Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -1913,13 +1913,21 @@ unsigned, bool *Fast) const { if (Fast) { - if (VT.getSizeInBits() == 256) - *Fast = !Subtarget->isUnalignedMem32Slow(); - else - // FIXME: We should always return that 8-byte and under accesses are fast. - // That is what other x86 lowering code assumes. + switch (VT.getSizeInBits()) { + default: + // 8-byte and under are always assumed to be fast. + *Fast = true; + break; + case 128: *Fast = !Subtarget->isUnalignedMem16Slow(); + break; + case 256: + *Fast = !Subtarget->isUnalignedMem32Slow(); + break; + // TODO: What about AVX-512 (512-bit) accesses? + } } + // Misaligned accesses of any size are always allowed. return true; } Index: test/CodeGen/X86/memcpy-2.ll =================================================================== --- test/CodeGen/X86/memcpy-2.ll +++ test/CodeGen/X86/memcpy-2.ll @@ -5,15 +5,6 @@ ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core2 | FileCheck %s -check-prefix=X86-64 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=nehalem | FileCheck %s -check-prefix=NHM_64 -;;; TODO: The last run line chooses cpu=nehalem to reveal possible bugs in the "t4" test case. -;;; -;;; Nehalem has a 'fast unaligned memory' attribute, so (1) some of the loads and stores -;;; are certainly unaligned and (2) the first load and first store overlap with the second -;;; load and second store respectively. -;;; -;;; Is either of the sequences ideal? -;;; Is the ideal code being generated for all CPU models? - @.str = internal constant [25 x i8] c"image\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00" @.str2 = internal constant [30 x i8] c"xxxxxxxxxxxxxxxxxxxxxxxxxxxxx\00", align 4 @@ -190,13 +181,18 @@ ; NOSSE: movl $2021161080 ; NOSSE: movl $2021161080 +;;; TODO: (1) Some of the loads and stores are certainly unaligned and (2) the first load and first +;;; store overlap with the second load and second store respectively. +;;; +;;; Is either of the sequences ideal? + ; X86-64-LABEL: t4: -; X86-64: movabsq $8680820740569200760, %rax -; X86-64: movq %rax -; X86-64: movq %rax -; X86-64: movq %rax -; X86-64: movw $120 -; X86-64: movl $2021161080 +; X86-64: movabsq $33909456017848440, %rax ## imm = 0x78787878787878 +; X86-64: movq %rax, -10(%rsp) +; X86-64: movabsq $8680820740569200760, %rax ## imm = 0x7878787878787878 +; X86-64: movq %rax, -16(%rsp) +; X86-64: movq %rax, -24(%rsp) +; X86-64: movq %rax, -32(%rsp) ; NHM_64-LABEL: t4: ; NHM_64: movups _.str2+14(%rip), %xmm0 Index: test/CodeGen/X86/pr11985.ll =================================================================== --- test/CodeGen/X86/pr11985.ll +++ test/CodeGen/X86/pr11985.ll @@ -1,26 +1,20 @@ ; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=prescott | FileCheck %s --check-prefix=PRESCOTT ; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=nehalem | FileCheck %s --check-prefix=NEHALEM -;;; TODO: The last run line chooses cpu=nehalem to reveal possible bugs in the "foo" test case. -;;; -;;; Nehalem has a 'fast unaligned memory' attribute, so (1) some of the loads and stores -;;; are certainly unaligned and (2) the first load and first store overlap with the second -;;; load and second store respectively. +;;; TODO: (1) Some of the loads and stores are certainly unaligned and (2) the first load and first +;;; store overlap with the second load and second store respectively. ;;; ;;; Is either of these sequences ideal? -;;; Is the ideal code being generated for all CPU models? define float @foo(i8* nocapture %buf, float %a, float %b) nounwind uwtable { ; PRESCOTT-LABEL: foo: ; PRESCOTT: # BB#0: # %entry -; PRESCOTT-NEXT: movw .Ltmp0+20(%rip), %ax -; PRESCOTT-NEXT: movw %ax, 20(%rdi) -; PRESCOTT-NEXT: movl .Ltmp0+16(%rip), %eax -; PRESCOTT-NEXT: movl %eax, 16(%rdi) -; PRESCOTT-NEXT: movq .Ltmp0+8(%rip), %rax -; PRESCOTT-NEXT: movq %rax, 8(%rdi) -; PRESCOTT-NEXT: movq .Ltmp0(%rip), %rax -; PRESCOTT-NEXT: movq %rax, (%rdi) +; PRESCOTT-NEXT: movq .Ltmp0+14(%rip), %rax +; PRESCOTT-NEXT: movq %rax, 14(%rdi) +; PRESCOTT-NEXT: movq .Ltmp0+8(%rip), %rax +; PRESCOTT-NEXT: movq %rax, 8(%rdi) +; PRESCOTT-NEXT: movq .Ltmp0(%rip), %rax +; PRESCOTT-NEXT: movq %rax, (%rdi) ; ; NEHALEM-LABEL: foo: ; NEHALEM: # BB#0: # %entry