Index: lib/CodeGen/SelectionDAG/SelectionDAG.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -5397,11 +5397,9 @@ // If the new VT cannot cover all of the remaining bits, then consider // issuing a (or a pair of) unaligned and overlapping load / store. - // FIXME: Only does this for 64-bit or more since we don't have proper - // cost model for unaligned load / store. bool Fast; if (NumMemOps && AllowOverlap && - VTSize >= 8 && NewVTSize < Size && + NewVTSize < Size && TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign, &Fast) && Fast) VTSize = Size; else { Index: test/CodeGen/AArch64/arm64-memcpy-inline.ll =================================================================== --- test/CodeGen/AArch64/arm64-memcpy-inline.ll +++ test/CodeGen/AArch64/arm64-memcpy-inline.ll @@ -16,10 +16,8 @@ define i32 @t0() { entry: ; CHECK-LABEL: t0: -; CHECK: ldrb [[REG0:w[0-9]+]], [x[[BASEREG:[0-9]+]], #10] -; CHECK: strb [[REG0]], [x[[BASEREG2:[0-9]+]], #10] -; CHECK: ldrh [[REG1:w[0-9]+]], [x[[BASEREG]], #8] -; CHECK: strh [[REG1]], [x[[BASEREG2]], #8] +; CHECK: ldur [[REG0:w[0-9]+]], [x[[BASEREG:[0-9]+]], #7] +; CHECK: stur [[REG0]], [x[[BASEREG2:[0-9]+]], #7] ; CHECK: ldr [[REG2:x[0-9]+]], ; CHECK: str [[REG2]], call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 getelementptr inbounds (%struct.x, %struct.x* @dst, i32 0, i32 0), i8* align 8 getelementptr inbounds (%struct.x, %struct.x* @src, i32 0, i32 0), i32 11, i1 false) @@ -74,9 +72,9 @@ define void @t5(i8* nocapture %C) nounwind { entry: ; CHECK-LABEL: t5: -; CHECK: strb wzr, [x0, #6] -; CHECK: mov [[REG7:w[0-9]+]], #21587 -; CHECK: strh [[REG7]], [x0, #4] +; CHECK: mov [[REG7:w[0-9]+]], #21337 +; CHECK: movk [[REG7]], +; CHECK: stur [[REG7]], [x0, #3] ; CHECK: mov [[REG8:w[0-9]+]], ; CHECK: movk [[REG8]], ; CHECK: str [[REG8]], [x0] Index: test/CodeGen/PowerPC/jaggedstructs.ll =================================================================== --- test/CodeGen/PowerPC/jaggedstructs.ll +++ test/CodeGen/PowerPC/jaggedstructs.ll @@ -34,11 +34,9 @@ ; CHECK-DAG: lwz {{[0-9]+}}, 178(1) ; CHECK-DAG: sth {{[0-9]+}}, 70(1) ; CHECK-DAG: stw {{[0-9]+}}, 66(1) -; CHECK-DAG: lbz {{[0-9]+}}, 191(1) -; CHECK-DAG: lhz {{[0-9]+}}, 189(1) +; CHECK-DAG: lwz {{[0-9]+}}, 188(1) ; CHECK-DAG: lwz {{[0-9]+}}, 185(1) -; CHECK-DAG: stb {{[0-9]+}}, 79(1) -; CHECK-DAG: sth {{[0-9]+}}, 77(1) +; CHECK-DAG: stw {{[0-9]+}}, 76(1) ; CHECK-DAG: stw {{[0-9]+}}, 73(1) ; CHECK-DAG: ld 6, 72(1) ; CHECK-DAG: ld 5, 64(1) Index: test/CodeGen/PowerPC/structsinmem.ll =================================================================== --- test/CodeGen/PowerPC/structsinmem.ll +++ test/CodeGen/PowerPC/structsinmem.ll @@ -157,8 +157,7 @@ ; CHECK: stw {{[0-9]+}}, 147(1) ; CHECK: sth {{[0-9]+}}, 158(1) ; CHECK: stw {{[0-9]+}}, 154(1) -; CHECK: stb {{[0-9]+}}, 167(1) -; CHECK: sth {{[0-9]+}}, 165(1) +; CHECK: stw {{[0-9]+}}, 164(1) ; CHECK: stw {{[0-9]+}}, 161(1) } Index: test/CodeGen/PowerPC/structsinregs.ll =================================================================== --- test/CodeGen/PowerPC/structsinregs.ll +++ test/CodeGen/PowerPC/structsinregs.ll @@ -148,8 +148,7 @@ ; CHECK: stw {{[0-9]+}}, 83(1) ; CHECK: sth {{[0-9]+}}, 94(1) ; CHECK: stw {{[0-9]+}}, 90(1) -; CHECK: stb {{[0-9]+}}, 103(1) -; CHECK: sth {{[0-9]+}}, 101(1) +; CHECK: stw {{[0-9]+}}, 100(1) ; CHECK: stw {{[0-9]+}}, 97(1) ; CHECK: ld 9, 96(1) ; CHECK: ld 8, 88(1) Index: test/CodeGen/X86/memcpy-from-string.ll =================================================================== --- test/CodeGen/X86/memcpy-from-string.ll +++ test/CodeGen/X86/memcpy-from-string.ll @@ -16,8 +16,7 @@ define void @foo(i8* %tmp2) { ; X86-LABEL: foo: ; X86: # %bb.0: -; X86-NEXT: movb $0, 6(%rdi) -; X86-NEXT: movw $15212, 4(%rdi) # imm = 0x3B6C +; X86-NEXT: movl $3894379, 3(%rdi) # imm = 0x3B6C6B ; X86-NEXT: movl $1802117222, (%rdi) # imm = 0x6B6A2066 ; X86-NEXT: retq call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp2, i8* getelementptr inbounds ([10 x i8], [10 x i8]* @0, i64 0, i64 3), i64 7, i1 false) Index: test/CodeGen/X86/memset-2.ll =================================================================== --- test/CodeGen/X86/memset-2.ll +++ test/CodeGen/X86/memset-2.ll @@ -51,11 +51,10 @@ ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; CHECK-NEXT: imull $16843009, %ecx, %ecx ## imm = 0x1010101 +; CHECK-NEXT: movl %ecx, 11(%eax) ; CHECK-NEXT: movl %ecx, 8(%eax) ; CHECK-NEXT: movl %ecx, 4(%eax) ; CHECK-NEXT: movl %ecx, (%eax) -; CHECK-NEXT: movw %cx, 12(%eax) -; CHECK-NEXT: movb %cl, 14(%eax) ; CHECK-NEXT: retl entry: tail call void @llvm.memset.p0i8.i32(i8* %s, i8 %a, i32 15, i1 false) Index: test/CodeGen/X86/memset-zero.ll =================================================================== --- test/CodeGen/X86/memset-zero.ll +++ test/CodeGen/X86/memset-zero.ll @@ -71,22 +71,19 @@ ; X86-LABEL: memset_7: ; X86: # %bb.0: # %entry ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movb $0, 6(%eax) -; X86-NEXT: movw $0, 4(%eax) +; X86-NEXT: movl $0, 3(%eax) ; X86-NEXT: movl $0, (%eax) ; X86-NEXT: retl ; ; CORE2-LABEL: memset_7: ; CORE2: # %bb.0: # %entry -; CORE2-NEXT: movb $0, 6(%rdi) -; CORE2-NEXT: movw $0, 4(%rdi) +; CORE2-NEXT: movl $0, 3(%rdi) ; CORE2-NEXT: movl $0, (%rdi) ; CORE2-NEXT: retq ; ; NEHALEM-LABEL: memset_7: ; NEHALEM: # %bb.0: # %entry -; NEHALEM-NEXT: movb $0, 6(%rdi) -; NEHALEM-NEXT: movw $0, 4(%rdi) +; NEHALEM-NEXT: movl $0, 3(%rdi) ; NEHALEM-NEXT: movl $0, (%rdi) ; NEHALEM-NEXT: retq entry: @@ -120,23 +117,20 @@ ; X86-LABEL: memset_11: ; X86: # %bb.0: # %entry ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movb $0, 10(%eax) -; X86-NEXT: movw $0, 8(%eax) +; X86-NEXT: movl $0, 7(%eax) ; X86-NEXT: movl $0, 4(%eax) ; X86-NEXT: movl $0, (%eax) ; X86-NEXT: retl ; ; CORE2-LABEL: memset_11: ; CORE2: # %bb.0: # %entry -; CORE2-NEXT: movb $0, 10(%rdi) -; CORE2-NEXT: movw $0, 8(%rdi) +; CORE2-NEXT: movl $0, 7(%rdi) ; CORE2-NEXT: movq $0, (%rdi) ; CORE2-NEXT: retq ; ; NEHALEM-LABEL: memset_11: ; NEHALEM: # %bb.0: # %entry -; NEHALEM-NEXT: movb $0, 10(%rdi) -; NEHALEM-NEXT: movw $0, 8(%rdi) +; NEHALEM-NEXT: movl $0, 7(%rdi) ; NEHALEM-NEXT: movq $0, (%rdi) ; NEHALEM-NEXT: retq entry: @@ -174,8 +168,7 @@ ; X86-LABEL: memset_15: ; X86: # %bb.0: # %entry ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movb $0, 14(%eax) -; X86-NEXT: movw $0, 12(%eax) +; X86-NEXT: movl $0, 11(%eax) ; X86-NEXT: movl $0, 8(%eax) ; X86-NEXT: movl $0, 4(%eax) ; X86-NEXT: movl $0, (%eax) @@ -256,8 +249,7 @@ ; X86-LABEL: memset_19: ; X86: # %bb.0: # %entry ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movb $0, 18(%eax) -; X86-NEXT: movw $0, 16(%eax) +; X86-NEXT: movl $0, 15(%eax) ; X86-NEXT: movl $0, 12(%eax) ; X86-NEXT: movl $0, 8(%eax) ; X86-NEXT: movl $0, 4(%eax) @@ -266,8 +258,7 @@ ; ; CORE2-LABEL: memset_19: ; CORE2: # %bb.0: # %entry -; CORE2-NEXT: movb $0, 18(%rdi) -; CORE2-NEXT: movw $0, 16(%rdi) +; CORE2-NEXT: movl $0, 15(%rdi) ; CORE2-NEXT: movq $0, 8(%rdi) ; CORE2-NEXT: movq $0, (%rdi) ; CORE2-NEXT: retq @@ -276,8 +267,7 @@ ; NEHALEM: # %bb.0: # %entry ; NEHALEM-NEXT: xorps %xmm0, %xmm0 ; NEHALEM-NEXT: movups %xmm0, (%rdi) -; NEHALEM-NEXT: movb $0, 18(%rdi) -; NEHALEM-NEXT: movw $0, 16(%rdi) +; NEHALEM-NEXT: movl $0, 15(%rdi) ; NEHALEM-NEXT: retq entry: call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 19, i1 false) @@ -288,8 +278,7 @@ ; X86-LABEL: memset_31: ; X86: # %bb.0: # %entry ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movb $0, 30(%eax) -; X86-NEXT: movw $0, 28(%eax) +; X86-NEXT: movl $0, 27(%eax) ; X86-NEXT: movl $0, 24(%eax) ; X86-NEXT: movl $0, 20(%eax) ; X86-NEXT: movl $0, 16(%eax) @@ -322,8 +311,7 @@ ; X86-LABEL: memset_35: ; X86: # %bb.0: # %entry ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: movb $0, 34(%eax) -; X86-NEXT: movw $0, 32(%eax) +; X86-NEXT: movl $0, 31(%eax) ; X86-NEXT: movl $0, 28(%eax) ; X86-NEXT: movl $0, 24(%eax) ; X86-NEXT: movl $0, 20(%eax) @@ -336,8 +324,7 @@ ; ; CORE2-LABEL: memset_35: ; CORE2: # %bb.0: # %entry -; CORE2-NEXT: movb $0, 34(%rdi) -; CORE2-NEXT: movw $0, 32(%rdi) +; CORE2-NEXT: movl $0, 31(%rdi) ; CORE2-NEXT: movq $0, 24(%rdi) ; CORE2-NEXT: movq $0, 16(%rdi) ; CORE2-NEXT: movq $0, 8(%rdi) @@ -349,8 +336,7 @@ ; NEHALEM-NEXT: xorps %xmm0, %xmm0 ; NEHALEM-NEXT: movups %xmm0, 16(%rdi) ; NEHALEM-NEXT: movups %xmm0, (%rdi) -; NEHALEM-NEXT: movb $0, 34(%rdi) -; NEHALEM-NEXT: movw $0, 32(%rdi) +; NEHALEM-NEXT: movl $0, 31(%rdi) ; NEHALEM-NEXT: retq entry: call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 35, i1 false) Index: test/CodeGen/X86/unaligned-load.ll =================================================================== --- test/CodeGen/X86/unaligned-load.ll +++ test/CodeGen/X86/unaligned-load.ll @@ -9,18 +9,18 @@ define void @func() nounwind ssp { ; I386-LABEL: func: ; I386: ## %bb.0: ## %entry -; I386-NEXT: pushl %esi -; I386-NEXT: subl $40, %esp -; I386-NEXT: leal {{[0-9]+}}(%esp), %esi +; I386-NEXT: subl $32, %esp ; I386-NEXT: .p2align 4, 0x90 ; I386-NEXT: LBB0_1: ## %bb ; I386-NEXT: ## =>This Inner Loop Header: Depth=1 -; I386-NEXT: subl $4, %esp -; I386-NEXT: pushl $31 -; I386-NEXT: pushl $_.str3 -; I386-NEXT: pushl %esi -; I386-NEXT: calll _memcpy -; I386-NEXT: addl $16, %esp +; I386-NEXT: movl $4673097, {{[0-9]+}}(%esp) ## imm = 0x474E49 +; I386-NEXT: movl $1230132307, {{[0-9]+}}(%esp) ## imm = 0x49525453 +; I386-NEXT: movl $541347367, {{[0-9]+}}(%esp) ## imm = 0x20444E27 +; I386-NEXT: movl $840969293, {{[0-9]+}}(%esp) ## imm = 0x32202C4D +; I386-NEXT: movl $1095911247, {{[0-9]+}}(%esp) ## imm = 0x4152474F +; I386-NEXT: movl $1380982853, {{[0-9]+}}(%esp) ## imm = 0x52502045 +; I386-NEXT: movl $1313821779, {{[0-9]+}}(%esp) ## imm = 0x4E4F5453 +; I386-NEXT: movl $1498564676, (%esp) ## imm = 0x59524844 ; I386-NEXT: jmp LBB0_1 ; ; CORE2-LABEL: func: