Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -4446,6 +4446,8 @@ : calculateByteProvider(Op->getOperand(0), Index - ByteShift, Depth + 1); } + case ISD::ANY_EXTEND: + case ISD::SIGN_EXTEND: case ISD::ZERO_EXTEND: { SDValue NarrowOp = Op->getOperand(0); unsigned NarrowBitWidth = NarrowOp.getScalarValueSizeInBits(); @@ -4453,22 +4455,32 @@ return None; uint64_t NarrowByteWidth = NarrowBitWidth / 8; - return Index >= NarrowByteWidth - ? ByteProvider::getConstantZero() - : calculateByteProvider(NarrowOp, Index, Depth + 1); + if (Index >= NarrowByteWidth) + return Op.getOpcode() == ISD::ZERO_EXTEND + ? Optional(ByteProvider::getConstantZero()) + : None; + else + return calculateByteProvider(NarrowOp, Index, Depth + 1); } case ISD::BSWAP: return calculateByteProvider(Op->getOperand(0), ByteWidth - Index - 1, Depth + 1); case ISD::LOAD: { auto L = cast(Op.getNode()); + if (L->isVolatile() || L->isIndexed()) + return None; - // TODO: support ext loads - if (L->isVolatile() || L->isIndexed() || - L->getExtensionType() != ISD::NON_EXTLOAD) + unsigned NarrowBitWidth = L->getMemoryVT().getSizeInBits(); + if (NarrowBitWidth % 8 != 0) return None; + uint64_t NarrowByteWidth = NarrowBitWidth / 8; - return ByteProvider::getMemory(L, Index); + if (Index >= NarrowByteWidth) + return L->getExtensionType() == ISD::ZEXTLOAD + ? Optional(ByteProvider::getConstantZero()) + : None; + else + return ByteProvider::getMemory(L, Index); } } @@ -4548,7 +4560,6 @@ LoadSDNode *L = P->Load; assert(L->hasNUsesOfValue(1, 0) && !L->isVolatile() && !L->isIndexed() && - (L->getExtensionType() == ISD::NON_EXTLOAD) && "Must be enforced by calculateByteProvider"); assert(L->getOffset().isUndef() && "Unindexed load must have undef offset"); Index: test/CodeGen/AArch64/load-combine-big-endian.ll =================================================================== --- test/CodeGen/AArch64/load-combine-big-endian.ll +++ test/CodeGen/AArch64/load-combine-big-endian.ll @@ -336,11 +336,8 @@ ; (i32) p[1] | (sext(p[0] << 16) to i32) define i32 @load_i32_by_sext_i16(i32* %arg) { ; CHECK-LABEL: load_i32_by_sext_i16: -; CHECK: ldrh w8, [x0] -; CHECK-NEXT: ldrh w0, [x0, #2] -; CHECK-NEXT: bfi w0, w8, #16, #16 +; CHECK: ldr w0, [x0] ; CHECK-NEXT: ret - %tmp = bitcast i32* %arg to i16* %tmp1 = load i16, i16* %tmp, align 4 %tmp2 = sext i16 %tmp1 to i32 @@ -395,16 +392,10 @@ ; (i32) p[i + 1] | ((i32) p[i + 2] << 8) | ((i32) p[i + 3] << 16) | ((i32) p[i + 4] << 24) define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) { ; CHECK-LABEL: load_i32_by_i8_base_offset_index_2: -; CHECK: add x8, x0, w1, uxtw -; CHECK-NEXT: ldrb w0, [x8, #13] -; CHECK-NEXT: ldrb w9, [x8, #14] -; CHECK-NEXT: ldrb w10, [x8, #15] -; CHECK-NEXT: ldrb w8, [x8, #16] -; CHECK-NEXT: bfi w0, w9, #8, #8 -; CHECK-NEXT: bfi w0, w10, #16, #8 -; CHECK-NEXT: bfi w0, w8, #24, #8 +; CHECK: add x8, x0, w1, uxtw +; CHECK-NEXT: ldur w8, [x8, #13] +; CHECK-NEXT: rev w0, w8 ; CHECK-NEXT: ret - %tmp = add nuw nsw i32 %i, 4 %tmp2 = add nuw nsw i32 %i, 3 %tmp3 = add nuw nsw i32 %i, 2 Index: test/CodeGen/AArch64/load-combine.ll =================================================================== --- test/CodeGen/AArch64/load-combine.ll +++ test/CodeGen/AArch64/load-combine.ll @@ -324,12 +324,8 @@ ; (i32) p[0] | (sext(p[1] << 16) to i32) define i32 @load_i32_by_sext_i16(i32* %arg) { ; CHECK-LABEL: load_i32_by_sext_i16: -; CHECK: ldrh w8, [x0] -; CHECK-NEXT: ldrh w9, [x0, #2] -; CHECK-NEXT: bfi w8, w9, #16, #16 -; CHECK-NEXT: mov w0, w8 +; CHECK: ldr w0, [x0] ; CHECK-NEXT: ret - %tmp = bitcast i32* %arg to i16* %tmp1 = load i16, i16* %tmp, align 4 %tmp2 = zext i16 %tmp1 to i32 @@ -384,15 +380,8 @@ define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) { ; CHECK-LABEL: load_i32_by_i8_base_offset_index_2: ; CHECK: add x8, x0, w1, uxtw -; CHECK-NEXT: ldrb w0, [x8, #13] -; CHECK-NEXT: ldrb w9, [x8, #14] -; CHECK-NEXT: ldrb w10, [x8, #15] -; CHECK-NEXT: ldrb w8, [x8, #16] -; CHECK-NEXT: bfi w0, w9, #8, #8 -; CHECK-NEXT: bfi w0, w10, #16, #8 -; CHECK-NEXT: bfi w0, w8, #24, #8 +; CHECK-NEXT: ldur w0, [x8, #13] ; CHECK-NEXT: ret - %tmp = add nuw nsw i32 %i, 4 %tmp2 = add nuw nsw i32 %i, 3 %tmp3 = add nuw nsw i32 %i, 2 Index: test/CodeGen/ARM/fp16-promote.ll =================================================================== --- test/CodeGen/ARM/fp16-promote.ll +++ test/CodeGen/ARM/fp16-promote.ll @@ -847,21 +847,15 @@ } ; CHECK-ALL-LABEL: test_extractelement: +; CHECK-VFP: push {{{.*}}, lr} ; CHECK-VFP: sub sp, sp, #8 -; CHECK-VFP: ldrh -; CHECK-VFP: ldrh -; CHECK-VFP: orr -; CHECK-VFP: str -; CHECK-VFP: ldrh -; CHECK-VFP: ldrh -; CHECK-VFP: orr -; CHECK-VFP: str +; CHECK-VFP: ldrd ; CHECK-VFP: mov ; CHECK-VFP: orr ; CHECK-VFP: ldrh ; CHECK-VFP: strh ; CHECK-VFP: add sp, sp, #8 -; CHECK-VFP: bx lr +; CHECK-VFP: pop {{{.*}}, pc} ; CHECK-NOVFP: ldrh ; CHECK-NOVFP: strh ; CHECK-NOVFP: ldrh Index: test/CodeGen/ARM/load-combine-big-endian.ll =================================================================== --- test/CodeGen/ARM/load-combine-big-endian.ll +++ test/CodeGen/ARM/load-combine-big-endian.ll @@ -456,17 +456,12 @@ ; (i32) p[1] | (sext(p[0] << 16) to i32) define i32 @load_i32_by_sext_i16(i32* %arg) { ; CHECK-LABEL: load_i32_by_sext_i16: -; CHECK: ldrh r1, [r0] -; CHECK-NEXT: ldrh r0, [r0, #2] -; CHECK-NEXT: orr r0, r0, r1, lsl #16 +; CHECK: ldr r0, [r0] ; CHECK-NEXT: mov pc, lr - -; CHECK-ARMv6-LABEL: load_i32_by_sext_i16: -; CHECK-ARMv6: ldrh r1, [r0] -; CHECK-ARMv6-NEXT: ldrh r0, [r0, #2] -; CHECK-ARMv6-NEXT: orr r0, r0, r1, lsl #16 +; +; CHECK-ARMv6-LABEL: +; CHECK-ARMv6: ldr r0, [r0] ; CHECK-ARMv6-NEXT: bx lr - %tmp = bitcast i32* %arg to i16* %tmp1 = load i16, i16* %tmp, align 4 %tmp2 = sext i16 %tmp1 to i32 @@ -534,24 +529,20 @@ define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) { ; CHECK-LABEL: load_i32_by_i8_base_offset_index_2: ; CHECK: add r0, r0, r1 -; CHECK-NEXT: ldrb r1, [r0, #13] -; CHECK-NEXT: ldrb r2, [r0, #14] -; CHECK-NEXT: ldrb r3, [r0, #15] -; CHECK-NEXT: ldrb r0, [r0, #16] -; CHECK-NEXT: orr r1, r1, r2, lsl #8 -; CHECK-NEXT: orr r1, r1, r3, lsl #16 -; CHECK-NEXT: orr r0, r1, r0, lsl #24 +; CHECK-NEXT: mov r1, #65280 +; CHECK-NEXT: mov r2, #16711680 +; CHECK-NEXT: ldr r0, [r0, #13] +; CHECK-NEXT: and r1, r1, r0, lsr #8 +; CHECK-NEXT: and r2, r2, r0, lsl #8 +; CHECK-NEXT: orr r1, r1, r0, lsr #24 +; CHECK-NEXT: orr r0, r2, r0, lsl #24 +; CHECK-NEXT: orr r0, r0, r1 ; CHECK-NEXT: mov pc, lr ; ; CHECK-ARMv6-LABEL: load_i32_by_i8_base_offset_index_2: ; CHECK-ARMv6: add r0, r0, r1 -; CHECK-ARMv6-NEXT: ldrb r1, [r0, #13] -; CHECK-ARMv6-NEXT: ldrb r2, [r0, #14] -; CHECK-ARMv6-NEXT: ldrb r3, [r0, #15] -; CHECK-ARMv6-NEXT: ldrb r0, [r0, #16] -; CHECK-ARMv6-NEXT: orr r1, r1, r2, lsl #8 -; CHECK-ARMv6-NEXT: orr r1, r1, r3, lsl #16 -; CHECK-ARMv6-NEXT: orr r0, r1, r0, lsl #24 +; CHECK-ARMv6-NEXT: ldr r0, [r0, #13] +; CHECK-ARMv6-NEXT: rev r0, r0 ; CHECK-ARMv6-NEXT: bx lr %tmp = add nuw nsw i32 %i, 4 Index: test/CodeGen/ARM/load-combine.ll =================================================================== --- test/CodeGen/ARM/load-combine.ll +++ test/CodeGen/ARM/load-combine.ll @@ -414,17 +414,12 @@ ; (i32) p[0] | (sext(p[1] << 16) to i32) define i32 @load_i32_by_sext_i16(i32* %arg) { ; CHECK-LABEL: load_i32_by_sext_i16: -; CHECK: ldrh r1, [r0, #2] -; CHECK-NEXT: ldrh r0, [r0] -; CHECK-NEXT: orr r0, r0, r1, lsl #16 +; CHECK: ldr r0, [r0] ; CHECK-NEXT: mov pc, lr ; ; CHECK-ARMv6-LABEL: load_i32_by_sext_i16: -; CHECK-ARMv6: ldrh r1, [r0, #2] -; CHECK-ARMv6-NEXT: ldrh r0, [r0] -; CHECK-ARMv6-NEXT: orr r0, r0, r1, lsl #16 -; CHECK-ARMv6-NEXT: bx lr - +; CHECK-ARMv6: ldr r0, [r0] +; CHECK-ARMv6-NEXT: bx lr %tmp = bitcast i32* %arg to i16* %tmp1 = load i16, i16* %tmp, align 4 %tmp2 = zext i16 %tmp1 to i32 @@ -485,26 +480,13 @@ define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) { ; CHECK-LABEL: load_i32_by_i8_base_offset_index_2: ; CHECK: add r0, r0, r1 -; CHECK-NEXT: ldrb r1, [r0, #13] -; CHECK-NEXT: ldrb r2, [r0, #14] -; CHECK-NEXT: ldrb r3, [r0, #15] -; CHECK-NEXT: ldrb r0, [r0, #16] -; CHECK-NEXT: orr r1, r1, r2, lsl #8 -; CHECK-NEXT: orr r1, r1, r3, lsl #16 -; CHECK-NEXT: orr r0, r1, r0, lsl #24 +; CHECK-NEXT: ldr r0, [r0, #13] ; CHECK-NEXT: mov pc, lr ; ; CHECK-ARMv6-LABEL: load_i32_by_i8_base_offset_index_2: ; CHECK-ARMv6: add r0, r0, r1 -; CHECK-ARMv6-NEXT: ldrb r1, [r0, #13] -; CHECK-ARMv6-NEXT: ldrb r2, [r0, #14] -; CHECK-ARMv6-NEXT: ldrb r3, [r0, #15] -; CHECK-ARMv6-NEXT: ldrb r0, [r0, #16] -; CHECK-ARMv6-NEXT: orr r1, r1, r2, lsl #8 -; CHECK-ARMv6-NEXT: orr r1, r1, r3, lsl #16 -; CHECK-ARMv6-NEXT: orr r0, r1, r0, lsl #24 +; CHECK-ARMv6-NEXT: ldr r0, [r0, #13] ; CHECK-ARMv6-NEXT: bx lr - %tmp = add nuw nsw i32 %i, 4 %tmp2 = add nuw nsw i32 %i, 3 %tmp3 = add nuw nsw i32 %i, 2 Index: test/CodeGen/X86/load-combine.ll =================================================================== --- test/CodeGen/X86/load-combine.ll +++ test/CodeGen/X86/load-combine.ll @@ -733,16 +733,8 @@ ; CHECK64-LABEL: load_i32_by_i8_bswap_base_index_offset: ; CHECK64: # BB#0: ; CHECK64-NEXT: movslq %esi, %rax -; CHECK64-NEXT: movzbl (%rdi,%rax), %ecx -; CHECK64-NEXT: shll $24, %ecx -; CHECK64-NEXT: movzbl 1(%rdi,%rax), %edx -; CHECK64-NEXT: shll $16, %edx -; CHECK64-NEXT: orl %ecx, %edx -; CHECK64-NEXT: movzbl 2(%rdi,%rax), %ecx -; CHECK64-NEXT: shll $8, %ecx -; CHECK64-NEXT: orl %edx, %ecx -; CHECK64-NEXT: movzbl 3(%rdi,%rax), %eax -; CHECK64-NEXT: orl %ecx, %eax +; CHECK64-NEXT: movl (%rdi,%rax), %eax +; CHECK64-NEXT: bswapl %eax ; CHECK64-NEXT: retq %tmp = bitcast i32* %arg to i8* %tmp2 = getelementptr inbounds i8, i8* %tmp, i32 %arg1 @@ -835,18 +827,12 @@ ; CHECK-LABEL: load_i32_by_sext_i16: ; CHECK: # BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: movzwl (%eax), %ecx -; CHECK-NEXT: movzwl 2(%eax), %eax -; CHECK-NEXT: shll $16, %eax -; CHECK-NEXT: orl %ecx, %eax +; CHECK-NEXT: movl (%eax), %eax ; CHECK-NEXT: retl ; ; CHECK64-LABEL: load_i32_by_sext_i16: ; CHECK64: # BB#0: -; CHECK64-NEXT: movzwl (%rdi), %ecx -; CHECK64-NEXT: movzwl 2(%rdi), %eax -; CHECK64-NEXT: shll $16, %eax -; CHECK64-NEXT: orl %ecx, %eax +; CHECK64-NEXT: movl (%rdi), %eax ; CHECK64-NEXT: retq %tmp = bitcast i32* %arg to i16* %tmp1 = load i16, i16* %tmp, align 1 @@ -865,24 +851,9 @@ define i32 @load_i32_by_i8_base_offset_index(i8* %arg, i32 %i) { ; CHECK-LABEL: load_i32_by_i8_base_offset_index: ; CHECK: # BB#0: -; CHECK-NEXT: pushl %esi -; CHECK-NEXT: .Lcfi4: -; CHECK-NEXT: .cfi_def_cfa_offset 8 -; CHECK-NEXT: .Lcfi5: -; CHECK-NEXT: .cfi_offset %esi, -8 ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx -; CHECK-NEXT: movzbl 12(%eax,%ecx), %edx -; CHECK-NEXT: movzbl 13(%eax,%ecx), %esi -; CHECK-NEXT: shll $8, %esi -; CHECK-NEXT: orl %edx, %esi -; CHECK-NEXT: movzbl 14(%eax,%ecx), %edx -; CHECK-NEXT: shll $16, %edx -; CHECK-NEXT: orl %esi, %edx -; CHECK-NEXT: movzbl 15(%eax,%ecx), %eax -; CHECK-NEXT: shll $24, %eax -; CHECK-NEXT: orl %edx, %eax -; CHECK-NEXT: popl %esi +; CHECK-NEXT: movl 12(%eax,%ecx), %eax ; CHECK-NEXT: retl ; ; CHECK64-LABEL: load_i32_by_i8_base_offset_index: @@ -925,39 +896,15 @@ define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) { ; CHECK-LABEL: load_i32_by_i8_base_offset_index_2: ; CHECK: # BB#0: -; CHECK-NEXT: pushl %esi -; CHECK-NEXT: .Lcfi6: -; CHECK-NEXT: .cfi_def_cfa_offset 8 -; CHECK-NEXT: .Lcfi7: -; CHECK-NEXT: .cfi_offset %esi, -8 ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx -; CHECK-NEXT: movzbl 13(%eax,%ecx), %edx -; CHECK-NEXT: movzbl 14(%eax,%ecx), %esi -; CHECK-NEXT: shll $8, %esi -; CHECK-NEXT: orl %edx, %esi -; CHECK-NEXT: movzbl 15(%eax,%ecx), %edx -; CHECK-NEXT: shll $16, %edx -; CHECK-NEXT: orl %esi, %edx -; CHECK-NEXT: movzbl 16(%eax,%ecx), %eax -; CHECK-NEXT: shll $24, %eax -; CHECK-NEXT: orl %edx, %eax -; CHECK-NEXT: popl %esi +; CHECK-NEXT: movl 13(%eax,%ecx), %eax ; CHECK-NEXT: retl ; ; CHECK64-LABEL: load_i32_by_i8_base_offset_index_2: ; CHECK64: # BB#0: ; CHECK64-NEXT: movl %esi, %eax -; CHECK64-NEXT: movzbl 13(%rdi,%rax), %ecx -; CHECK64-NEXT: movzbl 14(%rdi,%rax), %edx -; CHECK64-NEXT: shll $8, %edx -; CHECK64-NEXT: orl %ecx, %edx -; CHECK64-NEXT: movzbl 15(%rdi,%rax), %ecx -; CHECK64-NEXT: shll $16, %ecx -; CHECK64-NEXT: orl %edx, %ecx -; CHECK64-NEXT: movzbl 16(%rdi,%rax), %eax -; CHECK64-NEXT: shll $24, %eax -; CHECK64-NEXT: orl %ecx, %eax +; CHECK64-NEXT: movl 13(%rdi,%rax), %eax ; CHECK64-NEXT: retq %tmp = add nuw nsw i32 %i, 4 %tmp2 = add nuw nsw i32 %i, 3 @@ -1005,39 +952,15 @@ define i32 @load_i32_by_i8_zaext_loads(i8* %arg, i32 %arg1) { ; CHECK-LABEL: load_i32_by_i8_zaext_loads: ; CHECK: # BB#0: -; CHECK-NEXT: pushl %esi -; CHECK-NEXT: .Lcfi8: -; CHECK-NEXT: .cfi_def_cfa_offset 8 -; CHECK-NEXT: .Lcfi9: -; CHECK-NEXT: .cfi_offset %esi, -8 ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx -; CHECK-NEXT: movzbl 12(%eax,%ecx), %edx -; CHECK-NEXT: movzbl 13(%eax,%ecx), %esi -; CHECK-NEXT: shll $8, %esi -; CHECK-NEXT: orl %edx, %esi -; CHECK-NEXT: movzbl 14(%eax,%ecx), %edx -; CHECK-NEXT: shll $16, %edx -; CHECK-NEXT: orl %esi, %edx -; CHECK-NEXT: movzbl 15(%eax,%ecx), %eax -; CHECK-NEXT: shll $24, %eax -; CHECK-NEXT: orl %edx, %eax -; CHECK-NEXT: popl %esi +; CHECK-NEXT: movl 12(%eax,%ecx), %eax ; CHECK-NEXT: retl ; ; CHECK64-LABEL: load_i32_by_i8_zaext_loads: ; CHECK64: # BB#0: ; CHECK64-NEXT: movl %esi, %eax -; CHECK64-NEXT: movzbl 12(%rdi,%rax), %ecx -; CHECK64-NEXT: movzbl 13(%rdi,%rax), %edx -; CHECK64-NEXT: shll $8, %edx -; CHECK64-NEXT: orl %ecx, %edx -; CHECK64-NEXT: movzbl 14(%rdi,%rax), %ecx -; CHECK64-NEXT: shll $16, %ecx -; CHECK64-NEXT: orl %edx, %ecx -; CHECK64-NEXT: movzbl 15(%rdi,%rax), %eax -; CHECK64-NEXT: shll $24, %eax -; CHECK64-NEXT: orl %ecx, %eax +; CHECK64-NEXT: movl 12(%rdi,%rax), %eax ; CHECK64-NEXT: retq %tmp = add nuw nsw i32 %arg1, 3 %tmp2 = add nuw nsw i32 %arg1, 2 @@ -1085,39 +1008,15 @@ define i32 @load_i32_by_i8_zsext_loads(i8* %arg, i32 %arg1) { ; CHECK-LABEL: load_i32_by_i8_zsext_loads: ; CHECK: # BB#0: -; CHECK-NEXT: pushl %esi -; CHECK-NEXT: .Lcfi10: -; CHECK-NEXT: .cfi_def_cfa_offset 8 -; CHECK-NEXT: .Lcfi11: -; CHECK-NEXT: .cfi_offset %esi, -8 ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx -; CHECK-NEXT: movzbl 12(%eax,%ecx), %edx -; CHECK-NEXT: movzbl 13(%eax,%ecx), %esi -; CHECK-NEXT: shll $8, %esi -; CHECK-NEXT: orl %edx, %esi -; CHECK-NEXT: movzbl 14(%eax,%ecx), %edx -; CHECK-NEXT: shll $16, %edx -; CHECK-NEXT: orl %esi, %edx -; CHECK-NEXT: movsbl 15(%eax,%ecx), %eax -; CHECK-NEXT: shll $24, %eax -; CHECK-NEXT: orl %edx, %eax -; CHECK-NEXT: popl %esi +; CHECK-NEXT: movl 12(%eax,%ecx), %eax ; CHECK-NEXT: retl ; ; CHECK64-LABEL: load_i32_by_i8_zsext_loads: ; CHECK64: # BB#0: ; CHECK64-NEXT: movl %esi, %eax -; CHECK64-NEXT: movzbl 12(%rdi,%rax), %ecx -; CHECK64-NEXT: movzbl 13(%rdi,%rax), %edx -; CHECK64-NEXT: shll $8, %edx -; CHECK64-NEXT: orl %ecx, %edx -; CHECK64-NEXT: movzbl 14(%rdi,%rax), %ecx -; CHECK64-NEXT: shll $16, %ecx -; CHECK64-NEXT: orl %edx, %ecx -; CHECK64-NEXT: movsbl 15(%rdi,%rax), %eax -; CHECK64-NEXT: shll $24, %eax -; CHECK64-NEXT: orl %ecx, %eax +; CHECK64-NEXT: movl 12(%rdi,%rax), %eax ; CHECK64-NEXT: retq %tmp = add nuw nsw i32 %arg1, 3 %tmp2 = add nuw nsw i32 %arg1, 2