diff --git a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp --- a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp @@ -132,20 +132,10 @@ for (const Instruction &I : BB) { if (const AllocaInst *AI = dyn_cast(&I)) { Type *Ty = AI->getAllocatedType(); - Align TyPrefAlign = MF->getDataLayout().getPrefTypeAlign(Ty); + // The "specified" alignment is the alignment written on the alloca, // or the preferred alignment of the type if none is specified. - // - // (Unspecified alignment on allocas will be going away soon.) - Align SpecifiedAlign = AI->getAlign(); - - // If the preferred alignment of the type is higher than the specified - // alignment of the alloca, promote the alignment, as long as it doesn't - // require realigning the stack. - // - // FIXME: Do we really want to second-guess the IR in isel? - Align Alignment = - std::max(std::min(TyPrefAlign, StackAlign), SpecifiedAlign); + Align Alignment = AI->getAlign(); // Static allocas can be folded into the initial stack frame // adjustment. For targets that don't realign the stack, don't diff --git a/llvm/test/CodeGen/AArch64/preferred-alignment.ll b/llvm/test/CodeGen/AArch64/preferred-alignment.ll --- a/llvm/test/CodeGen/AArch64/preferred-alignment.ll +++ b/llvm/test/CodeGen/AArch64/preferred-alignment.ll @@ -3,11 +3,11 @@ ; Function Attrs: nounwind define i32 @foo() #0 { entry: - %c = alloca i8, align 1 + %c = alloca i8 ; CHECK: add x0, sp, #12 - %s = alloca i16, align 2 + %s = alloca i16 ; CHECK-NEXT: add x1, sp, #8 - %i = alloca i32, align 4 + %i = alloca i32 ; CHECK-NEXT: add x2, sp, #4 %call = call i32 @bar(i8* %c, i16* %s, i32* %i) %0 = load i8, i8* %c, align 1 diff --git a/llvm/test/CodeGen/AArch64/seh-finally.ll b/llvm/test/CodeGen/AArch64/seh-finally.ll --- a/llvm/test/CodeGen/AArch64/seh-finally.ll +++ b/llvm/test/CodeGen/AArch64/seh-finally.ll @@ -42,7 +42,7 @@ ; CHECK: ldur w0, [x29, #-8] ; CHECK: bl foo - %o = alloca %struct.S, align 4 + %o = alloca %struct.S, align 8 call void (...) @llvm.localescape(%struct.S* %o) %x = getelementptr inbounds %struct.S, %struct.S* %o, i32 0, i32 0 %0 = load i32, i32* %x, align 4 diff --git a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll --- a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll +++ b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll @@ -671,7 +671,7 @@ ; GCN-NEXT: s_swappc_b64 ; GCN-NOT: [[SP]] define amdgpu_kernel void @test_call_external_void_func_byval_struct_i8_i32() #0 { - %val = alloca { i8, i32 }, align 4, addrspace(5) + %val = alloca { i8, i32 }, align 8, addrspace(5) %gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %val, i32 0, i32 0 %gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %val, i32 0, i32 1 store i8 3, i8 addrspace(5)* %gep0 @@ -702,8 +702,8 @@ ; GCN: buffer_store_byte [[LOAD_OUT_VAL0]], off ; GCN: buffer_store_dword [[LOAD_OUT_VAL1]], off define amdgpu_kernel void @test_call_external_void_func_sret_struct_i8_i32_byval_struct_i8_i32(i32) #0 { - %in.val = alloca { i8, i32 }, align 4, addrspace(5) - %out.val = alloca { i8, i32 }, align 4, addrspace(5) + %in.val = alloca { i8, i32 }, align 8, addrspace(5) + %out.val = alloca { i8, i32 }, align 8, addrspace(5) %in.gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %in.val, i32 0, i32 0 %in.gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %in.val, i32 0, i32 1 store i8 3, i8 addrspace(5)* %in.gep0 diff --git a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll --- a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll +++ b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll @@ -291,7 +291,7 @@ ; GCN: ds_write_b32 v{{[0-9]+}}, [[PTR]] define void @alloca_ptr_nonentry_block(i32 %arg0) #0 { - %alloca0 = alloca { i8, i32 }, align 4, addrspace(5) + %alloca0 = alloca { i8, i32 }, align 8, addrspace(5) %cmp = icmp eq i32 %arg0, 0 br i1 %cmp, label %bb, label %ret diff --git a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll --- a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll +++ b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll @@ -123,7 +123,7 @@ %tid = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo) ; allocate enough scratch to go beyond 2^12 addressing - %scratch = alloca <1280 x i32>, align 8, addrspace(5) + %scratch = alloca <1280 x i32>, align 16, addrspace(5) ; load VGPR data %aptr = getelementptr <64 x i32>, <64 x i32> addrspace(1)* %in, i32 %tid diff --git a/llvm/test/CodeGen/ARM/ssp-data-layout.ll b/llvm/test/CodeGen/ARM/ssp-data-layout.ll --- a/llvm/test/CodeGen/ARM/ssp-data-layout.ll +++ b/llvm/test/CodeGen/ARM/ssp-data-layout.ll @@ -452,8 +452,8 @@ ; CHECK: bl get_struct_large_char2 ; CHECK: strb r0, [sp, #106] ; CHECK: bl end_struct_large_char2 - %a = alloca %struct.struct_small_char, align 1 - %b = alloca %struct.struct_large_char2, align 1 + %a = alloca %struct.struct_small_char, align 4 + %b = alloca %struct.struct_large_char2, align 4 %d1 = alloca %struct.struct_large_nonchar, align 8 %d2 = alloca %struct.struct_small_nonchar, align 2 %call = call signext i8 @get_struct_small_char() diff --git a/llvm/test/CodeGen/BPF/undef.ll b/llvm/test/CodeGen/BPF/undef.ll --- a/llvm/test/CodeGen/BPF/undef.ll +++ b/llvm/test/CodeGen/BPF/undef.ll @@ -40,7 +40,7 @@ ; CHECK: r1 = routing ; CHECK: call bpf_map_lookup_elem ; CHECK: exit - %key = alloca %struct.routing_key_2, align 1 + %key = alloca %struct.routing_key_2, align 8 %1 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 0 store i8 5, i8* %1, align 1 %2 = getelementptr inbounds %struct.routing_key_2, %struct.routing_key_2* %key, i64 0, i32 0, i64 1 diff --git a/llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll b/llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll --- a/llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll +++ b/llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll @@ -10,8 +10,8 @@ ; CHECK-LABEL: foobar: %retval = alloca i32, align 4 %x.addr = alloca i32, align 4 - %a = alloca %struct.x, align 4 - %c = alloca %struct.x*, align 4 + %a = alloca %struct.x, align 8 + %c = alloca %struct.x*, align 8 store i32 %x, i32* %x.addr, align 4 %x1 = getelementptr inbounds %struct.x, %struct.x* %a, i32 0, i32 0 %0 = load i32, i32* %x.addr, align 4 diff --git a/llvm/test/CodeGen/Mips/atomic64.ll b/llvm/test/CodeGen/Mips/atomic64.ll --- a/llvm/test/CodeGen/Mips/atomic64.ll +++ b/llvm/test/CodeGen/Mips/atomic64.ll @@ -1145,7 +1145,7 @@ ; MIPS64EB-NEXT: jr $ra ; MIPS64EB-NEXT: daddiu $sp, $sp, 16 entry: - %newval.addr = alloca i64, align 4 + %newval.addr = alloca i64, align 8 store i64 %newval, i64* %newval.addr, align 4 %tmp = load i64, i64* %newval.addr, align 4 %0 = atomicrmw xchg i64* @x, i64 %tmp monotonic @@ -1359,7 +1359,7 @@ ; MIPS64EB-NEXT: jr $ra ; MIPS64EB-NEXT: daddiu $sp, $sp, 16 entry: - %newval.addr = alloca i64, align 4 + %newval.addr = alloca i64, align 8 store i64 %newval, i64* %newval.addr, align 4 %tmp = load i64, i64* %newval.addr, align 4 %0 = cmpxchg i64* @x, i64 %oldval, i64 %tmp monotonic monotonic diff --git a/llvm/test/CodeGen/Mips/cconv/byval.ll b/llvm/test/CodeGen/Mips/cconv/byval.ll --- a/llvm/test/CodeGen/Mips/cconv/byval.ll +++ b/llvm/test/CodeGen/Mips/cconv/byval.ll @@ -151,7 +151,7 @@ ; N64-NEXT: jr $ra ; N64-NEXT: daddu $sp, $sp, $1 entry: - %a = alloca %struct.S1, align 4 + %a = alloca %struct.S1, align 8 call void @f2(%struct.S1* byval(%struct.S1) align 4 %a) ret void } @@ -340,8 +340,8 @@ ; N64-NEXT: jr $ra ; N64-NEXT: daddu $sp, $sp, $1 entry: - %a.addr = alloca %struct.S1*, align 4 - %byval-temp = alloca %struct.S1, align 4 + %a.addr = alloca %struct.S1* + %byval-temp = alloca %struct.S1, align 8 store %struct.S1* %a, %struct.S1** %a.addr, align 4 %0 = load %struct.S1*, %struct.S1** %a.addr, align 4 %1 = bitcast %struct.S1* %byval-temp to i8* @@ -412,8 +412,8 @@ ; N64-NEXT: jr $ra ; N64-NEXT: daddiu $sp, $sp, 32 entry: - %a.addr = alloca %struct.S1*, align 4 - %b.addr = alloca %struct.S1*, align 4 + %a.addr = alloca %struct.S1* + %b.addr = alloca %struct.S1* store %struct.S1* %a, %struct.S1** %a.addr, align 4 store %struct.S1* %b, %struct.S1** %b.addr, align 4 %0 = load %struct.S1*, %struct.S1** %a.addr, align 4 diff --git a/llvm/test/CodeGen/Mips/cconv/return-struct.ll b/llvm/test/CodeGen/Mips/cconv/return-struct.ll --- a/llvm/test/CodeGen/Mips/cconv/return-struct.ll +++ b/llvm/test/CodeGen/Mips/cconv/return-struct.ll @@ -56,7 +56,7 @@ ; packed into a single register. define inreg {i16} @ret_struct_i16() nounwind { entry: - %retval = alloca {i8,i8}, align 1 + %retval = alloca {i8,i8}, align 8 %0 = bitcast {i8,i8}* %retval to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds ({i8,i8}, {i8,i8}* @struct_2byte, i32 0, i32 0), i64 2, i1 false) %1 = bitcast {i8,i8}* %retval to {i16}* diff --git a/llvm/test/CodeGen/Mips/largeimmprinting.ll b/llvm/test/CodeGen/Mips/largeimmprinting.ll --- a/llvm/test/CodeGen/Mips/largeimmprinting.ll +++ b/llvm/test/CodeGen/Mips/largeimmprinting.ll @@ -24,7 +24,7 @@ ; 64: daddu $[[R1]], $sp, $[[R1]] ; 64: sd $ra, 24($[[R1]]) - %agg.tmp = alloca %struct.S1, align 1 + %agg.tmp = alloca %struct.S1, align 8 %tmp = getelementptr inbounds %struct.S1, %struct.S1* %agg.tmp, i32 0, i32 0, i32 0 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %tmp, i8* align 1 getelementptr inbounds (%struct.S1, %struct.S1* @s1, i32 0, i32 0, i32 0), i32 65536, i1 false) call void @f2(%struct.S1* byval(%struct.S1) %agg.tmp) nounwind diff --git a/llvm/test/CodeGen/Mips/o32_cc_byval.ll b/llvm/test/CodeGen/Mips/o32_cc_byval.ll --- a/llvm/test/CodeGen/Mips/o32_cc_byval.ll +++ b/llvm/test/CodeGen/Mips/o32_cc_byval.ll @@ -80,7 +80,7 @@ ; CHECK-NEXT: jr $ra ; CHECK-NEXT: addiu $sp, $sp, 64 entry: - %agg.tmp10 = alloca %struct.S3, align 4 + %agg.tmp10 = alloca %struct.S3, align 8 call void @callee1(float 2.000000e+01, %struct.S1* byval(%struct.S1) bitcast (%0* @f1.s1 to %struct.S1*)) nounwind call void @callee2(%struct.S2* byval(%struct.S2) @f1.s2) nounwind %tmp11 = getelementptr inbounds %struct.S3, %struct.S3* %agg.tmp10, i32 0, i32 0 diff --git a/llvm/test/CodeGen/NVPTX/lower-byval-args.ll b/llvm/test/CodeGen/NVPTX/lower-byval-args.ll --- a/llvm/test/CodeGen/NVPTX/lower-byval-args.ll +++ b/llvm/test/CodeGen/NVPTX/lower-byval-args.ll @@ -120,7 +120,7 @@ ; Verify that if the pointer escapes, then we do fall back onto using a temp copy. ; CHECK-LABEL: .visible .entry pointer_escapes -; CHECK: .local .align 8 .b8 __local_depot{{.*}} +; CHECK: .local .align 4 .b8 __local_depot{{.*}} ; CHECK64: ld.param.u64 [[result_addr:%rd[0-9]+]], [{{.*}}_param_0] ; CHECK64: add.u64 %[[copy_addr:rd[0-9]+]], %SPL, 0; ; CHECK32: ld.param.u32 [[result_addr:%r[0-9]+]], [{{.*}}_param_0] diff --git a/llvm/test/CodeGen/PowerPC/aix-cc-byval.ll b/llvm/test/CodeGen/PowerPC/aix-cc-byval.ll --- a/llvm/test/CodeGen/PowerPC/aix-cc-byval.ll +++ b/llvm/test/CodeGen/PowerPC/aix-cc-byval.ll @@ -353,7 +353,7 @@ define void @call_test_byval_4Byte() { entry: %s0 = alloca %struct.S0, align 8 - %s4a = alloca %struct.S4A, align 4 + %s4a = alloca %struct.S4A, align 8 %call = call signext i32 @test_byval_4Byte(%struct.S4* byval(%struct.S4) align 1 @gS4, %struct.S0* byval(%struct.S0) align 1 %s0, %struct.S4A* byval(%struct.S4A) align 4 %s4a) ret void } @@ -945,7 +945,7 @@ define i32 @call_test_byval_homogeneous_float_struct() { entry: - %s = alloca %struct.F, align 4 + %s = alloca %struct.F, align 8 %0 = bitcast %struct.F* %s to i8* call void @llvm.memset.p0i8.i32(i8* align 4 %0, i8 0, i32 12, i1 false) %call = call i32 @test_byval_homogeneous_float_struct(%struct.F* byval(%struct.F) align 4 %s) diff --git a/llvm/test/CodeGen/PowerPC/aix-sret-param.ll b/llvm/test/CodeGen/PowerPC/aix-sret-param.ll --- a/llvm/test/CodeGen/PowerPC/aix-sret-param.ll +++ b/llvm/test/CodeGen/PowerPC/aix-sret-param.ll @@ -17,7 +17,7 @@ define void @test1() { entry: - %s = alloca %struct.S, align 4 + %s = alloca %struct.S, align 8 call void @foo(%struct.S* sret(%struct.S) %s) ret void } diff --git a/llvm/test/CodeGen/PowerPC/byval.ll b/llvm/test/CodeGen/PowerPC/byval.ll --- a/llvm/test/CodeGen/PowerPC/byval.ll +++ b/llvm/test/CodeGen/PowerPC/byval.ll @@ -34,7 +34,7 @@ ; CHECK-NEXT: mtlr 0 ; CHECK-NEXT: blr entry: - %x = alloca %struct, align 4 + %x = alloca %struct, align 8 call void @foo(%struct* %x) %r = call i32 @foo1(%struct* byval(%struct) %x) ret i32 %r diff --git a/llvm/test/CodeGen/PowerPC/structsinregs.ll b/llvm/test/CodeGen/PowerPC/structsinregs.ll --- a/llvm/test/CodeGen/PowerPC/structsinregs.ll +++ b/llvm/test/CodeGen/PowerPC/structsinregs.ll @@ -35,13 +35,13 @@ define i32 @caller1() nounwind { entry: - %p1 = alloca %struct.s1, align 1 - %p2 = alloca %struct.s2, align 2 - %p3 = alloca %struct.s3, align 2 - %p4 = alloca %struct.s4, align 4 - %p5 = alloca %struct.s5, align 4 - %p6 = alloca %struct.s6, align 4 - %p7 = alloca %struct.s7, align 4 + %p1 = alloca %struct.s1 + %p2 = alloca %struct.s2 + %p3 = alloca %struct.s3 + %p4 = alloca %struct.s4 + %p5 = alloca %struct.s5 + %p6 = alloca %struct.s6 + %p7 = alloca %struct.s7 %0 = bitcast %struct.s1* %p1 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.s1, %struct.s1* @caller1.p1, i32 0, i32 0), i64 1, i1 false) %1 = bitcast %struct.s2* %p2 to i8* @@ -117,13 +117,13 @@ define i32 @caller2() nounwind { entry: - %p1 = alloca %struct.t1, align 1 - %p2 = alloca %struct.t2, align 1 - %p3 = alloca %struct.t3, align 1 - %p4 = alloca %struct.t4, align 1 - %p5 = alloca %struct.t5, align 1 - %p6 = alloca %struct.t6, align 1 - %p7 = alloca %struct.t7, align 1 + %p1 = alloca %struct.t1 + %p2 = alloca %struct.t2 + %p3 = alloca %struct.t3 + %p4 = alloca %struct.t4 + %p5 = alloca %struct.t5 + %p6 = alloca %struct.t6 + %p7 = alloca %struct.t7 %0 = bitcast %struct.t1* %p1 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds (%struct.t1, %struct.t1* @caller2.p1, i32 0, i32 0), i64 1, i1 false) %1 = bitcast %struct.t2* %p2 to i8* diff --git a/llvm/test/CodeGen/PowerPC/varargs-struct-float.ll b/llvm/test/CodeGen/PowerPC/varargs-struct-float.ll --- a/llvm/test/CodeGen/PowerPC/varargs-struct-float.ll +++ b/llvm/test/CodeGen/PowerPC/varargs-struct-float.ll @@ -7,7 +7,7 @@ define void @foo(float inreg %s.coerce) nounwind { entry: - %s = alloca %struct.Sf1, align 4 + %s = alloca %struct.Sf1, align 8 %coerce.dive = getelementptr %struct.Sf1, %struct.Sf1* %s, i32 0, i32 0 store float %s.coerce, float* %coerce.dive, align 1 %coerce.dive1 = getelementptr %struct.Sf1, %struct.Sf1* %s, i32 0, i32 0 diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll @@ -595,7 +595,7 @@ ; RV32I-WITHFP-NEXT: lw s0, 40(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: addi sp, sp, 48 ; RV32I-WITHFP-NEXT: ret - %ls = alloca %struct.large, align 4 + %ls = alloca %struct.large, align 8 %1 = bitcast %struct.large* %ls to i8* %a = getelementptr inbounds %struct.large, %struct.large* %ls, i32 0, i32 0 store i32 1, i32* %a diff --git a/llvm/test/CodeGen/RISCV/frame.ll b/llvm/test/CodeGen/RISCV/frame.ll --- a/llvm/test/CodeGen/RISCV/frame.ll +++ b/llvm/test/CodeGen/RISCV/frame.ll @@ -41,7 +41,7 @@ ; RV32I-WITHFP-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: addi sp, sp, 32 ; RV32I-WITHFP-NEXT: ret - %key = alloca %struct.key_t, align 4 + %key = alloca %struct.key_t, align 8 %1 = bitcast %struct.key_t* %key to i8* call void @llvm.memset.p0i8.i64(i8* align 4 %1, i8 0, i64 20, i1 false) %2 = getelementptr inbounds %struct.key_t, %struct.key_t* %key, i64 0, i32 1, i64 0 diff --git a/llvm/test/CodeGen/RISCV/mem64.ll b/llvm/test/CodeGen/RISCV/mem64.ll --- a/llvm/test/CodeGen/RISCV/mem64.ll +++ b/llvm/test/CodeGen/RISCV/mem64.ll @@ -368,7 +368,7 @@ ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret bb: - %tmp = alloca %struct.quux, align 4 + %tmp = alloca %struct.quux, align 8 %tmp1 = getelementptr inbounds %struct.quux, %struct.quux* %tmp, i64 0, i32 1 %tmp2 = getelementptr inbounds %struct.quux, %struct.quux* %tmp, i64 0, i32 1, i64 %arg store i8 0, i8* %tmp2, align 1 diff --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll --- a/llvm/test/CodeGen/RISCV/vararg.ll +++ b/llvm/test/CodeGen/RISCV/vararg.ll @@ -138,7 +138,7 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret - %va = alloca i8*, align 4 + %va = alloca i8* %1 = bitcast i8** %va to i8* call void @llvm.va_start(i8* %1) %argp.cur = load i8*, i8** %va, align 4 @@ -603,7 +603,7 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret - %va = alloca i8*, align 4 + %va = alloca i8* %1 = bitcast i8** %va to i8* call void @llvm.va_start(i8* %1) %2 = bitcast i8** %va to i32* @@ -725,7 +725,7 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret - %va = alloca i8*, align 4 + %va = alloca i8* %1 = bitcast i8** %va to i8* call void @llvm.va_start(i8* %1) %2 = va_arg i8** %va, double @@ -923,7 +923,7 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 80 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret - %va = alloca i8*, align 4 + %va = alloca i8* %1 = bitcast i8** %va to i8* call void @llvm.va_start(i8* %1) %2 = bitcast i8** %va to i32* @@ -1050,7 +1050,7 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 80 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret - %va = alloca i8*, align 4 + %va = alloca i8* %1 = bitcast i8** %va to i8* call void @llvm.va_start(i8* %1) %2 = va_arg i8** %va, double @@ -1351,8 +1351,8 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s1, 24(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 112 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret - %vargs = alloca i8*, align 4 - %wargs = alloca i8*, align 4 + %vargs = alloca i8* + %wargs = alloca i8* %1 = bitcast i8** %vargs to i8* %2 = bitcast i8** %wargs to i8* call void @llvm.va_start(i8* %1) @@ -1672,7 +1672,7 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret - %va = alloca i8*, align 4 + %va = alloca i8* %1 = bitcast i8** %va to i8* call void @llvm.va_start(i8* %1) %2 = va_arg i8** %va, i32 @@ -1886,7 +1886,7 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 2032 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret %large = alloca [ 100000000 x i8 ] - %va = alloca i8*, align 4 + %va = alloca i8* %1 = bitcast i8** %va to i8* call void @llvm.va_start(i8* %1) %argp.cur = load i8*, i8** %va, align 4 diff --git a/llvm/test/CodeGen/Thumb2/mve-stack.ll b/llvm/test/CodeGen/Thumb2/mve-stack.ll --- a/llvm/test/CodeGen/Thumb2/mve-stack.ll +++ b/llvm/test/CodeGen/Thumb2/mve-stack.ll @@ -15,7 +15,7 @@ ; CHECK-NEXT: add sp, #16 ; CHECK-NEXT: pop {r7, pc} entry: - %d = alloca [4 x i32], align 2 + %d = alloca [4 x i32], align 4 %g = getelementptr inbounds [4 x i32], [4 x i32]* %d, i32 0, i32 2 %b = bitcast i32* %g to <4 x i32>* store <4 x i32> zeroinitializer, <4 x i32>* %b, align 2 @@ -153,7 +153,7 @@ ; CHECK-NEXT: add sp, #16 ; CHECK-NEXT: pop {r7, pc} entry: - %d = alloca [4 x i32], align 2 + %d = alloca [4 x i32], align 4 %arraydecay = getelementptr inbounds [4 x i32], [4 x i32]* %d, i32 0, i32 0 call arm_aapcs_vfpcc void bitcast (void (...)* @func to void (i32*)*)(i32* %arraydecay) %g = getelementptr inbounds [4 x i32], [4 x i32]* %d, i32 0, i32 2 diff --git a/llvm/test/CodeGen/VE/Scalar/atomic_cmp_swap.ll b/llvm/test/CodeGen/VE/Scalar/atomic_cmp_swap.ll --- a/llvm/test/CodeGen/VE/Scalar/atomic_cmp_swap.ll +++ b/llvm/test/CodeGen/VE/Scalar/atomic_cmp_swap.ll @@ -1455,7 +1455,7 @@ ; CHECK-NEXT: adds.w.zx %s0, %s2, (0)1 ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) - %3 = alloca %"struct.std::__1::atomic", align 1 + %3 = alloca %"struct.std::__1::atomic", align 8 %4 = getelementptr inbounds %"struct.std::__1::atomic", %"struct.std::__1::atomic"* %3, i64 0, i32 0, i32 0, i32 0, i32 0 call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %4) %5 = zext i1 %1 to i8 @@ -1515,7 +1515,7 @@ ; CHECK-NEXT: adds.w.zx %s0, %s2, (0)1 ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) - %3 = alloca %"struct.std::__1::atomic.0", align 1 + %3 = alloca %"struct.std::__1::atomic.0", align 8 %4 = getelementptr inbounds %"struct.std::__1::atomic.0", %"struct.std::__1::atomic.0"* %3, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0 call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %4) %5 = load i8, i8* %0, align 1 @@ -1568,7 +1568,7 @@ ; CHECK-NEXT: adds.w.zx %s0, %s2, (0)1 ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) - %3 = alloca %"struct.std::__1::atomic.5", align 1 + %3 = alloca %"struct.std::__1::atomic.5", align 8 %4 = getelementptr inbounds %"struct.std::__1::atomic.5", %"struct.std::__1::atomic.5"* %3, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0 call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %4) %5 = load i8, i8* %0, align 1 @@ -1622,7 +1622,7 @@ ; CHECK-NEXT: adds.w.zx %s0, %s2, (0)1 ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) - %3 = alloca %"struct.std::__1::atomic.10", align 2 + %3 = alloca %"struct.std::__1::atomic.10", align 8 %4 = bitcast %"struct.std::__1::atomic.10"* %3 to i8* call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %4) %5 = getelementptr inbounds %"struct.std::__1::atomic.10", %"struct.std::__1::atomic.10"* %3, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0 @@ -1676,7 +1676,7 @@ ; CHECK-NEXT: adds.w.zx %s0, %s2, (0)1 ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) - %3 = alloca %"struct.std::__1::atomic.15", align 2 + %3 = alloca %"struct.std::__1::atomic.15", align 8 %4 = bitcast %"struct.std::__1::atomic.15"* %3 to i8* call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %4) %5 = getelementptr inbounds %"struct.std::__1::atomic.15", %"struct.std::__1::atomic.15"* %3, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0 @@ -1724,7 +1724,7 @@ ; CHECK-NEXT: adds.w.zx %s0, %s2, (0)1 ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) - %3 = alloca %"struct.std::__1::atomic.20", align 4 + %3 = alloca %"struct.std::__1::atomic.20", align 8 %4 = bitcast %"struct.std::__1::atomic.20"* %3 to i8* call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %4) %5 = getelementptr inbounds %"struct.std::__1::atomic.20", %"struct.std::__1::atomic.20"* %3, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0 @@ -1772,7 +1772,7 @@ ; CHECK-NEXT: adds.w.zx %s0, %s2, (0)1 ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) - %3 = alloca %"struct.std::__1::atomic.25", align 4 + %3 = alloca %"struct.std::__1::atomic.25", align 8 %4 = bitcast %"struct.std::__1::atomic.25"* %3 to i8* call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %4) %5 = getelementptr inbounds %"struct.std::__1::atomic.25", %"struct.std::__1::atomic.25"* %3, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0 diff --git a/llvm/test/CodeGen/VE/Scalar/atomic_load.ll b/llvm/test/CodeGen/VE/Scalar/atomic_load.ll --- a/llvm/test/CodeGen/VE/Scalar/atomic_load.ll +++ b/llvm/test/CodeGen/VE/Scalar/atomic_load.ll @@ -560,7 +560,7 @@ ; CHECK-NEXT: ld1b.zx %s0, 248(, %s11) ; CHECK-NEXT: and %s0, 1, %s0 ; CHECK-NEXT: or %s11, 0, %s9 - %1 = alloca %"struct.std::__1::atomic", align 1 + %1 = alloca %"struct.std::__1::atomic", align 8 %2 = getelementptr inbounds %"struct.std::__1::atomic", %"struct.std::__1::atomic"* %1, i64 0, i32 0, i32 0, i32 0, i32 0 call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %2) call void @_Z6fun_i1RNSt3__16atomicIbEE(%"struct.std::__1::atomic"* nonnull align 1 dereferenceable(1) %1) @@ -590,7 +590,7 @@ ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: ld1b.sx %s0, 248(, %s11) ; CHECK-NEXT: or %s11, 0, %s9 - %1 = alloca %"struct.std::__1::atomic.0", align 1 + %1 = alloca %"struct.std::__1::atomic.0", align 8 %2 = getelementptr inbounds %"struct.std::__1::atomic.0", %"struct.std::__1::atomic.0"* %1, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0 call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %2) call void @_Z6fun_i8RNSt3__16atomicIcEE(%"struct.std::__1::atomic.0"* nonnull align 1 dereferenceable(1) %1) @@ -612,7 +612,7 @@ ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: ld1b.zx %s0, 248(, %s11) ; CHECK-NEXT: or %s11, 0, %s9 - %1 = alloca %"struct.std::__1::atomic.5", align 1 + %1 = alloca %"struct.std::__1::atomic.5", align 8 %2 = getelementptr inbounds %"struct.std::__1::atomic.5", %"struct.std::__1::atomic.5"* %1, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0 call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %2) call void @_Z6fun_u8RNSt3__16atomicIhEE(%"struct.std::__1::atomic.5"* nonnull align 1 dereferenceable(1) %1) @@ -634,7 +634,7 @@ ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: ld2b.sx %s0, 248(, %s11) ; CHECK-NEXT: or %s11, 0, %s9 - %1 = alloca %"struct.std::__1::atomic.10", align 2 + %1 = alloca %"struct.std::__1::atomic.10", align 8 %2 = bitcast %"struct.std::__1::atomic.10"* %1 to i8* call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %2) call void @_Z7fun_i16RNSt3__16atomicIsEE(%"struct.std::__1::atomic.10"* nonnull align 2 dereferenceable(2) %1) @@ -657,7 +657,7 @@ ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: ld2b.zx %s0, 248(, %s11) ; CHECK-NEXT: or %s11, 0, %s9 - %1 = alloca %"struct.std::__1::atomic.15", align 2 + %1 = alloca %"struct.std::__1::atomic.15", align 8 %2 = bitcast %"struct.std::__1::atomic.15"* %1 to i8* call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %2) call void @_Z7fun_u16RNSt3__16atomicItEE(%"struct.std::__1::atomic.15"* nonnull align 2 dereferenceable(2) %1) @@ -680,7 +680,7 @@ ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: ldl.sx %s0, 248(, %s11) ; CHECK-NEXT: or %s11, 0, %s9 - %1 = alloca %"struct.std::__1::atomic.20", align 4 + %1 = alloca %"struct.std::__1::atomic.20", align 8 %2 = bitcast %"struct.std::__1::atomic.20"* %1 to i8* call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %2) call void @_Z7fun_i32RNSt3__16atomicIiEE(%"struct.std::__1::atomic.20"* nonnull align 4 dereferenceable(4) %1) @@ -703,7 +703,7 @@ ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: ldl.zx %s0, 248(, %s11) ; CHECK-NEXT: or %s11, 0, %s9 - %1 = alloca %"struct.std::__1::atomic.25", align 4 + %1 = alloca %"struct.std::__1::atomic.25", align 8 %2 = bitcast %"struct.std::__1::atomic.25"* %1 to i8* call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %2) call void @_Z7fun_u32RNSt3__16atomicIjEE(%"struct.std::__1::atomic.25"* nonnull align 4 dereferenceable(4) %1) diff --git a/llvm/test/CodeGen/VE/Scalar/atomic_swap.ll b/llvm/test/CodeGen/VE/Scalar/atomic_swap.ll --- a/llvm/test/CodeGen/VE/Scalar/atomic_swap.ll +++ b/llvm/test/CodeGen/VE/Scalar/atomic_swap.ll @@ -768,7 +768,7 @@ ; CHECK-NEXT: and %s0, 1, %s0 ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) - %2 = alloca %"struct.std::__1::atomic", align 1 + %2 = alloca %"struct.std::__1::atomic", align 8 %3 = getelementptr inbounds %"struct.std::__1::atomic", %"struct.std::__1::atomic"* %2, i64 0, i32 0, i32 0, i32 0, i32 0 call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %3) %4 = zext i1 %0 to i8 @@ -797,7 +797,7 @@ ; CHECK-NEXT: sra.l %s0, %s0, 56 ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) - %2 = alloca %"struct.std::__1::atomic.0", align 1 + %2 = alloca %"struct.std::__1::atomic.0", align 8 %3 = getelementptr inbounds %"struct.std::__1::atomic.0", %"struct.std::__1::atomic.0"* %2, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0 call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %3) %4 = atomicrmw volatile xchg i8* %3, i8 %0 monotonic @@ -816,7 +816,7 @@ ; CHECK-NEXT: and %s0, %s0, (56)0 ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) - %2 = alloca %"struct.std::__1::atomic.5", align 1 + %2 = alloca %"struct.std::__1::atomic.5", align 8 %3 = getelementptr inbounds %"struct.std::__1::atomic.5", %"struct.std::__1::atomic.5"* %2, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0 call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %3) %4 = atomicrmw volatile xchg i8* %3, i8 %0 monotonic @@ -836,7 +836,7 @@ ; CHECK-NEXT: sra.l %s0, %s0, 48 ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) - %2 = alloca %"struct.std::__1::atomic.10", align 2 + %2 = alloca %"struct.std::__1::atomic.10", align 8 %3 = bitcast %"struct.std::__1::atomic.10"* %2 to i8* call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %3) %4 = getelementptr inbounds %"struct.std::__1::atomic.10", %"struct.std::__1::atomic.10"* %2, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0 @@ -856,7 +856,7 @@ ; CHECK-NEXT: and %s0, %s0, (48)0 ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) - %2 = alloca %"struct.std::__1::atomic.15", align 2 + %2 = alloca %"struct.std::__1::atomic.15", align 8 %3 = bitcast %"struct.std::__1::atomic.15"* %2 to i8* call void @llvm.lifetime.start.p0i8(i64 2, i8* nonnull %3) %4 = getelementptr inbounds %"struct.std::__1::atomic.15", %"struct.std::__1::atomic.15"* %2, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0 @@ -873,7 +873,7 @@ ; CHECK-NEXT: adds.w.sx %s0, %s0, (0)1 ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) - %2 = alloca %"struct.std::__1::atomic.20", align 4 + %2 = alloca %"struct.std::__1::atomic.20", align 8 %3 = bitcast %"struct.std::__1::atomic.20"* %2 to i8* call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %3) %4 = getelementptr inbounds %"struct.std::__1::atomic.20", %"struct.std::__1::atomic.20"* %2, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0 @@ -890,7 +890,7 @@ ; CHECK-NEXT: adds.w.zx %s0, %s0, (0)1 ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) - %2 = alloca %"struct.std::__1::atomic.25", align 4 + %2 = alloca %"struct.std::__1::atomic.25", align 8 %3 = bitcast %"struct.std::__1::atomic.25"* %2 to i8* call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %3) %4 = getelementptr inbounds %"struct.std::__1::atomic.25", %"struct.std::__1::atomic.25"* %2, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0 diff --git a/llvm/test/CodeGen/WebAssembly/PR40172.ll b/llvm/test/CodeGen/WebAssembly/PR40172.ll --- a/llvm/test/CodeGen/WebAssembly/PR40172.ll +++ b/llvm/test/CodeGen/WebAssembly/PR40172.ll @@ -15,7 +15,7 @@ ; CHECK: i32.store8 8($[[BASE]]), $[[A1]]{{$}} define void @test(i8 %byte) { - %t = alloca { i8, i8 }, align 1 + %t = alloca { i8, i8 }, align 8 %x4 = and i8 %byte, 1 %x5 = icmp eq i8 %x4, 1 %x6 = and i8 %byte, 2 diff --git a/llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll b/llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll --- a/llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll +++ b/llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll @@ -50,8 +50,8 @@ ; Function Attrs: uwtable define void @_Z3barii(i32 %param1, i32 %param2) #0 !dbg !24 { entry: - %var1 = alloca %struct.AAA3, align 1 - %var2 = alloca %struct.AAA3, align 1 + %var1 = alloca %struct.AAA3, align 8 + %var2 = alloca %struct.AAA3, align 8 tail call void @llvm.dbg.value(metadata i32 %param1, i64 0, metadata !29, metadata !46), !dbg !47 tail call void @llvm.dbg.value(metadata i32 %param2, i64 0, metadata !30, metadata !46), !dbg !48 tail call void @llvm.dbg.value(metadata ptr null, i64 0, metadata !31, metadata !46), !dbg !49 diff --git a/llvm/test/CodeGen/X86/fast-isel-call.ll b/llvm/test/CodeGen/X86/fast-isel-call.ll --- a/llvm/test/CodeGen/X86/fast-isel-call.ll +++ b/llvm/test/CodeGen/X86/fast-isel-call.ll @@ -59,7 +59,7 @@ %struct.S = type { i8 } define void @test5() { entry: - %s = alloca %struct.S, align 1 + %s = alloca %struct.S, align 8 ; CHECK-LABEL: test5: ; CHECK: subl $12, %esp ; CHECK: leal 8(%esp), %ecx diff --git a/llvm/test/CodeGen/X86/load-local-v3i129.ll b/llvm/test/CodeGen/X86/load-local-v3i129.ll --- a/llvm/test/CodeGen/X86/load-local-v3i129.ll +++ b/llvm/test/CodeGen/X86/load-local-v3i129.ll @@ -29,7 +29,7 @@ ; SLOW-SHLD-NEXT: movq $-1, -48(%rsp) ; SLOW-SHLD-NEXT: retq Entry: - %y = alloca <3 x i129>, align 4 + %y = alloca <3 x i129>, align 16 %L = load <3 x i129>, ptr %y %I1 = insertelement <3 x i129> %L, i129 340282366920938463463374607431768211455, i32 1 store <3 x i129> %I1, ptr %y diff --git a/llvm/test/CodeGen/X86/pr44140.ll b/llvm/test/CodeGen/X86/pr44140.ll --- a/llvm/test/CodeGen/X86/pr44140.ll +++ b/llvm/test/CodeGen/X86/pr44140.ll @@ -59,7 +59,7 @@ %dummy1 = alloca [22 x i64], align 8 %dummy2 = alloca [22 x i64], align 8 - %data = alloca <2 x i64>, align 8 + %data = alloca <2 x i64>, align 16 br label %fake-loop diff --git a/llvm/test/CodeGen/X86/ssp-data-layout.ll b/llvm/test/CodeGen/X86/ssp-data-layout.ll --- a/llvm/test/CodeGen/X86/ssp-data-layout.ll +++ b/llvm/test/CodeGen/X86/ssp-data-layout.ll @@ -93,14 +93,14 @@ %y = alloca i32, align 4 %z = alloca i32, align 4 %ptr = alloca i32, align 4 - %small2 = alloca [2 x i16], align 2 + %small2 = alloca [2 x i16], align 4 %large2 = alloca [8 x i32], align 16 - %small = alloca [2 x i8], align 1 - %large = alloca [8 x i8], align 1 - %a = alloca %struct.struct_large_char, align 1 - %b = alloca %struct.struct_small_char, align 1 + %small = alloca [2 x i8], align 2 + %large = alloca [8 x i8], align 8 + %a = alloca %struct.struct_large_char, align 8 + %b = alloca %struct.struct_small_char, align 8 %c = alloca %struct.struct_large_nonchar, align 8 - %d = alloca %struct.struct_small_nonchar, align 2 + %d = alloca %struct.struct_small_nonchar, align 8 %call = call i32 @get_scalar1() store i32 %call, ptr %x, align 4 call void @end_scalar1() @@ -217,12 +217,12 @@ %ptr = alloca i32, align 4 %small2 = alloca [2 x i16], align 2 %large2 = alloca [8 x i32], align 16 - %small = alloca [2 x i8], align 1 - %large = alloca [8 x i8], align 1 - %a = alloca %struct.struct_large_char, align 1 - %b = alloca %struct.struct_small_char, align 1 + %small = alloca [2 x i8], align 2 + %large = alloca [8 x i8], align 8 + %a = alloca %struct.struct_large_char, align 8 + %b = alloca %struct.struct_small_char, align 8 %c = alloca %struct.struct_large_nonchar, align 8 - %d = alloca %struct.struct_small_nonchar, align 2 + %d = alloca %struct.struct_small_nonchar, align 8 %call = call i32 @get_scalar1() store i32 %call, ptr %x, align 4 call void @end_scalar1() @@ -325,14 +325,14 @@ %y = alloca i32, align 4 %z = alloca i32, align 4 %ptr = alloca i32, align 4 - %small2 = alloca [2 x i16], align 2 + %small2 = alloca [2 x i16], align 4 %large2 = alloca [8 x i32], align 16 - %small = alloca [2 x i8], align 1 - %large = alloca [8 x i8], align 1 - %a = alloca %struct.struct_large_char, align 1 - %b = alloca %struct.struct_small_char, align 1 + %small = alloca [2 x i8], align 2 + %large = alloca [8 x i8], align 8 + %a = alloca %struct.struct_large_char, align 8 + %b = alloca %struct.struct_small_char, align 8 %c = alloca %struct.struct_large_nonchar, align 8 - %d = alloca %struct.struct_small_nonchar, align 2 + %d = alloca %struct.struct_small_nonchar, align 8 %call = call i32 @get_scalar1() store i32 %call, ptr %x, align 4 call void @end_scalar1() diff --git a/llvm/test/CodeGen/X86/win-cleanuppad.ll b/llvm/test/CodeGen/X86/win-cleanuppad.ll --- a/llvm/test/CodeGen/X86/win-cleanuppad.ll +++ b/llvm/test/CodeGen/X86/win-cleanuppad.ll @@ -58,8 +58,8 @@ define void @nested_cleanup() #0 personality ptr @__CxxFrameHandler3 { entry: - %o1 = alloca %struct.Dtor, align 1 - %o2 = alloca %struct.Dtor, align 1 + %o1 = alloca %struct.Dtor, align 8 + %o2 = alloca %struct.Dtor, align 8 invoke void @f(i32 1) to label %invoke.cont unwind label %cleanup.outer diff --git a/llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll b/llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll --- a/llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll +++ b/llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll @@ -13,8 +13,8 @@ ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: testb $1, %dil -; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rax -; CHECK-NEXT: movq %rsp, %rcx +; CHECK-NEXT: movq %rsp, %rax +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rcx ; CHECK-NEXT: cmovneq %rax, %rcx ; CHECK-NEXT: movups (%rcx), %xmm0 ; CHECK-NEXT: callq _sink @@ -36,8 +36,8 @@ ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: testb $1, %dil -; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rax -; CHECK-NEXT: movq %rsp, %rcx +; CHECK-NEXT: movq %rsp, %rax +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rcx ; CHECK-NEXT: cmovneq %rax, %rcx ; CHECK-NEXT: movaps (%rcx), %xmm0 ; CHECK-NEXT: callq _sink diff --git a/llvm/test/DebugInfo/AArch64/frameindices.ll b/llvm/test/DebugInfo/AArch64/frameindices.ll --- a/llvm/test/DebugInfo/AArch64/frameindices.ll +++ b/llvm/test/DebugInfo/AArch64/frameindices.ll @@ -86,7 +86,7 @@ define void @_Z3f16v() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg !68 { entry: %agg.tmp.i.i = alloca %struct.A, align 8 - %d = alloca %struct.B, align 1 + %d = alloca %struct.B, align 8 %agg.tmp.sroa.2 = alloca [15 x i8], align 1 %agg.tmp.sroa.4 = alloca [7 x i8], align 1 tail call void @llvm.dbg.declare(metadata [15 x i8]* %agg.tmp.sroa.2, metadata !56, metadata !74), !dbg !75 diff --git a/llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll b/llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll --- a/llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll +++ b/llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll @@ -221,7 +221,7 @@ ; Function Attrs: noinline nounwind uwtable define void @use_dbg_declare() #0 !dbg !7 { entry: - %o = alloca %struct.Foo, align 4 + %o = alloca %struct.Foo, align 8 call void @llvm.dbg.declare(metadata %struct.Foo* %o, metadata !10, metadata !15), !dbg !16 call void @escape_foo(%struct.Foo* %o), !dbg !17 ret void, !dbg !18 diff --git a/llvm/test/DebugInfo/X86/dbg-addr.ll b/llvm/test/DebugInfo/X86/dbg-addr.ll --- a/llvm/test/DebugInfo/X86/dbg-addr.ll +++ b/llvm/test/DebugInfo/X86/dbg-addr.ll @@ -44,7 +44,7 @@ ; Function Attrs: noinline nounwind uwtable define void @use_dbg_addr() #0 !dbg !7 { entry: - %o = alloca %struct.Foo, align 4 + %o = alloca %struct.Foo, align 8 call void @llvm.dbg.addr(metadata %struct.Foo* %o, metadata !10, metadata !15), !dbg !16 call void @escape_foo(%struct.Foo* %o), !dbg !17 ret void, !dbg !18 @@ -52,7 +52,7 @@ define void @test_dbg_addr_and_dbg_val_undef() #0 !dbg !117 { entry: - %o = alloca %struct.Foo, align 4 + %o = alloca %struct.Foo, align 8 call void @llvm.dbg.addr(metadata %struct.Foo* %o, metadata !1110, metadata !1115), !dbg !1116 call void @escape_foo(%struct.Foo* %o), !dbg !1117 call void @llvm.dbg.value(metadata %struct.Foo* undef, metadata !1110, metadata !1115), !dbg !1116 diff --git a/llvm/test/DebugInfo/X86/dbg-declare-alloca.ll b/llvm/test/DebugInfo/X86/dbg-declare-alloca.ll --- a/llvm/test/DebugInfo/X86/dbg-declare-alloca.ll +++ b/llvm/test/DebugInfo/X86/dbg-declare-alloca.ll @@ -23,7 +23,7 @@ ; Function Attrs: noinline nounwind uwtable define void @use_dbg_declare() #0 !dbg !7 { entry: - %o = alloca %struct.Foo, align 4 + %o = alloca %struct.Foo, align 8 call void @llvm.dbg.declare(metadata %struct.Foo* %o, metadata !10, metadata !15), !dbg !16 call void @escape_foo(%struct.Foo* %o), !dbg !17 ret void, !dbg !18 diff --git a/llvm/test/DebugInfo/X86/sret.ll b/llvm/test/DebugInfo/X86/sret.ll --- a/llvm/test/DebugInfo/X86/sret.ll +++ b/llvm/test/DebugInfo/X86/sret.ll @@ -104,7 +104,7 @@ define void @_ZN1B9AInstanceEv(%class.A* noalias sret(%class.A) %agg.result, %class.B* %this) #2 align 2 !dbg !53 { entry: %this.addr = alloca %class.B*, align 8 - %nrvo = alloca i1 + %nrvo = alloca i1, align 1 %cleanup.dest.slot = alloca i32 store %class.B* %this, %class.B** %this.addr, align 8 call void @llvm.dbg.declare(metadata %class.B** %this.addr, metadata !89, metadata !DIExpression()), !dbg !91 @@ -141,13 +141,13 @@ %retval = alloca i32, align 4 %argc.addr = alloca i32, align 4 %argv.addr = alloca i8**, align 8 - %b = alloca %class.B, align 1 + %b = alloca %class.B, align 8 %return_val = alloca i32, align 4 %temp.lvalue = alloca %class.A, align 8 - %exn.slot = alloca i8* - %ehselector.slot = alloca i32 + %exn.slot = alloca i8*, align 8 + %ehselector.slot = alloca i32, align 4 %a = alloca %class.A, align 8 - %cleanup.dest.slot = alloca i32 + %cleanup.dest.slot = alloca i32, align 4 store i32 0, i32* %retval store i32 %argc, i32* %argc.addr, align 4 call void @llvm.dbg.declare(metadata i32* %argc.addr, metadata !104, metadata !DIExpression()), !dbg !105 @@ -227,8 +227,8 @@ define linkonce_odr void @_ZN1AD0Ev(%class.A* %this) unnamed_addr #2 align 2 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg !61 { entry: %this.addr = alloca %class.A*, align 8 - %exn.slot = alloca i8* - %ehselector.slot = alloca i32 + %exn.slot = alloca i8*, align 8 + %ehselector.slot = alloca i32, align 4 store %class.A* %this, %class.A** %this.addr, align 8 call void @llvm.dbg.declare(metadata %class.A** %this.addr, metadata !126, metadata !DIExpression()), !dbg !127 %this1 = load %class.A*, %class.A** %this.addr