diff --git a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp --- a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp @@ -128,20 +128,7 @@ for (const Instruction &I : BB) { if (const AllocaInst *AI = dyn_cast(&I)) { Type *Ty = AI->getAllocatedType(); - Align TyPrefAlign = MF->getDataLayout().getPrefTypeAlign(Ty); - // The "specified" alignment is the alignment written on the alloca, - // or the preferred alignment of the type if none is specified. - // - // (Unspecified alignment on allocas will be going away soon.) - Align SpecifiedAlign = AI->getAlign(); - - // If the preferred alignment of the type is higher than the specified - // alignment of the alloca, promote the alignment, as long as it doesn't - // require realigning the stack. - // - // FIXME: Do we really want to second-guess the IR in isel? - Align Alignment = - std::max(std::min(TyPrefAlign, StackAlign), SpecifiedAlign); + Align Alignment = AI->getAlign(); // Static allocas can be folded into the initial stack frame // adjustment. For targets that don't realign the stack, don't diff --git a/llvm/test/CodeGen/AArch64/preferred-alignment.ll b/llvm/test/CodeGen/AArch64/preferred-alignment.ll --- a/llvm/test/CodeGen/AArch64/preferred-alignment.ll +++ b/llvm/test/CodeGen/AArch64/preferred-alignment.ll @@ -3,11 +3,11 @@ ; Function Attrs: nounwind define i32 @foo() #0 { entry: - %c = alloca i8, align 1 + %c = alloca i8 ; CHECK: add x0, sp, #12 - %s = alloca i16, align 2 + %s = alloca i16 ; CHECK-NEXT: add x1, sp, #8 - %i = alloca i32, align 4 + %i = alloca i32 ; CHECK-NEXT: add x2, sp, #4 %call = call i32 @bar(ptr %c, ptr %s, ptr %i) %0 = load i8, ptr %c, align 1 diff --git a/llvm/test/CodeGen/AArch64/seh-finally.ll b/llvm/test/CodeGen/AArch64/seh-finally.ll --- a/llvm/test/CodeGen/AArch64/seh-finally.ll +++ b/llvm/test/CodeGen/AArch64/seh-finally.ll @@ -42,7 +42,7 @@ ; CHECK: ldur w0, [x29, #-8] ; CHECK: bl foo - %o = alloca %struct.S, align 4 + %o = alloca %struct.S, align 8 call void (...) @llvm.localescape(ptr %o) %0 = load i32, ptr %o, align 4 invoke void @foo(i32 %0) #5 diff --git a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll --- a/llvm/test/CodeGen/AMDGPU/call-argument-types.ll +++ b/llvm/test/CodeGen/AMDGPU/call-argument-types.ll @@ -671,7 +671,7 @@ ; GCN-NEXT: s_swappc_b64 ; GCN-NOT: [[SP]] define amdgpu_kernel void @test_call_external_void_func_byval_struct_i8_i32() #0 { - %val = alloca { i8, i32 }, align 4, addrspace(5) + %val = alloca { i8, i32 }, align 8, addrspace(5) %gep0 = getelementptr inbounds { i8, i32 }, ptr addrspace(5) %val, i32 0, i32 0 %gep1 = getelementptr inbounds { i8, i32 }, ptr addrspace(5) %val, i32 0, i32 1 store i8 3, ptr addrspace(5) %gep0 @@ -702,8 +702,8 @@ ; GCN: buffer_store_byte [[LOAD_OUT_VAL0]], off ; GCN: buffer_store_dword [[LOAD_OUT_VAL1]], off define amdgpu_kernel void @test_call_external_void_func_sret_struct_i8_i32_byval_struct_i8_i32(i32) #0 { - %in.val = alloca { i8, i32 }, align 4, addrspace(5) - %out.val = alloca { i8, i32 }, align 4, addrspace(5) + %in.val = alloca { i8, i32 }, align 8, addrspace(5) + %out.val = alloca { i8, i32 }, align 8, addrspace(5) %in.gep0 = getelementptr inbounds { i8, i32 }, ptr addrspace(5) %in.val, i32 0, i32 0 %in.gep1 = getelementptr inbounds { i8, i32 }, ptr addrspace(5) %in.val, i32 0, i32 1 store i8 3, ptr addrspace(5) %in.gep0 diff --git a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll --- a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll +++ b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll @@ -289,7 +289,7 @@ ; GCN: ds_write_b32 v{{[0-9]+}}, [[PTR]] define void @alloca_ptr_nonentry_block(i32 %arg0) #0 { - %alloca0 = alloca { i8, i32 }, align 4, addrspace(5) + %alloca0 = alloca { i8, i32 }, align 8, addrspace(5) %cmp = icmp eq i32 %arg0, 0 br i1 %cmp, label %bb, label %ret diff --git a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll --- a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll +++ b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll @@ -11098,7 +11098,7 @@ %tid = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo) ; allocate enough scratch to go beyond 2^12 addressing - %scratch = alloca <1280 x i32>, align 8, addrspace(5) + %scratch = alloca <1280 x i32>, align 16, addrspace(5) ; load VGPR data %aptr = getelementptr <64 x i32>, ptr addrspace(1) %in, i32 %tid diff --git a/llvm/test/CodeGen/ARM/ssp-data-layout.ll b/llvm/test/CodeGen/ARM/ssp-data-layout.ll --- a/llvm/test/CodeGen/ARM/ssp-data-layout.ll +++ b/llvm/test/CodeGen/ARM/ssp-data-layout.ll @@ -386,8 +386,8 @@ ; CHECK: bl get_struct_large_char2 ; CHECK: strb r0, [sp, #106] ; CHECK: bl end_struct_large_char2 - %a = alloca %struct.struct_small_char, align 1 - %b = alloca %struct.struct_large_char2, align 1 + %a = alloca %struct.struct_small_char, align 4 + %b = alloca %struct.struct_large_char2, align 4 %d1 = alloca %struct.struct_large_nonchar, align 8 %d2 = alloca %struct.struct_small_nonchar, align 2 %call = call signext i8 @get_struct_small_char() diff --git a/llvm/test/CodeGen/BPF/pr57872.ll b/llvm/test/CodeGen/BPF/pr57872.ll --- a/llvm/test/CodeGen/BPF/pr57872.ll +++ b/llvm/test/CodeGen/BPF/pr57872.ll @@ -180,7 +180,7 @@ ; CHECK-NEXT: call bar ; CHECK-NEXT: exit entry: - %event = alloca %struct.event, align 1 + %event = alloca %struct.event, align 8 %hostname = getelementptr inbounds %struct.event, ptr %event, i64 0, i32 1 %0 = load ptr, ptr %g, align 8 call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 1 dereferenceable(84) %hostname, ptr noundef nonnull align 1 dereferenceable(84) %0, i64 84, i1 false) diff --git a/llvm/test/CodeGen/BPF/undef.ll b/llvm/test/CodeGen/BPF/undef.ll --- a/llvm/test/CodeGen/BPF/undef.ll +++ b/llvm/test/CodeGen/BPF/undef.ll @@ -40,7 +40,7 @@ ; CHECK: r1 = routing ; CHECK: call bpf_map_lookup_elem ; CHECK: exit - %key = alloca %struct.routing_key_2, align 1 + %key = alloca %struct.routing_key_2, align 8 store i8 5, ptr %key, align 1 %1 = getelementptr inbounds %struct.routing_key_2, ptr %key, i64 0, i32 0, i64 1 store i8 6, ptr %1, align 1 diff --git a/llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll b/llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll --- a/llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll +++ b/llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll @@ -10,8 +10,8 @@ ; CHECK-LABEL: foobar: %retval = alloca i32, align 4 %x.addr = alloca i32, align 4 - %a = alloca %struct.x, align 4 - %c = alloca ptr, align 4 + %a = alloca %struct.x, align 8 + %c = alloca ptr, align 8 store i32 %x, ptr %x.addr, align 4 %0 = load i32, ptr %x.addr, align 4 store i32 %0, ptr %a, align 4 diff --git a/llvm/test/CodeGen/Mips/atomic64.ll b/llvm/test/CodeGen/Mips/atomic64.ll --- a/llvm/test/CodeGen/Mips/atomic64.ll +++ b/llvm/test/CodeGen/Mips/atomic64.ll @@ -1145,7 +1145,7 @@ ; MIPS64EB-NEXT: jr $ra ; MIPS64EB-NEXT: daddiu $sp, $sp, 16 entry: - %newval.addr = alloca i64, align 4 + %newval.addr = alloca i64, align 8 store i64 %newval, ptr %newval.addr, align 4 %tmp = load i64, ptr %newval.addr, align 4 %0 = atomicrmw xchg ptr @x, i64 %tmp monotonic @@ -1359,7 +1359,7 @@ ; MIPS64EB-NEXT: jr $ra ; MIPS64EB-NEXT: daddiu $sp, $sp, 16 entry: - %newval.addr = alloca i64, align 4 + %newval.addr = alloca i64, align 8 store i64 %newval, ptr %newval.addr, align 4 %tmp = load i64, ptr %newval.addr, align 4 %0 = cmpxchg ptr @x, i64 %oldval, i64 %tmp monotonic monotonic diff --git a/llvm/test/CodeGen/Mips/cconv/byval.ll b/llvm/test/CodeGen/Mips/cconv/byval.ll --- a/llvm/test/CodeGen/Mips/cconv/byval.ll +++ b/llvm/test/CodeGen/Mips/cconv/byval.ll @@ -151,7 +151,7 @@ ; N64-NEXT: jr $ra ; N64-NEXT: daddu $sp, $sp, $1 entry: - %a = alloca %struct.S1, align 4 + %a = alloca %struct.S1, align 8 call void @f2(ptr byval(%struct.S1) align 4 %a) ret void } @@ -340,8 +340,8 @@ ; N64-NEXT: jr $ra ; N64-NEXT: daddu $sp, $sp, $1 entry: - %a.addr = alloca ptr, align 4 - %byval-temp = alloca %struct.S1, align 4 + %a.addr = alloca ptr + %byval-temp = alloca %struct.S1, align 8 store ptr %a, ptr %a.addr, align 4 %0 = load ptr, ptr %a.addr, align 4 call void @llvm.memcpy.p0.p0.i32(ptr align 4 %byval-temp, ptr align 1 %0, i32 65520, i1 false) @@ -410,8 +410,8 @@ ; N64-NEXT: jr $ra ; N64-NEXT: daddiu $sp, $sp, 32 entry: - %a.addr = alloca ptr, align 4 - %b.addr = alloca ptr, align 4 + %a.addr = alloca ptr + %b.addr = alloca ptr store ptr %a, ptr %a.addr, align 4 store ptr %b, ptr %b.addr, align 4 %0 = load ptr, ptr %a.addr, align 4 diff --git a/llvm/test/CodeGen/Mips/cconv/return-struct.ll b/llvm/test/CodeGen/Mips/cconv/return-struct.ll --- a/llvm/test/CodeGen/Mips/cconv/return-struct.ll +++ b/llvm/test/CodeGen/Mips/cconv/return-struct.ll @@ -139,7 +139,7 @@ ; N64-LE-NEXT: jr $ra ; N64-LE-NEXT: daddiu $sp, $sp, 16 entry: - %retval = alloca {i8,i8}, align 1 + %retval = alloca {i8,i8}, align 8 call void @llvm.memcpy.p0.p0.i64(ptr %retval, ptr @struct_2byte, i64 2, i1 false) %0 = load volatile {i16}, ptr %retval ret {i16} %0 diff --git a/llvm/test/CodeGen/Mips/largeimmprinting.ll b/llvm/test/CodeGen/Mips/largeimmprinting.ll --- a/llvm/test/CodeGen/Mips/largeimmprinting.ll +++ b/llvm/test/CodeGen/Mips/largeimmprinting.ll @@ -24,7 +24,7 @@ ; 64: daddu $[[R1]], $sp, $[[R1]] ; 64: sd $ra, 24($[[R1]]) - %agg.tmp = alloca %struct.S1, align 1 + %agg.tmp = alloca %struct.S1, align 8 call void @llvm.memcpy.p0.p0.i32(ptr align 1 %agg.tmp, ptr align 1 @s1, i32 65536, i1 false) call void @f2(ptr byval(%struct.S1) %agg.tmp) nounwind ret void diff --git a/llvm/test/CodeGen/Mips/o32_cc_byval.ll b/llvm/test/CodeGen/Mips/o32_cc_byval.ll --- a/llvm/test/CodeGen/Mips/o32_cc_byval.ll +++ b/llvm/test/CodeGen/Mips/o32_cc_byval.ll @@ -80,7 +80,7 @@ ; CHECK-NEXT: jr $ra ; CHECK-NEXT: addiu $sp, $sp, 64 entry: - %agg.tmp10 = alloca %struct.S3, align 4 + %agg.tmp10 = alloca %struct.S3, align 8 call void @callee1(float 2.000000e+01, ptr byval(%struct.S1) @f1.s1) nounwind call void @callee2(ptr byval(%struct.S2) @f1.s2) nounwind store i8 11, ptr %agg.tmp10, align 4 diff --git a/llvm/test/CodeGen/NVPTX/lower-byval-args.ll b/llvm/test/CodeGen/NVPTX/lower-byval-args.ll --- a/llvm/test/CodeGen/NVPTX/lower-byval-args.ll +++ b/llvm/test/CodeGen/NVPTX/lower-byval-args.ll @@ -118,7 +118,7 @@ ; Verify that if the pointer escapes, then we do fall back onto using a temp copy. ; CHECK-LABEL: .visible .entry pointer_escapes -; CHECK: .local .align 8 .b8 __local_depot{{.*}} +; CHECK: .local .align 4 .b8 __local_depot{{.*}} ; CHECK64: ld.param.u64 [[result_addr:%rd[0-9]+]], [{{.*}}_param_0] ; CHECK64: add.u64 %[[copy_addr:rd[0-9]+]], %SPL, 0; ; CHECK32: ld.param.u32 [[result_addr:%r[0-9]+]], [{{.*}}_param_0] diff --git a/llvm/test/CodeGen/PowerPC/aix-cc-byval.ll b/llvm/test/CodeGen/PowerPC/aix-cc-byval.ll --- a/llvm/test/CodeGen/PowerPC/aix-cc-byval.ll +++ b/llvm/test/CodeGen/PowerPC/aix-cc-byval.ll @@ -353,7 +353,7 @@ define void @call_test_byval_4Byte() { entry: %s0 = alloca %struct.S0, align 8 - %s4a = alloca %struct.S4A, align 4 + %s4a = alloca %struct.S4A, align 8 %call = call signext i32 @test_byval_4Byte(ptr byval(%struct.S4) align 1 @gS4, ptr byval(%struct.S0) align 1 %s0, ptr byval(%struct.S4A) align 4 %s4a) ret void } @@ -945,7 +945,7 @@ define i32 @call_test_byval_homogeneous_float_struct() { entry: - %s = alloca %struct.F, align 4 + %s = alloca %struct.F, align 8 call void @llvm.memset.p0.i32(ptr align 4 %s, i8 0, i32 12, i1 false) %call = call i32 @test_byval_homogeneous_float_struct(ptr byval(%struct.F) align 4 %s) ret i32 %call diff --git a/llvm/test/CodeGen/PowerPC/aix-sret-param.ll b/llvm/test/CodeGen/PowerPC/aix-sret-param.ll --- a/llvm/test/CodeGen/PowerPC/aix-sret-param.ll +++ b/llvm/test/CodeGen/PowerPC/aix-sret-param.ll @@ -17,7 +17,7 @@ define void @test1() { entry: - %s = alloca %struct.S, align 4 + %s = alloca %struct.S, align 8 call void @foo(ptr sret(%struct.S) %s) ret void } diff --git a/llvm/test/CodeGen/PowerPC/byval.ll b/llvm/test/CodeGen/PowerPC/byval.ll --- a/llvm/test/CodeGen/PowerPC/byval.ll +++ b/llvm/test/CodeGen/PowerPC/byval.ll @@ -34,7 +34,7 @@ ; CHECK-NEXT: mtlr 0 ; CHECK-NEXT: blr entry: - %x = alloca %struct, align 4 + %x = alloca %struct, align 8 call void @foo(ptr %x) %r = call i32 @foo1(ptr byval(%struct) %x) ret i32 %r diff --git a/llvm/test/CodeGen/PowerPC/structsinregs.ll b/llvm/test/CodeGen/PowerPC/structsinregs.ll --- a/llvm/test/CodeGen/PowerPC/structsinregs.ll +++ b/llvm/test/CodeGen/PowerPC/structsinregs.ll @@ -35,13 +35,13 @@ define i32 @caller1() nounwind { entry: - %p1 = alloca %struct.s1, align 1 - %p2 = alloca %struct.s2, align 2 - %p3 = alloca %struct.s3, align 2 - %p4 = alloca %struct.s4, align 4 - %p5 = alloca %struct.s5, align 4 - %p6 = alloca %struct.s6, align 4 - %p7 = alloca %struct.s7, align 4 + %p1 = alloca %struct.s1 + %p2 = alloca %struct.s2 + %p3 = alloca %struct.s3 + %p4 = alloca %struct.s4 + %p5 = alloca %struct.s5 + %p6 = alloca %struct.s6 + %p7 = alloca %struct.s7 call void @llvm.memcpy.p0.p0.i64(ptr %p1, ptr @caller1.p1, i64 1, i1 false) call void @llvm.memcpy.p0.p0.i64(ptr align 2 %p2, ptr align 2 @caller1.p2, i64 2, i1 false) call void @llvm.memcpy.p0.p0.i64(ptr align 2 %p3, ptr align 2 @caller1.p3, i64 4, i1 false) @@ -103,13 +103,13 @@ define i32 @caller2() nounwind { entry: - %p1 = alloca %struct.t1, align 1 - %p2 = alloca %struct.t2, align 1 - %p3 = alloca %struct.t3, align 1 - %p4 = alloca %struct.t4, align 1 - %p5 = alloca %struct.t5, align 1 - %p6 = alloca %struct.t6, align 1 - %p7 = alloca %struct.t7, align 1 + %p1 = alloca %struct.t1 + %p2 = alloca %struct.t2 + %p3 = alloca %struct.t3 + %p4 = alloca %struct.t4 + %p5 = alloca %struct.t5 + %p6 = alloca %struct.t6 + %p7 = alloca %struct.t7 call void @llvm.memcpy.p0.p0.i64(ptr %p1, ptr @caller2.p1, i64 1, i1 false) call void @llvm.memcpy.p0.p0.i64(ptr %p2, ptr @caller2.p2, i64 2, i1 false) call void @llvm.memcpy.p0.p0.i64(ptr %p3, ptr @caller2.p3, i64 3, i1 false) diff --git a/llvm/test/CodeGen/PowerPC/varargs-struct-float.ll b/llvm/test/CodeGen/PowerPC/varargs-struct-float.ll --- a/llvm/test/CodeGen/PowerPC/varargs-struct-float.ll +++ b/llvm/test/CodeGen/PowerPC/varargs-struct-float.ll @@ -7,7 +7,7 @@ define void @foo(float inreg %s.coerce) nounwind { entry: - %s = alloca %struct.Sf1, align 4 + %s = alloca %struct.Sf1, align 8 store float %s.coerce, ptr %s, align 1 %0 = load float, ptr %s, align 1 call void (i32, ...) @testvaSf1(i32 1, float inreg %0) diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll @@ -594,7 +594,7 @@ ; RV32I-WITHFP-NEXT: lw s0, 40(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: addi sp, sp, 48 ; RV32I-WITHFP-NEXT: ret - %ls = alloca %struct.large, align 4 + %ls = alloca %struct.large, align 8 store i32 1, ptr %ls %b = getelementptr inbounds %struct.large, ptr %ls, i32 0, i32 1 store i32 2, ptr %b diff --git a/llvm/test/CodeGen/RISCV/frame.ll b/llvm/test/CodeGen/RISCV/frame.ll --- a/llvm/test/CodeGen/RISCV/frame.ll +++ b/llvm/test/CodeGen/RISCV/frame.ll @@ -41,7 +41,7 @@ ; RV32I-WITHFP-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-WITHFP-NEXT: addi sp, sp, 32 ; RV32I-WITHFP-NEXT: ret - %key = alloca %struct.key_t, align 4 + %key = alloca %struct.key_t, align 8 call void @llvm.memset.p0.i64(ptr align 4 %key, i8 0, i64 20, i1 false) %1 = getelementptr inbounds %struct.key_t, ptr %key, i64 0, i32 1, i64 0 call void @test1(ptr %1) diff --git a/llvm/test/CodeGen/RISCV/mem64.ll b/llvm/test/CodeGen/RISCV/mem64.ll --- a/llvm/test/CodeGen/RISCV/mem64.ll +++ b/llvm/test/CodeGen/RISCV/mem64.ll @@ -368,7 +368,7 @@ ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret bb: - %tmp = alloca %struct.quux, align 4 + %tmp = alloca %struct.quux, align 8 %tmp1 = getelementptr inbounds %struct.quux, ptr %tmp, i64 0, i32 1 %tmp2 = getelementptr inbounds %struct.quux, ptr %tmp, i64 0, i32 1, i64 %arg store i8 0, ptr %tmp2, align 1 diff --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll --- a/llvm/test/CodeGen/RISCV/vararg.ll +++ b/llvm/test/CodeGen/RISCV/vararg.ll @@ -138,7 +138,7 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret - %va = alloca ptr, align 4 + %va = alloca ptr call void @llvm.va_start(ptr %va) %argp.cur = load ptr, ptr %va, align 4 %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4 @@ -238,7 +238,7 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret - %va = alloca ptr, align 4 + %va = alloca ptr call void @llvm.va_start(ptr %va) %1 = va_arg ptr %va, i32 call void @llvm.va_end(ptr %va) @@ -401,7 +401,7 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret - %va = alloca ptr, align 4 + %va = alloca ptr call void @llvm.va_start(ptr %va) %1 = va_arg ptr %va, i32 %2 = alloca i8, i32 %1 @@ -599,7 +599,7 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret - %va = alloca ptr, align 4 + %va = alloca ptr call void @llvm.va_start(ptr %va) %argp.cur = load i32, ptr %va, align 4 %1 = add i32 %argp.cur, 7 @@ -719,7 +719,7 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret - %va = alloca ptr, align 4 + %va = alloca ptr call void @llvm.va_start(ptr %va) %1 = va_arg ptr %va, double call void @llvm.va_end(ptr %va) @@ -916,7 +916,7 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 80 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret - %va = alloca ptr, align 4 + %va = alloca ptr call void @llvm.va_start(ptr %va) %argp.cur = load i32, ptr %va, align 4 %1 = add i32 %argp.cur, 7 @@ -1041,7 +1041,7 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 80 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret - %va = alloca ptr, align 4 + %va = alloca ptr call void @llvm.va_start(ptr %va) %1 = va_arg ptr %va, double call void @llvm.va_end(ptr %va) @@ -1341,8 +1341,8 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s1, 24(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 112 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret - %vargs = alloca ptr, align 4 - %wargs = alloca ptr, align 4 + %vargs = alloca ptr + %wargs = alloca ptr call void @llvm.va_start(ptr %vargs) %1 = va_arg ptr %vargs, i32 call void @llvm.va_copy(ptr %wargs, ptr %vargs) @@ -1660,7 +1660,7 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret - %va = alloca ptr, align 4 + %va = alloca ptr call void @llvm.va_start(ptr %va) %1 = va_arg ptr %va, i32 call void @llvm.va_end(ptr %va) @@ -1849,7 +1849,7 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 2032 ; LP64-LP64F-LP64D-WITHFP-NEXT: ret %large = alloca [ 100000000 x i8 ] - %va = alloca ptr, align 4 + %va = alloca ptr call void @llvm.va_start(ptr %va) %argp.cur = load ptr, ptr %va, align 4 %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4 diff --git a/llvm/test/CodeGen/Thumb2/mve-stack.ll b/llvm/test/CodeGen/Thumb2/mve-stack.ll --- a/llvm/test/CodeGen/Thumb2/mve-stack.ll +++ b/llvm/test/CodeGen/Thumb2/mve-stack.ll @@ -15,7 +15,7 @@ ; CHECK-NEXT: add sp, #16 ; CHECK-NEXT: pop {r7, pc} entry: - %d = alloca [4 x i32], align 2 + %d = alloca [4 x i32], align 4 %g = getelementptr inbounds [4 x i32], ptr %d, i32 0, i32 2 store <4 x i32> zeroinitializer, ptr %g, align 2 call arm_aapcs_vfpcc void @func(ptr %d) @@ -57,7 +57,7 @@ ; CHECK-NEXT: add sp, #16 ; CHECK-NEXT: pop {r7, pc} entry: - %d = alloca [16 x i8], align 2 + %d = alloca [16 x i8], align 4 %g = getelementptr inbounds [16 x i8], ptr %d, i32 0, i32 2 store <16 x i8> zeroinitializer, ptr %g, align 2 call arm_aapcs_vfpcc void @func(ptr %d) @@ -78,7 +78,7 @@ ; CHECK-NEXT: add sp, #8 ; CHECK-NEXT: pop {r7, pc} entry: - %d = alloca [4 x i16], align 2 + %d = alloca [4 x i16], align 4 %g = getelementptr inbounds [4 x i16], ptr %d, i32 0, i32 2 store <4 x i16> , ptr %g, align 2 call arm_aapcs_vfpcc void @func(ptr %d) @@ -99,7 +99,7 @@ ; CHECK-NEXT: add sp, #8 ; CHECK-NEXT: pop {r7, pc} entry: - %d = alloca [4 x i8], align 2 + %d = alloca [4 x i8], align 4 %g = getelementptr inbounds [4 x i8], ptr %d, i32 0, i32 2 store <4 x i8> , ptr %g, align 2 call arm_aapcs_vfpcc void @func(ptr %d) @@ -120,7 +120,7 @@ ; CHECK-NEXT: add sp, #8 ; CHECK-NEXT: pop {r7, pc} entry: - %d = alloca [8 x i8], align 2 + %d = alloca [8 x i8], align 4 %g = getelementptr inbounds [8 x i8], ptr %d, i32 0, i32 2 store <8 x i8> zeroinitializer, ptr %g, align 2 call arm_aapcs_vfpcc void @func(ptr %d) @@ -141,7 +141,7 @@ ; CHECK-NEXT: add sp, #16 ; CHECK-NEXT: pop {r7, pc} entry: - %d = alloca [4 x i32], align 2 + %d = alloca [4 x i32], align 4 call arm_aapcs_vfpcc void @func(ptr %d) %g = getelementptr inbounds [4 x i32], ptr %d, i32 0, i32 2 %l = load <4 x i32>, ptr %g, align 2 @@ -181,7 +181,7 @@ ; CHECK-NEXT: add sp, #16 ; CHECK-NEXT: pop {r7, pc} entry: - %d = alloca [16 x i8], align 2 + %d = alloca [16 x i8], align 4 call arm_aapcs_vfpcc void @func(ptr %d) %g = getelementptr inbounds [16 x i8], ptr %d, i32 0, i32 2 %l = load <16 x i8>, ptr %g, align 2 @@ -202,7 +202,7 @@ ; CHECK-NEXT: add sp, #8 ; CHECK-NEXT: pop {r4, pc} entry: - %d = alloca [4 x i16], align 2 + %d = alloca [4 x i16], align 4 call arm_aapcs_vfpcc void @func(ptr %d) %g = getelementptr inbounds [4 x i16], ptr %d, i32 0, i32 2 %l = load <4 x i16>, ptr %g, align 2 @@ -223,7 +223,7 @@ ; CHECK-NEXT: add sp, #8 ; CHECK-NEXT: pop {r4, pc} entry: - %d = alloca [4 x i8], align 2 + %d = alloca [4 x i8], align 4 call arm_aapcs_vfpcc void @func(ptr %d) %g = getelementptr inbounds [4 x i8], ptr %d, i32 0, i32 2 %l = load <4 x i8>, ptr %g, align 2 @@ -244,7 +244,7 @@ ; CHECK-NEXT: add sp, #8 ; CHECK-NEXT: pop {r4, pc} entry: - %d = alloca [8 x i8], align 2 + %d = alloca [8 x i8], align 4 call arm_aapcs_vfpcc void @func(ptr %d) %g = getelementptr inbounds [8 x i8], ptr %d, i32 0, i32 2 %l = load <8 x i8>, ptr %g, align 2 diff --git a/llvm/test/CodeGen/VE/Scalar/atomic_cmp_swap.ll b/llvm/test/CodeGen/VE/Scalar/atomic_cmp_swap.ll --- a/llvm/test/CodeGen/VE/Scalar/atomic_cmp_swap.ll +++ b/llvm/test/CodeGen/VE/Scalar/atomic_cmp_swap.ll @@ -1462,7 +1462,7 @@ ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) bb: - %i = alloca %"struct.std::__1::atomic", align 1 + %i = alloca %"struct.std::__1::atomic", align 8 call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %i) %i3 = zext i1 %arg1 to i8 %i4 = load i8, ptr %arg, align 1 @@ -1525,7 +1525,7 @@ ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) bb: - %i = alloca %"struct.std::__1::atomic.0", align 1 + %i = alloca %"struct.std::__1::atomic.0", align 8 call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %i) %i3 = load i8, ptr %arg, align 1 %i4 = cmpxchg weak volatile ptr %i, i8 %i3, i8 %arg1 monotonic monotonic, align 1 @@ -1581,7 +1581,7 @@ ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) bb: - %i = alloca %"struct.std::__1::atomic.5", align 1 + %i = alloca %"struct.std::__1::atomic.5", align 8 call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %i) %i3 = load i8, ptr %arg, align 1 %i4 = cmpxchg weak volatile ptr %i, i8 %i3, i8 %arg1 monotonic monotonic, align 1 @@ -1638,7 +1638,7 @@ ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) bb: - %i = alloca %"struct.std::__1::atomic.10", align 2 + %i = alloca %"struct.std::__1::atomic.10", align 8 call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %i) %i4 = load i16, ptr %arg, align 2 %i5 = cmpxchg weak volatile ptr %i, i16 %i4, i16 %arg1 monotonic monotonic, align 2 @@ -1694,7 +1694,7 @@ ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) bb: - %i = alloca %"struct.std::__1::atomic.15", align 2 + %i = alloca %"struct.std::__1::atomic.15", align 8 call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %i) %i4 = load i16, ptr %arg, align 2 %i5 = cmpxchg weak volatile ptr %i, i16 %i4, i16 %arg1 monotonic monotonic, align 2 @@ -1741,7 +1741,7 @@ ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) bb: - %i = alloca %"struct.std::__1::atomic.20", align 4 + %i = alloca %"struct.std::__1::atomic.20", align 8 call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %i) %i4 = load i32, ptr %arg, align 4 %i5 = cmpxchg weak volatile ptr %i, i32 %i4, i32 %arg1 monotonic monotonic, align 4 @@ -1788,7 +1788,7 @@ ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) bb: - %i = alloca %"struct.std::__1::atomic.25", align 4 + %i = alloca %"struct.std::__1::atomic.25", align 8 call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %i) %i4 = load i32, ptr %arg, align 4 %i5 = cmpxchg weak volatile ptr %i, i32 %i4, i32 %arg1 monotonic monotonic, align 4 diff --git a/llvm/test/CodeGen/VE/Scalar/atomic_load.ll b/llvm/test/CodeGen/VE/Scalar/atomic_load.ll --- a/llvm/test/CodeGen/VE/Scalar/atomic_load.ll +++ b/llvm/test/CodeGen/VE/Scalar/atomic_load.ll @@ -521,7 +521,7 @@ ; CHECK-NEXT: ld1b.zx %s0, 248(, %s11) ; CHECK-NEXT: and %s0, 1, %s0 ; CHECK-NEXT: or %s11, 0, %s9 - %1 = alloca %"struct.std::__1::atomic", align 1 + %1 = alloca %"struct.std::__1::atomic", align 8 call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1) call void @_Z6fun_i1RNSt3__16atomicIbEE(ptr nonnull align 1 dereferenceable(1) %1) %2 = load atomic i8, ptr %1 monotonic, align 1 @@ -550,7 +550,7 @@ ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: ld1b.sx %s0, 248(, %s11) ; CHECK-NEXT: or %s11, 0, %s9 - %1 = alloca %"struct.std::__1::atomic.0", align 1 + %1 = alloca %"struct.std::__1::atomic.0", align 8 call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1) call void @_Z6fun_i8RNSt3__16atomicIcEE(ptr nonnull align 1 dereferenceable(1) %1) %2 = load atomic i8, ptr %1 monotonic, align 1 @@ -571,7 +571,7 @@ ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: ld1b.zx %s0, 248(, %s11) ; CHECK-NEXT: or %s11, 0, %s9 - %1 = alloca %"struct.std::__1::atomic.5", align 1 + %1 = alloca %"struct.std::__1::atomic.5", align 8 call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %1) call void @_Z6fun_u8RNSt3__16atomicIhEE(ptr nonnull align 1 dereferenceable(1) %1) %2 = load atomic i8, ptr %1 monotonic, align 1 @@ -592,7 +592,7 @@ ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: ld2b.sx %s0, 248(, %s11) ; CHECK-NEXT: or %s11, 0, %s9 - %1 = alloca %"struct.std::__1::atomic.10", align 2 + %1 = alloca %"struct.std::__1::atomic.10", align 8 call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %1) call void @_Z7fun_i16RNSt3__16atomicIsEE(ptr nonnull align 2 dereferenceable(2) %1) %2 = load atomic i16, ptr %1 monotonic, align 2 @@ -613,7 +613,7 @@ ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: ld2b.zx %s0, 248(, %s11) ; CHECK-NEXT: or %s11, 0, %s9 - %1 = alloca %"struct.std::__1::atomic.15", align 2 + %1 = alloca %"struct.std::__1::atomic.15", align 8 call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %1) call void @_Z7fun_u16RNSt3__16atomicItEE(ptr nonnull align 2 dereferenceable(2) %1) %2 = load atomic i16, ptr %1 monotonic, align 2 @@ -634,7 +634,7 @@ ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: ldl.sx %s0, 248(, %s11) ; CHECK-NEXT: or %s11, 0, %s9 - %1 = alloca %"struct.std::__1::atomic.20", align 4 + %1 = alloca %"struct.std::__1::atomic.20", align 8 call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %1) call void @_Z7fun_i32RNSt3__16atomicIiEE(ptr nonnull align 4 dereferenceable(4) %1) %2 = load atomic i32, ptr %1 monotonic, align 4 @@ -655,7 +655,7 @@ ; CHECK-NEXT: bsic %s10, (, %s12) ; CHECK-NEXT: ldl.zx %s0, 248(, %s11) ; CHECK-NEXT: or %s11, 0, %s9 - %1 = alloca %"struct.std::__1::atomic.25", align 4 + %1 = alloca %"struct.std::__1::atomic.25", align 8 call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %1) call void @_Z7fun_u32RNSt3__16atomicIjEE(ptr nonnull align 4 dereferenceable(4) %1) %2 = load atomic i32, ptr %1 monotonic, align 4 diff --git a/llvm/test/CodeGen/VE/Scalar/atomic_swap.ll b/llvm/test/CodeGen/VE/Scalar/atomic_swap.ll --- a/llvm/test/CodeGen/VE/Scalar/atomic_swap.ll +++ b/llvm/test/CodeGen/VE/Scalar/atomic_swap.ll @@ -723,7 +723,7 @@ ; CHECK-NEXT: and %s0, 1, %s0 ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) - %2 = alloca %"struct.std::__1::atomic", align 1 + %2 = alloca %"struct.std::__1::atomic", align 8 call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %2) %3 = zext i1 %0 to i8 %4 = atomicrmw volatile xchg ptr %2, i8 %3 monotonic @@ -751,7 +751,7 @@ ; CHECK-NEXT: sra.l %s0, %s0, 56 ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) - %2 = alloca %"struct.std::__1::atomic.0", align 1 + %2 = alloca %"struct.std::__1::atomic.0", align 8 call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %2) %3 = atomicrmw volatile xchg ptr %2, i8 %0 monotonic call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %2) @@ -769,7 +769,7 @@ ; CHECK-NEXT: and %s0, %s0, (56)0 ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) - %2 = alloca %"struct.std::__1::atomic.5", align 1 + %2 = alloca %"struct.std::__1::atomic.5", align 8 call void @llvm.lifetime.start.p0(i64 1, ptr nonnull %2) %3 = atomicrmw volatile xchg ptr %2, i8 %0 monotonic call void @llvm.lifetime.end.p0(i64 1, ptr nonnull %2) @@ -788,7 +788,7 @@ ; CHECK-NEXT: sra.l %s0, %s0, 48 ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) - %2 = alloca %"struct.std::__1::atomic.10", align 2 + %2 = alloca %"struct.std::__1::atomic.10", align 8 call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %2) %3 = atomicrmw volatile xchg ptr %2, i16 %0 monotonic call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %2) @@ -806,7 +806,7 @@ ; CHECK-NEXT: and %s0, %s0, (48)0 ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) - %2 = alloca %"struct.std::__1::atomic.15", align 2 + %2 = alloca %"struct.std::__1::atomic.15", align 8 call void @llvm.lifetime.start.p0(i64 2, ptr nonnull %2) %3 = atomicrmw volatile xchg ptr %2, i16 %0 monotonic call void @llvm.lifetime.end.p0(i64 2, ptr nonnull %2) @@ -821,7 +821,7 @@ ; CHECK-NEXT: adds.w.sx %s0, %s0, (0)1 ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) - %2 = alloca %"struct.std::__1::atomic.20", align 4 + %2 = alloca %"struct.std::__1::atomic.20", align 8 call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %2) %3 = atomicrmw volatile xchg ptr %2, i32 %0 monotonic call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %2) @@ -836,7 +836,7 @@ ; CHECK-NEXT: adds.w.zx %s0, %s0, (0)1 ; CHECK-NEXT: adds.l %s11, 16, %s11 ; CHECK-NEXT: b.l.t (, %s10) - %2 = alloca %"struct.std::__1::atomic.25", align 4 + %2 = alloca %"struct.std::__1::atomic.25", align 8 call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %2) %3 = atomicrmw volatile xchg ptr %2, i32 %0 monotonic call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %2) diff --git a/llvm/test/CodeGen/WebAssembly/PR40172.ll b/llvm/test/CodeGen/WebAssembly/PR40172.ll --- a/llvm/test/CodeGen/WebAssembly/PR40172.ll +++ b/llvm/test/CodeGen/WebAssembly/PR40172.ll @@ -15,7 +15,7 @@ ; CHECK: i32.store8 8($[[BASE]]), $[[A1]]{{$}} define void @test(i8 %byte) { - %t = alloca { i8, i8 }, align 1 + %t = alloca { i8, i8 }, align 8 %x4 = and i8 %byte, 1 %x5 = icmp eq i8 %x4, 1 %x6 = and i8 %byte, 2 diff --git a/llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll b/llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll --- a/llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll +++ b/llvm/test/CodeGen/X86/dbg-changes-codegen-branch-folding.ll @@ -50,8 +50,8 @@ ; Function Attrs: uwtable define void @_Z3barii(i32 %param1, i32 %param2) #0 !dbg !24 { entry: - %var1 = alloca %struct.AAA3, align 1 - %var2 = alloca %struct.AAA3, align 1 + %var1 = alloca %struct.AAA3, align 8 + %var2 = alloca %struct.AAA3, align 8 tail call void @llvm.dbg.value(metadata i32 %param1, i64 0, metadata !29, metadata !46), !dbg !47 tail call void @llvm.dbg.value(metadata i32 %param2, i64 0, metadata !30, metadata !46), !dbg !48 tail call void @llvm.dbg.value(metadata ptr null, i64 0, metadata !31, metadata !46), !dbg !49 diff --git a/llvm/test/CodeGen/X86/fast-isel-call.ll b/llvm/test/CodeGen/X86/fast-isel-call.ll --- a/llvm/test/CodeGen/X86/fast-isel-call.ll +++ b/llvm/test/CodeGen/X86/fast-isel-call.ll @@ -59,7 +59,7 @@ %struct.S = type { i8 } define void @test5() { entry: - %s = alloca %struct.S, align 1 + %s = alloca %struct.S, align 8 ; CHECK-LABEL: test5: ; CHECK: subl $12, %esp ; CHECK: leal 8(%esp), %ecx diff --git a/llvm/test/CodeGen/X86/load-local-v3i129.ll b/llvm/test/CodeGen/X86/load-local-v3i129.ll --- a/llvm/test/CodeGen/X86/load-local-v3i129.ll +++ b/llvm/test/CodeGen/X86/load-local-v3i129.ll @@ -29,7 +29,7 @@ ; SLOW-SHLD-NEXT: movq $-1, -48(%rsp) ; SLOW-SHLD-NEXT: retq Entry: - %y = alloca <3 x i129>, align 4 + %y = alloca <3 x i129>, align 16 %L = load <3 x i129>, ptr %y %I1 = insertelement <3 x i129> %L, i129 340282366920938463463374607431768211455, i32 1 store <3 x i129> %I1, ptr %y diff --git a/llvm/test/CodeGen/X86/pr44140.ll b/llvm/test/CodeGen/X86/pr44140.ll --- a/llvm/test/CodeGen/X86/pr44140.ll +++ b/llvm/test/CodeGen/X86/pr44140.ll @@ -59,7 +59,7 @@ %dummy1 = alloca [22 x i64], align 8 %dummy2 = alloca [22 x i64], align 8 - %data = alloca <2 x i64>, align 8 + %data = alloca <2 x i64>, align 16 br label %fake-loop diff --git a/llvm/test/CodeGen/X86/ssp-data-layout.ll b/llvm/test/CodeGen/X86/ssp-data-layout.ll --- a/llvm/test/CodeGen/X86/ssp-data-layout.ll +++ b/llvm/test/CodeGen/X86/ssp-data-layout.ll @@ -93,14 +93,14 @@ %y = alloca i32, align 4 %z = alloca i32, align 4 %ptr = alloca i32, align 4 - %small2 = alloca [2 x i16], align 2 + %small2 = alloca [2 x i16], align 4 %large2 = alloca [8 x i32], align 16 - %small = alloca [2 x i8], align 1 - %large = alloca [8 x i8], align 1 - %a = alloca %struct.struct_large_char, align 1 - %b = alloca %struct.struct_small_char, align 1 + %small = alloca [2 x i8], align 2 + %large = alloca [8 x i8], align 8 + %a = alloca %struct.struct_large_char, align 8 + %b = alloca %struct.struct_small_char, align 8 %c = alloca %struct.struct_large_nonchar, align 8 - %d = alloca %struct.struct_small_nonchar, align 2 + %d = alloca %struct.struct_small_nonchar, align 8 %call = call i32 @get_scalar1() store i32 %call, ptr %x, align 4 call void @end_scalar1() @@ -217,12 +217,12 @@ %ptr = alloca i32, align 4 %small2 = alloca [2 x i16], align 2 %large2 = alloca [8 x i32], align 16 - %small = alloca [2 x i8], align 1 - %large = alloca [8 x i8], align 1 - %a = alloca %struct.struct_large_char, align 1 - %b = alloca %struct.struct_small_char, align 1 + %small = alloca [2 x i8], align 2 + %large = alloca [8 x i8], align 8 + %a = alloca %struct.struct_large_char, align 8 + %b = alloca %struct.struct_small_char, align 8 %c = alloca %struct.struct_large_nonchar, align 8 - %d = alloca %struct.struct_small_nonchar, align 2 + %d = alloca %struct.struct_small_nonchar, align 8 %call = call i32 @get_scalar1() store i32 %call, ptr %x, align 4 call void @end_scalar1() @@ -325,14 +325,14 @@ %y = alloca i32, align 4 %z = alloca i32, align 4 %ptr = alloca i32, align 4 - %small2 = alloca [2 x i16], align 2 + %small2 = alloca [2 x i16], align 4 %large2 = alloca [8 x i32], align 16 - %small = alloca [2 x i8], align 1 - %large = alloca [8 x i8], align 1 - %a = alloca %struct.struct_large_char, align 1 - %b = alloca %struct.struct_small_char, align 1 + %small = alloca [2 x i8], align 2 + %large = alloca [8 x i8], align 8 + %a = alloca %struct.struct_large_char, align 8 + %b = alloca %struct.struct_small_char, align 8 %c = alloca %struct.struct_large_nonchar, align 8 - %d = alloca %struct.struct_small_nonchar, align 2 + %d = alloca %struct.struct_small_nonchar, align 8 %call = call i32 @get_scalar1() store i32 %call, ptr %x, align 4 call void @end_scalar1() diff --git a/llvm/test/CodeGen/X86/win-cleanuppad.ll b/llvm/test/CodeGen/X86/win-cleanuppad.ll --- a/llvm/test/CodeGen/X86/win-cleanuppad.ll +++ b/llvm/test/CodeGen/X86/win-cleanuppad.ll @@ -58,8 +58,8 @@ define void @nested_cleanup() #0 personality ptr @__CxxFrameHandler3 { entry: - %o1 = alloca %struct.Dtor, align 1 - %o2 = alloca %struct.Dtor, align 1 + %o1 = alloca %struct.Dtor, align 8 + %o2 = alloca %struct.Dtor, align 8 invoke void @f(i32 1) to label %invoke.cont unwind label %cleanup.outer diff --git a/llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll b/llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll --- a/llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll +++ b/llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll @@ -13,8 +13,8 @@ ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: testb $1, %dil -; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rax -; CHECK-NEXT: movq %rsp, %rcx +; CHECK-NEXT: movq %rsp, %rax +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rcx ; CHECK-NEXT: cmovneq %rax, %rcx ; CHECK-NEXT: movups (%rcx), %xmm0 ; CHECK-NEXT: callq _sink @@ -36,8 +36,8 @@ ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: testb $1, %dil -; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rax -; CHECK-NEXT: movq %rsp, %rcx +; CHECK-NEXT: movq %rsp, %rax +; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rcx ; CHECK-NEXT: cmovneq %rax, %rcx ; CHECK-NEXT: movaps (%rcx), %xmm0 ; CHECK-NEXT: callq _sink diff --git a/llvm/test/DebugInfo/AArch64/frameindices.ll b/llvm/test/DebugInfo/AArch64/frameindices.ll --- a/llvm/test/DebugInfo/AArch64/frameindices.ll +++ b/llvm/test/DebugInfo/AArch64/frameindices.ll @@ -86,7 +86,7 @@ define void @_Z3f16v() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg !68 { entry: %agg.tmp.i.i = alloca %struct.A, align 8 - %d = alloca %struct.B, align 1 + %d = alloca %struct.B, align 8 %agg.tmp.sroa.2 = alloca [15 x i8], align 1 %agg.tmp.sroa.4 = alloca [7 x i8], align 1 tail call void @llvm.dbg.declare(metadata [15 x i8]* %agg.tmp.sroa.2, metadata !56, metadata !74), !dbg !75 diff --git a/llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll b/llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll --- a/llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll +++ b/llvm/test/DebugInfo/NVPTX/dbg-declare-alloca.ll @@ -221,7 +221,7 @@ ; Function Attrs: noinline nounwind uwtable define void @use_dbg_declare() #0 !dbg !7 { entry: - %o = alloca %struct.Foo, align 4 + %o = alloca %struct.Foo, align 8 call void @llvm.dbg.declare(metadata ptr %o, metadata !10, metadata !15), !dbg !16 call void @escape_foo(ptr %o), !dbg !17 ret void, !dbg !18 diff --git a/llvm/test/DebugInfo/X86/dbg-addr.ll b/llvm/test/DebugInfo/X86/dbg-addr.ll --- a/llvm/test/DebugInfo/X86/dbg-addr.ll +++ b/llvm/test/DebugInfo/X86/dbg-addr.ll @@ -44,7 +44,7 @@ ; Function Attrs: noinline nounwind uwtable define void @use_dbg_addr() #0 !dbg !7 { entry: - %o = alloca %struct.Foo, align 4 + %o = alloca %struct.Foo, align 8 call void @llvm.dbg.addr(metadata ptr %o, metadata !10, metadata !15), !dbg !16 call void @escape_foo(ptr %o), !dbg !17 ret void, !dbg !18 @@ -52,7 +52,7 @@ define void @test_dbg_addr_and_dbg_val_undef() #0 !dbg !117 { entry: - %o = alloca %struct.Foo, align 4 + %o = alloca %struct.Foo, align 8 call void @llvm.dbg.addr(metadata ptr %o, metadata !1110, metadata !1115), !dbg !1116 call void @escape_foo(ptr %o), !dbg !1117 call void @llvm.dbg.value(metadata ptr undef, metadata !1110, metadata !1115), !dbg !1116 diff --git a/llvm/test/DebugInfo/X86/dbg-declare-alloca.ll b/llvm/test/DebugInfo/X86/dbg-declare-alloca.ll --- a/llvm/test/DebugInfo/X86/dbg-declare-alloca.ll +++ b/llvm/test/DebugInfo/X86/dbg-declare-alloca.ll @@ -23,7 +23,7 @@ ; Function Attrs: noinline nounwind uwtable define void @use_dbg_declare() #0 !dbg !7 { entry: - %o = alloca %struct.Foo, align 4 + %o = alloca %struct.Foo, align 8 call void @llvm.dbg.declare(metadata ptr %o, metadata !10, metadata !15), !dbg !16 call void @escape_foo(ptr %o), !dbg !17 ret void, !dbg !18 diff --git a/llvm/test/DebugInfo/X86/sret.ll b/llvm/test/DebugInfo/X86/sret.ll --- a/llvm/test/DebugInfo/X86/sret.ll +++ b/llvm/test/DebugInfo/X86/sret.ll @@ -102,7 +102,7 @@ define void @_ZN1B9AInstanceEv(ptr noalias sret(%class.A) %agg.result, ptr %this) #2 align 2 !dbg !53 { entry: %this.addr = alloca ptr, align 8 - %nrvo = alloca i1 + %nrvo = alloca i1, align 1 %cleanup.dest.slot = alloca i32 store ptr %this, ptr %this.addr, align 8 call void @llvm.dbg.declare(metadata ptr %this.addr, metadata !89, metadata !DIExpression()), !dbg !91 @@ -139,7 +139,7 @@ %retval = alloca i32, align 4 %argc.addr = alloca i32, align 4 %argv.addr = alloca ptr, align 8 - %b = alloca %class.B, align 1 + %b = alloca %class.B, align 8 %return_val = alloca i32, align 4 %temp.lvalue = alloca %class.A, align 8 %exn.slot = alloca ptr @@ -226,7 +226,7 @@ entry: %this.addr = alloca ptr, align 8 %exn.slot = alloca ptr - %ehselector.slot = alloca i32 + %ehselector.slot = alloca i32, align 4 store ptr %this, ptr %this.addr, align 8 call void @llvm.dbg.declare(metadata ptr %this.addr, metadata !126, metadata !DIExpression()), !dbg !127 %this1 = load ptr, ptr %this.addr diff --git a/llvm/test/DebugInfo/assignment-tracking/X86/nested-loop-frags.ll b/llvm/test/DebugInfo/assignment-tracking/X86/nested-loop-frags.ll --- a/llvm/test/DebugInfo/assignment-tracking/X86/nested-loop-frags.ll +++ b/llvm/test/DebugInfo/assignment-tracking/X86/nested-loop-frags.ll @@ -86,17 +86,17 @@ define dso_local noundef i32 @_Z3funii(i32 noundef %a, i32 noundef %b) local_unnamed_addr #0 !dbg !17 { entry: - %a.addr = alloca i64, align 4, !DIAssignID !58 ; VAR:a + %a.addr = alloca i64, align 8, !DIAssignID !58 ; VAR:a call void @llvm.dbg.assign(metadata i1 undef, metadata !21, metadata !DIExpression(), metadata !58, metadata ptr %a.addr, metadata !DIExpression()), !dbg !27 ; VAR:a - %b.addr = alloca i64, align 4, !DIAssignID !64 ; VAR:b + %b.addr = alloca i64, align 8, !DIAssignID !64 ; VAR:b call void @llvm.dbg.assign(metadata i1 undef, metadata !22, metadata !DIExpression(), metadata !64, metadata ptr %b.addr, metadata !DIExpression()), !dbg !27 ; VAR:b - %c.addr = alloca i64, align 4, !DIAssignID !68 ; VAR:c + %c.addr = alloca i64, align 8, !DIAssignID !68 ; VAR:c call void @llvm.dbg.assign(metadata i1 undef, metadata !67, metadata !DIExpression(), metadata !68, metadata ptr %c.addr, metadata !DIExpression()), !dbg !27 ; VAR:c - %d.addr = alloca i64, align 4, !DIAssignID !73 ; VAR:d + %d.addr = alloca i64, align 8, !DIAssignID !73 ; VAR:d call void @llvm.dbg.assign(metadata i1 undef, metadata !72, metadata !DIExpression(), metadata !73, metadata ptr %d.addr, metadata !DIExpression()), !dbg !27 ; VAR:d - %e.addr = alloca i64, align 4, !DIAssignID !76 ; VAR:e + %e.addr = alloca i64, align 8, !DIAssignID !76 ; VAR:e call void @llvm.dbg.assign(metadata i1 undef, metadata !75, metadata !DIExpression(), metadata !76, metadata ptr %e.addr, metadata !DIExpression()), !dbg !27 ; VAR:e - ;%f.addr = alloca i64, align 4, !DIAssignID !80 ; VAR:f + ;%f.addr = alloca i64, align 8, !DIAssignID !80 ; VAR:f ;call void @llvm.dbg.assign(metadata i1 undef, metadata !79, metadata !DIExpression(), metadata !80, metadata ptr %f.addr, metadata !DIExpression()), !dbg !27 ; VAR:f store i64 1, ptr %a.addr, !DIAssignID !70 ; VAR:a call void @llvm.dbg.assign(metadata i64 1, metadata !21, metadata !DIExpression(), metadata !70, metadata ptr %a.addr, metadata !DIExpression()), !dbg !27 ; VAR:a