Index: cfe/trunk/lib/CodeGen/CGBuilder.h =================================================================== --- cfe/trunk/lib/CodeGen/CGBuilder.h +++ cfe/trunk/lib/CodeGen/CGBuilder.h @@ -258,23 +258,23 @@ using CGBuilderBaseTy::CreateMemCpy; llvm::CallInst *CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile = false) { - auto Align = std::min(Dest.getAlignment(), Src.getAlignment()); - return CreateMemCpy(Dest.getPointer(), Src.getPointer(), Size, - Align.getQuantity(), IsVolatile); + return CreateMemCpy(Dest.getPointer(), Dest.getAlignment().getQuantity(), + Src.getPointer(), Src.getAlignment().getQuantity(), + Size,IsVolatile); } llvm::CallInst *CreateMemCpy(Address Dest, Address Src, uint64_t Size, bool IsVolatile = false) { - auto Align = std::min(Dest.getAlignment(), Src.getAlignment()); - return CreateMemCpy(Dest.getPointer(), Src.getPointer(), Size, - Align.getQuantity(), IsVolatile); + return CreateMemCpy(Dest.getPointer(), Dest.getAlignment().getQuantity(), + Src.getPointer(), Src.getAlignment().getQuantity(), + Size, IsVolatile); } using CGBuilderBaseTy::CreateMemMove; llvm::CallInst *CreateMemMove(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile = false) { - auto Align = std::min(Dest.getAlignment(), Src.getAlignment()); - return CreateMemMove(Dest.getPointer(), Src.getPointer(), Size, - Align.getQuantity(), IsVolatile); + return CreateMemMove(Dest.getPointer(), Dest.getAlignment().getQuantity(), + Src.getPointer(), Src.getAlignment().getQuantity(), + Size, IsVolatile); } using CGBuilderBaseTy::CreateMemSet; Index: cfe/trunk/test/CodeGen/arm-arguments.c =================================================================== --- cfe/trunk/test/CodeGen/arm-arguments.c +++ cfe/trunk/test/CodeGen/arm-arguments.c @@ -215,6 +215,6 @@ // AAPCS: %[[a:.*]] = alloca %struct.s35, align 16 // AAPCS: %[[b:.*]] = bitcast %struct.s35* %[[a]] to i8* // AAPCS: %[[c:.*]] = bitcast %struct.s35* %0 to i8* -// AAPCS: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %[[b]], i8* align 8 %[[c]] +// AAPCS: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %[[b]], i8* align 8 %[[c]] // AAPCS: %[[d:.*]] = bitcast %struct.s35* %[[a]] to <4 x float>* // AAPCS: load <4 x float>, <4 x float>* %[[d]], align 16 Index: cfe/trunk/test/CodeGen/arm64-be-bitfield.c =================================================================== --- cfe/trunk/test/CodeGen/arm64-be-bitfield.c +++ cfe/trunk/test/CodeGen/arm64-be-bitfield.c @@ -7,6 +7,6 @@ // IR: callee_b0f(i64 [[ARG:%.*]]) // IR: store i64 [[ARG]], i64* [[PTR:%.*]], align 8 // IR: [[BITCAST:%.*]] = bitcast i64* [[PTR]] to i8* -// IR: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* align 4 [[BITCAST]], i64 4 +// IR: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* align 8 [[BITCAST]], i64 4 return bp11.b2; } Index: cfe/trunk/test/CodeGen/block-byref-aggr.c =================================================================== --- cfe/trunk/test/CodeGen/block-byref-aggr.c +++ cfe/trunk/test/CodeGen/block-byref-aggr.c @@ -24,7 +24,7 @@ // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[BYREF]], [[BYREF]]* [[T0]], i32 0, i32 4 // CHECK-NEXT: [[T2:%.*]] = bitcast [[AGG]]* [[T1]] to i8* // CHECK-NEXT: [[T3:%.*]] = bitcast [[AGG]]* [[TEMP]] to i8* -// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[T2]], i8* align 4 [[T3]], i64 4, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[T2]], i8* align 4 [[T3]], i64 4, i1 false) // Verify that there's nothing else significant in the function. // CHECK-NEXT: [[T0:%.*]] = bitcast [[BYREF]]* [[A]] to i8* // CHECK-NEXT: call void @_Block_object_dispose(i8* [[T0]], i32 8) @@ -50,14 +50,14 @@ // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[B_BYREF]], [[B_BYREF]]* [[T0]], i32 0, i32 4 // CHECK-NEXT: [[T2:%.*]] = bitcast [[AGG]]* [[T1]] to i8* // CHECK-NEXT: [[T3:%.*]] = bitcast [[AGG]]* [[TEMP]] to i8* -// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[T2]], i8* align 4 [[T3]], i64 4, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[T2]], i8* align 4 [[T3]], i64 4, i1 false) // Then for 'a': // CHECK-NEXT: [[A_FORWARDING:%.*]] = getelementptr inbounds [[A_BYREF]], [[A_BYREF]]* [[A]], i32 0, i32 1 // CHECK-NEXT: [[T0:%.*]] = load [[A_BYREF]]*, [[A_BYREF]]** [[A_FORWARDING]] // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[A_BYREF]], [[A_BYREF]]* [[T0]], i32 0, i32 4 // CHECK-NEXT: [[T2:%.*]] = bitcast [[AGG]]* [[T1]] to i8* // CHECK-NEXT: [[T3:%.*]] = bitcast [[AGG]]* [[TEMP]] to i8* -// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[T2]], i8* align 4 [[T3]], i64 4, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[T2]], i8* align 4 [[T3]], i64 4, i1 false) // Verify that there's nothing else significant in the function. // CHECK-NEXT: [[T0:%.*]] = bitcast [[B_BYREF]]* [[B]] to i8* // CHECK-NEXT: call void @_Block_object_dispose(i8* [[T0]], i32 8) Index: cfe/trunk/test/CodeGen/builtin-memfns.c =================================================================== --- cfe/trunk/test/CodeGen/builtin-memfns.c +++ cfe/trunk/test/CodeGen/builtin-memfns.c @@ -73,7 +73,7 @@ struct PS ps; void test8(int *arg) { // CHECK: @test8 - // CHECK: call void @llvm.memcpy{{.*}} align 1 {{.*}} align 1 {{.*}} 16, i1 false) + // CHECK: call void @llvm.memcpy{{.*}} align 4 {{.*}} align 1 {{.*}} 16, i1 false) __builtin_memcpy(arg, ps.modes, sizeof(struct PS)); } Index: cfe/trunk/test/CodeGen/c11atomics-ios.c =================================================================== --- cfe/trunk/test/CodeGen/c11atomics-ios.c +++ cfe/trunk/test/CodeGen/c11atomics-ios.c @@ -132,7 +132,7 @@ // CHECK-NEXT: [[T0:%.*]] = load [[S]]*, [[S]]** [[FP]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[S]]* [[TMP0]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast [[S]]* [[F]] to i8* -// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[T1]], i8* align 2 [[T2]], i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[T1]], i8* align 2 [[T2]], i32 8, i1 false) // CHECK-NEXT: [[T3:%.*]] = bitcast [[S]]* [[TMP0]] to i64* // CHECK-NEXT: [[T4:%.*]] = load i64, i64* [[T3]], align 8 // CHECK-NEXT: [[T5:%.*]] = bitcast [[S]]* [[T0]] to i64* @@ -183,7 +183,7 @@ // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[TMP0]], i32 0, i32 0 // CHECK-NEXT: [[T1:%.*]] = bitcast [[PS]]* [[F]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast [[PS]]* [[T0]] to i8* -// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[T1]], i8* align 2 [[T2]], i32 6, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[T1]], i8* align 8 [[T2]], i32 6, i1 false) PS f = *fp; // CHECK-NEXT: [[T0:%.*]] = load [[APS]]*, [[APS]]** [[FP]] @@ -192,7 +192,7 @@ // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[TMP1]], i32 0, i32 0 // CHECK-NEXT: [[T2:%.*]] = bitcast [[PS]]* [[T1]] to i8* // CHECK-NEXT: [[T3:%.*]] = bitcast [[PS]]* [[F]] to i8* -// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[T2]], i8* align 2 [[T3]], i32 6, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[T2]], i8* align 2 [[T3]], i32 6, i1 false) // CHECK-NEXT: [[T4:%.*]] = bitcast [[APS]]* [[TMP1]] to i64* // CHECK-NEXT: [[T5:%.*]] = load i64, i64* [[T4]], align 8 // CHECK-NEXT: [[T6:%.*]] = bitcast [[APS]]* [[T0]] to i64* @@ -215,7 +215,7 @@ // CHECK: [[ATOMIC_RES_STRUCT:%.*]] = bitcast i64* [[ATOMIC_RES64]] to %struct.PS* // CHECK: [[AGG_RESULT8:%.*]] = bitcast %struct.PS* %agg.result to i8* // CHECK: [[ATOMIC_RES8:%.*]] = bitcast %struct.PS* [[ATOMIC_RES_STRUCT]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[AGG_RESULT8]], i8* align 2 [[ATOMIC_RES8]], i32 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[AGG_RESULT8]], i8* align 8 [[ATOMIC_RES8]], i32 6, i1 false) return __c11_atomic_load(addr, 5); } @@ -236,7 +236,7 @@ // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_VAL8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i8* // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) // CHECK: [[ATOMIC_VAL64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i64* // CHECK: [[VAL64:%.*]] = load i64, i64* [[ATOMIC_VAL64]], align 8 // CHECK: store atomic i64 [[VAL64]], i64* [[ADDR64]] seq_cst, align 8 @@ -261,7 +261,7 @@ // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_VAL8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i8* // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) // CHECK: [[ATOMIC_VAL64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i64* // CHECK: [[ATOMIC_RES64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_RES]] to i64* // CHECK: [[VAL64:%.*]] = load i64, i64* [[ATOMIC_VAL64]], align 8 @@ -270,7 +270,7 @@ // CHECK: [[ATOMIC_RES_STRUCT:%.*]] = bitcast i64* [[ATOMIC_RES64]] to %struct.PS* // CHECK: [[AGG_RESULT8:%.*]] = bitcast %struct.PS* %agg.result to i8* // CHECK: [[ATOMIC_RES8:%.*]] = bitcast %struct.PS* [[ATOMIC_RES_STRUCT]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[AGG_RESULT8]], i8* align 2 [[ATOMIC_RES8]], i32 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[AGG_RESULT8]], i8* align 8 [[ATOMIC_RES8]], i32 6, i1 false) return __c11_atomic_exchange(addr, *val, 5); } @@ -295,11 +295,11 @@ // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_DESIRED8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_DESIRED:%.*]] to i8* // CHECK: [[DESIRED8:%.*]] = bitcast %struct.PS* [[DESIRED]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 [[ATOMIC_DESIRED8]], i8* align 2 [[DESIRED8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_DESIRED8]], i8* align 2 [[DESIRED8]], i64 6, i1 false) // CHECK: [[ATOMIC_DESIRED64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_DESIRED:%.*]] to i64* // CHECK: [[ATOMIC_NEW8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_NEW]] to i8* // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 [[ATOMIC_NEW8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_NEW8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) // CHECK: [[ATOMIC_NEW64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_NEW]] to i64* // CHECK: [[ATOMIC_DESIRED_VAL64:%.*]] = load i64, i64* [[ATOMIC_DESIRED64]], align 8 // CHECK: [[ATOMIC_NEW_VAL64:%.*]] = load i64, i64* [[ATOMIC_NEW64]], align 8 Index: cfe/trunk/test/CodeGen/c11atomics.c =================================================================== --- cfe/trunk/test/CodeGen/c11atomics.c +++ cfe/trunk/test/CodeGen/c11atomics.c @@ -282,7 +282,7 @@ // CHECK-NEXT: [[T0:%.*]] = load [[S]]*, [[S]]** [[FP]] // CHECK-NEXT: [[T1:%.*]] = bitcast [[S]]* [[TMP0]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast [[S]]* [[F]] to i8* -// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[T1]], i8* align 2 [[T2]], i32 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[T1]], i8* align 2 [[T2]], i32 8, i1 false) // CHECK-NEXT: [[T3:%.*]] = bitcast [[S]]* [[T0]] to i8* // CHECK-NEXT: [[T4:%.*]] = bitcast [[S]]* [[TMP0]] to i8* // CHECK-NEXT: call arm_aapcscc void @__atomic_store(i32 8, i8* [[T3]], i8* [[T4]], i32 5) @@ -335,7 +335,7 @@ // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[TMP0]], i32 0, i32 0 // CHECK-NEXT: [[T1:%.*]] = bitcast [[PS]]* [[F]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast [[PS]]* [[T0]] to i8* -// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[T1]], i8* align 2 [[T2]], i32 6, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[T1]], i8* align 8 [[T2]], i32 6, i1 false) PS f = *fp; // CHECK-NEXT: [[T0:%.*]] = load [[APS]]*, [[APS]]** [[FP]] @@ -344,7 +344,7 @@ // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[TMP1]], i32 0, i32 0 // CHECK-NEXT: [[T2:%.*]] = bitcast [[PS]]* [[T1]] to i8* // CHECK-NEXT: [[T3:%.*]] = bitcast [[PS]]* [[F]] to i8* -// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[T2]], i8* align 2 [[T3]], i32 6, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[T2]], i8* align 2 [[T3]], i32 6, i1 false) // CHECK-NEXT: [[T4:%.*]] = bitcast [[APS]]* [[T0]] to i8* // CHECK-NEXT: [[T5:%.*]] = bitcast [[APS]]* [[TMP1]] to i8* // CHECK-NEXT: call arm_aapcscc void @__atomic_store(i32 8, i8* [[T4]], i8* [[T5]], i32 5) @@ -357,7 +357,7 @@ // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[TMP3]], i32 0, i32 0 // CHECK-NEXT: [[T1:%.*]] = bitcast %struct.PS* [[TMP2]] to i8* // CHECK-NEXT: [[T2:%.*]] = bitcast %struct.PS* [[T0]] to i8* -// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[T1]], i8* align 2 [[T2]], i32 6, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[T1]], i8* align 8 [[T2]], i32 6, i1 false) // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds %struct.PS, %struct.PS* [[TMP2]], i32 0, i32 0 // CHECK-NEXT: [[T1:%.*]] = load i16, i16* [[T0]], align 2 // CHECK-NEXT: [[T2:%.*]] = sext i16 [[T1]] to i32 @@ -381,7 +381,7 @@ // CHECK: [[ATOMIC_RES_STRUCT:%.*]] = bitcast i64* [[ATOMIC_RES64]] to %struct.PS* // CHECK: [[AGG_RESULT8:%.*]] = bitcast %struct.PS* %agg.result to i8* // CHECK: [[ATOMIC_RES8:%.*]] = bitcast %struct.PS* [[ATOMIC_RES_STRUCT]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[AGG_RESULT8]], i8* align 2 [[ATOMIC_RES8]], i32 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[AGG_RESULT8]], i8* align 8 [[ATOMIC_RES8]], i32 6, i1 false) return __c11_atomic_load(addr, 5); } @@ -402,7 +402,7 @@ // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_VAL8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i8* // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) // CHECK: [[ATOMIC_VAL64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i64* // CHECK: [[ADDR8:%.*]] = bitcast i64* [[ADDR64]] to i8* // CHECK: [[VAL64:%.*]] = load i64, i64* [[ATOMIC_VAL64]], align 2 @@ -427,7 +427,7 @@ // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_VAL8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i8* // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) // CHECK: [[ATOMIC_VAL64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i64* // CHECK: [[ATOMIC_RES64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_RES]] to i64* // CHECK: [[ADDR8:%.*]] = bitcast i64* [[ADDR64]] to i8* @@ -437,7 +437,7 @@ // CHECK: [[ATOMIC_RES_STRUCT:%.*]] = bitcast i64* [[ATOMIC_RES64]] to %struct.PS* // CHECK: [[AGG_RESULT8:%.*]] = bitcast %struct.PS* %agg.result to i8* // CHECK: [[ATOMIC_RES8:%.*]] = bitcast %struct.PS* [[ATOMIC_RES_STRUCT]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[AGG_RESULT8]], i8* align 2 [[ATOMIC_RES8]], i32 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 2 [[AGG_RESULT8]], i8* align 8 [[ATOMIC_RES8]], i32 6, i1 false) return __c11_atomic_exchange(addr, *val, 5); } @@ -461,11 +461,11 @@ // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_DESIRED8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_DESIRED]] to i8* // CHECK: [[DESIRED8:%.*]] = bitcast %struct.PS* [[DESIRED]]to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 [[ATOMIC_DESIRED8]], i8* align 2 [[DESIRED8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_DESIRED8]], i8* align 2 [[DESIRED8]], i64 6, i1 false) // CHECK: [[ATOMIC_DESIRED64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_DESIRED]] to i64* // CHECK: [[ATOMIC_NEW8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_NEW]] to i8* // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 [[ATOMIC_NEW8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_NEW8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) // CHECK: [[ATOMIC_NEW64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_NEW]] to i64* // CHECK: [[ADDR8:%.*]] = bitcast i64* [[ADDR64]] to i8* // CHECK: [[ATOMIC_DESIRED8:%.*]] = bitcast i64* [[ATOMIC_DESIRED64]] to i8* Index: cfe/trunk/test/CodeGen/packed-nest-unpacked.c =================================================================== --- cfe/trunk/test/CodeGen/packed-nest-unpacked.c +++ cfe/trunk/test/CodeGen/packed-nest-unpacked.c @@ -42,7 +42,7 @@ // void test6() { // CHECK: @test6 - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 bitcast (%struct.X* getelementptr inbounds (%struct.Y, %struct.Y* @g, i32 0, i32 1) to i8*), i8* align 1 %{{.*}}, i64 24, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 bitcast (%struct.X* getelementptr inbounds (%struct.Y, %struct.Y* @g, i32 0, i32 1) to i8*), i8* align 4 %{{.*}}, i64 24, i1 false) g.y = foo(); } Index: cfe/trunk/test/CodeGen/ppc64-align-struct.c =================================================================== --- cfe/trunk/test/CodeGen/ppc64-align-struct.c +++ cfe/trunk/test/CodeGen/ppc64-align-struct.c @@ -56,7 +56,7 @@ // CHECK: [[T0:%.*]] = bitcast i8* %[[CUR]] to %struct.test1* // CHECK: [[DEST:%.*]] = bitcast %struct.test1* %y to i8* // CHECK: [[SRC:%.*]] = bitcast %struct.test1* [[T0]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i64 8, i1 false) +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[DEST]], i8* align 8 [[SRC]], i64 8, i1 false) struct test1 test1va (int x, ...) { struct test1 y; @@ -102,7 +102,7 @@ // CHECK: [[T0:%.*]] = bitcast i8* %[[ALIGN]] to %struct.test3* // CHECK: [[DEST:%.*]] = bitcast %struct.test3* %y to i8* // CHECK: [[SRC:%.*]] = bitcast %struct.test3* [[T0]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[DEST]], i8* align 16 [[SRC]], i64 32, i1 false) +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 32 [[DEST]], i8* align 16 [[SRC]], i64 32, i1 false) struct test3 test3va (int x, ...) { struct test3 y; @@ -121,7 +121,7 @@ // CHECK: [[T0:%.*]] = bitcast i8* %[[CUR]] to %struct.test4* // CHECK: [[DEST:%.*]] = bitcast %struct.test4* %y to i8* // CHECK: [[SRC:%.*]] = bitcast %struct.test4* [[T0]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i64 12, i1 false) +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[DEST]], i8* align 8 [[SRC]], i64 12, i1 false) struct test4 test4va (int x, ...) { struct test4 y; @@ -140,7 +140,7 @@ // CHECK: [[T0:%.*]] = bitcast i8* %[[CUR]] to %struct.test_longdouble* // CHECK: [[DEST:%.*]] = bitcast %struct.test_longdouble* %y to i8* // CHECK: [[SRC:%.*]] = bitcast %struct.test_longdouble* [[T0]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[DEST]], i8* align 8 [[SRC]], i64 16, i1 false) +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[DEST]], i8* align 8 [[SRC]], i64 16, i1 false) struct test_longdouble { long double x; }; struct test_longdouble testva_longdouble (int x, ...) { Index: cfe/trunk/test/CodeGen/ppc64-soft-float.c =================================================================== --- cfe/trunk/test/CodeGen/ppc64-soft-float.c +++ cfe/trunk/test/CodeGen/ppc64-soft-float.c @@ -92,7 +92,7 @@ // CHECK-BE: %[[TMP0:[^ ]+]] = alloca %struct.f3, align 4 // CHECK: %[[TMP1:[^ ]+]] = alloca [2 x i64] // CHECK: %[[TMP2:[^ ]+]] = bitcast [2 x i64]* %[[TMP1]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %[[TMP2]], i8* align 4 bitcast (%struct.f3* @global_f3 to i8*), i64 12, i1 false) +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[TMP2]], i8* align 4 bitcast (%struct.f3* @global_f3 to i8*), i64 12, i1 false) // CHECK: %[[TMP3:[^ ]+]] = load [2 x i64], [2 x i64]* %[[TMP1]] // CHECK-LE: call { i64, i64 } @func_f3([2 x i64] %[[TMP3]]) // CHECK-BE: call void @func_f3(%struct.f3* sret %[[TMP0]], [2 x i64] %[[TMP3]]) @@ -111,7 +111,7 @@ // CHECK: %[[TMP0:[^ ]+]] = alloca %struct.f5, align 4 // CHECK: %[[TMP1:[^ ]+]] = alloca [3 x i64] // CHECK: %[[TMP2:[^ ]+]] = bitcast [3 x i64]* %[[TMP1]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %[[TMP2]], i8* align 4 bitcast (%struct.f5* @global_f5 to i8*), i64 20, i1 false) +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[TMP2]], i8* align 4 bitcast (%struct.f5* @global_f5 to i8*), i64 20, i1 false) // CHECK: %[[TMP3:[^ ]+]] = load [3 x i64], [3 x i64]* %[[TMP1]] // CHECK: call void @func_f5(%struct.f5* sret %[[TMP0]], [3 x i64] %[[TMP3]]) struct f5 global_f5; @@ -128,7 +128,7 @@ // CHECK: %[[TMP0:[^ ]+]] = alloca %struct.f7, align 4 // CHECK: %[[TMP1:[^ ]+]] = alloca [4 x i64], align 8 // CHECK: %[[TMP2:[^ ]+]] = bitcast [4 x i64]* %[[TMP1]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %[[TMP2]], i8* align 4 bitcast (%struct.f7* @global_f7 to i8*), i64 28, i1 false) +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[TMP2]], i8* align 4 bitcast (%struct.f7* @global_f7 to i8*), i64 28, i1 false) // CHECK: %[[TMP3:[^ ]+]] = load [4 x i64], [4 x i64]* %[[TMP1]], align 8 // CHECK: call void @func_f7(%struct.f7* sret %[[TMP0]], [4 x i64] %[[TMP3]]) struct f7 global_f7; @@ -144,7 +144,7 @@ // CHECK-LABEL: @call_f9 // CHECK: %[[TMP1:[^ ]+]] = alloca [5 x i64] // CHECK: %[[TMP2:[^ ]+]] = bitcast [5 x i64]* %[[TMP1]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %[[TMP2]], i8* align 4 bitcast (%struct.f9* @global_f9 to i8*), i64 36, i1 false) +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[TMP2]], i8* align 4 bitcast (%struct.f9* @global_f9 to i8*), i64 36, i1 false) // CHECK: %[[TMP3:[^ ]+]] = load [5 x i64], [5 x i64]* %[[TMP1]] // CHECK: call void @func_f9(%struct.f9* sret %{{[^ ]+}}, [5 x i64] %[[TMP3]]) struct f9 global_f9; @@ -162,7 +162,7 @@ // CHECK-BE: %[[TMPX:[^ ]+]] = alloca %struct.fabc, align 4 // CHECK: %[[TMP0:[^ ]+]] = alloca [2 x i64], align 8 // CHECK: %[[TMP2:[^ ]+]] = bitcast [2 x i64]* %[[TMP0]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %[[TMP2]], i8* align 4 bitcast (%struct.fabc* @global_fabc to i8*), i64 12, i1 false) +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[TMP2]], i8* align 4 bitcast (%struct.fabc* @global_fabc to i8*), i64 12, i1 false) // CHECK: %[[TMP3:[^ ]+]] = load [2 x i64], [2 x i64]* %[[TMP0]], align 8 // CHECK-LE: %call = call { i64, i64 } @func_fabc([2 x i64] %[[TMP3]]) // CHECK-BE: call void @func_fabc(%struct.fabc* sret %[[TMPX]], [2 x i64] %[[TMP3]]) Index: cfe/trunk/test/CodeGen/ppc64le-aggregates.c =================================================================== --- cfe/trunk/test/CodeGen/ppc64le-aggregates.c +++ cfe/trunk/test/CodeGen/ppc64le-aggregates.c @@ -104,7 +104,7 @@ // CHECK-LABEL: @call_f9 // CHECK: %[[TMP1:[^ ]+]] = alloca [5 x i64] // CHECK: %[[TMP2:[^ ]+]] = bitcast [5 x i64]* %[[TMP1]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %[[TMP2]], i8* align 4 bitcast (%struct.f9* @global_f9 to i8*), i64 36, i1 false) +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[TMP2]], i8* align 4 bitcast (%struct.f9* @global_f9 to i8*), i64 36, i1 false) // CHECK: %[[TMP3:[^ ]+]] = load [5 x i64], [5 x i64]* %[[TMP1]] // CHECK: call void @func_f9(%struct.f9* sret %{{[^ ]+}}, [5 x i64] %[[TMP3]]) struct f9 global_f9; Index: cfe/trunk/test/CodeGen/riscv32-abi.c =================================================================== --- cfe/trunk/test/CodeGen/riscv32-abi.c +++ cfe/trunk/test/CodeGen/riscv32-abi.c @@ -385,7 +385,7 @@ // CHECK-NEXT: [[TMP5:%.*]] = bitcast i8* [[ARGP_CUR4]] to %struct.tiny* // CHECK-NEXT: [[TMP6:%.*]] = bitcast %struct.tiny* [[TS]] to i8* // CHECK-NEXT: [[TMP7:%.*]] = bitcast %struct.tiny* [[TMP5]] to i8* -// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[TMP6]], i8* align 1 [[TMP7]], i32 4, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[TMP6]], i8* align 4 [[TMP7]], i32 4, i1 false) // CHECK-NEXT: [[ARGP_CUR6:%.*]] = load i8*, i8** [[VA]], align 4 // CHECK-NEXT: [[ARGP_NEXT7:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR6]], i32 8 // CHECK-NEXT: store i8* [[ARGP_NEXT7]], i8** [[VA]], align 4 Index: cfe/trunk/test/CodeGen/riscv64-abi.c =================================================================== --- cfe/trunk/test/CodeGen/riscv64-abi.c +++ cfe/trunk/test/CodeGen/riscv64-abi.c @@ -375,7 +375,7 @@ // CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[ARGP_CUR2]] to %struct.tiny* // CHECK-NEXT: [[TMP3:%.*]] = bitcast %struct.tiny* [[TS]] to i8* // CHECK-NEXT: [[TMP4:%.*]] = bitcast %struct.tiny* [[TMP2]] to i8* -// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 [[TMP3]], i8* align 2 [[TMP4]], i64 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 [[TMP3]], i8* align 8 [[TMP4]], i64 8, i1 false) // CHECK-NEXT: [[ARGP_CUR4:%.*]] = load i8*, i8** [[VA]], align 8 // CHECK-NEXT: [[ARGP_NEXT5:%.*]] = getelementptr inbounds i8, i8* [[ARGP_CUR4]], i64 16 // CHECK-NEXT: store i8* [[ARGP_NEXT5]], i8** [[VA]], align 8 Index: cfe/trunk/test/CodeGen/x86_32-arguments-realign.c =================================================================== --- cfe/trunk/test/CodeGen/x86_32-arguments-realign.c +++ cfe/trunk/test/CodeGen/x86_32-arguments-realign.c @@ -2,7 +2,7 @@ // RUN: FileCheck < %t %s // CHECK-LABEL: define void @f0(%struct.s0* byval align 4) -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %{{.*}}, i8* align 4 %{{.*}}, i32 16, i1 false) +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %{{.*}}, i8* align 4 %{{.*}}, i32 16, i1 false) // CHECK: } struct s0 { long double a; }; void f0(struct s0 a0) { Index: cfe/trunk/test/CodeGen/x86_64-arguments.c =================================================================== --- cfe/trunk/test/CodeGen/x86_64-arguments.c +++ cfe/trunk/test/CodeGen/x86_64-arguments.c @@ -434,7 +434,7 @@ // CHECK-NEXT: [[CASTED_VALUE_ADDR:%.*]] = bitcast i8* [[VALUE_ADDR]] to [[STRUCT_TEST51]] // CHECK-NEXT: [[CASTED_TMP_ADDR:%.*]] = bitcast [[STRUCT_TEST51]]* [[TMP_ADDR]] to i8* // CHECK-NEXT: [[RECASTED_VALUE_ADDR:%.*]] = bitcast [[STRUCT_TEST51]]* [[CASTED_VALUE_ADDR]] to i8* -// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[CASTED_TMP_ADDR]], i8* align 8 [[RECASTED_VALUE_ADDR]], i64 16, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[CASTED_TMP_ADDR]], i8* align 8 [[RECASTED_VALUE_ADDR]], i64 16, i1 false) // CHECK-NEXT: add i32 {{.*}}, 16 // CHECK-NEXT: store i32 {{.*}}, i32* {{.*}} // CHECK-NEXT: br label Index: cfe/trunk/test/CodeGenCXX/alignment.cpp =================================================================== --- cfe/trunk/test/CodeGenCXX/alignment.cpp +++ cfe/trunk/test/CodeGenCXX/alignment.cpp @@ -204,7 +204,7 @@ // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], [[A]]* [[A_P]], i32 0, i32 0 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8* // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[T0]], i8* align 16 [[T1]], i64 16, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 16 [[T1]], i64 16, i1 false) AlignedArray result = a.aArray; } @@ -223,7 +223,7 @@ // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], [[A]]* [[A_P]], i32 0, i32 0 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8* // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[T0]], i8* align 16 [[T1]], i64 16, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 16 [[T1]], i64 16, i1 false) AlignedArray result = b.aArray; } @@ -234,7 +234,7 @@ // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8* // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[T0]], i8* align 8 [[T1]], i64 16, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 8 [[T1]], i64 16, i1 false) AlignedArray result = b.bArray; } @@ -245,7 +245,7 @@ // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8* // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[T0]], i8* align 8 [[T1]], i64 16, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 8 [[T1]], i64 16, i1 false) AlignedArray result = b->bArray; } @@ -256,7 +256,7 @@ // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8* // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[T0]], i8* align 16 [[T1]], i64 16, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 16 [[T1]], i64 16, i1 false) B b; AlignedArray result = b.bArray; } @@ -277,7 +277,7 @@ // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[A]], [[A]]* [[A_P]], i32 0, i32 0 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8* // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[T0]], i8* align 16 [[T1]], i64 16, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 16 [[T1]], i64 16, i1 false) D d; AlignedArray result = d.aArray; } @@ -292,7 +292,7 @@ // CHECK: [[ARRAY_P:%.*]] = getelementptr inbounds [[B]], [[B]]* [[B_P]], i32 0, i32 2 // CHECK: [[T0:%.*]] = bitcast [[ARRAY]]* [[RESULT]] to i8* // CHECK: [[T1:%.*]] = bitcast [[ARRAY]]* [[ARRAY_P]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[T0]], i8* align 8 [[T1]], i64 16, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 64 [[T0]], i8* align 8 [[T1]], i64 16, i1 false) D d; AlignedArray result = d.bArray; } Index: cfe/trunk/test/CodeGenCXX/assign-construct-memcpy.cpp =================================================================== --- cfe/trunk/test/CodeGenCXX/assign-construct-memcpy.cpp +++ cfe/trunk/test/CodeGenCXX/assign-construct-memcpy.cpp @@ -32,10 +32,10 @@ foo *test2(const foo &x) { return new foo(x); // CHECK-POD: test2 -// CHECK-POD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}} align 8 {{.*}} align 8 {{.*}}i64 24 +// CHECK-POD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}} align 16 {{.*}} align 8 {{.*}}i64 24 // CHECK-NONPOD: test2 -// CHECK-NONPOD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}} align 8 {{.*}} align 8 {{.*}}i64 24 +// CHECK-NONPOD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}} align 16 {{.*}} align 8 {{.*}}i64 24 } foo test3(const foo &x) { @@ -51,10 +51,10 @@ foo *test4(foo &&x) { return new foo(x); // CHECK-POD: test4 -// CHECK-POD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}} align 8 {{.*}} align 8 {{.*}}i64 24 +// CHECK-POD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}} align 16 {{.*}} align 8 {{.*}}i64 24 // CHECK-NONPOD: test4 -// CHECK-NONPOD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}} align 8 {{.*}} align 8 {{.*}}i64 24 +// CHECK-NONPOD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}} align 16 {{.*}} align 8 {{.*}}i64 24 } void test5(foo &f, const foo &x) { Index: cfe/trunk/test/CodeGenCXX/copy-constructor-elim.cpp =================================================================== --- cfe/trunk/test/CodeGenCXX/copy-constructor-elim.cpp +++ cfe/trunk/test/CodeGenCXX/copy-constructor-elim.cpp @@ -56,4 +56,4 @@ // Make sure that we obey the destination's alignment requirements when emitting // the copy. // CHECK-LABEL: define {{.*}} @f( -// CHECK: call void @llvm.memcpy.p0i8.p0i8.{{i64|i32}}({{.*}}align 4{{.*}}, i8* align 4 bitcast (%struct.V* @gv1 to i8*), {{i64|i32}} 4, i1 false) +// CHECK: call void @llvm.memcpy.p0i8.p0i8.{{i64|i32}}({{.*}}align 4{{.*}}, i8* align 8 bitcast (%struct.V* @gv1 to i8*), {{i64|i32}} 4, i1 false) Index: cfe/trunk/test/CodeGenCXX/eh.cpp =================================================================== --- cfe/trunk/test/CodeGenCXX/eh.cpp +++ cfe/trunk/test/CodeGenCXX/eh.cpp @@ -13,7 +13,7 @@ // CHECK: [[EXNOBJ:%.*]] = call i8* @__cxa_allocate_exception(i64 8) // CHECK-NEXT: [[EXN:%.*]] = bitcast i8* [[EXNOBJ]] to [[DSTAR:%[^*]*\*]] // CHECK-NEXT: [[EXN2:%.*]] = bitcast [[DSTAR]] [[EXN]] to i8* -// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[EXN2]], i8* align 8 bitcast ([[DSTAR]] @d1 to i8*), i64 8, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[EXN2]], i8* align 8 bitcast ([[DSTAR]] @d1 to i8*), i64 8, i1 false) // CHECK-NEXT: call void @__cxa_throw(i8* [[EXNOBJ]], i8* bitcast ({ i8*, i8* }* @_ZTI7test1_D to i8*), i8* null) [[NR:#[0-9]+]] // CHECK-NEXT: unreachable Index: cfe/trunk/test/OpenMP/parallel_reduction_codegen.cpp =================================================================== --- cfe/trunk/test/OpenMP/parallel_reduction_codegen.cpp +++ cfe/trunk/test/OpenMP/parallel_reduction_codegen.cpp @@ -737,7 +737,7 @@ // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* dereferenceable(4) [[VAR_PRIV]]) // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_REF]] to i8* // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[BC1]], i8* align 4 [[BC2]], i64 4, i1 false) +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[BC1]], i8* align 4 [[BC2]], i64 4, i1 false) // var1 = var1.operator &&(var1_reduction); // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_REF]]) @@ -752,7 +752,7 @@ // CHECK: call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]]) // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_REF]] to i8* // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[BC1]], i8* align 4 [[BC2]], i64 4, i1 false) +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[BC1]], i8* align 4 [[BC2]], i64 4, i1 false) // t_var1 = min(t_var1, t_var1_reduction); // CHECK: [[T_VAR1_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_REF]], @@ -778,7 +778,7 @@ // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* dereferenceable(4) [[VAR_PRIV]]) // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_REF]] to i8* // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[BC1]], i8* align 4 [[BC2]], i64 4, i1 false) +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[BC1]], i8* align 4 [[BC2]], i64 4, i1 false) // CHECK: call void @__kmpc_end_critical( // var1 = var1.operator &&(var1_reduction); @@ -796,7 +796,7 @@ // CHECK: call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]]) // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_REF]] to i8* // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[BC1]], i8* align 4 [[BC2]], i64 4, i1 false) +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[BC1]], i8* align 4 [[BC2]], i64 4, i1 false) // CHECK: call void @__kmpc_end_critical( // t_var1 = min(t_var1, t_var1_reduction); @@ -864,7 +864,7 @@ // CHECK: [[UP:%.+]] = call dereferenceable(4) [[S_INT_TY]]* @{{.+}}([[S_INT_TY]]* [[VAR_LHS]], [[S_INT_TY]]* dereferenceable(4) [[VAR_RHS]]) // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR_LHS]] to i8* // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[UP]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[BC1]], i8* align 4 [[BC2]], i64 4, i1 false) +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[BC1]], i8* align 4 [[BC2]], i64 4, i1 false) // var1_lhs = var1_lhs.operator &&(var1_rhs); // CHECK: [[TO_INT:%.+]] = call i{{[0-9]+}} @{{.+}}([[S_INT_TY]]* [[VAR1_LHS]]) @@ -880,7 +880,7 @@ // CHECK: call void @{{.+}}([[S_INT_TY]]* [[COND_LVALUE:%.+]], i32 [[CONV]]) // CHECK: [[BC1:%.+]] = bitcast [[S_INT_TY]]* [[VAR1_LHS]] to i8* // CHECK: [[BC2:%.+]] = bitcast [[S_INT_TY]]* [[COND_LVALUE]] to i8* -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[BC1]], i8* align 4 [[BC2]], i64 4, i1 false) +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 128 [[BC1]], i8* align 4 [[BC2]], i64 4, i1 false) // t_var1_lhs = min(t_var1_lhs, t_var1_rhs); // CHECK: [[T_VAR1_LHS_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR1_LHS]],