diff --git a/clang/test/CodeGen/c11atomics-ios.c b/clang/test/CodeGen/c11atomics-ios.c --- a/clang/test/CodeGen/c11atomics-ios.c +++ b/clang/test/CodeGen/c11atomics-ios.c @@ -154,7 +154,7 @@ // CHECK-NEXT: [[P:%.*]] = load [[APS]]*, [[APS]]** [[FP]] // CHECK-NEXT: [[T0:%.*]] = bitcast [[APS]]* [[P]] to i8* -// CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[T0]], i8 0, i64 8, i1 false) +// CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 8 [[T0]], i8 0, i32 8, i1 false) // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[P]], i32 0, i32 0 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 0 // CHECK-NEXT: store i16 1, i16* [[T1]], align 8 @@ -236,7 +236,7 @@ // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_VAL8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i8* // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i32 6, i1 false) // CHECK: [[ATOMIC_VAL64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i64* // CHECK: [[VAL64:%.*]] = load i64, i64* [[ATOMIC_VAL64]], align 8 // CHECK: store atomic i64 [[VAL64]], i64* [[ADDR64]] seq_cst, align 8 @@ -261,7 +261,7 @@ // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_VAL8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i8* // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i32 6, i1 false) // CHECK: [[ATOMIC_VAL64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i64* // CHECK: [[ATOMIC_RES64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_RES]] to i64* // CHECK: [[VAL64:%.*]] = load i64, i64* [[ATOMIC_VAL64]], align 8 @@ -295,11 +295,11 @@ // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_DESIRED8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_DESIRED:%.*]] to i8* // CHECK: [[DESIRED8:%.*]] = bitcast %struct.PS* [[DESIRED]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_DESIRED8]], i8* align 2 [[DESIRED8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[ATOMIC_DESIRED8]], i8* align 2 [[DESIRED8]], i32 6, i1 false) // CHECK: [[ATOMIC_DESIRED64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_DESIRED:%.*]] to i64* // CHECK: [[ATOMIC_NEW8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_NEW]] to i8* // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_NEW8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[ATOMIC_NEW8]], i8* align 2 [[NONATOMIC_TMP8]], i32 6, i1 false) // CHECK: [[ATOMIC_NEW64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_NEW]] to i64* // CHECK: [[ATOMIC_DESIRED_VAL64:%.*]] = load i64, i64* [[ATOMIC_DESIRED64]], align 8 // CHECK: [[ATOMIC_NEW_VAL64:%.*]] = load i64, i64* [[ATOMIC_NEW64]], align 8 diff --git a/clang/test/CodeGen/c11atomics.c b/clang/test/CodeGen/c11atomics.c --- a/clang/test/CodeGen/c11atomics.c +++ b/clang/test/CodeGen/c11atomics.c @@ -307,7 +307,7 @@ // CHECK-NEXT: [[P:%.*]] = load [[APS]]*, [[APS]]** [[FP]] // CHECK-NEXT: [[T0:%.*]] = bitcast [[APS]]* [[P]] to i8* -// CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[T0]], i8 0, i64 8, i1 false) +// CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 8 [[T0]], i8 0, i32 8, i1 false) // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[APS]], [[APS]]* [[P]], i32 0, i32 0 // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[PS]], [[PS]]* [[T0]], i32 0, i32 0 // CHECK-NEXT: store i16 1, i16* [[T1]], align 8 @@ -402,7 +402,7 @@ // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_VAL8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i8* // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i32 6, i1 false) // CHECK: [[ATOMIC_VAL64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i64* // CHECK: [[ADDR8:%.*]] = bitcast i64* [[ADDR64]] to i8* // CHECK: [[VAL64:%.*]] = load i64, i64* [[ATOMIC_VAL64]], align 2 @@ -427,7 +427,7 @@ // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_VAL8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i8* // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[ATOMIC_VAL8]], i8* align 2 [[NONATOMIC_TMP8]], i32 6, i1 false) // CHECK: [[ATOMIC_VAL64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_VAL]] to i64* // CHECK: [[ATOMIC_RES64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_RES]] to i64* // CHECK: [[ADDR8:%.*]] = bitcast i64* [[ADDR64]] to i8* @@ -461,11 +461,11 @@ // CHECK: [[ADDR64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ADDR]] to i64* // CHECK: [[ATOMIC_DESIRED8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_DESIRED]] to i8* // CHECK: [[DESIRED8:%.*]] = bitcast %struct.PS* [[DESIRED]]to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_DESIRED8]], i8* align 2 [[DESIRED8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[ATOMIC_DESIRED8]], i8* align 2 [[DESIRED8]], i32 6, i1 false) // CHECK: [[ATOMIC_DESIRED64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_DESIRED]] to i64* // CHECK: [[ATOMIC_NEW8:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_NEW]] to i8* // CHECK: [[NONATOMIC_TMP8:%.*]] = bitcast %struct.PS* [[NONATOMIC_TMP]] to i8* - // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[ATOMIC_NEW8]], i8* align 2 [[NONATOMIC_TMP8]], i64 6, i1 false) + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 [[ATOMIC_NEW8]], i8* align 2 [[NONATOMIC_TMP8]], i32 6, i1 false) // CHECK: [[ATOMIC_NEW64:%.*]] = bitcast { %struct.PS, [2 x i8] }* [[ATOMIC_NEW]] to i64* // CHECK: [[ADDR8:%.*]] = bitcast i64* [[ADDR64]] to i8* // CHECK: [[ATOMIC_DESIRED8:%.*]] = bitcast i64* [[ATOMIC_DESIRED64]] to i8* diff --git a/clang/test/CodeGen/x86-atomic-long_double.c b/clang/test/CodeGen/x86-atomic-long_double.c --- a/clang/test/CodeGen/x86-atomic-long_double.c +++ b/clang/test/CodeGen/x86-atomic-long_double.c @@ -46,10 +46,10 @@ // CHECK32: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] // CHECK32: [[INC_VALUE:%.+]] = fadd x86_fp80 [[OLD_VALUE]], // CHECK32: [[OLD_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[OLD_VALUE]], x86_fp80* [[OLD_VALUE_ADDR]], align 4 // CHECK32: [[DESIRED_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[DESIRED_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[INC_VALUE]], x86_fp80* [[DESIRED_VALUE_ADDR]], align 4 // CHECK32: [[OBJ:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[EXPECTED:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i8* @@ -108,10 +108,10 @@ // CHECK32: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[ORIG_LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] // CHECK32: [[DEC_VALUE:%.+]] = fadd x86_fp80 [[OLD_VALUE]], // CHECK32: [[OLD_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[OLD_VALUE]], x86_fp80* [[OLD_VALUE_ADDR]], align 4 // CHECK32: [[DESIRED_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[DESIRED_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[DEC_VALUE]], x86_fp80* [[DESIRED_VALUE_ADDR]], align 4 // CHECK32: [[OBJ:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[EXPECTED:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i8* @@ -177,10 +177,10 @@ // CHECK32: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] // CHECK32: [[INC_VALUE:%.+]] = fsub x86_fp80 [[OLD_VALUE]], // CHECK32: [[OLD_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[OLD_VALUE]], x86_fp80* [[OLD_VALUE_ADDR]], align 4 // CHECK32: [[DESIRED_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[DESIRED_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[INC_VALUE]], x86_fp80* [[DESIRED_VALUE_ADDR]], align 4 // CHECK32: [[OBJ:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[EXPECTED:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i8* @@ -213,7 +213,7 @@ // CHECK32: store x86_fp80* %{{.+}}, x86_fp80** [[ADDR_ADDR:%.+]], align 4 // CHECK32: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** [[ADDR_ADDR]], align 4 // CHECK32: [[STORE_TEMP_VOID_PTR:%.+]] = bitcast x86_fp80* [[STORE_TEMP_PTR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[STORE_TEMP_VOID_PTR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[STORE_TEMP_VOID_PTR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 {{.+}}, x86_fp80* [[STORE_TEMP_PTR]], align 4 // CHECK32: [[ADDR_VOID:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[STORE_TEMP_VOID_PTR:%.+]] = bitcast x86_fp80* [[STORE_TEMP_PTR]] to i8* @@ -281,10 +281,10 @@ // CHECK32: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] // CHECK32: [[INC_VALUE:%.+]] = fadd x86_fp80 [[OLD_VALUE]], // CHECK32: [[OLD_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[OLD_VALUE]], x86_fp80* [[OLD_VALUE_ADDR]], align 4 // CHECK32: [[DESIRED_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[DESIRED_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[INC_VALUE]], x86_fp80* [[DESIRED_VALUE_ADDR]], align 4 // CHECK32: [[OBJ:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[EXPECTED:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i8* @@ -342,10 +342,10 @@ // CHECK32: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[ORIG_LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] // CHECK32: [[DEC_VALUE:%.+]] = fadd x86_fp80 [[OLD_VALUE]], // CHECK32: [[OLD_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[OLD_VALUE]], x86_fp80* [[OLD_VALUE_ADDR]], align 4 // CHECK32: [[DESIRED_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[DESIRED_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[DEC_VALUE]], x86_fp80* [[DESIRED_VALUE_ADDR]], align 4 // CHECK32: [[OBJ:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[EXPECTED:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i8* @@ -409,10 +409,10 @@ // CHECK32: [[OLD_VALUE:%.+]] = phi x86_fp80 [ [[LD_VALUE]], %{{.+}} ], [ [[LD_VALUE:%.+]], %[[ATOMIC_OP]] ] // CHECK32: [[INC_VALUE:%.+]] = fsub x86_fp80 [[OLD_VALUE]], // CHECK32: [[OLD_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[OLD_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[OLD_VALUE]], x86_fp80* [[OLD_VALUE_ADDR]], align 4 // CHECK32: [[DESIRED_VALUE_VOID_ADDR:%.+]] = bitcast x86_fp80* [[DESIRED_VALUE_ADDR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[DESIRED_VALUE_VOID_ADDR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 [[INC_VALUE]], x86_fp80* [[DESIRED_VALUE_ADDR]], align 4 // CHECK32: [[OBJ:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[EXPECTED:%.+]] = bitcast x86_fp80* [[OLD_VALUE_ADDR]] to i8* @@ -445,7 +445,7 @@ // CHECK32: store x86_fp80* %{{.+}}, x86_fp80** [[ADDR_ADDR:%.+]], align 4 // CHECK32: [[ADDR:%.+]] = load x86_fp80*, x86_fp80** [[ADDR_ADDR]], align 4 // CHECK32: [[STORE_TEMP_VOID_PTR:%.+]] = bitcast x86_fp80* [[STORE_TEMP_PTR:%.+]] to i8* - // CHECK32: call void @llvm.memset.p0i8.i64(i8* align 4 [[STORE_TEMP_VOID_PTR]], i8 0, i64 12, i1 false) + // CHECK32: call void @llvm.memset.p0i8.i32(i8* align 4 [[STORE_TEMP_VOID_PTR]], i8 0, i32 12, i1 false) // CHECK32: store x86_fp80 {{.+}}, x86_fp80* [[STORE_TEMP_PTR]], align 4 // CHECK32: [[ADDR_VOID:%.+]] = bitcast x86_fp80* [[ADDR]] to i8* // CHECK32: [[STORE_TEMP_VOID_PTR:%.+]] = bitcast x86_fp80* [[STORE_TEMP_PTR]] to i8* diff --git a/clang/test/CodeGenCXX/pod-member-memcpys.cpp b/clang/test/CodeGenCXX/pod-member-memcpys.cpp --- a/clang/test/CodeGenCXX/pod-member-memcpys.cpp +++ b/clang/test/CodeGenCXX/pod-member-memcpys.cpp @@ -203,7 +203,7 @@ CALL_CC(BitfieldMember2) // BitfieldMember2 copy-constructor: // CHECK-2-LABEL: define linkonce_odr void @_ZN15BitfieldMember2C2ERKS_(%struct.BitfieldMember2* %this, %struct.BitfieldMember2* nonnull align {{[0-9]+}} dereferenceable({{[0-9]+}}) %0) -// CHECK-2: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}} align 4 {{.*}} align 4 {{.*}}i64 16, i1 false) +// CHECK-2: call void @llvm.memcpy.p0i8.p0i8.i32({{.*}} align 4 {{.*}} align 4 {{.*}}i32 16, i1 false) // CHECK-2: call void @_ZN6NonPODC1ERKS_ // CHECK-2: ret void diff --git a/clang/test/CodeGenCXX/pr20897.cpp b/clang/test/CodeGenCXX/pr20897.cpp --- a/clang/test/CodeGenCXX/pr20897.cpp +++ b/clang/test/CodeGenCXX/pr20897.cpp @@ -9,7 +9,7 @@ // CHECK: %[[dest_a_gep:.*]] = getelementptr inbounds %struct.Derived, %struct.Derived* %[[this]], i32 0, i32 1 // CHECK-NEXT: %[[src_load:.*]] = load %struct.Derived*, %struct.Derived** {{.*}} // CHECK-NEXT: %[[src_a_gep:.*]] = getelementptr inbounds %struct.Derived, %struct.Derived* %[[src_load:.*]], i32 0, i32 1 -// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %[[dest_a_gep]], i8* align 4 %[[src_a_gep]], i64 1, i1 false) +// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %[[dest_a_gep]], i8* align 4 %[[src_a_gep]], i32 1, i1 false) // CHECK-NEXT: %[[dest_this:.*]] = load %struct.Derived*, %struct.Derived** %[[retval]] // CHECK-NEXT: ret %struct.Derived* %[[dest_this]] bool a : 1; diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h --- a/llvm/include/llvm/IR/IRBuilder.h +++ b/llvm/include/llvm/IR/IRBuilder.h @@ -435,6 +435,16 @@ return ConstantInt::get(Context, AI); } + ConstantInt *getIntPtrSize(Value *Ptr, uint64_t Size) { + assert(BB && "Must have a basic block to retrieve the module!"); + + Module *M = BB->getParent()->getParent(); + auto *PtrType = Ptr->getType(); + unsigned PtrSize = M->getDataLayout().getPointerSizeInBits( + PtrType->getPointerAddressSpace()); + return getIntN(PtrSize, Size); + } + //===--------------------------------------------------------------------===// // Type creation methods //===--------------------------------------------------------------------===// @@ -521,7 +531,7 @@ MaybeAlign Align, bool isVolatile = false, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) { - return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile, + return CreateMemSet(Ptr, Val, getIntPtrSize(Ptr, Size), Align, isVolatile, TBAATag, ScopeTag, NoAliasTag); } @@ -565,7 +575,7 @@ MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) { - return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getInt64(Size), + return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getIntPtrSize(Dst, Size), isVolatile, TBAATag, TBAAStructTag, ScopeTag, NoAliasTag); } @@ -625,7 +635,7 @@ bool isVolatile = false, MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) { - return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size), + return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getIntPtrSize(Dst, Size), isVolatile, TBAATag, ScopeTag, NoAliasTag); } diff --git a/llvm/test/Transforms/MemCpyOpt/form-memset.ll b/llvm/test/Transforms/MemCpyOpt/form-memset.ll --- a/llvm/test/Transforms/MemCpyOpt/form-memset.ll +++ b/llvm/test/Transforms/MemCpyOpt/form-memset.ll @@ -50,7 +50,7 @@ ret void ; CHECK-LABEL: @test1( ; CHECK-NOT: store -; CHECK: call void @llvm.memset.p0i8.i64 +; CHECK: call void @llvm.memset.p0i8.i32 ; CHECK-NOT: store ; CHECK: ret } @@ -152,11 +152,11 @@ ; CHECK-LABEL: @test2( ; CHECK-NOT: store -; CHECK: call void @llvm.memset.p0i8.i64(i8* align 1 %tmp41, i8 -1, i64 8, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* align 1 %tmp41, i8 -1, i32 8, i1 false) ; CHECK-NOT: store -; CHECK: call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 32, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* align 8 %0, i8 0, i32 32, i1 false) ; CHECK-NOT: store -; CHECK: call void @llvm.memset.p0i8.i64(i8* align 8 %1, i8 0, i64 32, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* align 8 %1, i8 0, i32 32, i1 false) ; CHECK-NOT: store ; CHECK: ret } @@ -175,7 +175,7 @@ ret void ; CHECK-LABEL: @test3( ; CHECK-NOT: store -; CHECK: call void @llvm.memset.p0i8.i64(i8* align 4 %1, i8 0, i64 15, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* align 4 %1, i8 0, i32 15, i1 false) } ; store followed by memset, different offset scenario @@ -188,7 +188,7 @@ ret void ; CHECK-LABEL: @test4( ; CHECK-NOT: store -; CHECK: call void @llvm.memset.p0i8.i64(i8* align 4 %1, i8 0, i64 15, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* align 4 %1, i8 0, i32 15, i1 false) } declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind @@ -204,7 +204,7 @@ ret void ; CHECK-LABEL: @test5( ; CHECK-NOT: store -; CHECK: call void @llvm.memset.p0i8.i64(i8* align 4 %1, i8 0, i64 15, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* align 4 %1, i8 0, i32 15, i1 false) } ;; Memset followed by memset. @@ -217,7 +217,7 @@ tail call void @llvm.memset.p0i8.i64(i8* %1, i8 0, i64 12, i1 false) ret void ; CHECK-LABEL: @test6( -; CHECK: call void @llvm.memset.p0i8.i64(i8* %2, i8 0, i64 24, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* %2, i8 0, i32 24, i1 false) } ; More aggressive heuristic @@ -233,7 +233,7 @@ %4 = getelementptr inbounds i32, i32* %c, i32 4 store i32 -1, i32* %4, align 4 ; CHECK-LABEL: @test7( -; CHECK: call void @llvm.memset.p0i8.i64(i8* align 4 %5, i8 -1, i64 20, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* align 4 %5, i8 -1, i32 20, i1 false) ret void } @@ -270,7 +270,7 @@ store i8 -1, i8* getelementptr (i8, i8* bitcast ([16 x i64]* @test9buf to i8*), i64 15), align 1 ret void ; CHECK-LABEL: @test9( -; CHECK: call void @llvm.memset.p0i8.i64(i8* align 16 bitcast ([16 x i64]* @test9buf to i8*), i8 -1, i64 16, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* align 16 bitcast ([16 x i64]* @test9buf to i8*), i8 -1, i32 16, i1 false) } ; PR19092 @@ -280,7 +280,7 @@ ret void ; CHECK-LABEL: @test10( ; CHECK-NOT: memset -; CHECK: call void @llvm.memset.p0i8.i64(i8* %P, i8 0, i64 42, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* %P, i8 0, i32 42, i1 false) ; CHECK-NOT: memset ; CHECK: ret void } @@ -297,7 +297,7 @@ ret void ; CHECK-LABEL: @test11( ; CHECK-NOT: store -; CHECK: call void @llvm.memset.p0i8.i64(i8* align 4 %1, i8 1, i64 23, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* align 4 %1, i8 1, i32 23, i1 false) } ; Alignment should be preserved when there is a store with default align @@ -310,5 +310,5 @@ ret void ; CHECK-LABEL: @test12( ; CHECK-NOT: store -; CHECK: call void @llvm.memset.p0i8.i64(i8* align 4 %1, i8 0, i64 15, i1 false) +; CHECK: call void @llvm.memset.p0i8.i32(i8* align 4 %1, i8 0, i32 15, i1 false) }