@@ -172,15 +172,15 @@ template <>
172
172
__forceinline kmp_int32 test_then_add<kmp_int32>(volatile kmp_int32 *p,
173
173
kmp_int32 d) {
174
174
kmp_int32 r;
175
- r = KMP_TEST_THEN_ADD32 (p , d);
175
+ r = KMP_TEST_THEN_ADD32 (CCAST (kmp_int32 *, p) , d);
176
176
return r;
177
177
}
178
178
179
179
template <>
180
180
__forceinline kmp_int64 test_then_add<kmp_int64>(volatile kmp_int64 *p,
181
181
kmp_int64 d) {
182
182
kmp_int64 r;
183
- r = KMP_TEST_THEN_ADD64 (p , d);
183
+ r = KMP_TEST_THEN_ADD64 (CCAST (kmp_int64 *, p) , d);
184
184
return r;
185
185
}
186
186
@@ -190,14 +190,14 @@ template <typename T> static __forceinline T test_then_inc_acq(volatile T *p);
190
190
template <>
191
191
__forceinline kmp_int32 test_then_inc_acq<kmp_int32>(volatile kmp_int32 *p) {
192
192
kmp_int32 r;
193
- r = KMP_TEST_THEN_INC_ACQ32 (p );
193
+ r = KMP_TEST_THEN_INC_ACQ32 (CCAST (kmp_int32 *, p) );
194
194
return r;
195
195
}
196
196
197
197
template <>
198
198
__forceinline kmp_int64 test_then_inc_acq<kmp_int64>(volatile kmp_int64 *p) {
199
199
kmp_int64 r;
200
- r = KMP_TEST_THEN_INC_ACQ64 (p );
200
+ r = KMP_TEST_THEN_INC_ACQ64 (CCAST (kmp_int64 *, p) );
201
201
return r;
202
202
}
203
203
@@ -207,14 +207,14 @@ template <typename T> static __forceinline T test_then_inc(volatile T *p);
207
207
template <>
208
208
__forceinline kmp_int32 test_then_inc<kmp_int32>(volatile kmp_int32 *p) {
209
209
kmp_int32 r;
210
- r = KMP_TEST_THEN_INC32 (p );
210
+ r = KMP_TEST_THEN_INC32 (CCAST (kmp_int32 *, p) );
211
211
return r;
212
212
}
213
213
214
214
template <>
215
215
__forceinline kmp_int64 test_then_inc<kmp_int64>(volatile kmp_int64 *p) {
216
216
kmp_int64 r;
217
- r = KMP_TEST_THEN_INC64 (p );
217
+ r = KMP_TEST_THEN_INC64 (CCAST (kmp_int64 *, p) );
218
218
return r;
219
219
}
220
220
@@ -262,7 +262,7 @@ static UT // unsigned 4- or 8-byte type
262
262
register kmp_uint32 (*f)(UT, UT) = pred;
263
263
register UT r;
264
264
265
- KMP_FSYNC_SPIN_INIT (obj, ( void *) spin);
265
+ KMP_FSYNC_SPIN_INIT (obj, CCAST (UT *, spin) );
266
266
KMP_INIT_YIELD (spins);
267
267
// main wait spin loop
268
268
while (!f (r = *spin, check)) {
@@ -440,7 +440,7 @@ static void __kmp_dispatch_dxo(int *gtid_ref, int *cid_ref, ident_t *loc_ref) {
440
440
th->th .th_dispatch ->th_dispatch_pr_current );
441
441
}
442
442
443
- KMP_FSYNC_RELEASING (&sh->u .s .ordered_iteration );
443
+ KMP_FSYNC_RELEASING (CCAST (UT *, &sh->u .s .ordered_iteration ) );
444
444
#if !defined(KMP_GOMP_COMPAT)
445
445
if (__kmp_env_consistency_check) {
446
446
if (pr->ordered_bumped != 0 ) {
@@ -1162,7 +1162,9 @@ __kmp_dispatch_init(ident_t *loc, int gtid, enum sched_type schedule, T lb,
1162
1162
gtid, my_buffer_index, sh->buffer_index ));
1163
1163
1164
1164
th->th .th_dispatch ->th_dispatch_pr_current = (dispatch_private_info_t *)pr;
1165
- th->th .th_dispatch ->th_dispatch_sh_current = (dispatch_shared_info_t *)sh;
1165
+ th->th .th_dispatch ->th_dispatch_sh_current =
1166
+ RCAST (dispatch_shared_info_t *,
1167
+ CCAST (dispatch_shared_info_template<UT> *, sh));
1166
1168
#if USE_ITT_BUILD
1167
1169
if (pr->ordered ) {
1168
1170
__kmp_itt_ordered_init (gtid);
@@ -1978,7 +1980,8 @@ static int __kmp_dispatch_next(ident_t *loc, int gtid, kmp_int32 *p_last,
1978
1980
pr->u .p .parm2 ) { // compare with K*nproc*(chunk+1), K=2 by default
1979
1981
// use dynamic-style shcedule
1980
1982
// atomically inrement iterations, get old value
1981
- init = test_then_add<ST>((ST *)&sh->u .s .iteration , (ST)chunkspec);
1983
+ init = test_then_add<ST>(
1984
+ RCAST (ST *, CCAST (UT *, &sh->u .s .iteration )), (ST)chunkspec);
1982
1985
remaining = trip - init;
1983
1986
if (remaining <= 0 ) {
1984
1987
status = 0 ; // all iterations got by other threads
@@ -1995,8 +1998,8 @@ static int __kmp_dispatch_next(ident_t *loc, int gtid, kmp_int32 *p_last,
1995
1998
} // if
1996
1999
limit = init + (UT)(remaining *
1997
2000
*(double *)&pr->u .p .parm3 ); // divide by K*nproc
1998
- if (compare_and_swap<ST>((ST *) &sh->u .s .iteration , (ST)init ,
1999
- (ST)limit)) {
2001
+ if (compare_and_swap<ST>(RCAST (ST *, CCAST (UT *, &sh->u .s .iteration )) ,
2002
+ (ST)init, (ST) limit)) {
2000
2003
// CAS was successful, chunk obtained
2001
2004
status = 1 ;
2002
2005
--limit;
@@ -2056,7 +2059,8 @@ static int __kmp_dispatch_next(ident_t *loc, int gtid, kmp_int32 *p_last,
2056
2059
if ((T)remaining < pr->u .p .parm2 ) {
2057
2060
// use dynamic-style shcedule
2058
2061
// atomically inrement iterations, get old value
2059
- init = test_then_add<ST>((ST *)&sh->u .s .iteration , (ST)chunk);
2062
+ init = test_then_add<ST>(
2063
+ RCAST (ST *, CCAST (UT *, &sh->u .s .iteration )), (ST)chunk);
2060
2064
remaining = trip - init;
2061
2065
if (remaining <= 0 ) {
2062
2066
status = 0 ; // all iterations got by other threads
@@ -2078,8 +2082,8 @@ static int __kmp_dispatch_next(ident_t *loc, int gtid, kmp_int32 *p_last,
2078
2082
if (rem) // adjust so that span%chunk == 0
2079
2083
span += chunk - rem;
2080
2084
limit = init + span;
2081
- if (compare_and_swap<ST>((ST *) &sh->u .s .iteration , (ST)init ,
2082
- (ST)limit)) {
2085
+ if (compare_and_swap<ST>(RCAST (ST *, CCAST (UT *, &sh->u .s .iteration )) ,
2086
+ (ST)init, (ST) limit)) {
2083
2087
// CAS was successful, chunk obtained
2084
2088
status = 1 ;
2085
2089
--limit;
@@ -2716,7 +2720,7 @@ __kmp_wait_yield_4(volatile kmp_uint32 *spinner, kmp_uint32 checker,
2716
2720
register kmp_uint32 (*f)(kmp_uint32, kmp_uint32) = pred;
2717
2721
register kmp_uint32 r;
2718
2722
2719
- KMP_FSYNC_SPIN_INIT (obj, ( void *) spin);
2723
+ KMP_FSYNC_SPIN_INIT (obj, CCAST (kmp_uint32 *, spin) );
2720
2724
KMP_INIT_YIELD (spins);
2721
2725
// main wait spin loop
2722
2726
while (!f (r = TCR_4 (*spin), check)) {
0 commit comments