Changeset View
Changeset View
Standalone View
Standalone View
openmp/runtime/src/kmp_dispatch.cpp
Show First 20 Lines • Show All 1,283 Lines • ▼ Show 20 Lines | #ifdef KMP_DEBUG | ||||
} | } | ||||
#endif | #endif | ||||
// activate non-empty buffer and let others steal from us | // activate non-empty buffer and let others steal from us | ||||
if (pr->u.p.count < (UT)pr->u.p.ub) | if (pr->u.p.count < (UT)pr->u.p.ub) | ||||
KMP_ATOMIC_ST_REL(&pr->steal_flag, READY); | KMP_ATOMIC_ST_REL(&pr->steal_flag, READY); | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
if (KMP_ATOMIC_LD_RLX(&v->steal_flag) != READY || | if (KMP_ATOMIC_LD_ACQ(&v->steal_flag) != READY || | ||||
v->u.p.count >= (UT)v->u.p.ub) { | v->u.p.count >= (UT)v->u.p.ub) { | ||||
AndreyChurbanov: This change fights with the consequence of the problem, not with the root cause. IMHO, the… | |||||
pr->u.p.parm4 = (victimId + 1) % nproc; // shift start victim tid | pr->u.p.parm4 = (victimId + 1) % nproc; // shift start victim tid | ||||
continue; // no chunks to steal, try next victim | continue; // no chunks to steal, try next victim | ||||
} | } | ||||
lckv = v->u.p.steal_lock; | lckv = v->u.p.steal_lock; | ||||
KMP_ASSERT(lckv != NULL); | KMP_ASSERT(lckv != NULL); | ||||
__kmp_acquire_lock(lckv, gtid); | __kmp_acquire_lock(lckv, gtid); | ||||
limit = v->u.p.ub; // keep initial ub | limit = v->u.p.ub; // keep initial ub | ||||
if (v->u.p.count >= limit) { | if (v->u.p.count >= limit) { | ||||
▲ Show 20 Lines • Show All 1,681 Lines • Show Last 20 Lines |
This change fights with the consequence of the problem, not with the root cause. IMHO, the better fix would be to change
to
Then the acquire load would synchronize with release store at buffer initialization, that should prevent the steal_lock pointer to ever be NULL here.