21 #include "kmp_wait_release.h"
22 #include "kmp_wrapper_getpid.h"
24 #include "tsan_annotations.h"
27 #include <sys/syscall.h>
45 void __kmp_validate_locks(
void) {
50 x = ~((kmp_uint32)0) - 2;
53 for (i = 0; i < 8; ++i, ++x, ++y) {
54 kmp_uint32 z = (x - y);
58 KMP_ASSERT(offsetof(kmp_base_queuing_lock, tail_id) % 8 == 0);
72 static kmp_int32 __kmp_get_tas_lock_owner(kmp_tas_lock_t *lck) {
73 return KMP_LOCK_STRIP(KMP_ATOMIC_LD_RLX(&lck->lk.poll)) - 1;
76 static inline bool __kmp_is_tas_lock_nestable(kmp_tas_lock_t *lck) {
77 return lck->lk.depth_locked != -1;
80 __forceinline
static int
81 __kmp_acquire_tas_lock_timed_template(kmp_tas_lock_t *lck, kmp_int32 gtid) {
84 #ifdef USE_LOCK_PROFILE
85 kmp_uint32 curr = KMP_LOCK_STRIP(lck->lk.poll);
86 if ((curr != 0) && (curr != gtid + 1))
87 __kmp_printf(
"LOCK CONTENTION: %p\n", lck);
91 kmp_int32 tas_free = KMP_LOCK_FREE(tas);
92 kmp_int32 tas_busy = KMP_LOCK_BUSY(gtid + 1, tas);
94 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == tas_free &&
95 __kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)) {
96 KMP_FSYNC_ACQUIRED(lck);
97 return KMP_LOCK_ACQUIRED_FIRST;
101 KMP_FSYNC_PREPARE(lck);
102 KMP_INIT_YIELD(spins);
103 kmp_backoff_t backoff = __kmp_spin_backoff_params;
105 __kmp_spin_backoff(&backoff);
106 KMP_YIELD_OVERSUB_ELSE_SPIN(spins);
107 }
while (KMP_ATOMIC_LD_RLX(&lck->lk.poll) != tas_free ||
108 !__kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy));
109 KMP_FSYNC_ACQUIRED(lck);
110 return KMP_LOCK_ACQUIRED_FIRST;
113 int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
114 int retval = __kmp_acquire_tas_lock_timed_template(lck, gtid);
115 ANNOTATE_TAS_ACQUIRED(lck);
119 static int __kmp_acquire_tas_lock_with_checks(kmp_tas_lock_t *lck,
121 char const *
const func =
"omp_set_lock";
122 if ((
sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
123 __kmp_is_tas_lock_nestable(lck)) {
124 KMP_FATAL(LockNestableUsedAsSimple, func);
126 if ((gtid >= 0) && (__kmp_get_tas_lock_owner(lck) == gtid)) {
127 KMP_FATAL(LockIsAlreadyOwned, func);
129 return __kmp_acquire_tas_lock(lck, gtid);
132 int __kmp_test_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
133 kmp_int32 tas_free = KMP_LOCK_FREE(tas);
134 kmp_int32 tas_busy = KMP_LOCK_BUSY(gtid + 1, tas);
135 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == tas_free &&
136 __kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)) {
137 KMP_FSYNC_ACQUIRED(lck);
143 static int __kmp_test_tas_lock_with_checks(kmp_tas_lock_t *lck,
145 char const *
const func =
"omp_test_lock";
146 if ((
sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
147 __kmp_is_tas_lock_nestable(lck)) {
148 KMP_FATAL(LockNestableUsedAsSimple, func);
150 return __kmp_test_tas_lock(lck, gtid);
153 int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
156 KMP_FSYNC_RELEASING(lck);
157 ANNOTATE_TAS_RELEASED(lck);
158 KMP_ATOMIC_ST_REL(&lck->lk.poll, KMP_LOCK_FREE(tas));
162 return KMP_LOCK_RELEASED;
165 static int __kmp_release_tas_lock_with_checks(kmp_tas_lock_t *lck,
167 char const *
const func =
"omp_unset_lock";
169 if ((
sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
170 __kmp_is_tas_lock_nestable(lck)) {
171 KMP_FATAL(LockNestableUsedAsSimple, func);
173 if (__kmp_get_tas_lock_owner(lck) == -1) {
174 KMP_FATAL(LockUnsettingFree, func);
176 if ((gtid >= 0) && (__kmp_get_tas_lock_owner(lck) >= 0) &&
177 (__kmp_get_tas_lock_owner(lck) != gtid)) {
178 KMP_FATAL(LockUnsettingSetByAnother, func);
180 return __kmp_release_tas_lock(lck, gtid);
183 void __kmp_init_tas_lock(kmp_tas_lock_t *lck) {
184 lck->lk.poll = KMP_LOCK_FREE(tas);
187 void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck) { lck->lk.poll = 0; }
189 static void __kmp_destroy_tas_lock_with_checks(kmp_tas_lock_t *lck) {
190 char const *
const func =
"omp_destroy_lock";
191 if ((
sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
192 __kmp_is_tas_lock_nestable(lck)) {
193 KMP_FATAL(LockNestableUsedAsSimple, func);
195 if (__kmp_get_tas_lock_owner(lck) != -1) {
196 KMP_FATAL(LockStillOwned, func);
198 __kmp_destroy_tas_lock(lck);
203 int __kmp_acquire_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
204 KMP_DEBUG_ASSERT(gtid >= 0);
206 if (__kmp_get_tas_lock_owner(lck) == gtid) {
207 lck->lk.depth_locked += 1;
208 return KMP_LOCK_ACQUIRED_NEXT;
210 __kmp_acquire_tas_lock_timed_template(lck, gtid);
211 ANNOTATE_TAS_ACQUIRED(lck);
212 lck->lk.depth_locked = 1;
213 return KMP_LOCK_ACQUIRED_FIRST;
217 static int __kmp_acquire_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
219 char const *
const func =
"omp_set_nest_lock";
220 if (!__kmp_is_tas_lock_nestable(lck)) {
221 KMP_FATAL(LockSimpleUsedAsNestable, func);
223 return __kmp_acquire_nested_tas_lock(lck, gtid);
226 int __kmp_test_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
229 KMP_DEBUG_ASSERT(gtid >= 0);
231 if (__kmp_get_tas_lock_owner(lck) == gtid) {
232 retval = ++lck->lk.depth_locked;
233 }
else if (!__kmp_test_tas_lock(lck, gtid)) {
237 retval = lck->lk.depth_locked = 1;
242 static int __kmp_test_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
244 char const *
const func =
"omp_test_nest_lock";
245 if (!__kmp_is_tas_lock_nestable(lck)) {
246 KMP_FATAL(LockSimpleUsedAsNestable, func);
248 return __kmp_test_nested_tas_lock(lck, gtid);
251 int __kmp_release_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
252 KMP_DEBUG_ASSERT(gtid >= 0);
255 if (--(lck->lk.depth_locked) == 0) {
256 __kmp_release_tas_lock(lck, gtid);
257 return KMP_LOCK_RELEASED;
259 return KMP_LOCK_STILL_HELD;
262 static int __kmp_release_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
264 char const *
const func =
"omp_unset_nest_lock";
266 if (!__kmp_is_tas_lock_nestable(lck)) {
267 KMP_FATAL(LockSimpleUsedAsNestable, func);
269 if (__kmp_get_tas_lock_owner(lck) == -1) {
270 KMP_FATAL(LockUnsettingFree, func);
272 if (__kmp_get_tas_lock_owner(lck) != gtid) {
273 KMP_FATAL(LockUnsettingSetByAnother, func);
275 return __kmp_release_nested_tas_lock(lck, gtid);
278 void __kmp_init_nested_tas_lock(kmp_tas_lock_t *lck) {
279 __kmp_init_tas_lock(lck);
280 lck->lk.depth_locked = 0;
283 void __kmp_destroy_nested_tas_lock(kmp_tas_lock_t *lck) {
284 __kmp_destroy_tas_lock(lck);
285 lck->lk.depth_locked = 0;
288 static void __kmp_destroy_nested_tas_lock_with_checks(kmp_tas_lock_t *lck) {
289 char const *
const func =
"omp_destroy_nest_lock";
290 if (!__kmp_is_tas_lock_nestable(lck)) {
291 KMP_FATAL(LockSimpleUsedAsNestable, func);
293 if (__kmp_get_tas_lock_owner(lck) != -1) {
294 KMP_FATAL(LockStillOwned, func);
296 __kmp_destroy_nested_tas_lock(lck);
309 static kmp_int32 __kmp_get_futex_lock_owner(kmp_futex_lock_t *lck) {
310 return KMP_LOCK_STRIP((TCR_4(lck->lk.poll) >> 1)) - 1;
313 static inline bool __kmp_is_futex_lock_nestable(kmp_futex_lock_t *lck) {
314 return lck->lk.depth_locked != -1;
317 __forceinline
static int
318 __kmp_acquire_futex_lock_timed_template(kmp_futex_lock_t *lck, kmp_int32 gtid) {
319 kmp_int32 gtid_code = (gtid + 1) << 1;
323 #ifdef USE_LOCK_PROFILE
324 kmp_uint32 curr = KMP_LOCK_STRIP(TCR_4(lck->lk.poll));
325 if ((curr != 0) && (curr != gtid_code))
326 __kmp_printf(
"LOCK CONTENTION: %p\n", lck);
330 KMP_FSYNC_PREPARE(lck);
331 KA_TRACE(1000, (
"__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d entering\n",
332 lck, lck->lk.poll, gtid));
336 while ((poll_val = KMP_COMPARE_AND_STORE_RET32(
337 &(lck->lk.poll), KMP_LOCK_FREE(futex),
338 KMP_LOCK_BUSY(gtid_code, futex))) != KMP_LOCK_FREE(futex)) {
340 kmp_int32 cond = KMP_LOCK_STRIP(poll_val) & 1;
343 (
"__kmp_acquire_futex_lock: lck:%p, T#%d poll_val = 0x%x cond = 0x%x\n",
344 lck, gtid, poll_val, cond));
355 if (!KMP_COMPARE_AND_STORE_REL32(&(lck->lk.poll), poll_val,
356 poll_val | KMP_LOCK_BUSY(1, futex))) {
359 (
"__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d can't set bit 0\n",
360 lck, lck->lk.poll, gtid));
363 poll_val |= KMP_LOCK_BUSY(1, futex);
366 (
"__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d bit 0 set\n", lck,
367 lck->lk.poll, gtid));
372 (
"__kmp_acquire_futex_lock: lck:%p, T#%d before futex_wait(0x%x)\n",
373 lck, gtid, poll_val));
376 if ((rc = syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAIT, poll_val, NULL,
378 KA_TRACE(1000, (
"__kmp_acquire_futex_lock: lck:%p, T#%d futex_wait(0x%x) "
379 "failed (rc=%ld errno=%d)\n",
380 lck, gtid, poll_val, rc, errno));
385 (
"__kmp_acquire_futex_lock: lck:%p, T#%d after futex_wait(0x%x)\n",
386 lck, gtid, poll_val));
393 KMP_FSYNC_ACQUIRED(lck);
394 KA_TRACE(1000, (
"__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d exiting\n", lck,
395 lck->lk.poll, gtid));
396 return KMP_LOCK_ACQUIRED_FIRST;
399 int __kmp_acquire_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
400 int retval = __kmp_acquire_futex_lock_timed_template(lck, gtid);
401 ANNOTATE_FUTEX_ACQUIRED(lck);
405 static int __kmp_acquire_futex_lock_with_checks(kmp_futex_lock_t *lck,
407 char const *
const func =
"omp_set_lock";
408 if ((
sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
409 __kmp_is_futex_lock_nestable(lck)) {
410 KMP_FATAL(LockNestableUsedAsSimple, func);
412 if ((gtid >= 0) && (__kmp_get_futex_lock_owner(lck) == gtid)) {
413 KMP_FATAL(LockIsAlreadyOwned, func);
415 return __kmp_acquire_futex_lock(lck, gtid);
418 int __kmp_test_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
419 if (KMP_COMPARE_AND_STORE_ACQ32(&(lck->lk.poll), KMP_LOCK_FREE(futex),
420 KMP_LOCK_BUSY((gtid + 1) << 1, futex))) {
421 KMP_FSYNC_ACQUIRED(lck);
427 static int __kmp_test_futex_lock_with_checks(kmp_futex_lock_t *lck,
429 char const *
const func =
"omp_test_lock";
430 if ((
sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
431 __kmp_is_futex_lock_nestable(lck)) {
432 KMP_FATAL(LockNestableUsedAsSimple, func);
434 return __kmp_test_futex_lock(lck, gtid);
437 int __kmp_release_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
440 KA_TRACE(1000, (
"__kmp_release_futex_lock: lck:%p(0x%x), T#%d entering\n",
441 lck, lck->lk.poll, gtid));
443 KMP_FSYNC_RELEASING(lck);
444 ANNOTATE_FUTEX_RELEASED(lck);
446 kmp_int32 poll_val = KMP_XCHG_FIXED32(&(lck->lk.poll), KMP_LOCK_FREE(futex));
449 (
"__kmp_release_futex_lock: lck:%p, T#%d released poll_val = 0x%x\n",
450 lck, gtid, poll_val));
452 if (KMP_LOCK_STRIP(poll_val) & 1) {
454 (
"__kmp_release_futex_lock: lck:%p, T#%d futex_wake 1 thread\n",
456 syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex),
462 KA_TRACE(1000, (
"__kmp_release_futex_lock: lck:%p(0x%x), T#%d exiting\n", lck,
463 lck->lk.poll, gtid));
466 return KMP_LOCK_RELEASED;
469 static int __kmp_release_futex_lock_with_checks(kmp_futex_lock_t *lck,
471 char const *
const func =
"omp_unset_lock";
473 if ((
sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
474 __kmp_is_futex_lock_nestable(lck)) {
475 KMP_FATAL(LockNestableUsedAsSimple, func);
477 if (__kmp_get_futex_lock_owner(lck) == -1) {
478 KMP_FATAL(LockUnsettingFree, func);
480 if ((gtid >= 0) && (__kmp_get_futex_lock_owner(lck) >= 0) &&
481 (__kmp_get_futex_lock_owner(lck) != gtid)) {
482 KMP_FATAL(LockUnsettingSetByAnother, func);
484 return __kmp_release_futex_lock(lck, gtid);
487 void __kmp_init_futex_lock(kmp_futex_lock_t *lck) {
488 TCW_4(lck->lk.poll, KMP_LOCK_FREE(futex));
491 void __kmp_destroy_futex_lock(kmp_futex_lock_t *lck) { lck->lk.poll = 0; }
493 static void __kmp_destroy_futex_lock_with_checks(kmp_futex_lock_t *lck) {
494 char const *
const func =
"omp_destroy_lock";
495 if ((
sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
496 __kmp_is_futex_lock_nestable(lck)) {
497 KMP_FATAL(LockNestableUsedAsSimple, func);
499 if (__kmp_get_futex_lock_owner(lck) != -1) {
500 KMP_FATAL(LockStillOwned, func);
502 __kmp_destroy_futex_lock(lck);
507 int __kmp_acquire_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
508 KMP_DEBUG_ASSERT(gtid >= 0);
510 if (__kmp_get_futex_lock_owner(lck) == gtid) {
511 lck->lk.depth_locked += 1;
512 return KMP_LOCK_ACQUIRED_NEXT;
514 __kmp_acquire_futex_lock_timed_template(lck, gtid);
515 ANNOTATE_FUTEX_ACQUIRED(lck);
516 lck->lk.depth_locked = 1;
517 return KMP_LOCK_ACQUIRED_FIRST;
521 static int __kmp_acquire_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
523 char const *
const func =
"omp_set_nest_lock";
524 if (!__kmp_is_futex_lock_nestable(lck)) {
525 KMP_FATAL(LockSimpleUsedAsNestable, func);
527 return __kmp_acquire_nested_futex_lock(lck, gtid);
530 int __kmp_test_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
533 KMP_DEBUG_ASSERT(gtid >= 0);
535 if (__kmp_get_futex_lock_owner(lck) == gtid) {
536 retval = ++lck->lk.depth_locked;
537 }
else if (!__kmp_test_futex_lock(lck, gtid)) {
541 retval = lck->lk.depth_locked = 1;
546 static int __kmp_test_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
548 char const *
const func =
"omp_test_nest_lock";
549 if (!__kmp_is_futex_lock_nestable(lck)) {
550 KMP_FATAL(LockSimpleUsedAsNestable, func);
552 return __kmp_test_nested_futex_lock(lck, gtid);
555 int __kmp_release_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
556 KMP_DEBUG_ASSERT(gtid >= 0);
559 if (--(lck->lk.depth_locked) == 0) {
560 __kmp_release_futex_lock(lck, gtid);
561 return KMP_LOCK_RELEASED;
563 return KMP_LOCK_STILL_HELD;
566 static int __kmp_release_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
568 char const *
const func =
"omp_unset_nest_lock";
570 if (!__kmp_is_futex_lock_nestable(lck)) {
571 KMP_FATAL(LockSimpleUsedAsNestable, func);
573 if (__kmp_get_futex_lock_owner(lck) == -1) {
574 KMP_FATAL(LockUnsettingFree, func);
576 if (__kmp_get_futex_lock_owner(lck) != gtid) {
577 KMP_FATAL(LockUnsettingSetByAnother, func);
579 return __kmp_release_nested_futex_lock(lck, gtid);
582 void __kmp_init_nested_futex_lock(kmp_futex_lock_t *lck) {
583 __kmp_init_futex_lock(lck);
584 lck->lk.depth_locked = 0;
587 void __kmp_destroy_nested_futex_lock(kmp_futex_lock_t *lck) {
588 __kmp_destroy_futex_lock(lck);
589 lck->lk.depth_locked = 0;
592 static void __kmp_destroy_nested_futex_lock_with_checks(kmp_futex_lock_t *lck) {
593 char const *
const func =
"omp_destroy_nest_lock";
594 if (!__kmp_is_futex_lock_nestable(lck)) {
595 KMP_FATAL(LockSimpleUsedAsNestable, func);
597 if (__kmp_get_futex_lock_owner(lck) != -1) {
598 KMP_FATAL(LockStillOwned, func);
600 __kmp_destroy_nested_futex_lock(lck);
608 static kmp_int32 __kmp_get_ticket_lock_owner(kmp_ticket_lock_t *lck) {
609 return std::atomic_load_explicit(&lck->lk.owner_id,
610 std::memory_order_relaxed) -
614 static inline bool __kmp_is_ticket_lock_nestable(kmp_ticket_lock_t *lck) {
615 return std::atomic_load_explicit(&lck->lk.depth_locked,
616 std::memory_order_relaxed) != -1;
619 static kmp_uint32 __kmp_bakery_check(
void *now_serving, kmp_uint32 my_ticket) {
620 return std::atomic_load_explicit((std::atomic<unsigned> *)now_serving,
621 std::memory_order_acquire) == my_ticket;
624 __forceinline
static int
625 __kmp_acquire_ticket_lock_timed_template(kmp_ticket_lock_t *lck,
627 kmp_uint32 my_ticket = std::atomic_fetch_add_explicit(
628 &lck->lk.next_ticket, 1U, std::memory_order_relaxed);
630 #ifdef USE_LOCK_PROFILE
631 if (std::atomic_load_explicit(&lck->lk.now_serving,
632 std::memory_order_relaxed) != my_ticket)
633 __kmp_printf(
"LOCK CONTENTION: %p\n", lck);
637 if (std::atomic_load_explicit(&lck->lk.now_serving,
638 std::memory_order_acquire) == my_ticket) {
639 return KMP_LOCK_ACQUIRED_FIRST;
641 KMP_WAIT_PTR(&lck->lk.now_serving, my_ticket, __kmp_bakery_check, lck);
642 return KMP_LOCK_ACQUIRED_FIRST;
645 int __kmp_acquire_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
646 int retval = __kmp_acquire_ticket_lock_timed_template(lck, gtid);
647 ANNOTATE_TICKET_ACQUIRED(lck);
651 static int __kmp_acquire_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
653 char const *
const func =
"omp_set_lock";
655 if (!std::atomic_load_explicit(&lck->lk.initialized,
656 std::memory_order_relaxed)) {
657 KMP_FATAL(LockIsUninitialized, func);
659 if (lck->lk.self != lck) {
660 KMP_FATAL(LockIsUninitialized, func);
662 if (__kmp_is_ticket_lock_nestable(lck)) {
663 KMP_FATAL(LockNestableUsedAsSimple, func);
665 if ((gtid >= 0) && (__kmp_get_ticket_lock_owner(lck) == gtid)) {
666 KMP_FATAL(LockIsAlreadyOwned, func);
669 __kmp_acquire_ticket_lock(lck, gtid);
671 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
672 std::memory_order_relaxed);
673 return KMP_LOCK_ACQUIRED_FIRST;
676 int __kmp_test_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
677 kmp_uint32 my_ticket = std::atomic_load_explicit(&lck->lk.next_ticket,
678 std::memory_order_relaxed);
680 if (std::atomic_load_explicit(&lck->lk.now_serving,
681 std::memory_order_relaxed) == my_ticket) {
682 kmp_uint32 next_ticket = my_ticket + 1;
683 if (std::atomic_compare_exchange_strong_explicit(
684 &lck->lk.next_ticket, &my_ticket, next_ticket,
685 std::memory_order_acquire, std::memory_order_acquire)) {
692 static int __kmp_test_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
694 char const *
const func =
"omp_test_lock";
696 if (!std::atomic_load_explicit(&lck->lk.initialized,
697 std::memory_order_relaxed)) {
698 KMP_FATAL(LockIsUninitialized, func);
700 if (lck->lk.self != lck) {
701 KMP_FATAL(LockIsUninitialized, func);
703 if (__kmp_is_ticket_lock_nestable(lck)) {
704 KMP_FATAL(LockNestableUsedAsSimple, func);
707 int retval = __kmp_test_ticket_lock(lck, gtid);
710 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
711 std::memory_order_relaxed);
716 int __kmp_release_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
717 kmp_uint32 distance = std::atomic_load_explicit(&lck->lk.next_ticket,
718 std::memory_order_relaxed) -
719 std::atomic_load_explicit(&lck->lk.now_serving,
720 std::memory_order_relaxed);
722 ANNOTATE_TICKET_RELEASED(lck);
723 std::atomic_fetch_add_explicit(&lck->lk.now_serving, 1U,
724 std::memory_order_release);
727 (kmp_uint32)(__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
728 return KMP_LOCK_RELEASED;
731 static int __kmp_release_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
733 char const *
const func =
"omp_unset_lock";
735 if (!std::atomic_load_explicit(&lck->lk.initialized,
736 std::memory_order_relaxed)) {
737 KMP_FATAL(LockIsUninitialized, func);
739 if (lck->lk.self != lck) {
740 KMP_FATAL(LockIsUninitialized, func);
742 if (__kmp_is_ticket_lock_nestable(lck)) {
743 KMP_FATAL(LockNestableUsedAsSimple, func);
745 if (__kmp_get_ticket_lock_owner(lck) == -1) {
746 KMP_FATAL(LockUnsettingFree, func);
748 if ((gtid >= 0) && (__kmp_get_ticket_lock_owner(lck) >= 0) &&
749 (__kmp_get_ticket_lock_owner(lck) != gtid)) {
750 KMP_FATAL(LockUnsettingSetByAnother, func);
752 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
753 return __kmp_release_ticket_lock(lck, gtid);
756 void __kmp_init_ticket_lock(kmp_ticket_lock_t *lck) {
757 lck->lk.location = NULL;
759 std::atomic_store_explicit(&lck->lk.next_ticket, 0U,
760 std::memory_order_relaxed);
761 std::atomic_store_explicit(&lck->lk.now_serving, 0U,
762 std::memory_order_relaxed);
763 std::atomic_store_explicit(
764 &lck->lk.owner_id, 0,
765 std::memory_order_relaxed);
766 std::atomic_store_explicit(
767 &lck->lk.depth_locked, -1,
768 std::memory_order_relaxed);
769 std::atomic_store_explicit(&lck->lk.initialized,
true,
770 std::memory_order_release);
773 void __kmp_destroy_ticket_lock(kmp_ticket_lock_t *lck) {
774 std::atomic_store_explicit(&lck->lk.initialized,
false,
775 std::memory_order_release);
777 lck->lk.location = NULL;
778 std::atomic_store_explicit(&lck->lk.next_ticket, 0U,
779 std::memory_order_relaxed);
780 std::atomic_store_explicit(&lck->lk.now_serving, 0U,
781 std::memory_order_relaxed);
782 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
783 std::atomic_store_explicit(&lck->lk.depth_locked, -1,
784 std::memory_order_relaxed);
787 static void __kmp_destroy_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
788 char const *
const func =
"omp_destroy_lock";
790 if (!std::atomic_load_explicit(&lck->lk.initialized,
791 std::memory_order_relaxed)) {
792 KMP_FATAL(LockIsUninitialized, func);
794 if (lck->lk.self != lck) {
795 KMP_FATAL(LockIsUninitialized, func);
797 if (__kmp_is_ticket_lock_nestable(lck)) {
798 KMP_FATAL(LockNestableUsedAsSimple, func);
800 if (__kmp_get_ticket_lock_owner(lck) != -1) {
801 KMP_FATAL(LockStillOwned, func);
803 __kmp_destroy_ticket_lock(lck);
808 int __kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
809 KMP_DEBUG_ASSERT(gtid >= 0);
811 if (__kmp_get_ticket_lock_owner(lck) == gtid) {
812 std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1,
813 std::memory_order_relaxed);
814 return KMP_LOCK_ACQUIRED_NEXT;
816 __kmp_acquire_ticket_lock_timed_template(lck, gtid);
817 ANNOTATE_TICKET_ACQUIRED(lck);
818 std::atomic_store_explicit(&lck->lk.depth_locked, 1,
819 std::memory_order_relaxed);
820 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
821 std::memory_order_relaxed);
822 return KMP_LOCK_ACQUIRED_FIRST;
826 static int __kmp_acquire_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
828 char const *
const func =
"omp_set_nest_lock";
830 if (!std::atomic_load_explicit(&lck->lk.initialized,
831 std::memory_order_relaxed)) {
832 KMP_FATAL(LockIsUninitialized, func);
834 if (lck->lk.self != lck) {
835 KMP_FATAL(LockIsUninitialized, func);
837 if (!__kmp_is_ticket_lock_nestable(lck)) {
838 KMP_FATAL(LockSimpleUsedAsNestable, func);
840 return __kmp_acquire_nested_ticket_lock(lck, gtid);
843 int __kmp_test_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
846 KMP_DEBUG_ASSERT(gtid >= 0);
848 if (__kmp_get_ticket_lock_owner(lck) == gtid) {
849 retval = std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1,
850 std::memory_order_relaxed) +
852 }
else if (!__kmp_test_ticket_lock(lck, gtid)) {
855 std::atomic_store_explicit(&lck->lk.depth_locked, 1,
856 std::memory_order_relaxed);
857 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
858 std::memory_order_relaxed);
864 static int __kmp_test_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
866 char const *
const func =
"omp_test_nest_lock";
868 if (!std::atomic_load_explicit(&lck->lk.initialized,
869 std::memory_order_relaxed)) {
870 KMP_FATAL(LockIsUninitialized, func);
872 if (lck->lk.self != lck) {
873 KMP_FATAL(LockIsUninitialized, func);
875 if (!__kmp_is_ticket_lock_nestable(lck)) {
876 KMP_FATAL(LockSimpleUsedAsNestable, func);
878 return __kmp_test_nested_ticket_lock(lck, gtid);
881 int __kmp_release_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
882 KMP_DEBUG_ASSERT(gtid >= 0);
884 if ((std::atomic_fetch_add_explicit(&lck->lk.depth_locked, -1,
885 std::memory_order_relaxed) -
887 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
888 __kmp_release_ticket_lock(lck, gtid);
889 return KMP_LOCK_RELEASED;
891 return KMP_LOCK_STILL_HELD;
894 static int __kmp_release_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
896 char const *
const func =
"omp_unset_nest_lock";
898 if (!std::atomic_load_explicit(&lck->lk.initialized,
899 std::memory_order_relaxed)) {
900 KMP_FATAL(LockIsUninitialized, func);
902 if (lck->lk.self != lck) {
903 KMP_FATAL(LockIsUninitialized, func);
905 if (!__kmp_is_ticket_lock_nestable(lck)) {
906 KMP_FATAL(LockSimpleUsedAsNestable, func);
908 if (__kmp_get_ticket_lock_owner(lck) == -1) {
909 KMP_FATAL(LockUnsettingFree, func);
911 if (__kmp_get_ticket_lock_owner(lck) != gtid) {
912 KMP_FATAL(LockUnsettingSetByAnother, func);
914 return __kmp_release_nested_ticket_lock(lck, gtid);
917 void __kmp_init_nested_ticket_lock(kmp_ticket_lock_t *lck) {
918 __kmp_init_ticket_lock(lck);
919 std::atomic_store_explicit(&lck->lk.depth_locked, 0,
920 std::memory_order_relaxed);
924 void __kmp_destroy_nested_ticket_lock(kmp_ticket_lock_t *lck) {
925 __kmp_destroy_ticket_lock(lck);
926 std::atomic_store_explicit(&lck->lk.depth_locked, 0,
927 std::memory_order_relaxed);
931 __kmp_destroy_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
932 char const *
const func =
"omp_destroy_nest_lock";
934 if (!std::atomic_load_explicit(&lck->lk.initialized,
935 std::memory_order_relaxed)) {
936 KMP_FATAL(LockIsUninitialized, func);
938 if (lck->lk.self != lck) {
939 KMP_FATAL(LockIsUninitialized, func);
941 if (!__kmp_is_ticket_lock_nestable(lck)) {
942 KMP_FATAL(LockSimpleUsedAsNestable, func);
944 if (__kmp_get_ticket_lock_owner(lck) != -1) {
945 KMP_FATAL(LockStillOwned, func);
947 __kmp_destroy_nested_ticket_lock(lck);
952 static const ident_t *__kmp_get_ticket_lock_location(kmp_ticket_lock_t *lck) {
953 return lck->lk.location;
956 static void __kmp_set_ticket_lock_location(kmp_ticket_lock_t *lck,
958 lck->lk.location = loc;
961 static kmp_lock_flags_t __kmp_get_ticket_lock_flags(kmp_ticket_lock_t *lck) {
962 return lck->lk.flags;
965 static void __kmp_set_ticket_lock_flags(kmp_ticket_lock_t *lck,
966 kmp_lock_flags_t flags) {
967 lck->lk.flags = flags;
1025 #ifdef DEBUG_QUEUING_LOCKS
1028 #define TRACE_BUF_ELE 1024
1029 static char traces[TRACE_BUF_ELE][128] = {0};
1031 #define TRACE_LOCK(X, Y) \
1032 KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s\n", X, Y);
1033 #define TRACE_LOCK_T(X, Y, Z) \
1034 KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s%d\n", X, Y, Z);
1035 #define TRACE_LOCK_HT(X, Y, Z, Q) \
1036 KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s %d,%d\n", X, Y, \
1039 static void __kmp_dump_queuing_lock(kmp_info_t *this_thr, kmp_int32 gtid,
1040 kmp_queuing_lock_t *lck, kmp_int32 head_id,
1041 kmp_int32 tail_id) {
1044 __kmp_printf_no_lock(
"\n__kmp_dump_queuing_lock: TRACE BEGINS HERE! \n");
1046 i = tc % TRACE_BUF_ELE;
1047 __kmp_printf_no_lock(
"%s\n", traces[i]);
1048 i = (i + 1) % TRACE_BUF_ELE;
1049 while (i != (tc % TRACE_BUF_ELE)) {
1050 __kmp_printf_no_lock(
"%s", traces[i]);
1051 i = (i + 1) % TRACE_BUF_ELE;
1053 __kmp_printf_no_lock(
"\n");
1055 __kmp_printf_no_lock(
"\n__kmp_dump_queuing_lock: gtid+1:%d, spin_here:%d, "
1056 "next_wait:%d, head_id:%d, tail_id:%d\n",
1057 gtid + 1, this_thr->th.th_spin_here,
1058 this_thr->th.th_next_waiting, head_id, tail_id);
1060 __kmp_printf_no_lock(
"\t\thead: %d ", lck->lk.head_id);
1062 if (lck->lk.head_id >= 1) {
1063 t = __kmp_threads[lck->lk.head_id - 1]->th.th_next_waiting;
1065 __kmp_printf_no_lock(
"-> %d ", t);
1066 t = __kmp_threads[t - 1]->th.th_next_waiting;
1069 __kmp_printf_no_lock(
"; tail: %d ", lck->lk.tail_id);
1070 __kmp_printf_no_lock(
"\n\n");
1075 static kmp_int32 __kmp_get_queuing_lock_owner(kmp_queuing_lock_t *lck) {
1076 return TCR_4(lck->lk.owner_id) - 1;
1079 static inline bool __kmp_is_queuing_lock_nestable(kmp_queuing_lock_t *lck) {
1080 return lck->lk.depth_locked != -1;
1084 template <
bool takeTime>
1087 __forceinline
static int
1088 __kmp_acquire_queuing_lock_timed_template(kmp_queuing_lock_t *lck,
1090 kmp_info_t *this_thr = __kmp_thread_from_gtid(gtid);
1091 volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1092 volatile kmp_int32 *tail_id_p = &lck->lk.tail_id;
1093 volatile kmp_uint32 *spin_here_p;
1094 kmp_int32 need_mf = 1;
1097 ompt_state_t prev_state = ompt_state_undefined;
1101 (
"__kmp_acquire_queuing_lock: lck:%p, T#%d entering\n", lck, gtid));
1103 KMP_FSYNC_PREPARE(lck);
1104 KMP_DEBUG_ASSERT(this_thr != NULL);
1105 spin_here_p = &this_thr->th.th_spin_here;
1107 #ifdef DEBUG_QUEUING_LOCKS
1108 TRACE_LOCK(gtid + 1,
"acq ent");
1110 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1111 if (this_thr->th.th_next_waiting != 0)
1112 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1114 KMP_DEBUG_ASSERT(!*spin_here_p);
1115 KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
1124 *spin_here_p = TRUE;
1136 #ifdef DEBUG_QUEUING_LOCKS
1138 TRACE_LOCK_HT(gtid + 1,
"acq read: ", head, tail);
1147 enqueued = KMP_COMPARE_AND_STORE_ACQ64((
volatile kmp_int64 *)tail_id_p,
1149 KMP_PACK_64(gtid + 1, gtid + 1));
1150 #ifdef DEBUG_QUEUING_LOCKS
1152 TRACE_LOCK(gtid + 1,
"acq enq: (-1,0)->(tid,tid)");
1158 KMP_DEBUG_ASSERT(tail != gtid + 1);
1160 #ifdef DEBUG_QUEUING_LOCKS
1161 TRACE_LOCK_HT(gtid + 1,
"acq read: ", head, tail);
1169 enqueued = KMP_COMPARE_AND_STORE_ACQ32(tail_id_p, tail, gtid + 1);
1171 #ifdef DEBUG_QUEUING_LOCKS
1173 TRACE_LOCK(gtid + 1,
"acq enq: (h,t)->(h,tid)");
1180 kmp_int32 grabbed_lock;
1182 #ifdef DEBUG_QUEUING_LOCKS
1184 TRACE_LOCK_HT(gtid + 1,
"acq read: ", head, tail);
1190 grabbed_lock = KMP_COMPARE_AND_STORE_ACQ32(head_id_p, 0, -1);
1194 *spin_here_p = FALSE;
1198 (
"__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: no queuing\n",
1200 #ifdef DEBUG_QUEUING_LOCKS
1201 TRACE_LOCK_HT(gtid + 1,
"acq exit: ", head, 0);
1205 if (ompt_enabled.enabled && prev_state != ompt_state_undefined) {
1207 this_thr->th.ompt_thread_info.state = prev_state;
1208 this_thr->th.ompt_thread_info.wait_id = 0;
1212 KMP_FSYNC_ACQUIRED(lck);
1213 return KMP_LOCK_ACQUIRED_FIRST;
1220 if (ompt_enabled.enabled && prev_state == ompt_state_undefined) {
1222 prev_state = this_thr->th.ompt_thread_info.state;
1223 this_thr->th.ompt_thread_info.wait_id = (uint64_t)lck;
1224 this_thr->th.ompt_thread_info.state = ompt_state_wait_lock;
1230 kmp_info_t *tail_thr = __kmp_thread_from_gtid(tail - 1);
1231 KMP_ASSERT(tail_thr != NULL);
1232 tail_thr->th.th_next_waiting = gtid + 1;
1236 (
"__kmp_acquire_queuing_lock: lck:%p, T#%d waiting for lock\n",
1241 KMP_WAIT(spin_here_p, FALSE, KMP_EQ, lck);
1246 #ifdef DEBUG_QUEUING_LOCKS
1247 TRACE_LOCK(gtid + 1,
"acq spin");
1249 if (this_thr->th.th_next_waiting != 0)
1250 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1252 KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
1253 KA_TRACE(1000, (
"__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: after "
1254 "waiting on queue\n",
1257 #ifdef DEBUG_QUEUING_LOCKS
1258 TRACE_LOCK(gtid + 1,
"acq exit 2");
1263 this_thr->th.ompt_thread_info.state = prev_state;
1264 this_thr->th.ompt_thread_info.wait_id = 0;
1268 return KMP_LOCK_ACQUIRED_FIRST;
1274 KMP_YIELD_OVERSUB();
1276 #ifdef DEBUG_QUEUING_LOCKS
1277 TRACE_LOCK(gtid + 1,
"acq retry");
1280 KMP_ASSERT2(0,
"should not get here");
1281 return KMP_LOCK_ACQUIRED_FIRST;
1284 int __kmp_acquire_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1285 KMP_DEBUG_ASSERT(gtid >= 0);
1287 int retval = __kmp_acquire_queuing_lock_timed_template<false>(lck, gtid);
1288 ANNOTATE_QUEUING_ACQUIRED(lck);
1292 static int __kmp_acquire_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1294 char const *
const func =
"omp_set_lock";
1295 if (lck->lk.initialized != lck) {
1296 KMP_FATAL(LockIsUninitialized, func);
1298 if (__kmp_is_queuing_lock_nestable(lck)) {
1299 KMP_FATAL(LockNestableUsedAsSimple, func);
1301 if (__kmp_get_queuing_lock_owner(lck) == gtid) {
1302 KMP_FATAL(LockIsAlreadyOwned, func);
1305 __kmp_acquire_queuing_lock(lck, gtid);
1307 lck->lk.owner_id = gtid + 1;
1308 return KMP_LOCK_ACQUIRED_FIRST;
1311 int __kmp_test_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1312 volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1315 kmp_info_t *this_thr;
1318 KA_TRACE(1000, (
"__kmp_test_queuing_lock: T#%d entering\n", gtid));
1319 KMP_DEBUG_ASSERT(gtid >= 0);
1321 this_thr = __kmp_thread_from_gtid(gtid);
1322 KMP_DEBUG_ASSERT(this_thr != NULL);
1323 KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here);
1330 if (KMP_COMPARE_AND_STORE_ACQ32(head_id_p, 0, -1)) {
1332 (
"__kmp_test_queuing_lock: T#%d exiting: holding lock\n", gtid));
1333 KMP_FSYNC_ACQUIRED(lck);
1334 ANNOTATE_QUEUING_ACQUIRED(lck);
1340 (
"__kmp_test_queuing_lock: T#%d exiting: without lock\n", gtid));
1344 static int __kmp_test_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1346 char const *
const func =
"omp_test_lock";
1347 if (lck->lk.initialized != lck) {
1348 KMP_FATAL(LockIsUninitialized, func);
1350 if (__kmp_is_queuing_lock_nestable(lck)) {
1351 KMP_FATAL(LockNestableUsedAsSimple, func);
1354 int retval = __kmp_test_queuing_lock(lck, gtid);
1357 lck->lk.owner_id = gtid + 1;
1362 int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1363 kmp_info_t *this_thr;
1364 volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1365 volatile kmp_int32 *tail_id_p = &lck->lk.tail_id;
1368 (
"__kmp_release_queuing_lock: lck:%p, T#%d entering\n", lck, gtid));
1369 KMP_DEBUG_ASSERT(gtid >= 0);
1370 this_thr = __kmp_thread_from_gtid(gtid);
1371 KMP_DEBUG_ASSERT(this_thr != NULL);
1372 #ifdef DEBUG_QUEUING_LOCKS
1373 TRACE_LOCK(gtid + 1,
"rel ent");
1375 if (this_thr->th.th_spin_here)
1376 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1377 if (this_thr->th.th_next_waiting != 0)
1378 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1380 KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here);
1381 KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
1383 KMP_FSYNC_RELEASING(lck);
1384 ANNOTATE_QUEUING_RELEASED(lck);
1393 #ifdef DEBUG_QUEUING_LOCKS
1395 TRACE_LOCK_HT(gtid + 1,
"rel read: ", head, tail);
1397 __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1399 KMP_DEBUG_ASSERT(head !=
1404 if (KMP_COMPARE_AND_STORE_REL32(head_id_p, -1, 0)) {
1407 (
"__kmp_release_queuing_lock: lck:%p, T#%d exiting: queue empty\n",
1409 #ifdef DEBUG_QUEUING_LOCKS
1410 TRACE_LOCK_HT(gtid + 1,
"rel exit: ", 0, 0);
1416 return KMP_LOCK_RELEASED;
1423 #ifdef DEBUG_QUEUING_LOCKS
1425 __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1427 KMP_DEBUG_ASSERT(head > 0);
1430 dequeued = KMP_COMPARE_AND_STORE_REL64(
1431 RCAST(
volatile kmp_int64 *, tail_id_p), KMP_PACK_64(head, head),
1432 KMP_PACK_64(-1, 0));
1433 #ifdef DEBUG_QUEUING_LOCKS
1434 TRACE_LOCK(gtid + 1,
"rel deq: (h,h)->(-1,0)");
1438 volatile kmp_int32 *waiting_id_p;
1439 kmp_info_t *head_thr = __kmp_thread_from_gtid(head - 1);
1440 KMP_DEBUG_ASSERT(head_thr != NULL);
1441 waiting_id_p = &head_thr->th.th_next_waiting;
1444 #ifdef DEBUG_QUEUING_LOCKS
1445 if (head <= 0 || tail <= 0)
1446 __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1448 KMP_DEBUG_ASSERT(head > 0 && tail > 0);
1455 KMP_WAIT((
volatile kmp_uint32 *)waiting_id_p, 0, KMP_NEQ, NULL);
1456 #ifdef DEBUG_QUEUING_LOCKS
1457 TRACE_LOCK(gtid + 1,
"rel deq: (h,t)->(h',t)");
1464 kmp_info_t *head_thr = __kmp_thread_from_gtid(head - 1);
1465 KMP_DEBUG_ASSERT(head_thr != NULL);
1468 #ifdef DEBUG_QUEUING_LOCKS
1469 if (head <= 0 || tail <= 0)
1470 __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1472 KMP_DEBUG_ASSERT(head > 0 && tail > 0);
1476 head_thr->th.th_next_waiting = 0;
1477 #ifdef DEBUG_QUEUING_LOCKS
1478 TRACE_LOCK_T(gtid + 1,
"rel nw=0 for t=", head);
1483 head_thr->th.th_spin_here = FALSE;
1485 KA_TRACE(1000, (
"__kmp_release_queuing_lock: lck:%p, T#%d exiting: after "
1488 #ifdef DEBUG_QUEUING_LOCKS
1489 TRACE_LOCK(gtid + 1,
"rel exit 2");
1491 return KMP_LOCK_RELEASED;
1496 #ifdef DEBUG_QUEUING_LOCKS
1497 TRACE_LOCK(gtid + 1,
"rel retry");
1501 KMP_ASSERT2(0,
"should not get here");
1502 return KMP_LOCK_RELEASED;
1505 static int __kmp_release_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1507 char const *
const func =
"omp_unset_lock";
1509 if (lck->lk.initialized != lck) {
1510 KMP_FATAL(LockIsUninitialized, func);
1512 if (__kmp_is_queuing_lock_nestable(lck)) {
1513 KMP_FATAL(LockNestableUsedAsSimple, func);
1515 if (__kmp_get_queuing_lock_owner(lck) == -1) {
1516 KMP_FATAL(LockUnsettingFree, func);
1518 if (__kmp_get_queuing_lock_owner(lck) != gtid) {
1519 KMP_FATAL(LockUnsettingSetByAnother, func);
1521 lck->lk.owner_id = 0;
1522 return __kmp_release_queuing_lock(lck, gtid);
1525 void __kmp_init_queuing_lock(kmp_queuing_lock_t *lck) {
1526 lck->lk.location = NULL;
1527 lck->lk.head_id = 0;
1528 lck->lk.tail_id = 0;
1529 lck->lk.next_ticket = 0;
1530 lck->lk.now_serving = 0;
1531 lck->lk.owner_id = 0;
1532 lck->lk.depth_locked = -1;
1533 lck->lk.initialized = lck;
1535 KA_TRACE(1000, (
"__kmp_init_queuing_lock: lock %p initialized\n", lck));
1538 void __kmp_destroy_queuing_lock(kmp_queuing_lock_t *lck) {
1539 lck->lk.initialized = NULL;
1540 lck->lk.location = NULL;
1541 lck->lk.head_id = 0;
1542 lck->lk.tail_id = 0;
1543 lck->lk.next_ticket = 0;
1544 lck->lk.now_serving = 0;
1545 lck->lk.owner_id = 0;
1546 lck->lk.depth_locked = -1;
1549 static void __kmp_destroy_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
1550 char const *
const func =
"omp_destroy_lock";
1551 if (lck->lk.initialized != lck) {
1552 KMP_FATAL(LockIsUninitialized, func);
1554 if (__kmp_is_queuing_lock_nestable(lck)) {
1555 KMP_FATAL(LockNestableUsedAsSimple, func);
1557 if (__kmp_get_queuing_lock_owner(lck) != -1) {
1558 KMP_FATAL(LockStillOwned, func);
1560 __kmp_destroy_queuing_lock(lck);
1565 int __kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1566 KMP_DEBUG_ASSERT(gtid >= 0);
1568 if (__kmp_get_queuing_lock_owner(lck) == gtid) {
1569 lck->lk.depth_locked += 1;
1570 return KMP_LOCK_ACQUIRED_NEXT;
1572 __kmp_acquire_queuing_lock_timed_template<false>(lck, gtid);
1573 ANNOTATE_QUEUING_ACQUIRED(lck);
1575 lck->lk.depth_locked = 1;
1577 lck->lk.owner_id = gtid + 1;
1578 return KMP_LOCK_ACQUIRED_FIRST;
1583 __kmp_acquire_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1585 char const *
const func =
"omp_set_nest_lock";
1586 if (lck->lk.initialized != lck) {
1587 KMP_FATAL(LockIsUninitialized, func);
1589 if (!__kmp_is_queuing_lock_nestable(lck)) {
1590 KMP_FATAL(LockSimpleUsedAsNestable, func);
1592 return __kmp_acquire_nested_queuing_lock(lck, gtid);
1595 int __kmp_test_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1598 KMP_DEBUG_ASSERT(gtid >= 0);
1600 if (__kmp_get_queuing_lock_owner(lck) == gtid) {
1601 retval = ++lck->lk.depth_locked;
1602 }
else if (!__kmp_test_queuing_lock(lck, gtid)) {
1606 retval = lck->lk.depth_locked = 1;
1608 lck->lk.owner_id = gtid + 1;
1613 static int __kmp_test_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1615 char const *
const func =
"omp_test_nest_lock";
1616 if (lck->lk.initialized != lck) {
1617 KMP_FATAL(LockIsUninitialized, func);
1619 if (!__kmp_is_queuing_lock_nestable(lck)) {
1620 KMP_FATAL(LockSimpleUsedAsNestable, func);
1622 return __kmp_test_nested_queuing_lock(lck, gtid);
1625 int __kmp_release_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1626 KMP_DEBUG_ASSERT(gtid >= 0);
1629 if (--(lck->lk.depth_locked) == 0) {
1631 lck->lk.owner_id = 0;
1632 __kmp_release_queuing_lock(lck, gtid);
1633 return KMP_LOCK_RELEASED;
1635 return KMP_LOCK_STILL_HELD;
1639 __kmp_release_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1641 char const *
const func =
"omp_unset_nest_lock";
1643 if (lck->lk.initialized != lck) {
1644 KMP_FATAL(LockIsUninitialized, func);
1646 if (!__kmp_is_queuing_lock_nestable(lck)) {
1647 KMP_FATAL(LockSimpleUsedAsNestable, func);
1649 if (__kmp_get_queuing_lock_owner(lck) == -1) {
1650 KMP_FATAL(LockUnsettingFree, func);
1652 if (__kmp_get_queuing_lock_owner(lck) != gtid) {
1653 KMP_FATAL(LockUnsettingSetByAnother, func);
1655 return __kmp_release_nested_queuing_lock(lck, gtid);
1658 void __kmp_init_nested_queuing_lock(kmp_queuing_lock_t *lck) {
1659 __kmp_init_queuing_lock(lck);
1660 lck->lk.depth_locked = 0;
1663 void __kmp_destroy_nested_queuing_lock(kmp_queuing_lock_t *lck) {
1664 __kmp_destroy_queuing_lock(lck);
1665 lck->lk.depth_locked = 0;
1669 __kmp_destroy_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
1670 char const *
const func =
"omp_destroy_nest_lock";
1671 if (lck->lk.initialized != lck) {
1672 KMP_FATAL(LockIsUninitialized, func);
1674 if (!__kmp_is_queuing_lock_nestable(lck)) {
1675 KMP_FATAL(LockSimpleUsedAsNestable, func);
1677 if (__kmp_get_queuing_lock_owner(lck) != -1) {
1678 KMP_FATAL(LockStillOwned, func);
1680 __kmp_destroy_nested_queuing_lock(lck);
1685 static const ident_t *__kmp_get_queuing_lock_location(kmp_queuing_lock_t *lck) {
1686 return lck->lk.location;
1689 static void __kmp_set_queuing_lock_location(kmp_queuing_lock_t *lck,
1691 lck->lk.location = loc;
1694 static kmp_lock_flags_t __kmp_get_queuing_lock_flags(kmp_queuing_lock_t *lck) {
1695 return lck->lk.flags;
1698 static void __kmp_set_queuing_lock_flags(kmp_queuing_lock_t *lck,
1699 kmp_lock_flags_t flags) {
1700 lck->lk.flags = flags;
1703 #if KMP_USE_ADAPTIVE_LOCKS
1707 #if KMP_HAVE_RTM_INTRINSICS
1708 #include <immintrin.h>
1709 #define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT)
1714 #define _XBEGIN_STARTED (~0u)
1715 #define _XABORT_EXPLICIT (1 << 0)
1716 #define _XABORT_RETRY (1 << 1)
1717 #define _XABORT_CONFLICT (1 << 2)
1718 #define _XABORT_CAPACITY (1 << 3)
1719 #define _XABORT_DEBUG (1 << 4)
1720 #define _XABORT_NESTED (1 << 5)
1721 #define _XABORT_CODE(x) ((unsigned char)(((x) >> 24) & 0xFF))
1724 #define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT)
1726 #define STRINGIZE_INTERNAL(arg) #arg
1727 #define STRINGIZE(arg) STRINGIZE_INTERNAL(arg)
1733 static __inline
int _xbegin() {
1770 __asm__
volatile(
"1: .byte 0xC7; .byte 0xF8;\n"
1773 "1: movl %%eax,%0\n"
1775 :
"+r"(res)::
"memory",
"%eax");
1781 static __inline
void _xend() {
1789 __asm__
volatile(
".byte 0x0f; .byte 0x01; .byte 0xd5" :::
"memory");
1798 #define _xabort(ARG) _asm _emit 0xc6 _asm _emit 0xf8 _asm _emit ARG
1800 #define _xabort(ARG) \
1801 __asm__ volatile(".byte 0xC6; .byte 0xF8; .byte " STRINGIZE(ARG):::"memory");
1807 #if KMP_DEBUG_ADAPTIVE_LOCKS
1812 static kmp_adaptive_lock_statistics_t destroyedStats;
1815 static kmp_adaptive_lock_info_t liveLocks;
1818 static kmp_bootstrap_lock_t chain_lock =
1819 KMP_BOOTSTRAP_LOCK_INITIALIZER(chain_lock);
1822 void __kmp_init_speculative_stats() {
1823 kmp_adaptive_lock_info_t *lck = &liveLocks;
1825 memset(CCAST(kmp_adaptive_lock_statistics_t *, &(lck->stats)), 0,
1826 sizeof(lck->stats));
1827 lck->stats.next = lck;
1828 lck->stats.prev = lck;
1830 KMP_ASSERT(lck->stats.next->stats.prev == lck);
1831 KMP_ASSERT(lck->stats.prev->stats.next == lck);
1833 __kmp_init_bootstrap_lock(&chain_lock);
1837 static void __kmp_remember_lock(kmp_adaptive_lock_info_t *lck) {
1838 __kmp_acquire_bootstrap_lock(&chain_lock);
1840 lck->stats.next = liveLocks.stats.next;
1841 lck->stats.prev = &liveLocks;
1843 liveLocks.stats.next = lck;
1844 lck->stats.next->stats.prev = lck;
1846 KMP_ASSERT(lck->stats.next->stats.prev == lck);
1847 KMP_ASSERT(lck->stats.prev->stats.next == lck);
1849 __kmp_release_bootstrap_lock(&chain_lock);
1852 static void __kmp_forget_lock(kmp_adaptive_lock_info_t *lck) {
1853 KMP_ASSERT(lck->stats.next->stats.prev == lck);
1854 KMP_ASSERT(lck->stats.prev->stats.next == lck);
1856 kmp_adaptive_lock_info_t *n = lck->stats.next;
1857 kmp_adaptive_lock_info_t *p = lck->stats.prev;
1863 static void __kmp_zero_speculative_stats(kmp_adaptive_lock_info_t *lck) {
1864 memset(CCAST(kmp_adaptive_lock_statistics_t *, &lck->stats), 0,
1865 sizeof(lck->stats));
1866 __kmp_remember_lock(lck);
1869 static void __kmp_add_stats(kmp_adaptive_lock_statistics_t *t,
1870 kmp_adaptive_lock_info_t *lck) {
1871 kmp_adaptive_lock_statistics_t
volatile *s = &lck->stats;
1873 t->nonSpeculativeAcquireAttempts += lck->acquire_attempts;
1874 t->successfulSpeculations += s->successfulSpeculations;
1875 t->hardFailedSpeculations += s->hardFailedSpeculations;
1876 t->softFailedSpeculations += s->softFailedSpeculations;
1877 t->nonSpeculativeAcquires += s->nonSpeculativeAcquires;
1878 t->lemmingYields += s->lemmingYields;
1881 static void __kmp_accumulate_speculative_stats(kmp_adaptive_lock_info_t *lck) {
1882 __kmp_acquire_bootstrap_lock(&chain_lock);
1884 __kmp_add_stats(&destroyedStats, lck);
1885 __kmp_forget_lock(lck);
1887 __kmp_release_bootstrap_lock(&chain_lock);
1890 static float percent(kmp_uint32 count, kmp_uint32 total) {
1891 return (total == 0) ? 0.0 : (100.0 * count) / total;
1894 void __kmp_print_speculative_stats() {
1895 kmp_adaptive_lock_statistics_t total = destroyedStats;
1896 kmp_adaptive_lock_info_t *lck;
1898 for (lck = liveLocks.stats.next; lck != &liveLocks; lck = lck->stats.next) {
1899 __kmp_add_stats(&total, lck);
1901 kmp_adaptive_lock_statistics_t *t = &total;
1902 kmp_uint32 totalSections =
1903 t->nonSpeculativeAcquires + t->successfulSpeculations;
1904 kmp_uint32 totalSpeculations = t->successfulSpeculations +
1905 t->hardFailedSpeculations +
1906 t->softFailedSpeculations;
1907 if (totalSections <= 0)
1911 if (strcmp(__kmp_speculative_statsfile,
"-") == 0) {
1914 size_t buffLen = KMP_STRLEN(__kmp_speculative_statsfile) + 20;
1915 char buffer[buffLen];
1916 KMP_SNPRINTF(&buffer[0], buffLen, __kmp_speculative_statsfile,
1917 (kmp_int32)getpid());
1918 statsFile.
open(buffer,
"w");
1921 fprintf(statsFile,
"Speculative lock statistics (all approximate!)\n");
1922 fprintf(statsFile,
" Lock parameters: \n"
1923 " max_soft_retries : %10d\n"
1924 " max_badness : %10d\n",
1925 __kmp_adaptive_backoff_params.max_soft_retries,
1926 __kmp_adaptive_backoff_params.max_badness);
1927 fprintf(statsFile,
" Non-speculative acquire attempts : %10d\n",
1928 t->nonSpeculativeAcquireAttempts);
1929 fprintf(statsFile,
" Total critical sections : %10d\n",
1931 fprintf(statsFile,
" Successful speculations : %10d (%5.1f%%)\n",
1932 t->successfulSpeculations,
1933 percent(t->successfulSpeculations, totalSections));
1934 fprintf(statsFile,
" Non-speculative acquires : %10d (%5.1f%%)\n",
1935 t->nonSpeculativeAcquires,
1936 percent(t->nonSpeculativeAcquires, totalSections));
1937 fprintf(statsFile,
" Lemming yields : %10d\n\n",
1940 fprintf(statsFile,
" Speculative acquire attempts : %10d\n",
1942 fprintf(statsFile,
" Successes : %10d (%5.1f%%)\n",
1943 t->successfulSpeculations,
1944 percent(t->successfulSpeculations, totalSpeculations));
1945 fprintf(statsFile,
" Soft failures : %10d (%5.1f%%)\n",
1946 t->softFailedSpeculations,
1947 percent(t->softFailedSpeculations, totalSpeculations));
1948 fprintf(statsFile,
" Hard failures : %10d (%5.1f%%)\n",
1949 t->hardFailedSpeculations,
1950 percent(t->hardFailedSpeculations, totalSpeculations));
1953 #define KMP_INC_STAT(lck, stat) (lck->lk.adaptive.stats.stat++)
1955 #define KMP_INC_STAT(lck, stat)
1959 static inline bool __kmp_is_unlocked_queuing_lock(kmp_queuing_lock_t *lck) {
1962 bool res = lck->lk.head_id == 0;
1966 #if KMP_COMPILER_ICC
1969 __sync_synchronize();
1976 static __inline
void
1977 __kmp_update_badness_after_success(kmp_adaptive_lock_t *lck) {
1979 lck->lk.adaptive.badness = 0;
1980 KMP_INC_STAT(lck, successfulSpeculations);
1984 static __inline
void __kmp_step_badness(kmp_adaptive_lock_t *lck) {
1985 kmp_uint32 newBadness = (lck->lk.adaptive.badness << 1) | 1;
1986 if (newBadness > lck->lk.adaptive.max_badness) {
1989 lck->lk.adaptive.badness = newBadness;
1994 KMP_ATTRIBUTE_TARGET_RTM
1995 static __inline
int __kmp_should_speculate(kmp_adaptive_lock_t *lck,
1997 kmp_uint32 badness = lck->lk.adaptive.badness;
1998 kmp_uint32 attempts = lck->lk.adaptive.acquire_attempts;
1999 int res = (attempts & badness) == 0;
2005 KMP_ATTRIBUTE_TARGET_RTM
2006 static int __kmp_test_adaptive_lock_only(kmp_adaptive_lock_t *lck,
2008 int retries = lck->lk.adaptive.max_soft_retries;
2015 kmp_uint32 status = _xbegin();
2020 if (status == _XBEGIN_STARTED) {
2025 if (!__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) {
2029 KMP_ASSERT2(0,
"should not get here");
2034 if (status & SOFT_ABORT_MASK) {
2035 KMP_INC_STAT(lck, softFailedSpeculations);
2038 KMP_INC_STAT(lck, hardFailedSpeculations);
2043 }
while (retries--);
2047 __kmp_step_badness(lck);
2054 static int __kmp_test_adaptive_lock(kmp_adaptive_lock_t *lck, kmp_int32 gtid) {
2056 if (__kmp_should_speculate(lck, gtid) &&
2057 __kmp_test_adaptive_lock_only(lck, gtid))
2062 lck->lk.adaptive.acquire_attempts++;
2065 if (__kmp_test_queuing_lock(GET_QLK_PTR(lck), gtid)) {
2066 KMP_INC_STAT(lck, nonSpeculativeAcquires);
2073 static int __kmp_test_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
2075 char const *
const func =
"omp_test_lock";
2076 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2077 KMP_FATAL(LockIsUninitialized, func);
2080 int retval = __kmp_test_adaptive_lock(lck, gtid);
2083 lck->lk.qlk.owner_id = gtid + 1;
2099 static void __kmp_acquire_adaptive_lock(kmp_adaptive_lock_t *lck,
2101 if (__kmp_should_speculate(lck, gtid)) {
2102 if (__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) {
2103 if (__kmp_test_adaptive_lock_only(lck, gtid))
2112 while (!__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) {
2113 KMP_INC_STAT(lck, lemmingYields);
2117 if (__kmp_test_adaptive_lock_only(lck, gtid))
2124 lck->lk.adaptive.acquire_attempts++;
2126 __kmp_acquire_queuing_lock_timed_template<FALSE>(GET_QLK_PTR(lck), gtid);
2128 KMP_INC_STAT(lck, nonSpeculativeAcquires);
2129 ANNOTATE_QUEUING_ACQUIRED(lck);
2132 static void __kmp_acquire_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
2134 char const *
const func =
"omp_set_lock";
2135 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2136 KMP_FATAL(LockIsUninitialized, func);
2138 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) == gtid) {
2139 KMP_FATAL(LockIsAlreadyOwned, func);
2142 __kmp_acquire_adaptive_lock(lck, gtid);
2144 lck->lk.qlk.owner_id = gtid + 1;
2147 KMP_ATTRIBUTE_TARGET_RTM
2148 static int __kmp_release_adaptive_lock(kmp_adaptive_lock_t *lck,
2150 if (__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(
2155 __kmp_update_badness_after_success(lck);
2158 __kmp_release_queuing_lock(GET_QLK_PTR(lck), gtid);
2160 return KMP_LOCK_RELEASED;
2163 static int __kmp_release_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
2165 char const *
const func =
"omp_unset_lock";
2167 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2168 KMP_FATAL(LockIsUninitialized, func);
2170 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) == -1) {
2171 KMP_FATAL(LockUnsettingFree, func);
2173 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) != gtid) {
2174 KMP_FATAL(LockUnsettingSetByAnother, func);
2176 lck->lk.qlk.owner_id = 0;
2177 __kmp_release_adaptive_lock(lck, gtid);
2178 return KMP_LOCK_RELEASED;
2181 static void __kmp_init_adaptive_lock(kmp_adaptive_lock_t *lck) {
2182 __kmp_init_queuing_lock(GET_QLK_PTR(lck));
2183 lck->lk.adaptive.badness = 0;
2184 lck->lk.adaptive.acquire_attempts = 0;
2185 lck->lk.adaptive.max_soft_retries =
2186 __kmp_adaptive_backoff_params.max_soft_retries;
2187 lck->lk.adaptive.max_badness = __kmp_adaptive_backoff_params.max_badness;
2188 #if KMP_DEBUG_ADAPTIVE_LOCKS
2189 __kmp_zero_speculative_stats(&lck->lk.adaptive);
2191 KA_TRACE(1000, (
"__kmp_init_adaptive_lock: lock %p initialized\n", lck));
2194 static void __kmp_destroy_adaptive_lock(kmp_adaptive_lock_t *lck) {
2195 #if KMP_DEBUG_ADAPTIVE_LOCKS
2196 __kmp_accumulate_speculative_stats(&lck->lk.adaptive);
2198 __kmp_destroy_queuing_lock(GET_QLK_PTR(lck));
2202 static void __kmp_destroy_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck) {
2203 char const *
const func =
"omp_destroy_lock";
2204 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2205 KMP_FATAL(LockIsUninitialized, func);
2207 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) != -1) {
2208 KMP_FATAL(LockStillOwned, func);
2210 __kmp_destroy_adaptive_lock(lck);
2219 static kmp_int32 __kmp_get_drdpa_lock_owner(kmp_drdpa_lock_t *lck) {
2220 return lck->lk.owner_id - 1;
2223 static inline bool __kmp_is_drdpa_lock_nestable(kmp_drdpa_lock_t *lck) {
2224 return lck->lk.depth_locked != -1;
2227 __forceinline
static int
2228 __kmp_acquire_drdpa_lock_timed_template(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2229 kmp_uint64 ticket = KMP_ATOMIC_INC(&lck->lk.next_ticket);
2230 kmp_uint64 mask = lck->lk.mask;
2231 std::atomic<kmp_uint64> *polls = lck->lk.polls;
2233 #ifdef USE_LOCK_PROFILE
2234 if (polls[ticket & mask] != ticket)
2235 __kmp_printf(
"LOCK CONTENTION: %p\n", lck);
2247 KMP_FSYNC_PREPARE(lck);
2248 KMP_INIT_YIELD(spins);
2249 while (polls[ticket & mask] < ticket) {
2250 KMP_YIELD_OVERSUB_ELSE_SPIN(spins);
2258 mask = lck->lk.mask;
2259 polls = lck->lk.polls;
2263 KMP_FSYNC_ACQUIRED(lck);
2264 KA_TRACE(1000, (
"__kmp_acquire_drdpa_lock: ticket #%lld acquired lock %p\n",
2266 lck->lk.now_serving = ticket;
2273 if ((lck->lk.old_polls != NULL) && (ticket >= lck->lk.cleanup_ticket)) {
2274 __kmp_free(lck->lk.old_polls);
2275 lck->lk.old_polls = NULL;
2276 lck->lk.cleanup_ticket = 0;
2282 if (lck->lk.old_polls == NULL) {
2283 bool reconfigure =
false;
2284 std::atomic<kmp_uint64> *old_polls = polls;
2285 kmp_uint32 num_polls = TCR_4(lck->lk.num_polls);
2287 if (TCR_4(__kmp_nth) >
2288 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) {
2291 if (num_polls > 1) {
2293 num_polls = TCR_4(lck->lk.num_polls);
2296 polls = (std::atomic<kmp_uint64> *)__kmp_allocate(num_polls *
2304 kmp_uint64 num_waiting = TCR_8(lck->lk.next_ticket) - ticket - 1;
2305 if (num_waiting > num_polls) {
2306 kmp_uint32 old_num_polls = num_polls;
2309 mask = (mask << 1) | 1;
2311 }
while (num_polls <= num_waiting);
2317 polls = (std::atomic<kmp_uint64> *)__kmp_allocate(num_polls *
2320 for (i = 0; i < old_num_polls; i++) {
2321 polls[i].store(old_polls[i]);
2336 KA_TRACE(1000, (
"__kmp_acquire_drdpa_lock: ticket #%lld reconfiguring "
2337 "lock %p to %d polls\n",
2338 ticket, lck, num_polls));
2340 lck->lk.old_polls = old_polls;
2341 lck->lk.polls = polls;
2345 lck->lk.num_polls = num_polls;
2346 lck->lk.mask = mask;
2354 lck->lk.cleanup_ticket = lck->lk.next_ticket;
2357 return KMP_LOCK_ACQUIRED_FIRST;
2360 int __kmp_acquire_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2361 int retval = __kmp_acquire_drdpa_lock_timed_template(lck, gtid);
2362 ANNOTATE_DRDPA_ACQUIRED(lck);
2366 static int __kmp_acquire_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2368 char const *
const func =
"omp_set_lock";
2369 if (lck->lk.initialized != lck) {
2370 KMP_FATAL(LockIsUninitialized, func);
2372 if (__kmp_is_drdpa_lock_nestable(lck)) {
2373 KMP_FATAL(LockNestableUsedAsSimple, func);
2375 if ((gtid >= 0) && (__kmp_get_drdpa_lock_owner(lck) == gtid)) {
2376 KMP_FATAL(LockIsAlreadyOwned, func);
2379 __kmp_acquire_drdpa_lock(lck, gtid);
2381 lck->lk.owner_id = gtid + 1;
2382 return KMP_LOCK_ACQUIRED_FIRST;
2385 int __kmp_test_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2388 kmp_uint64 ticket = lck->lk.next_ticket;
2389 std::atomic<kmp_uint64> *polls = lck->lk.polls;
2390 kmp_uint64 mask = lck->lk.mask;
2391 if (polls[ticket & mask] == ticket) {
2392 kmp_uint64 next_ticket = ticket + 1;
2393 if (__kmp_atomic_compare_store_acq(&lck->lk.next_ticket, ticket,
2395 KMP_FSYNC_ACQUIRED(lck);
2396 KA_TRACE(1000, (
"__kmp_test_drdpa_lock: ticket #%lld acquired lock %p\n",
2398 lck->lk.now_serving = ticket;
2412 static int __kmp_test_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2414 char const *
const func =
"omp_test_lock";
2415 if (lck->lk.initialized != lck) {
2416 KMP_FATAL(LockIsUninitialized, func);
2418 if (__kmp_is_drdpa_lock_nestable(lck)) {
2419 KMP_FATAL(LockNestableUsedAsSimple, func);
2422 int retval = __kmp_test_drdpa_lock(lck, gtid);
2425 lck->lk.owner_id = gtid + 1;
2430 int __kmp_release_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2433 kmp_uint64 ticket = lck->lk.now_serving + 1;
2434 std::atomic<kmp_uint64> *polls = lck->lk.polls;
2435 kmp_uint64 mask = lck->lk.mask;
2436 KA_TRACE(1000, (
"__kmp_release_drdpa_lock: ticket #%lld released lock %p\n",
2438 KMP_FSYNC_RELEASING(lck);
2439 ANNOTATE_DRDPA_RELEASED(lck);
2440 polls[ticket & mask] = ticket;
2441 return KMP_LOCK_RELEASED;
2444 static int __kmp_release_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2446 char const *
const func =
"omp_unset_lock";
2448 if (lck->lk.initialized != lck) {
2449 KMP_FATAL(LockIsUninitialized, func);
2451 if (__kmp_is_drdpa_lock_nestable(lck)) {
2452 KMP_FATAL(LockNestableUsedAsSimple, func);
2454 if (__kmp_get_drdpa_lock_owner(lck) == -1) {
2455 KMP_FATAL(LockUnsettingFree, func);
2457 if ((gtid >= 0) && (__kmp_get_drdpa_lock_owner(lck) >= 0) &&
2458 (__kmp_get_drdpa_lock_owner(lck) != gtid)) {
2459 KMP_FATAL(LockUnsettingSetByAnother, func);
2461 lck->lk.owner_id = 0;
2462 return __kmp_release_drdpa_lock(lck, gtid);
2465 void __kmp_init_drdpa_lock(kmp_drdpa_lock_t *lck) {
2466 lck->lk.location = NULL;
2468 lck->lk.num_polls = 1;
2469 lck->lk.polls = (std::atomic<kmp_uint64> *)__kmp_allocate(
2470 lck->lk.num_polls *
sizeof(*(lck->lk.polls)));
2471 lck->lk.cleanup_ticket = 0;
2472 lck->lk.old_polls = NULL;
2473 lck->lk.next_ticket = 0;
2474 lck->lk.now_serving = 0;
2475 lck->lk.owner_id = 0;
2476 lck->lk.depth_locked = -1;
2477 lck->lk.initialized = lck;
2479 KA_TRACE(1000, (
"__kmp_init_drdpa_lock: lock %p initialized\n", lck));
2482 void __kmp_destroy_drdpa_lock(kmp_drdpa_lock_t *lck) {
2483 lck->lk.initialized = NULL;
2484 lck->lk.location = NULL;
2485 if (lck->lk.polls.load() != NULL) {
2486 __kmp_free(lck->lk.polls.load());
2487 lck->lk.polls = NULL;
2489 if (lck->lk.old_polls != NULL) {
2490 __kmp_free(lck->lk.old_polls);
2491 lck->lk.old_polls = NULL;
2494 lck->lk.num_polls = 0;
2495 lck->lk.cleanup_ticket = 0;
2496 lck->lk.next_ticket = 0;
2497 lck->lk.now_serving = 0;
2498 lck->lk.owner_id = 0;
2499 lck->lk.depth_locked = -1;
2502 static void __kmp_destroy_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
2503 char const *
const func =
"omp_destroy_lock";
2504 if (lck->lk.initialized != lck) {
2505 KMP_FATAL(LockIsUninitialized, func);
2507 if (__kmp_is_drdpa_lock_nestable(lck)) {
2508 KMP_FATAL(LockNestableUsedAsSimple, func);
2510 if (__kmp_get_drdpa_lock_owner(lck) != -1) {
2511 KMP_FATAL(LockStillOwned, func);
2513 __kmp_destroy_drdpa_lock(lck);
2518 int __kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2519 KMP_DEBUG_ASSERT(gtid >= 0);
2521 if (__kmp_get_drdpa_lock_owner(lck) == gtid) {
2522 lck->lk.depth_locked += 1;
2523 return KMP_LOCK_ACQUIRED_NEXT;
2525 __kmp_acquire_drdpa_lock_timed_template(lck, gtid);
2526 ANNOTATE_DRDPA_ACQUIRED(lck);
2528 lck->lk.depth_locked = 1;
2530 lck->lk.owner_id = gtid + 1;
2531 return KMP_LOCK_ACQUIRED_FIRST;
2535 static void __kmp_acquire_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2537 char const *
const func =
"omp_set_nest_lock";
2538 if (lck->lk.initialized != lck) {
2539 KMP_FATAL(LockIsUninitialized, func);
2541 if (!__kmp_is_drdpa_lock_nestable(lck)) {
2542 KMP_FATAL(LockSimpleUsedAsNestable, func);
2544 __kmp_acquire_nested_drdpa_lock(lck, gtid);
2547 int __kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2550 KMP_DEBUG_ASSERT(gtid >= 0);
2552 if (__kmp_get_drdpa_lock_owner(lck) == gtid) {
2553 retval = ++lck->lk.depth_locked;
2554 }
else if (!__kmp_test_drdpa_lock(lck, gtid)) {
2558 retval = lck->lk.depth_locked = 1;
2560 lck->lk.owner_id = gtid + 1;
2565 static int __kmp_test_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2567 char const *
const func =
"omp_test_nest_lock";
2568 if (lck->lk.initialized != lck) {
2569 KMP_FATAL(LockIsUninitialized, func);
2571 if (!__kmp_is_drdpa_lock_nestable(lck)) {
2572 KMP_FATAL(LockSimpleUsedAsNestable, func);
2574 return __kmp_test_nested_drdpa_lock(lck, gtid);
2577 int __kmp_release_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2578 KMP_DEBUG_ASSERT(gtid >= 0);
2581 if (--(lck->lk.depth_locked) == 0) {
2583 lck->lk.owner_id = 0;
2584 __kmp_release_drdpa_lock(lck, gtid);
2585 return KMP_LOCK_RELEASED;
2587 return KMP_LOCK_STILL_HELD;
2590 static int __kmp_release_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2592 char const *
const func =
"omp_unset_nest_lock";
2594 if (lck->lk.initialized != lck) {
2595 KMP_FATAL(LockIsUninitialized, func);
2597 if (!__kmp_is_drdpa_lock_nestable(lck)) {
2598 KMP_FATAL(LockSimpleUsedAsNestable, func);
2600 if (__kmp_get_drdpa_lock_owner(lck) == -1) {
2601 KMP_FATAL(LockUnsettingFree, func);
2603 if (__kmp_get_drdpa_lock_owner(lck) != gtid) {
2604 KMP_FATAL(LockUnsettingSetByAnother, func);
2606 return __kmp_release_nested_drdpa_lock(lck, gtid);
2609 void __kmp_init_nested_drdpa_lock(kmp_drdpa_lock_t *lck) {
2610 __kmp_init_drdpa_lock(lck);
2611 lck->lk.depth_locked = 0;
2614 void __kmp_destroy_nested_drdpa_lock(kmp_drdpa_lock_t *lck) {
2615 __kmp_destroy_drdpa_lock(lck);
2616 lck->lk.depth_locked = 0;
2619 static void __kmp_destroy_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
2620 char const *
const func =
"omp_destroy_nest_lock";
2621 if (lck->lk.initialized != lck) {
2622 KMP_FATAL(LockIsUninitialized, func);
2624 if (!__kmp_is_drdpa_lock_nestable(lck)) {
2625 KMP_FATAL(LockSimpleUsedAsNestable, func);
2627 if (__kmp_get_drdpa_lock_owner(lck) != -1) {
2628 KMP_FATAL(LockStillOwned, func);
2630 __kmp_destroy_nested_drdpa_lock(lck);
2635 static const ident_t *__kmp_get_drdpa_lock_location(kmp_drdpa_lock_t *lck) {
2636 return lck->lk.location;
2639 static void __kmp_set_drdpa_lock_location(kmp_drdpa_lock_t *lck,
2641 lck->lk.location = loc;
2644 static kmp_lock_flags_t __kmp_get_drdpa_lock_flags(kmp_drdpa_lock_t *lck) {
2645 return lck->lk.flags;
2648 static void __kmp_set_drdpa_lock_flags(kmp_drdpa_lock_t *lck,
2649 kmp_lock_flags_t flags) {
2650 lck->lk.flags = flags;
2654 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2655 #define __kmp_tsc() __kmp_hardware_timestamp()
2657 kmp_backoff_t __kmp_spin_backoff_params = {1, 4096, 100};
2660 extern kmp_uint64 __kmp_now_nsec();
2661 kmp_backoff_t __kmp_spin_backoff_params = {1, 256, 100};
2662 #define __kmp_tsc() __kmp_now_nsec()
2672 static inline bool before(kmp_uint64 a, kmp_uint64 b) {
2673 return ((kmp_int64)b - (kmp_int64)a) > 0;
2677 void __kmp_spin_backoff(kmp_backoff_t *boff) {
2680 for (i = boff->step; i > 0; i--) {
2681 kmp_uint64 goal = __kmp_tsc() + boff->min_tick;
2684 }
while (before(__kmp_tsc(), goal));
2686 boff->step = (boff->step << 1 | 1) & (boff->max_backoff - 1);
2689 #if KMP_USE_DYNAMIC_LOCK
2693 static void __kmp_init_direct_lock(kmp_dyna_lock_t *lck,
2694 kmp_dyna_lockseq_t seq) {
2695 TCW_4(*lck, KMP_GET_D_TAG(seq));
2698 (
"__kmp_init_direct_lock: initialized direct lock with type#%d\n", seq));
2704 #define HLE_ACQUIRE ".byte 0xf2;"
2705 #define HLE_RELEASE ".byte 0xf3;"
2707 static inline kmp_uint32 swap4(kmp_uint32
volatile *p, kmp_uint32 v) {
2708 __asm__
volatile(HLE_ACQUIRE
"xchg %1,%0" :
"+r"(v),
"+m"(*p) : :
"memory");
2712 static void __kmp_destroy_hle_lock(kmp_dyna_lock_t *lck) { TCW_4(*lck, 0); }
2714 static void __kmp_destroy_hle_lock_with_checks(kmp_dyna_lock_t *lck) {
2718 static void __kmp_acquire_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) {
2720 if (swap4(lck, KMP_LOCK_BUSY(1, hle)) != KMP_LOCK_FREE(hle)) {
2723 while (*(kmp_uint32
volatile *)lck != KMP_LOCK_FREE(hle)) {
2724 for (
int i = delay; i != 0; --i)
2726 delay = ((delay << 1) | 1) & 7;
2728 }
while (swap4(lck, KMP_LOCK_BUSY(1, hle)) != KMP_LOCK_FREE(hle));
2732 static void __kmp_acquire_hle_lock_with_checks(kmp_dyna_lock_t *lck,
2734 __kmp_acquire_hle_lock(lck, gtid);
2737 static int __kmp_release_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) {
2738 __asm__
volatile(HLE_RELEASE
"movl %1,%0"
2740 :
"r"(KMP_LOCK_FREE(hle))
2742 return KMP_LOCK_RELEASED;
2745 static int __kmp_release_hle_lock_with_checks(kmp_dyna_lock_t *lck,
2747 return __kmp_release_hle_lock(lck, gtid);
2750 static int __kmp_test_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) {
2751 return swap4(lck, KMP_LOCK_BUSY(1, hle)) == KMP_LOCK_FREE(hle);
2754 static int __kmp_test_hle_lock_with_checks(kmp_dyna_lock_t *lck,
2756 return __kmp_test_hle_lock(lck, gtid);
2759 static void __kmp_init_rtm_queuing_lock(kmp_queuing_lock_t *lck) {
2760 __kmp_init_queuing_lock(lck);
2763 static void __kmp_destroy_rtm_queuing_lock(kmp_queuing_lock_t *lck) {
2764 __kmp_destroy_queuing_lock(lck);
2768 __kmp_destroy_rtm_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
2769 __kmp_destroy_queuing_lock_with_checks(lck);
2772 KMP_ATTRIBUTE_TARGET_RTM
2773 static void __kmp_acquire_rtm_queuing_lock(kmp_queuing_lock_t *lck,
2775 unsigned retries = 3, status;
2778 if (status == _XBEGIN_STARTED) {
2779 if (__kmp_is_unlocked_queuing_lock(lck))
2783 if ((status & _XABORT_EXPLICIT) && _XABORT_CODE(status) == 0xff) {
2785 while (!__kmp_is_unlocked_queuing_lock(lck)) {
2788 }
else if (!(status & _XABORT_RETRY))
2790 }
while (retries--);
2793 __kmp_acquire_queuing_lock(lck, gtid);
2796 static void __kmp_acquire_rtm_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
2798 __kmp_acquire_rtm_queuing_lock(lck, gtid);
2801 KMP_ATTRIBUTE_TARGET_RTM
2802 static int __kmp_release_rtm_queuing_lock(kmp_queuing_lock_t *lck,
2804 if (__kmp_is_unlocked_queuing_lock(lck)) {
2809 __kmp_release_queuing_lock(lck, gtid);
2811 return KMP_LOCK_RELEASED;
2814 static int __kmp_release_rtm_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
2816 return __kmp_release_rtm_queuing_lock(lck, gtid);
2819 KMP_ATTRIBUTE_TARGET_RTM
2820 static int __kmp_test_rtm_queuing_lock(kmp_queuing_lock_t *lck,
2822 unsigned retries = 3, status;
2825 if (status == _XBEGIN_STARTED && __kmp_is_unlocked_queuing_lock(lck)) {
2828 if (!(status & _XABORT_RETRY))
2830 }
while (retries--);
2832 return __kmp_test_queuing_lock(lck, gtid);
2835 static int __kmp_test_rtm_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
2837 return __kmp_test_rtm_queuing_lock(lck, gtid);
2841 typedef kmp_tas_lock_t kmp_rtm_spin_lock_t;
2843 static void __kmp_destroy_rtm_spin_lock(kmp_rtm_spin_lock_t *lck) {
2844 KMP_ATOMIC_ST_REL(&lck->lk.poll, 0);
2847 static void __kmp_destroy_rtm_spin_lock_with_checks(kmp_rtm_spin_lock_t *lck) {
2848 __kmp_destroy_rtm_spin_lock(lck);
2851 KMP_ATTRIBUTE_TARGET_RTM
2852 static int __kmp_acquire_rtm_spin_lock(kmp_rtm_spin_lock_t *lck,
2854 unsigned retries = 3, status;
2855 kmp_int32 lock_free = KMP_LOCK_FREE(rtm_spin);
2856 kmp_int32 lock_busy = KMP_LOCK_BUSY(1, rtm_spin);
2859 if (status == _XBEGIN_STARTED) {
2860 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == lock_free)
2861 return KMP_LOCK_ACQUIRED_FIRST;
2864 if ((status & _XABORT_EXPLICIT) && _XABORT_CODE(status) == 0xff) {
2866 while (KMP_ATOMIC_LD_RLX(&lck->lk.poll) != lock_free) {
2869 }
else if (!(status & _XABORT_RETRY))
2871 }
while (retries--);
2874 KMP_FSYNC_PREPARE(lck);
2875 kmp_backoff_t backoff = __kmp_spin_backoff_params;
2876 while (KMP_ATOMIC_LD_RLX(&lck->lk.poll) != lock_free ||
2877 !__kmp_atomic_compare_store_acq(&lck->lk.poll, lock_free, lock_busy)) {
2878 __kmp_spin_backoff(&backoff);
2880 KMP_FSYNC_ACQUIRED(lck);
2881 return KMP_LOCK_ACQUIRED_FIRST;
2884 static int __kmp_acquire_rtm_spin_lock_with_checks(kmp_rtm_spin_lock_t *lck,
2886 return __kmp_acquire_rtm_spin_lock(lck, gtid);
2889 KMP_ATTRIBUTE_TARGET_RTM
2890 static int __kmp_release_rtm_spin_lock(kmp_rtm_spin_lock_t *lck,
2892 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == KMP_LOCK_FREE(rtm_spin)) {
2897 KMP_FSYNC_RELEASING(lck);
2898 KMP_ATOMIC_ST_REL(&lck->lk.poll, KMP_LOCK_FREE(rtm_spin));
2900 return KMP_LOCK_RELEASED;
2903 static int __kmp_release_rtm_spin_lock_with_checks(kmp_rtm_spin_lock_t *lck,
2905 return __kmp_release_rtm_spin_lock(lck, gtid);
2908 KMP_ATTRIBUTE_TARGET_RTM
2909 static int __kmp_test_rtm_spin_lock(kmp_rtm_spin_lock_t *lck, kmp_int32 gtid) {
2910 unsigned retries = 3, status;
2911 kmp_int32 lock_free = KMP_LOCK_FREE(rtm_spin);
2912 kmp_int32 lock_busy = KMP_LOCK_BUSY(1, rtm_spin);
2915 if (status == _XBEGIN_STARTED &&
2916 KMP_ATOMIC_LD_RLX(&lck->lk.poll) == lock_free) {
2919 if (!(status & _XABORT_RETRY))
2921 }
while (retries--);
2923 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == lock_free &&
2924 __kmp_atomic_compare_store_acq(&lck->lk.poll, lock_free, lock_busy)) {
2925 KMP_FSYNC_ACQUIRED(lck);
2931 static int __kmp_test_rtm_spin_lock_with_checks(kmp_rtm_spin_lock_t *lck,
2933 return __kmp_test_rtm_spin_lock(lck, gtid);
2939 static void __kmp_init_indirect_lock(kmp_dyna_lock_t *l,
2940 kmp_dyna_lockseq_t tag);
2941 static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *lock);
2942 static int __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
2943 static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
2944 static int __kmp_test_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
2945 static int __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
2947 static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
2949 static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
2953 #define KMP_FOREACH_LOCK_KIND(m, a) m(ticket, a) m(queuing, a) m(drdpa, a)
2955 #define expand1(lk, op) \
2956 static void __kmp_##op##_##lk##_##lock(kmp_user_lock_p lock) { \
2957 __kmp_##op##_##lk##_##lock(&lock->lk); \
2959 #define expand2(lk, op) \
2960 static int __kmp_##op##_##lk##_##lock(kmp_user_lock_p lock, \
2962 return __kmp_##op##_##lk##_##lock(&lock->lk, gtid); \
2964 #define expand3(lk, op) \
2965 static void __kmp_set_##lk##_##lock_flags(kmp_user_lock_p lock, \
2966 kmp_lock_flags_t flags) { \
2967 __kmp_set_##lk##_lock_flags(&lock->lk, flags); \
2969 #define expand4(lk, op) \
2970 static void __kmp_set_##lk##_##lock_location(kmp_user_lock_p lock, \
2971 const ident_t *loc) { \
2972 __kmp_set_##lk##_lock_location(&lock->lk, loc); \
2975 KMP_FOREACH_LOCK_KIND(expand1, init)
2976 KMP_FOREACH_LOCK_KIND(expand1, init_nested)
2977 KMP_FOREACH_LOCK_KIND(expand1, destroy)
2978 KMP_FOREACH_LOCK_KIND(expand1, destroy_nested)
2979 KMP_FOREACH_LOCK_KIND(expand2, acquire)
2980 KMP_FOREACH_LOCK_KIND(expand2, acquire_nested)
2981 KMP_FOREACH_LOCK_KIND(expand2, release)
2982 KMP_FOREACH_LOCK_KIND(expand2, release_nested)
2983 KMP_FOREACH_LOCK_KIND(expand2, test)
2984 KMP_FOREACH_LOCK_KIND(expand2, test_nested)
2985 KMP_FOREACH_LOCK_KIND(expand3, )
2986 KMP_FOREACH_LOCK_KIND(expand4, )
2997 #define expand(l, op) 0, __kmp_init_direct_lock,
2998 void (*__kmp_direct_init[])(kmp_dyna_lock_t *, kmp_dyna_lockseq_t) = {
2999 __kmp_init_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, init)};
3003 #define expand(l, op) 0, (void (*)(kmp_dyna_lock_t *))__kmp_##op##_##l##_lock,
3004 static void (*direct_destroy[])(kmp_dyna_lock_t *) = {
3005 __kmp_destroy_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, destroy)};
3007 #define expand(l, op) \
3008 0, (void (*)(kmp_dyna_lock_t *))__kmp_destroy_##l##_lock_with_checks,
3009 static void (*direct_destroy_check[])(kmp_dyna_lock_t *) = {
3010 __kmp_destroy_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, destroy)};
3014 #define expand(l, op) \
3015 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock,
3016 static int (*direct_set[])(kmp_dyna_lock_t *, kmp_int32) = {
3017 __kmp_set_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, acquire)};
3019 #define expand(l, op) \
3020 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock_with_checks,
3021 static int (*direct_set_check[])(kmp_dyna_lock_t *, kmp_int32) = {
3022 __kmp_set_indirect_lock_with_checks, 0,
3023 KMP_FOREACH_D_LOCK(expand, acquire)};
3027 #define expand(l, op) \
3028 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock,
3029 static int (*direct_unset[])(kmp_dyna_lock_t *, kmp_int32) = {
3030 __kmp_unset_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, release)};
3031 static int (*direct_test[])(kmp_dyna_lock_t *, kmp_int32) = {
3032 __kmp_test_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, test)};
3034 #define expand(l, op) \
3035 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock_with_checks,
3036 static int (*direct_unset_check[])(kmp_dyna_lock_t *, kmp_int32) = {
3037 __kmp_unset_indirect_lock_with_checks, 0,
3038 KMP_FOREACH_D_LOCK(expand, release)};
3039 static int (*direct_test_check[])(kmp_dyna_lock_t *, kmp_int32) = {
3040 __kmp_test_indirect_lock_with_checks, 0, KMP_FOREACH_D_LOCK(expand, test)};
3044 void (**__kmp_direct_destroy)(kmp_dyna_lock_t *) = 0;
3045 int (**__kmp_direct_set)(kmp_dyna_lock_t *, kmp_int32) = 0;
3046 int (**__kmp_direct_unset)(kmp_dyna_lock_t *, kmp_int32) = 0;
3047 int (**__kmp_direct_test)(kmp_dyna_lock_t *, kmp_int32) = 0;
3050 #define expand(l, op) (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock,
3051 void (*__kmp_indirect_init[])(kmp_user_lock_p) = {
3052 KMP_FOREACH_I_LOCK(expand, init)};
3055 #define expand(l, op) (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock,
3056 static void (*indirect_destroy[])(kmp_user_lock_p) = {
3057 KMP_FOREACH_I_LOCK(expand, destroy)};
3059 #define expand(l, op) \
3060 (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock_with_checks,
3061 static void (*indirect_destroy_check[])(kmp_user_lock_p) = {
3062 KMP_FOREACH_I_LOCK(expand, destroy)};
3066 #define expand(l, op) \
3067 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock,
3068 static int (*indirect_set[])(kmp_user_lock_p,
3069 kmp_int32) = {KMP_FOREACH_I_LOCK(expand, acquire)};
3071 #define expand(l, op) \
3072 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock_with_checks,
3073 static int (*indirect_set_check[])(kmp_user_lock_p, kmp_int32) = {
3074 KMP_FOREACH_I_LOCK(expand, acquire)};
3078 #define expand(l, op) \
3079 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock,
3080 static int (*indirect_unset[])(kmp_user_lock_p, kmp_int32) = {
3081 KMP_FOREACH_I_LOCK(expand, release)};
3082 static int (*indirect_test[])(kmp_user_lock_p,
3083 kmp_int32) = {KMP_FOREACH_I_LOCK(expand, test)};
3085 #define expand(l, op) \
3086 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock_with_checks,
3087 static int (*indirect_unset_check[])(kmp_user_lock_p, kmp_int32) = {
3088 KMP_FOREACH_I_LOCK(expand, release)};
3089 static int (*indirect_test_check[])(kmp_user_lock_p, kmp_int32) = {
3090 KMP_FOREACH_I_LOCK(expand, test)};
3094 void (**__kmp_indirect_destroy)(kmp_user_lock_p) = 0;
3095 int (**__kmp_indirect_set)(kmp_user_lock_p, kmp_int32) = 0;
3096 int (**__kmp_indirect_unset)(kmp_user_lock_p, kmp_int32) = 0;
3097 int (**__kmp_indirect_test)(kmp_user_lock_p, kmp_int32) = 0;
3100 kmp_indirect_lock_table_t __kmp_i_lock_table;
3103 static kmp_uint32 __kmp_indirect_lock_size[KMP_NUM_I_LOCKS] = {0};
3106 void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
3108 void (*__kmp_indirect_set_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
3109 kmp_lock_flags_t) = {0};
3110 const ident_t *(*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])(
3111 kmp_user_lock_p) = {0};
3112 kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(
3113 kmp_user_lock_p) = {0};
3116 static kmp_indirect_lock_t *__kmp_indirect_lock_pool[KMP_NUM_I_LOCKS] = {0};
3123 kmp_indirect_lock_t *__kmp_allocate_indirect_lock(
void **user_lock,
3125 kmp_indirect_locktag_t tag) {
3126 kmp_indirect_lock_t *lck;
3127 kmp_lock_index_t idx;
3129 __kmp_acquire_lock(&__kmp_global_lock, gtid);
3131 if (__kmp_indirect_lock_pool[tag] != NULL) {
3133 lck = __kmp_indirect_lock_pool[tag];
3134 if (OMP_LOCK_T_SIZE <
sizeof(
void *))
3135 idx = lck->lock->pool.index;
3136 __kmp_indirect_lock_pool[tag] = (kmp_indirect_lock_t *)lck->lock->pool.next;
3137 KA_TRACE(20, (
"__kmp_allocate_indirect_lock: reusing an existing lock %p\n",
3140 idx = __kmp_i_lock_table.next;
3142 if (idx == __kmp_i_lock_table.size) {
3144 int row = __kmp_i_lock_table.size / KMP_I_LOCK_CHUNK;
3145 kmp_indirect_lock_t **new_table = (kmp_indirect_lock_t **)__kmp_allocate(
3146 2 * row *
sizeof(kmp_indirect_lock_t *));
3147 KMP_MEMCPY(new_table, __kmp_i_lock_table.table,
3148 row *
sizeof(kmp_indirect_lock_t *));
3149 kmp_indirect_lock_t **old_table = __kmp_i_lock_table.table;
3150 __kmp_i_lock_table.table = new_table;
3151 __kmp_free(old_table);
3153 for (
int i = row; i < 2 * row; ++i)
3154 *(__kmp_i_lock_table.table + i) = (kmp_indirect_lock_t *)__kmp_allocate(
3155 KMP_I_LOCK_CHUNK *
sizeof(kmp_indirect_lock_t));
3156 __kmp_i_lock_table.size = 2 * idx;
3158 __kmp_i_lock_table.next++;
3159 lck = KMP_GET_I_LOCK(idx);
3161 lck->lock = (kmp_user_lock_p)__kmp_allocate(__kmp_indirect_lock_size[tag]);
3163 (
"__kmp_allocate_indirect_lock: allocated a new lock %p\n", lck));
3166 __kmp_release_lock(&__kmp_global_lock, gtid);
3170 if (OMP_LOCK_T_SIZE <
sizeof(
void *)) {
3171 *((kmp_lock_index_t *)user_lock) = idx
3174 *((kmp_indirect_lock_t **)user_lock) = lck;
3181 static __forceinline kmp_indirect_lock_t *
3182 __kmp_lookup_indirect_lock(
void **user_lock,
const char *func) {
3183 if (__kmp_env_consistency_check) {
3184 kmp_indirect_lock_t *lck = NULL;
3185 if (user_lock == NULL) {
3186 KMP_FATAL(LockIsUninitialized, func);
3188 if (OMP_LOCK_T_SIZE <
sizeof(
void *)) {
3189 kmp_lock_index_t idx = KMP_EXTRACT_I_INDEX(user_lock);
3190 if (idx >= __kmp_i_lock_table.size) {
3191 KMP_FATAL(LockIsUninitialized, func);
3193 lck = KMP_GET_I_LOCK(idx);
3195 lck = *((kmp_indirect_lock_t **)user_lock);
3198 KMP_FATAL(LockIsUninitialized, func);
3202 if (OMP_LOCK_T_SIZE <
sizeof(
void *)) {
3203 return KMP_GET_I_LOCK(KMP_EXTRACT_I_INDEX(user_lock));
3205 return *((kmp_indirect_lock_t **)user_lock);
3210 static void __kmp_init_indirect_lock(kmp_dyna_lock_t *lock,
3211 kmp_dyna_lockseq_t seq) {
3212 #if KMP_USE_ADAPTIVE_LOCKS
3213 if (seq == lockseq_adaptive && !__kmp_cpuinfo.rtm) {
3214 KMP_WARNING(AdaptiveNotSupported,
"kmp_lockseq_t",
"adaptive");
3215 seq = lockseq_queuing;
3219 if (seq == lockseq_rtm_queuing && !__kmp_cpuinfo.rtm) {
3220 seq = lockseq_queuing;
3223 kmp_indirect_locktag_t tag = KMP_GET_I_TAG(seq);
3224 kmp_indirect_lock_t *l =
3225 __kmp_allocate_indirect_lock((
void **)lock, __kmp_entry_gtid(), tag);
3226 KMP_I_LOCK_FUNC(l, init)(l->lock);
3228 20, (
"__kmp_init_indirect_lock: initialized indirect lock with type#%d\n",
3232 static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *lock) {
3233 kmp_uint32 gtid = __kmp_entry_gtid();
3234 kmp_indirect_lock_t *l =
3235 __kmp_lookup_indirect_lock((
void **)lock,
"omp_destroy_lock");
3236 KMP_I_LOCK_FUNC(l, destroy)(l->lock);
3237 kmp_indirect_locktag_t tag = l->type;
3239 __kmp_acquire_lock(&__kmp_global_lock, gtid);
3242 l->lock->pool.next = (kmp_user_lock_p)__kmp_indirect_lock_pool[tag];
3243 if (OMP_LOCK_T_SIZE <
sizeof(
void *)) {
3244 l->lock->pool.index = KMP_EXTRACT_I_INDEX(lock);
3246 __kmp_indirect_lock_pool[tag] = l;
3248 __kmp_release_lock(&__kmp_global_lock, gtid);
3251 static int __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
3252 kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
3253 return KMP_I_LOCK_FUNC(l, set)(l->lock, gtid);
3256 static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
3257 kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
3258 return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid);
3261 static int __kmp_test_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
3262 kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
3263 return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid);
3266 static int __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
3268 kmp_indirect_lock_t *l =
3269 __kmp_lookup_indirect_lock((
void **)lock,
"omp_set_lock");
3270 return KMP_I_LOCK_FUNC(l, set)(l->lock, gtid);
3273 static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
3275 kmp_indirect_lock_t *l =
3276 __kmp_lookup_indirect_lock((
void **)lock,
"omp_unset_lock");
3277 return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid);
3280 static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
3282 kmp_indirect_lock_t *l =
3283 __kmp_lookup_indirect_lock((
void **)lock,
"omp_test_lock");
3284 return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid);
3287 kmp_dyna_lockseq_t __kmp_user_lock_seq = lockseq_queuing;
3290 kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p lck, kmp_uint32 seq) {
3293 case lockseq_nested_tas:
3294 return __kmp_get_tas_lock_owner((kmp_tas_lock_t *)lck);
3297 case lockseq_nested_futex:
3298 return __kmp_get_futex_lock_owner((kmp_futex_lock_t *)lck);
3300 case lockseq_ticket:
3301 case lockseq_nested_ticket:
3302 return __kmp_get_ticket_lock_owner((kmp_ticket_lock_t *)lck);
3303 case lockseq_queuing:
3304 case lockseq_nested_queuing:
3305 #if KMP_USE_ADAPTIVE_LOCKS
3306 case lockseq_adaptive:
3308 return __kmp_get_queuing_lock_owner((kmp_queuing_lock_t *)lck);
3310 case lockseq_nested_drdpa:
3311 return __kmp_get_drdpa_lock_owner((kmp_drdpa_lock_t *)lck);
3318 void __kmp_init_dynamic_user_locks() {
3320 if (__kmp_env_consistency_check) {
3321 __kmp_direct_set = direct_set_check;
3322 __kmp_direct_unset = direct_unset_check;
3323 __kmp_direct_test = direct_test_check;
3324 __kmp_direct_destroy = direct_destroy_check;
3325 __kmp_indirect_set = indirect_set_check;
3326 __kmp_indirect_unset = indirect_unset_check;
3327 __kmp_indirect_test = indirect_test_check;
3328 __kmp_indirect_destroy = indirect_destroy_check;
3330 __kmp_direct_set = direct_set;
3331 __kmp_direct_unset = direct_unset;
3332 __kmp_direct_test = direct_test;
3333 __kmp_direct_destroy = direct_destroy;
3334 __kmp_indirect_set = indirect_set;
3335 __kmp_indirect_unset = indirect_unset;
3336 __kmp_indirect_test = indirect_test;
3337 __kmp_indirect_destroy = indirect_destroy;
3342 if (__kmp_init_user_locks)
3346 __kmp_i_lock_table.size = KMP_I_LOCK_CHUNK;
3347 __kmp_i_lock_table.table =
3348 (kmp_indirect_lock_t **)__kmp_allocate(
sizeof(kmp_indirect_lock_t *));
3349 *(__kmp_i_lock_table.table) = (kmp_indirect_lock_t *)__kmp_allocate(
3350 KMP_I_LOCK_CHUNK *
sizeof(kmp_indirect_lock_t));
3351 __kmp_i_lock_table.next = 0;
3354 __kmp_indirect_lock_size[locktag_ticket] =
sizeof(kmp_ticket_lock_t);
3355 __kmp_indirect_lock_size[locktag_queuing] =
sizeof(kmp_queuing_lock_t);
3356 #if KMP_USE_ADAPTIVE_LOCKS
3357 __kmp_indirect_lock_size[locktag_adaptive] =
sizeof(kmp_adaptive_lock_t);
3359 __kmp_indirect_lock_size[locktag_drdpa] =
sizeof(kmp_drdpa_lock_t);
3361 __kmp_indirect_lock_size[locktag_rtm_queuing] =
sizeof(kmp_queuing_lock_t);
3363 __kmp_indirect_lock_size[locktag_nested_tas] =
sizeof(kmp_tas_lock_t);
3365 __kmp_indirect_lock_size[locktag_nested_futex] =
sizeof(kmp_futex_lock_t);
3367 __kmp_indirect_lock_size[locktag_nested_ticket] =
sizeof(kmp_ticket_lock_t);
3368 __kmp_indirect_lock_size[locktag_nested_queuing] =
sizeof(kmp_queuing_lock_t);
3369 __kmp_indirect_lock_size[locktag_nested_drdpa] =
sizeof(kmp_drdpa_lock_t);
3372 #define fill_jumps(table, expand, sep) \
3374 table[locktag##sep##ticket] = expand(ticket); \
3375 table[locktag##sep##queuing] = expand(queuing); \
3376 table[locktag##sep##drdpa] = expand(drdpa); \
3379 #if KMP_USE_ADAPTIVE_LOCKS
3380 #define fill_table(table, expand) \
3382 fill_jumps(table, expand, _); \
3383 table[locktag_adaptive] = expand(queuing); \
3384 fill_jumps(table, expand, _nested_); \
3387 #define fill_table(table, expand) \
3389 fill_jumps(table, expand, _); \
3390 fill_jumps(table, expand, _nested_); \
3395 (void (*)(kmp_user_lock_p, const ident_t *)) __kmp_set_##l##_lock_location
3396 fill_table(__kmp_indirect_set_location, expand);
3399 (void (*)(kmp_user_lock_p, kmp_lock_flags_t)) __kmp_set_##l##_lock_flags
3400 fill_table(__kmp_indirect_set_flags, expand);
3403 (const ident_t *(*)(kmp_user_lock_p)) __kmp_get_##l##_lock_location
3404 fill_table(__kmp_indirect_get_location, expand);
3407 (kmp_lock_flags_t(*)(kmp_user_lock_p)) __kmp_get_##l##_lock_flags
3408 fill_table(__kmp_indirect_get_flags, expand);
3411 __kmp_init_user_locks = TRUE;
3415 void __kmp_cleanup_indirect_user_locks() {
3421 for (k = 0; k < KMP_NUM_I_LOCKS; ++k) {
3422 kmp_indirect_lock_t *l = __kmp_indirect_lock_pool[k];
3424 kmp_indirect_lock_t *ll = l;
3425 l = (kmp_indirect_lock_t *)l->lock->pool.next;
3426 KA_TRACE(20, (
"__kmp_cleanup_indirect_user_locks: freeing %p from pool\n",
3428 __kmp_free(ll->lock);
3431 __kmp_indirect_lock_pool[k] = NULL;
3434 for (i = 0; i < __kmp_i_lock_table.next; i++) {
3435 kmp_indirect_lock_t *l = KMP_GET_I_LOCK(i);
3436 if (l->lock != NULL) {
3438 KMP_I_LOCK_FUNC(l, destroy)(l->lock);
3441 (
"__kmp_cleanup_indirect_user_locks: destroy/freeing %p from table\n",
3443 __kmp_free(l->lock);
3447 for (i = 0; i < __kmp_i_lock_table.size / KMP_I_LOCK_CHUNK; i++)
3448 __kmp_free(__kmp_i_lock_table.table[i]);
3449 __kmp_free(__kmp_i_lock_table.table);
3451 __kmp_init_user_locks = FALSE;
3454 enum kmp_lock_kind __kmp_user_lock_kind = lk_default;
3455 int __kmp_num_locks_in_block = 1;
3459 static void __kmp_init_tas_lock_with_checks(kmp_tas_lock_t *lck) {
3460 __kmp_init_tas_lock(lck);
3463 static void __kmp_init_nested_tas_lock_with_checks(kmp_tas_lock_t *lck) {
3464 __kmp_init_nested_tas_lock(lck);
3468 static void __kmp_init_futex_lock_with_checks(kmp_futex_lock_t *lck) {
3469 __kmp_init_futex_lock(lck);
3472 static void __kmp_init_nested_futex_lock_with_checks(kmp_futex_lock_t *lck) {
3473 __kmp_init_nested_futex_lock(lck);
3477 static int __kmp_is_ticket_lock_initialized(kmp_ticket_lock_t *lck) {
3478 return lck == lck->lk.self;
3481 static void __kmp_init_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
3482 __kmp_init_ticket_lock(lck);
3485 static void __kmp_init_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
3486 __kmp_init_nested_ticket_lock(lck);
3489 static int __kmp_is_queuing_lock_initialized(kmp_queuing_lock_t *lck) {
3490 return lck == lck->lk.initialized;
3493 static void __kmp_init_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
3494 __kmp_init_queuing_lock(lck);
3498 __kmp_init_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
3499 __kmp_init_nested_queuing_lock(lck);
3502 #if KMP_USE_ADAPTIVE_LOCKS
3503 static void __kmp_init_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck) {
3504 __kmp_init_adaptive_lock(lck);
3508 static int __kmp_is_drdpa_lock_initialized(kmp_drdpa_lock_t *lck) {
3509 return lck == lck->lk.initialized;
3512 static void __kmp_init_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
3513 __kmp_init_drdpa_lock(lck);
3516 static void __kmp_init_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
3517 __kmp_init_nested_drdpa_lock(lck);
3524 enum kmp_lock_kind __kmp_user_lock_kind = lk_default;
3526 size_t __kmp_base_user_lock_size = 0;
3527 size_t __kmp_user_lock_size = 0;
3529 kmp_int32 (*__kmp_get_user_lock_owner_)(kmp_user_lock_p lck) = NULL;
3530 int (*__kmp_acquire_user_lock_with_checks_)(kmp_user_lock_p lck,
3531 kmp_int32 gtid) = NULL;
3533 int (*__kmp_test_user_lock_with_checks_)(kmp_user_lock_p lck,
3534 kmp_int32 gtid) = NULL;
3535 int (*__kmp_release_user_lock_with_checks_)(kmp_user_lock_p lck,
3536 kmp_int32 gtid) = NULL;
3537 void (*__kmp_init_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3538 void (*__kmp_destroy_user_lock_)(kmp_user_lock_p lck) = NULL;
3539 void (*__kmp_destroy_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3540 int (*__kmp_acquire_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
3541 kmp_int32 gtid) = NULL;
3543 int (*__kmp_test_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
3544 kmp_int32 gtid) = NULL;
3545 int (*__kmp_release_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
3546 kmp_int32 gtid) = NULL;
3547 void (*__kmp_init_nested_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3548 void (*__kmp_destroy_nested_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3550 int (*__kmp_is_user_lock_initialized_)(kmp_user_lock_p lck) = NULL;
3551 const ident_t *(*__kmp_get_user_lock_location_)(kmp_user_lock_p lck) = NULL;
3552 void (*__kmp_set_user_lock_location_)(kmp_user_lock_p lck,
3554 kmp_lock_flags_t (*__kmp_get_user_lock_flags_)(kmp_user_lock_p lck) = NULL;
3555 void (*__kmp_set_user_lock_flags_)(kmp_user_lock_p lck,
3556 kmp_lock_flags_t flags) = NULL;
3558 void __kmp_set_user_lock_vptrs(kmp_lock_kind_t user_lock_kind) {
3559 switch (user_lock_kind) {
3565 __kmp_base_user_lock_size =
sizeof(kmp_base_tas_lock_t);
3566 __kmp_user_lock_size =
sizeof(kmp_tas_lock_t);
3568 __kmp_get_user_lock_owner_ =
3569 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_tas_lock_owner);
3571 if (__kmp_env_consistency_check) {
3572 KMP_BIND_USER_LOCK_WITH_CHECKS(tas);
3573 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(tas);
3575 KMP_BIND_USER_LOCK(tas);
3576 KMP_BIND_NESTED_USER_LOCK(tas);
3579 __kmp_destroy_user_lock_ =
3580 (void (*)(kmp_user_lock_p))(&__kmp_destroy_tas_lock);
3582 __kmp_is_user_lock_initialized_ = (int (*)(kmp_user_lock_p))NULL;
3584 __kmp_get_user_lock_location_ = (
const ident_t *(*)(kmp_user_lock_p))NULL;
3586 __kmp_set_user_lock_location_ =
3587 (void (*)(kmp_user_lock_p,
const ident_t *))NULL;
3589 __kmp_get_user_lock_flags_ = (kmp_lock_flags_t(*)(kmp_user_lock_p))NULL;
3591 __kmp_set_user_lock_flags_ =
3592 (void (*)(kmp_user_lock_p, kmp_lock_flags_t))NULL;
3598 __kmp_base_user_lock_size =
sizeof(kmp_base_futex_lock_t);
3599 __kmp_user_lock_size =
sizeof(kmp_futex_lock_t);
3601 __kmp_get_user_lock_owner_ =
3602 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_futex_lock_owner);
3604 if (__kmp_env_consistency_check) {
3605 KMP_BIND_USER_LOCK_WITH_CHECKS(futex);
3606 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(futex);
3608 KMP_BIND_USER_LOCK(futex);
3609 KMP_BIND_NESTED_USER_LOCK(futex);
3612 __kmp_destroy_user_lock_ =
3613 (void (*)(kmp_user_lock_p))(&__kmp_destroy_futex_lock);
3615 __kmp_is_user_lock_initialized_ = (int (*)(kmp_user_lock_p))NULL;
3617 __kmp_get_user_lock_location_ = (
const ident_t *(*)(kmp_user_lock_p))NULL;
3619 __kmp_set_user_lock_location_ =
3620 (void (*)(kmp_user_lock_p,
const ident_t *))NULL;
3622 __kmp_get_user_lock_flags_ = (kmp_lock_flags_t(*)(kmp_user_lock_p))NULL;
3624 __kmp_set_user_lock_flags_ =
3625 (void (*)(kmp_user_lock_p, kmp_lock_flags_t))NULL;
3631 __kmp_base_user_lock_size =
sizeof(kmp_base_ticket_lock_t);
3632 __kmp_user_lock_size =
sizeof(kmp_ticket_lock_t);
3634 __kmp_get_user_lock_owner_ =
3635 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_owner);
3637 if (__kmp_env_consistency_check) {
3638 KMP_BIND_USER_LOCK_WITH_CHECKS(ticket);
3639 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(ticket);
3641 KMP_BIND_USER_LOCK(ticket);
3642 KMP_BIND_NESTED_USER_LOCK(ticket);
3645 __kmp_destroy_user_lock_ =
3646 (void (*)(kmp_user_lock_p))(&__kmp_destroy_ticket_lock);
3648 __kmp_is_user_lock_initialized_ =
3649 (int (*)(kmp_user_lock_p))(&__kmp_is_ticket_lock_initialized);
3651 __kmp_get_user_lock_location_ =
3652 (
const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_location);
3654 __kmp_set_user_lock_location_ = (void (*)(
3655 kmp_user_lock_p,
const ident_t *))(&__kmp_set_ticket_lock_location);
3657 __kmp_get_user_lock_flags_ =
3658 (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_flags);
3660 __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3661 &__kmp_set_ticket_lock_flags);
3665 __kmp_base_user_lock_size =
sizeof(kmp_base_queuing_lock_t);
3666 __kmp_user_lock_size =
sizeof(kmp_queuing_lock_t);
3668 __kmp_get_user_lock_owner_ =
3669 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_owner);
3671 if (__kmp_env_consistency_check) {
3672 KMP_BIND_USER_LOCK_WITH_CHECKS(queuing);
3673 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(queuing);
3675 KMP_BIND_USER_LOCK(queuing);
3676 KMP_BIND_NESTED_USER_LOCK(queuing);
3679 __kmp_destroy_user_lock_ =
3680 (void (*)(kmp_user_lock_p))(&__kmp_destroy_queuing_lock);
3682 __kmp_is_user_lock_initialized_ =
3683 (int (*)(kmp_user_lock_p))(&__kmp_is_queuing_lock_initialized);
3685 __kmp_get_user_lock_location_ =
3686 (
const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_location);
3688 __kmp_set_user_lock_location_ = (void (*)(
3689 kmp_user_lock_p,
const ident_t *))(&__kmp_set_queuing_lock_location);
3691 __kmp_get_user_lock_flags_ =
3692 (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_flags);
3694 __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3695 &__kmp_set_queuing_lock_flags);
3698 #if KMP_USE_ADAPTIVE_LOCKS
3700 __kmp_base_user_lock_size =
sizeof(kmp_base_adaptive_lock_t);
3701 __kmp_user_lock_size =
sizeof(kmp_adaptive_lock_t);
3703 __kmp_get_user_lock_owner_ =
3704 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_owner);
3706 if (__kmp_env_consistency_check) {
3707 KMP_BIND_USER_LOCK_WITH_CHECKS(adaptive);
3709 KMP_BIND_USER_LOCK(adaptive);
3712 __kmp_destroy_user_lock_ =
3713 (void (*)(kmp_user_lock_p))(&__kmp_destroy_adaptive_lock);
3715 __kmp_is_user_lock_initialized_ =
3716 (int (*)(kmp_user_lock_p))(&__kmp_is_queuing_lock_initialized);
3718 __kmp_get_user_lock_location_ =
3719 (
const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_location);
3721 __kmp_set_user_lock_location_ = (void (*)(
3722 kmp_user_lock_p,
const ident_t *))(&__kmp_set_queuing_lock_location);
3724 __kmp_get_user_lock_flags_ =
3725 (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_flags);
3727 __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3728 &__kmp_set_queuing_lock_flags);
3734 __kmp_base_user_lock_size =
sizeof(kmp_base_drdpa_lock_t);
3735 __kmp_user_lock_size =
sizeof(kmp_drdpa_lock_t);
3737 __kmp_get_user_lock_owner_ =
3738 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_owner);
3740 if (__kmp_env_consistency_check) {
3741 KMP_BIND_USER_LOCK_WITH_CHECKS(drdpa);
3742 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(drdpa);
3744 KMP_BIND_USER_LOCK(drdpa);
3745 KMP_BIND_NESTED_USER_LOCK(drdpa);
3748 __kmp_destroy_user_lock_ =
3749 (void (*)(kmp_user_lock_p))(&__kmp_destroy_drdpa_lock);
3751 __kmp_is_user_lock_initialized_ =
3752 (int (*)(kmp_user_lock_p))(&__kmp_is_drdpa_lock_initialized);
3754 __kmp_get_user_lock_location_ =
3755 (
const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_location);
3757 __kmp_set_user_lock_location_ = (void (*)(
3758 kmp_user_lock_p,
const ident_t *))(&__kmp_set_drdpa_lock_location);
3760 __kmp_get_user_lock_flags_ =
3761 (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_flags);
3763 __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3764 &__kmp_set_drdpa_lock_flags);
3772 kmp_lock_table_t __kmp_user_lock_table = {1, 0, NULL};
3773 kmp_user_lock_p __kmp_lock_pool = NULL;
3776 kmp_block_of_locks *__kmp_lock_blocks = NULL;
3777 int __kmp_num_locks_in_block = 1;
3779 static kmp_lock_index_t __kmp_lock_table_insert(kmp_user_lock_p lck) {
3781 kmp_lock_index_t index;
3782 if (__kmp_user_lock_table.used >= __kmp_user_lock_table.allocated) {
3783 kmp_lock_index_t size;
3784 kmp_user_lock_p *table;
3786 if (__kmp_user_lock_table.allocated == 0) {
3789 size = __kmp_user_lock_table.allocated * 2;
3791 table = (kmp_user_lock_p *)__kmp_allocate(
sizeof(kmp_user_lock_p) * size);
3792 KMP_MEMCPY(table + 1, __kmp_user_lock_table.table + 1,
3793 sizeof(kmp_user_lock_p) * (__kmp_user_lock_table.used - 1));
3794 table[0] = (kmp_user_lock_p)__kmp_user_lock_table.table;
3799 __kmp_user_lock_table.table = table;
3800 __kmp_user_lock_table.allocated = size;
3802 KMP_DEBUG_ASSERT(__kmp_user_lock_table.used <
3803 __kmp_user_lock_table.allocated);
3804 index = __kmp_user_lock_table.used;
3805 __kmp_user_lock_table.table[index] = lck;
3806 ++__kmp_user_lock_table.used;
3810 static kmp_user_lock_p __kmp_lock_block_allocate() {
3812 static int last_index = 0;
3813 if ((last_index >= __kmp_num_locks_in_block) || (__kmp_lock_blocks == NULL)) {
3817 KMP_DEBUG_ASSERT(__kmp_user_lock_size > 0);
3818 size_t space_for_locks = __kmp_user_lock_size * __kmp_num_locks_in_block;
3820 (
char *)__kmp_allocate(space_for_locks +
sizeof(kmp_block_of_locks));
3822 kmp_block_of_locks *new_block =
3823 (kmp_block_of_locks *)(&buffer[space_for_locks]);
3824 new_block->next_block = __kmp_lock_blocks;
3825 new_block->locks = (
void *)buffer;
3828 __kmp_lock_blocks = new_block;
3830 kmp_user_lock_p ret = (kmp_user_lock_p)(&(
3831 ((
char *)(__kmp_lock_blocks->locks))[last_index * __kmp_user_lock_size]));
3838 kmp_user_lock_p __kmp_user_lock_allocate(
void **user_lock, kmp_int32 gtid,
3839 kmp_lock_flags_t flags) {
3840 kmp_user_lock_p lck;
3841 kmp_lock_index_t index;
3842 KMP_DEBUG_ASSERT(user_lock);
3844 __kmp_acquire_lock(&__kmp_global_lock, gtid);
3846 if (__kmp_lock_pool == NULL) {
3851 ANNOTATE_IGNORE_WRITES_BEGIN();
3852 if (__kmp_num_locks_in_block <= 1) {
3853 lck = (kmp_user_lock_p)__kmp_allocate(__kmp_user_lock_size);
3855 lck = __kmp_lock_block_allocate();
3857 ANNOTATE_IGNORE_WRITES_END();
3861 index = __kmp_lock_table_insert(lck);
3864 lck = __kmp_lock_pool;
3865 index = __kmp_lock_pool->pool.index;
3866 __kmp_lock_pool = __kmp_lock_pool->pool.next;
3871 if (OMP_LOCK_T_SIZE <
sizeof(
void *)) {
3872 *((kmp_lock_index_t *)user_lock) = index;
3874 *((kmp_user_lock_p *)user_lock) = lck;
3878 __kmp_set_user_lock_flags(lck, flags);
3880 __kmp_release_lock(&__kmp_global_lock, gtid);
3886 void __kmp_user_lock_free(
void **user_lock, kmp_int32 gtid,
3887 kmp_user_lock_p lck) {
3888 KMP_DEBUG_ASSERT(user_lock != NULL);
3889 KMP_DEBUG_ASSERT(lck != NULL);
3891 __kmp_acquire_lock(&__kmp_global_lock, gtid);
3893 lck->pool.next = __kmp_lock_pool;
3894 __kmp_lock_pool = lck;
3895 if (OMP_LOCK_T_SIZE <
sizeof(
void *)) {
3896 kmp_lock_index_t index = *((kmp_lock_index_t *)user_lock);
3897 KMP_DEBUG_ASSERT(0 < index && index <= __kmp_user_lock_table.used);
3898 lck->pool.index = index;
3901 __kmp_release_lock(&__kmp_global_lock, gtid);
3904 kmp_user_lock_p __kmp_lookup_user_lock(
void **user_lock,
char const *func) {
3905 kmp_user_lock_p lck = NULL;
3907 if (__kmp_env_consistency_check) {
3908 if (user_lock == NULL) {
3909 KMP_FATAL(LockIsUninitialized, func);
3913 if (OMP_LOCK_T_SIZE <
sizeof(
void *)) {
3914 kmp_lock_index_t index = *((kmp_lock_index_t *)user_lock);
3915 if (__kmp_env_consistency_check) {
3916 if (!(0 < index && index < __kmp_user_lock_table.used)) {
3917 KMP_FATAL(LockIsUninitialized, func);
3920 KMP_DEBUG_ASSERT(0 < index && index < __kmp_user_lock_table.used);
3921 KMP_DEBUG_ASSERT(__kmp_user_lock_size > 0);
3922 lck = __kmp_user_lock_table.table[index];
3924 lck = *((kmp_user_lock_p *)user_lock);
3927 if (__kmp_env_consistency_check) {
3929 KMP_FATAL(LockIsUninitialized, func);
3936 void __kmp_cleanup_user_locks(
void) {
3939 __kmp_lock_pool = NULL;
3941 #define IS_CRITICAL(lck) \
3942 ((__kmp_get_user_lock_flags_ != NULL) && \
3943 ((*__kmp_get_user_lock_flags_)(lck)&kmp_lf_critical_section))
3968 while (__kmp_user_lock_table.used > 1) {
3973 kmp_user_lock_p lck =
3974 __kmp_user_lock_table.table[--__kmp_user_lock_table.used];
3976 if ((__kmp_is_user_lock_initialized_ != NULL) &&
3977 (*__kmp_is_user_lock_initialized_)(lck)) {
3981 if (__kmp_env_consistency_check && (!IS_CRITICAL(lck)) &&
3982 ((loc = __kmp_get_user_lock_location(lck)) != NULL) &&
3984 kmp_str_loc_t str_loc = __kmp_str_loc_init(loc->
psource,
false);
3985 KMP_WARNING(CnsLockNotDestroyed, str_loc.file, str_loc.line);
3986 __kmp_str_loc_free(&str_loc);
3990 if (IS_CRITICAL(lck)) {
3993 (
"__kmp_cleanup_user_locks: free critical section lock %p (%p)\n",
3994 lck, *(
void **)lck));
3996 KA_TRACE(20, (
"__kmp_cleanup_user_locks: free lock %p (%p)\n", lck,
4002 __kmp_destroy_user_lock(lck);
4006 if (__kmp_lock_blocks == NULL) {
4014 kmp_user_lock_p *table_ptr = __kmp_user_lock_table.table;
4015 __kmp_user_lock_table.table = NULL;
4016 __kmp_user_lock_table.allocated = 0;
4018 while (table_ptr != NULL) {
4021 kmp_user_lock_p *next = (kmp_user_lock_p *)(table_ptr[0]);
4022 __kmp_free(table_ptr);
4027 kmp_block_of_locks_t *block_ptr = __kmp_lock_blocks;
4028 __kmp_lock_blocks = NULL;
4030 while (block_ptr != NULL) {
4031 kmp_block_of_locks_t *next = block_ptr->next_block;
4032 __kmp_free(block_ptr->locks);
4037 TCW_4(__kmp_init_user_locks, FALSE);
void open(const char *filename, const char *mode, const char *env_var=nullptr)