13 #ifndef KMP_AFFINITY_H
14 #define KMP_AFFINITY_H
19 #if KMP_AFFINITY_SUPPORTED
21 class KMPHwlocAffinity :
public KMPAffinity {
23 class Mask :
public KMPAffinity::Mask {
28 mask = hwloc_bitmap_alloc();
31 ~Mask() { hwloc_bitmap_free(mask); }
32 void set(
int i)
override { hwloc_bitmap_set(mask, i); }
33 bool is_set(
int i)
const override {
return hwloc_bitmap_isset(mask, i); }
34 void clear(
int i)
override { hwloc_bitmap_clr(mask, i); }
35 void zero()
override { hwloc_bitmap_zero(mask); }
36 void copy(
const KMPAffinity::Mask *src)
override {
37 const Mask *convert =
static_cast<const Mask *
>(src);
38 hwloc_bitmap_copy(mask, convert->mask);
40 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
41 const Mask *convert =
static_cast<const Mask *
>(rhs);
42 hwloc_bitmap_and(mask, mask, convert->mask);
44 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
45 const Mask *convert =
static_cast<const Mask *
>(rhs);
46 hwloc_bitmap_or(mask, mask, convert->mask);
48 void bitwise_not()
override { hwloc_bitmap_not(mask, mask); }
49 int begin()
const override {
return hwloc_bitmap_first(mask); }
50 int end()
const override {
return -1; }
51 int next(
int previous)
const override {
52 return hwloc_bitmap_next(mask, previous);
54 int get_system_affinity(
bool abort_on_error)
override {
55 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
56 "Illegal get affinity operation when not capable");
58 hwloc_get_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
64 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
68 int set_system_affinity(
bool abort_on_error)
const override {
69 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
70 "Illegal set affinity operation when not capable");
72 hwloc_set_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
78 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
83 int set_process_affinity(
bool abort_on_error)
const override {
84 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
85 "Illegal set process affinity operation when not capable");
87 const hwloc_topology_support *support =
88 hwloc_topology_get_support(__kmp_hwloc_topology);
89 if (support->cpubind->set_proc_cpubind) {
91 retval = hwloc_set_cpubind(__kmp_hwloc_topology, mask,
92 HWLOC_CPUBIND_PROCESS);
97 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
102 int get_proc_group()
const override {
105 if (__kmp_num_proc_groups == 1) {
108 for (
int i = 0; i < __kmp_num_proc_groups; i++) {
110 unsigned long first_32_bits = hwloc_bitmap_to_ith_ulong(mask, i * 2);
111 unsigned long second_32_bits =
112 hwloc_bitmap_to_ith_ulong(mask, i * 2 + 1);
113 if (first_32_bits == 0 && second_32_bits == 0) {
125 void determine_capable(
const char *var)
override {
126 const hwloc_topology_support *topology_support;
127 if (__kmp_hwloc_topology == NULL) {
128 if (hwloc_topology_init(&__kmp_hwloc_topology) < 0) {
129 __kmp_hwloc_error = TRUE;
130 if (__kmp_affinity_verbose)
131 KMP_WARNING(AffHwlocErrorOccurred, var,
"hwloc_topology_init()");
133 if (hwloc_topology_load(__kmp_hwloc_topology) < 0) {
134 __kmp_hwloc_error = TRUE;
135 if (__kmp_affinity_verbose)
136 KMP_WARNING(AffHwlocErrorOccurred, var,
"hwloc_topology_load()");
139 topology_support = hwloc_topology_get_support(__kmp_hwloc_topology);
144 if (topology_support && topology_support->cpubind->set_thisthread_cpubind &&
145 topology_support->cpubind->get_thisthread_cpubind &&
146 topology_support->discovery->pu && !__kmp_hwloc_error) {
148 KMP_AFFINITY_ENABLE(TRUE);
151 __kmp_hwloc_error = TRUE;
152 KMP_AFFINITY_DISABLE();
155 void bind_thread(
int which)
override {
156 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
157 "Illegal set affinity operation when not capable");
158 KMPAffinity::Mask *mask;
159 KMP_CPU_ALLOC_ON_STACK(mask);
161 KMP_CPU_SET(which, mask);
162 __kmp_set_system_affinity(mask, TRUE);
163 KMP_CPU_FREE_FROM_STACK(mask);
165 KMPAffinity::Mask *allocate_mask()
override {
return new Mask(); }
166 void deallocate_mask(KMPAffinity::Mask *m)
override {
delete m; }
167 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
168 return new Mask[num];
170 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
171 Mask *hwloc_array =
static_cast<Mask *
>(array);
172 delete[] hwloc_array;
174 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
175 int index)
override {
176 Mask *hwloc_array =
static_cast<Mask *
>(array);
177 return &(hwloc_array[index]);
179 api_type get_api_type()
const override {
return HWLOC; }
183 #if KMP_OS_LINUX || KMP_OS_FREEBSD
189 #include <sys/syscall.h>
190 #if KMP_ARCH_X86 || KMP_ARCH_ARM
191 #ifndef __NR_sched_setaffinity
192 #define __NR_sched_setaffinity 241
193 #elif __NR_sched_setaffinity != 241
194 #error Wrong code for setaffinity system call.
196 #ifndef __NR_sched_getaffinity
197 #define __NR_sched_getaffinity 242
198 #elif __NR_sched_getaffinity != 242
199 #error Wrong code for getaffinity system call.
201 #elif KMP_ARCH_AARCH64
202 #ifndef __NR_sched_setaffinity
203 #define __NR_sched_setaffinity 122
204 #elif __NR_sched_setaffinity != 122
205 #error Wrong code for setaffinity system call.
207 #ifndef __NR_sched_getaffinity
208 #define __NR_sched_getaffinity 123
209 #elif __NR_sched_getaffinity != 123
210 #error Wrong code for getaffinity system call.
212 #elif KMP_ARCH_RISCV64
213 #ifndef __NR_sched_setaffinity
214 #define __NR_sched_setaffinity 122
215 #elif __NR_sched_setaffinity != 122
216 #error Wrong code for setaffinity system call.
218 #ifndef __NR_sched_getaffinity
219 #define __NR_sched_getaffinity 123
220 #elif __NR_sched_getaffinity != 123
221 #error Wrong code for getaffinity system call.
223 #elif KMP_ARCH_X86_64
224 #ifndef __NR_sched_setaffinity
225 #define __NR_sched_setaffinity 203
226 #elif __NR_sched_setaffinity != 203
227 #error Wrong code for setaffinity system call.
229 #ifndef __NR_sched_getaffinity
230 #define __NR_sched_getaffinity 204
231 #elif __NR_sched_getaffinity != 204
232 #error Wrong code for getaffinity system call.
235 #ifndef __NR_sched_setaffinity
236 #define __NR_sched_setaffinity 222
237 #elif __NR_sched_setaffinity != 222
238 #error Wrong code for setaffinity system call.
240 #ifndef __NR_sched_getaffinity
241 #define __NR_sched_getaffinity 223
242 #elif __NR_sched_getaffinity != 223
243 #error Wrong code for getaffinity system call.
246 # ifndef __NR_sched_setaffinity
247 # define __NR_sched_setaffinity 4239
248 # elif __NR_sched_setaffinity != 4239
249 # error Wrong code for setaffinity system call.
251 # ifndef __NR_sched_getaffinity
252 # define __NR_sched_getaffinity 4240
253 # elif __NR_sched_getaffinity != 4240
254 # error Wrong code for getaffinity system call.
256 # elif KMP_ARCH_MIPS64
257 # ifndef __NR_sched_setaffinity
258 # define __NR_sched_setaffinity 5195
259 # elif __NR_sched_setaffinity != 5195
260 # error Wrong code for setaffinity system call.
262 # ifndef __NR_sched_getaffinity
263 # define __NR_sched_getaffinity 5196
264 # elif __NR_sched_getaffinity != 5196
265 # error Wrong code for getaffinity system call.
268 #error Unknown or unsupported architecture
272 #include <pthread_np.h>
274 class KMPNativeAffinity :
public KMPAffinity {
275 class Mask :
public KMPAffinity::Mask {
276 typedef unsigned long mask_t;
277 typedef decltype(__kmp_affin_mask_size) mask_size_type;
278 static const unsigned int BITS_PER_MASK_T =
sizeof(mask_t) * CHAR_BIT;
279 static const mask_t ONE = 1;
280 mask_size_type get_num_mask_types()
const {
281 return __kmp_affin_mask_size /
sizeof(mask_t);
286 Mask() { mask = (mask_t *)__kmp_allocate(__kmp_affin_mask_size); }
291 void set(
int i)
override {
292 mask[i / BITS_PER_MASK_T] |= (ONE << (i % BITS_PER_MASK_T));
294 bool is_set(
int i)
const override {
295 return (mask[i / BITS_PER_MASK_T] & (ONE << (i % BITS_PER_MASK_T)));
297 void clear(
int i)
override {
298 mask[i / BITS_PER_MASK_T] &= ~(ONE << (i % BITS_PER_MASK_T));
300 void zero()
override {
301 mask_size_type e = get_num_mask_types();
302 for (mask_size_type i = 0; i < e; ++i)
305 void copy(
const KMPAffinity::Mask *src)
override {
306 const Mask *convert =
static_cast<const Mask *
>(src);
307 mask_size_type e = get_num_mask_types();
308 for (mask_size_type i = 0; i < e; ++i)
309 mask[i] = convert->mask[i];
311 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
312 const Mask *convert =
static_cast<const Mask *
>(rhs);
313 mask_size_type e = get_num_mask_types();
314 for (mask_size_type i = 0; i < e; ++i)
315 mask[i] &= convert->mask[i];
317 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
318 const Mask *convert =
static_cast<const Mask *
>(rhs);
319 mask_size_type e = get_num_mask_types();
320 for (mask_size_type i = 0; i < e; ++i)
321 mask[i] |= convert->mask[i];
323 void bitwise_not()
override {
324 mask_size_type e = get_num_mask_types();
325 for (mask_size_type i = 0; i < e; ++i)
326 mask[i] = ~(mask[i]);
328 int begin()
const override {
330 while (retval < end() && !is_set(retval))
334 int end()
const override {
336 __kmp_type_convert(get_num_mask_types() * BITS_PER_MASK_T, &e);
339 int next(
int previous)
const override {
340 int retval = previous + 1;
341 while (retval < end() && !is_set(retval))
345 int get_system_affinity(
bool abort_on_error)
override {
346 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
347 "Illegal get affinity operation when not capable");
350 syscall(__NR_sched_getaffinity, 0, __kmp_affin_mask_size, mask);
353 pthread_getaffinity_np(pthread_self(), __kmp_affin_mask_size,
reinterpret_cast<cpuset_t *
>(mask));
354 int retval = (r == 0 ? 0 : -1);
360 if (abort_on_error) {
361 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
365 int set_system_affinity(
bool abort_on_error)
const override {
366 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
367 "Illegal set affinity operation when not capable");
370 syscall(__NR_sched_setaffinity, 0, __kmp_affin_mask_size, mask);
373 pthread_setaffinity_np(pthread_self(), __kmp_affin_mask_size,
reinterpret_cast<cpuset_t *
>(mask));
374 int retval = (r == 0 ? 0 : -1);
380 if (abort_on_error) {
381 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
386 void determine_capable(
const char *env_var)
override {
387 __kmp_affinity_determine_capable(env_var);
389 void bind_thread(
int which)
override { __kmp_affinity_bind_thread(which); }
390 KMPAffinity::Mask *allocate_mask()
override {
391 KMPNativeAffinity::Mask *retval =
new Mask();
394 void deallocate_mask(KMPAffinity::Mask *m)
override {
395 KMPNativeAffinity::Mask *native_mask =
396 static_cast<KMPNativeAffinity::Mask *
>(m);
399 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
400 return new Mask[num];
402 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
403 Mask *linux_array =
static_cast<Mask *
>(array);
404 delete[] linux_array;
406 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
407 int index)
override {
408 Mask *linux_array =
static_cast<Mask *
>(array);
409 return &(linux_array[index]);
411 api_type get_api_type()
const override {
return NATIVE_OS; }
416 class KMPNativeAffinity :
public KMPAffinity {
417 class Mask :
public KMPAffinity::Mask {
418 typedef ULONG_PTR mask_t;
419 static const int BITS_PER_MASK_T =
sizeof(mask_t) * CHAR_BIT;
424 mask = (mask_t *)__kmp_allocate(
sizeof(mask_t) * __kmp_num_proc_groups);
430 void set(
int i)
override {
431 mask[i / BITS_PER_MASK_T] |= ((mask_t)1 << (i % BITS_PER_MASK_T));
433 bool is_set(
int i)
const override {
434 return (mask[i / BITS_PER_MASK_T] & ((mask_t)1 << (i % BITS_PER_MASK_T)));
436 void clear(
int i)
override {
437 mask[i / BITS_PER_MASK_T] &= ~((mask_t)1 << (i % BITS_PER_MASK_T));
439 void zero()
override {
440 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
443 void copy(
const KMPAffinity::Mask *src)
override {
444 const Mask *convert =
static_cast<const Mask *
>(src);
445 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
446 mask[i] = convert->mask[i];
448 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
449 const Mask *convert =
static_cast<const Mask *
>(rhs);
450 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
451 mask[i] &= convert->mask[i];
453 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
454 const Mask *convert =
static_cast<const Mask *
>(rhs);
455 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
456 mask[i] |= convert->mask[i];
458 void bitwise_not()
override {
459 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
460 mask[i] = ~(mask[i]);
462 int begin()
const override {
464 while (retval < end() && !is_set(retval))
468 int end()
const override {
return __kmp_num_proc_groups * BITS_PER_MASK_T; }
469 int next(
int previous)
const override {
470 int retval = previous + 1;
471 while (retval < end() && !is_set(retval))
475 int set_process_affinity(
bool abort_on_error)
const override {
476 if (__kmp_num_proc_groups <= 1) {
477 if (!SetProcessAffinityMask(GetCurrentProcess(), *mask)) {
478 DWORD error = GetLastError();
479 if (abort_on_error) {
480 __kmp_fatal(KMP_MSG(CantSetThreadAffMask), KMP_ERR(error),
488 int set_system_affinity(
bool abort_on_error)
const override {
489 if (__kmp_num_proc_groups > 1) {
492 int group = get_proc_group();
494 if (abort_on_error) {
495 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
502 ga.Mask = mask[group];
503 ga.Reserved[0] = ga.Reserved[1] = ga.Reserved[2] = 0;
505 KMP_DEBUG_ASSERT(__kmp_SetThreadGroupAffinity != NULL);
506 if (__kmp_SetThreadGroupAffinity(GetCurrentThread(), &ga, NULL) == 0) {
507 DWORD error = GetLastError();
508 if (abort_on_error) {
509 __kmp_fatal(KMP_MSG(CantSetThreadAffMask), KMP_ERR(error),
515 if (!SetThreadAffinityMask(GetCurrentThread(), *mask)) {
516 DWORD error = GetLastError();
517 if (abort_on_error) {
518 __kmp_fatal(KMP_MSG(CantSetThreadAffMask), KMP_ERR(error),
526 int get_system_affinity(
bool abort_on_error)
override {
527 if (__kmp_num_proc_groups > 1) {
530 KMP_DEBUG_ASSERT(__kmp_GetThreadGroupAffinity != NULL);
531 if (__kmp_GetThreadGroupAffinity(GetCurrentThread(), &ga) == 0) {
532 DWORD error = GetLastError();
533 if (abort_on_error) {
534 __kmp_fatal(KMP_MSG(FunctionError,
"GetThreadGroupAffinity()"),
535 KMP_ERR(error), __kmp_msg_null);
539 if ((ga.Group < 0) || (ga.Group > __kmp_num_proc_groups) ||
543 mask[ga.Group] = ga.Mask;
545 mask_t newMask, sysMask, retval;
546 if (!GetProcessAffinityMask(GetCurrentProcess(), &newMask, &sysMask)) {
547 DWORD error = GetLastError();
548 if (abort_on_error) {
549 __kmp_fatal(KMP_MSG(FunctionError,
"GetProcessAffinityMask()"),
550 KMP_ERR(error), __kmp_msg_null);
554 retval = SetThreadAffinityMask(GetCurrentThread(), newMask);
556 DWORD error = GetLastError();
557 if (abort_on_error) {
558 __kmp_fatal(KMP_MSG(FunctionError,
"SetThreadAffinityMask()"),
559 KMP_ERR(error), __kmp_msg_null);
563 newMask = SetThreadAffinityMask(GetCurrentThread(), retval);
565 DWORD error = GetLastError();
566 if (abort_on_error) {
567 __kmp_fatal(KMP_MSG(FunctionError,
"SetThreadAffinityMask()"),
568 KMP_ERR(error), __kmp_msg_null);
575 int get_proc_group()
const override {
577 if (__kmp_num_proc_groups == 1) {
580 for (
int i = 0; i < __kmp_num_proc_groups; i++) {
590 void determine_capable(
const char *env_var)
override {
591 __kmp_affinity_determine_capable(env_var);
593 void bind_thread(
int which)
override { __kmp_affinity_bind_thread(which); }
594 KMPAffinity::Mask *allocate_mask()
override {
return new Mask(); }
595 void deallocate_mask(KMPAffinity::Mask *m)
override {
delete m; }
596 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
597 return new Mask[num];
599 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
600 Mask *windows_array =
static_cast<Mask *
>(array);
601 delete[] windows_array;
603 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
604 int index)
override {
605 Mask *windows_array =
static_cast<Mask *
>(array);
606 return &(windows_array[index]);
608 api_type get_api_type()
const override {
return NATIVE_OS; }
615 static const unsigned maxDepth = 32;
616 unsigned labels[maxDepth];
617 unsigned childNums[maxDepth];
620 Address(
unsigned _depth) : depth(_depth), leader(FALSE) {}
621 Address &operator=(
const Address &b) {
623 for (
unsigned i = 0; i < depth; i++) {
624 labels[i] = b.labels[i];
625 childNums[i] = b.childNums[i];
630 bool operator==(
const Address &b)
const {
631 if (depth != b.depth)
633 for (
unsigned i = 0; i < depth; i++)
634 if (labels[i] != b.labels[i])
638 bool isClose(
const Address &b,
int level)
const {
639 if (depth != b.depth)
641 if ((
unsigned)level >= depth)
643 for (
unsigned i = 0; i < (depth - level); i++)
644 if (labels[i] != b.labels[i])
648 bool operator!=(
const Address &b)
const {
return !operator==(b); }
651 printf(
"Depth: %u --- ", depth);
652 for (i = 0; i < depth; i++) {
653 printf(
"%u ", labels[i]);
662 AddrUnsPair(Address _first,
unsigned _second)
663 : first(_first), second(_second) {}
664 AddrUnsPair &operator=(
const AddrUnsPair &b) {
672 printf(
" --- second = %u", second);
674 bool operator==(
const AddrUnsPair &b)
const {
675 if (first != b.first)
677 if (second != b.second)
681 bool operator!=(
const AddrUnsPair &b)
const {
return !operator==(b); }
684 static int __kmp_affinity_cmp_Address_labels(
const void *a,
const void *b) {
685 const Address *aa = &(((
const AddrUnsPair *)a)->first);
686 const Address *bb = &(((
const AddrUnsPair *)b)->first);
687 unsigned depth = aa->depth;
689 KMP_DEBUG_ASSERT(depth == bb->depth);
690 for (i = 0; i < depth; i++) {
691 if (aa->labels[i] < bb->labels[i])
693 if (aa->labels[i] > bb->labels[i])
705 class hierarchy_info {
709 static const kmp_uint32 maxLeaves = 4;
710 static const kmp_uint32 minBranch = 4;
716 kmp_uint32 maxLevels;
723 kmp_uint32 base_num_threads;
724 enum init_status { initialized = 0, not_initialized = 1, initializing = 2 };
725 volatile kmp_int8 uninitialized;
727 volatile kmp_int8 resizing;
733 kmp_uint32 *numPerLevel;
734 kmp_uint32 *skipPerLevel;
736 void deriveLevels(AddrUnsPair *adr2os,
int num_addrs) {
737 int hier_depth = adr2os[0].first.depth;
739 for (
int i = hier_depth - 1; i >= 0; --i) {
741 for (
int j = 0; j < num_addrs; ++j) {
742 int next = adr2os[j].first.childNums[i];
746 numPerLevel[level] = max + 1;
752 : maxLevels(7), depth(1), uninitialized(not_initialized), resizing(0) {}
755 if (!uninitialized && numPerLevel) {
756 __kmp_free(numPerLevel);
758 uninitialized = not_initialized;
762 void init(AddrUnsPair *adr2os,
int num_addrs) {
763 kmp_int8 bool_result = KMP_COMPARE_AND_STORE_ACQ8(
764 &uninitialized, not_initialized, initializing);
765 if (bool_result == 0) {
766 while (TCR_1(uninitialized) != initialized)
770 KMP_DEBUG_ASSERT(bool_result == 1);
780 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 *
sizeof(kmp_uint32));
781 skipPerLevel = &(numPerLevel[maxLevels]);
782 for (kmp_uint32 i = 0; i < maxLevels;
790 qsort(adr2os, num_addrs,
sizeof(*adr2os),
791 __kmp_affinity_cmp_Address_labels);
792 deriveLevels(adr2os, num_addrs);
794 numPerLevel[0] = maxLeaves;
795 numPerLevel[1] = num_addrs / maxLeaves;
796 if (num_addrs % maxLeaves)
800 base_num_threads = num_addrs;
801 for (
int i = maxLevels - 1; i >= 0;
803 if (numPerLevel[i] != 1 || depth > 1)
806 kmp_uint32 branch = minBranch;
807 if (numPerLevel[0] == 1)
808 branch = num_addrs / maxLeaves;
809 if (branch < minBranch)
811 for (kmp_uint32 d = 0; d < depth - 1; ++d) {
812 while (numPerLevel[d] > branch ||
813 (d == 0 && numPerLevel[d] > maxLeaves)) {
814 if (numPerLevel[d] & 1)
816 numPerLevel[d] = numPerLevel[d] >> 1;
817 if (numPerLevel[d + 1] == 1)
819 numPerLevel[d + 1] = numPerLevel[d + 1] << 1;
821 if (numPerLevel[0] == 1) {
822 branch = branch >> 1;
828 for (kmp_uint32 i = 1; i < depth; ++i)
829 skipPerLevel[i] = numPerLevel[i - 1] * skipPerLevel[i - 1];
831 for (kmp_uint32 i = depth; i < maxLevels; ++i)
832 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
834 uninitialized = initialized;
838 void resize(kmp_uint32 nproc) {
839 kmp_int8 bool_result = KMP_COMPARE_AND_STORE_ACQ8(&resizing, 0, 1);
840 while (bool_result == 0) {
842 if (nproc <= base_num_threads)
845 bool_result = KMP_COMPARE_AND_STORE_ACQ8(&resizing, 0, 1);
847 KMP_DEBUG_ASSERT(bool_result != 0);
848 if (nproc <= base_num_threads)
852 kmp_uint32 old_sz = skipPerLevel[depth - 1];
853 kmp_uint32 incs = 0, old_maxLevels = maxLevels;
855 for (kmp_uint32 i = depth; i < maxLevels && nproc > old_sz; ++i) {
856 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
857 numPerLevel[i - 1] *= 2;
861 if (nproc > old_sz) {
862 while (nproc > old_sz) {
870 kmp_uint32 *old_numPerLevel = numPerLevel;
871 kmp_uint32 *old_skipPerLevel = skipPerLevel;
872 numPerLevel = skipPerLevel = NULL;
874 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 *
sizeof(kmp_uint32));
875 skipPerLevel = &(numPerLevel[maxLevels]);
878 for (kmp_uint32 i = 0; i < old_maxLevels; ++i) {
880 numPerLevel[i] = old_numPerLevel[i];
881 skipPerLevel[i] = old_skipPerLevel[i];
885 for (kmp_uint32 i = old_maxLevels; i < maxLevels; ++i) {
892 __kmp_free(old_numPerLevel);
896 for (kmp_uint32 i = old_maxLevels; i < maxLevels; ++i)
897 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
899 base_num_threads = nproc;