14 #include "kmp_affinity.h"
18 #include "kmp_wrapper_getpid.h"
19 #if KMP_USE_HIER_SCHED
20 #include "kmp_dispatch_hier.h"
24 #define HWLOC_GROUP_KIND_INTEL_MODULE 102
25 #define HWLOC_GROUP_KIND_INTEL_TILE 103
26 #define HWLOC_GROUP_KIND_INTEL_DIE 104
27 #define HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP 220
32 kmp_topology_t *__kmp_topology =
nullptr;
34 kmp_hw_subset_t *__kmp_hw_subset =
nullptr;
37 static hierarchy_info machine_hierarchy;
39 void __kmp_cleanup_hierarchy() { machine_hierarchy.fini(); }
41 void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar) {
45 if (TCR_1(machine_hierarchy.uninitialized))
46 machine_hierarchy.init(nproc);
49 if (nproc > machine_hierarchy.base_num_threads)
50 machine_hierarchy.resize(nproc);
52 depth = machine_hierarchy.depth;
53 KMP_DEBUG_ASSERT(depth > 0);
55 thr_bar->depth = depth;
56 __kmp_type_convert(machine_hierarchy.numPerLevel[0] - 1,
57 &(thr_bar->base_leaf_kids));
58 thr_bar->skip_per_level = machine_hierarchy.skipPerLevel;
61 static int nCoresPerPkg, nPackages;
62 static int __kmp_nThreadsPerCore;
63 #ifndef KMP_DFLT_NTH_CORES
64 static int __kmp_ncores;
67 const char *__kmp_hw_get_catalog_string(kmp_hw_t type,
bool plural) {
70 return ((plural) ? KMP_I18N_STR(Sockets) : KMP_I18N_STR(Socket));
72 return ((plural) ? KMP_I18N_STR(Dice) : KMP_I18N_STR(Die));
74 return ((plural) ? KMP_I18N_STR(Modules) : KMP_I18N_STR(Module));
76 return ((plural) ? KMP_I18N_STR(Tiles) : KMP_I18N_STR(Tile));
78 return ((plural) ? KMP_I18N_STR(NumaDomains) : KMP_I18N_STR(NumaDomain));
80 return ((plural) ? KMP_I18N_STR(L3Caches) : KMP_I18N_STR(L3Cache));
82 return ((plural) ? KMP_I18N_STR(L2Caches) : KMP_I18N_STR(L2Cache));
84 return ((plural) ? KMP_I18N_STR(L1Caches) : KMP_I18N_STR(L1Cache));
86 return ((plural) ? KMP_I18N_STR(LLCaches) : KMP_I18N_STR(LLCache));
88 return ((plural) ? KMP_I18N_STR(Cores) : KMP_I18N_STR(Core));
90 return ((plural) ? KMP_I18N_STR(Threads) : KMP_I18N_STR(Thread));
91 case KMP_HW_PROC_GROUP:
92 return ((plural) ? KMP_I18N_STR(ProcGroups) : KMP_I18N_STR(ProcGroup));
94 return KMP_I18N_STR(Unknown);
97 const char *__kmp_hw_get_keyword(kmp_hw_t type,
bool plural) {
100 return ((plural) ?
"sockets" :
"socket");
102 return ((plural) ?
"dice" :
"die");
104 return ((plural) ?
"modules" :
"module");
106 return ((plural) ?
"tiles" :
"tile");
108 return ((plural) ?
"numa_domains" :
"numa_domain");
110 return ((plural) ?
"l3_caches" :
"l3_cache");
112 return ((plural) ?
"l2_caches" :
"l2_cache");
114 return ((plural) ?
"l1_caches" :
"l1_cache");
116 return ((plural) ?
"ll_caches" :
"ll_cache");
118 return ((plural) ?
"cores" :
"core");
120 return ((plural) ?
"threads" :
"thread");
121 case KMP_HW_PROC_GROUP:
122 return ((plural) ?
"proc_groups" :
"proc_group");
124 return ((plural) ?
"unknowns" :
"unknown");
127 const char *__kmp_hw_get_core_type_string(kmp_hw_core_type_t type) {
129 case KMP_HW_CORE_TYPE_UNKNOWN:
131 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
132 case KMP_HW_CORE_TYPE_ATOM:
133 return "Intel Atom(R) processor";
134 case KMP_HW_CORE_TYPE_CORE:
135 return "Intel(R) Core(TM) processor";
143 int kmp_hw_thread_t::compare_ids(
const void *a,
const void *b) {
144 const kmp_hw_thread_t *ahwthread = (
const kmp_hw_thread_t *)a;
145 const kmp_hw_thread_t *bhwthread = (
const kmp_hw_thread_t *)b;
146 int depth = __kmp_topology->get_depth();
147 for (
int level = 0; level < depth; ++level) {
148 if (ahwthread->ids[level] < bhwthread->ids[level])
150 else if (ahwthread->ids[level] > bhwthread->ids[level])
153 if (ahwthread->os_id < bhwthread->os_id)
155 else if (ahwthread->os_id > bhwthread->os_id)
160 #if KMP_AFFINITY_SUPPORTED
161 int kmp_hw_thread_t::compare_compact(
const void *a,
const void *b) {
163 const kmp_hw_thread_t *aa = (
const kmp_hw_thread_t *)a;
164 const kmp_hw_thread_t *bb = (
const kmp_hw_thread_t *)b;
165 int depth = __kmp_topology->get_depth();
166 KMP_DEBUG_ASSERT(__kmp_affinity_compact >= 0);
167 KMP_DEBUG_ASSERT(__kmp_affinity_compact <= depth);
168 for (i = 0; i < __kmp_affinity_compact; i++) {
169 int j = depth - i - 1;
170 if (aa->sub_ids[j] < bb->sub_ids[j])
172 if (aa->sub_ids[j] > bb->sub_ids[j])
175 for (; i < depth; i++) {
176 int j = i - __kmp_affinity_compact;
177 if (aa->sub_ids[j] < bb->sub_ids[j])
179 if (aa->sub_ids[j] > bb->sub_ids[j])
186 void kmp_hw_thread_t::print()
const {
187 int depth = __kmp_topology->get_depth();
188 printf(
"%4d ", os_id);
189 for (
int i = 0; i < depth; ++i) {
190 printf(
"%4d ", ids[i]);
193 if (attrs.is_core_type_valid())
194 printf(
" (%s)", __kmp_hw_get_core_type_string(attrs.get_core_type()));
195 if (attrs.is_core_eff_valid())
196 printf(
" (eff=%d)", attrs.get_core_eff());
206 void kmp_topology_t::_insert_layer(kmp_hw_t type,
const int *ids) {
210 int previous_id = kmp_hw_thread_t::UNKNOWN_ID;
211 int previous_new_id = kmp_hw_thread_t::UNKNOWN_ID;
215 for (target_layer = 0; target_layer < depth; ++target_layer) {
216 bool layers_equal =
true;
217 bool strictly_above_target_layer =
false;
218 for (
int i = 0; i < num_hw_threads; ++i) {
219 int id = hw_threads[i].ids[target_layer];
221 if (
id != previous_id && new_id == previous_new_id) {
223 strictly_above_target_layer =
true;
224 layers_equal =
false;
226 }
else if (
id == previous_id && new_id != previous_new_id) {
228 layers_equal =
false;
232 previous_new_id = new_id;
234 if (strictly_above_target_layer || layers_equal)
240 for (
int i = depth - 1, j = depth; i >= target_layer; --i, --j)
242 types[target_layer] = type;
243 for (
int k = 0; k < num_hw_threads; ++k) {
244 for (
int i = depth - 1, j = depth; i >= target_layer; --i, --j)
245 hw_threads[k].ids[j] = hw_threads[k].ids[i];
246 hw_threads[k].ids[target_layer] = ids[k];
248 equivalent[type] = type;
252 #if KMP_GROUP_AFFINITY
254 void kmp_topology_t::_insert_windows_proc_groups() {
256 if (__kmp_num_proc_groups == 1)
258 kmp_affin_mask_t *mask;
259 int *ids = (
int *)__kmp_allocate(
sizeof(
int) * num_hw_threads);
261 for (
int i = 0; i < num_hw_threads; ++i) {
263 KMP_CPU_SET(hw_threads[i].os_id, mask);
264 ids[i] = __kmp_get_proc_group(mask);
267 _insert_layer(KMP_HW_PROC_GROUP, ids);
274 void kmp_topology_t::_remove_radix1_layers() {
275 int preference[KMP_HW_LAST];
276 int top_index1, top_index2;
278 preference[KMP_HW_SOCKET] = 110;
279 preference[KMP_HW_PROC_GROUP] = 100;
280 preference[KMP_HW_CORE] = 95;
281 preference[KMP_HW_THREAD] = 90;
282 preference[KMP_HW_NUMA] = 85;
283 preference[KMP_HW_DIE] = 80;
284 preference[KMP_HW_TILE] = 75;
285 preference[KMP_HW_MODULE] = 73;
286 preference[KMP_HW_L3] = 70;
287 preference[KMP_HW_L2] = 65;
288 preference[KMP_HW_L1] = 60;
289 preference[KMP_HW_LLC] = 5;
292 while (top_index1 < depth - 1 && top_index2 < depth) {
293 kmp_hw_t type1 = types[top_index1];
294 kmp_hw_t type2 = types[top_index2];
295 KMP_ASSERT_VALID_HW_TYPE(type1);
296 KMP_ASSERT_VALID_HW_TYPE(type2);
299 if ((type1 == KMP_HW_THREAD || type1 == KMP_HW_CORE ||
300 type1 == KMP_HW_SOCKET) &&
301 (type2 == KMP_HW_THREAD || type2 == KMP_HW_CORE ||
302 type2 == KMP_HW_SOCKET)) {
303 top_index1 = top_index2++;
307 bool all_same =
true;
308 int id1 = hw_threads[0].ids[top_index1];
309 int id2 = hw_threads[0].ids[top_index2];
310 int pref1 = preference[type1];
311 int pref2 = preference[type2];
312 for (
int hwidx = 1; hwidx < num_hw_threads; ++hwidx) {
313 if (hw_threads[hwidx].ids[top_index1] == id1 &&
314 hw_threads[hwidx].ids[top_index2] != id2) {
318 if (hw_threads[hwidx].ids[top_index2] != id2)
320 id1 = hw_threads[hwidx].ids[top_index1];
321 id2 = hw_threads[hwidx].ids[top_index2];
325 kmp_hw_t remove_type, keep_type;
326 int remove_layer, remove_layer_ids;
329 remove_layer = remove_layer_ids = top_index2;
333 remove_layer = remove_layer_ids = top_index1;
339 remove_layer_ids = top_index2;
342 set_equivalent_type(remove_type, keep_type);
343 for (
int idx = 0; idx < num_hw_threads; ++idx) {
344 kmp_hw_thread_t &hw_thread = hw_threads[idx];
345 for (
int d = remove_layer_ids; d < depth - 1; ++d)
346 hw_thread.ids[d] = hw_thread.ids[d + 1];
348 for (
int idx = remove_layer; idx < depth - 1; ++idx)
349 types[idx] = types[idx + 1];
352 top_index1 = top_index2++;
355 KMP_ASSERT(depth > 0);
358 void kmp_topology_t::_set_last_level_cache() {
359 if (get_equivalent_type(KMP_HW_L3) != KMP_HW_UNKNOWN)
360 set_equivalent_type(KMP_HW_LLC, KMP_HW_L3);
361 else if (get_equivalent_type(KMP_HW_L2) != KMP_HW_UNKNOWN)
362 set_equivalent_type(KMP_HW_LLC, KMP_HW_L2);
363 #if KMP_MIC_SUPPORTED
364 else if (__kmp_mic_type == mic3) {
365 if (get_equivalent_type(KMP_HW_L2) != KMP_HW_UNKNOWN)
366 set_equivalent_type(KMP_HW_LLC, KMP_HW_L2);
367 else if (get_equivalent_type(KMP_HW_TILE) != KMP_HW_UNKNOWN)
368 set_equivalent_type(KMP_HW_LLC, KMP_HW_TILE);
371 set_equivalent_type(KMP_HW_LLC, KMP_HW_L1);
374 else if (get_equivalent_type(KMP_HW_L1) != KMP_HW_UNKNOWN)
375 set_equivalent_type(KMP_HW_LLC, KMP_HW_L1);
377 if (get_equivalent_type(KMP_HW_LLC) == KMP_HW_UNKNOWN) {
378 if (get_equivalent_type(KMP_HW_SOCKET) != KMP_HW_UNKNOWN)
379 set_equivalent_type(KMP_HW_LLC, KMP_HW_SOCKET);
380 else if (get_equivalent_type(KMP_HW_CORE) != KMP_HW_UNKNOWN)
381 set_equivalent_type(KMP_HW_LLC, KMP_HW_CORE);
383 KMP_ASSERT(get_equivalent_type(KMP_HW_LLC) != KMP_HW_UNKNOWN);
387 void kmp_topology_t::_gather_enumeration_information() {
388 int previous_id[KMP_HW_LAST];
389 int max[KMP_HW_LAST];
391 for (
int i = 0; i < depth; ++i) {
392 previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID;
397 int core_level = get_level(KMP_HW_CORE);
398 for (
int i = 0; i < num_hw_threads; ++i) {
399 kmp_hw_thread_t &hw_thread = hw_threads[i];
400 for (
int layer = 0; layer < depth; ++layer) {
401 int id = hw_thread.ids[layer];
402 if (
id != previous_id[layer]) {
404 for (
int l = layer; l < depth; ++l)
408 for (
int l = layer + 1; l < depth; ++l) {
409 if (max[l] > ratio[l])
415 if (__kmp_is_hybrid_cpu() && core_level >= 0 && layer <= core_level) {
416 if (hw_thread.attrs.is_core_eff_valid() &&
417 hw_thread.attrs.core_eff >= num_core_efficiencies) {
420 num_core_efficiencies = hw_thread.attrs.core_eff + 1;
422 if (hw_thread.attrs.is_core_type_valid()) {
424 for (
int j = 0; j < num_core_types; ++j) {
425 if (hw_thread.attrs.get_core_type() == core_types[j]) {
431 KMP_ASSERT(num_core_types < KMP_HW_MAX_NUM_CORE_TYPES);
432 core_types[num_core_types++] = hw_thread.attrs.get_core_type();
439 for (
int layer = 0; layer < depth; ++layer) {
440 previous_id[layer] = hw_thread.ids[layer];
443 for (
int layer = 0; layer < depth; ++layer) {
444 if (max[layer] > ratio[layer])
445 ratio[layer] = max[layer];
449 int kmp_topology_t::_get_ncores_with_attr(
const kmp_hw_attr_t &attr,
451 bool find_all)
const {
452 int current, current_max;
453 int previous_id[KMP_HW_LAST];
454 for (
int i = 0; i < depth; ++i)
455 previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID;
456 int core_level = get_level(KMP_HW_CORE);
459 KMP_ASSERT(above_level < core_level);
462 for (
int i = 0; i < num_hw_threads; ++i) {
463 kmp_hw_thread_t &hw_thread = hw_threads[i];
464 if (!find_all && hw_thread.ids[above_level] != previous_id[above_level]) {
465 if (current > current_max)
466 current_max = current;
467 current = hw_thread.attrs.contains(attr);
469 for (
int level = above_level + 1; level <= core_level; ++level) {
470 if (hw_thread.ids[level] != previous_id[level]) {
471 if (hw_thread.attrs.contains(attr))
477 for (
int level = 0; level < depth; ++level)
478 previous_id[level] = hw_thread.ids[level];
480 if (current > current_max)
481 current_max = current;
486 void kmp_topology_t::_discover_uniformity() {
488 for (
int level = 0; level < depth; ++level)
490 flags.uniform = (num == count[depth - 1]);
494 void kmp_topology_t::_set_sub_ids() {
495 int previous_id[KMP_HW_LAST];
496 int sub_id[KMP_HW_LAST];
498 for (
int i = 0; i < depth; ++i) {
502 for (
int i = 0; i < num_hw_threads; ++i) {
503 kmp_hw_thread_t &hw_thread = hw_threads[i];
505 for (
int j = 0; j < depth; ++j) {
506 if (hw_thread.ids[j] != previous_id[j]) {
508 for (
int k = j + 1; k < depth; ++k) {
515 for (
int j = 0; j < depth; ++j) {
516 previous_id[j] = hw_thread.ids[j];
519 for (
int j = 0; j < depth; ++j) {
520 hw_thread.sub_ids[j] = sub_id[j];
525 void kmp_topology_t::_set_globals() {
527 int core_level, thread_level, package_level;
528 package_level = get_level(KMP_HW_SOCKET);
529 #if KMP_GROUP_AFFINITY
530 if (package_level == -1)
531 package_level = get_level(KMP_HW_PROC_GROUP);
533 core_level = get_level(KMP_HW_CORE);
534 thread_level = get_level(KMP_HW_THREAD);
536 KMP_ASSERT(core_level != -1);
537 KMP_ASSERT(thread_level != -1);
539 __kmp_nThreadsPerCore = calculate_ratio(thread_level, core_level);
540 if (package_level != -1) {
541 nCoresPerPkg = calculate_ratio(core_level, package_level);
542 nPackages = get_count(package_level);
545 nCoresPerPkg = get_count(core_level);
548 #ifndef KMP_DFLT_NTH_CORES
549 __kmp_ncores = get_count(core_level);
553 kmp_topology_t *kmp_topology_t::allocate(
int nproc,
int ndepth,
554 const kmp_hw_t *types) {
555 kmp_topology_t *retval;
557 size_t size =
sizeof(kmp_topology_t) +
sizeof(kmp_hw_thread_t) * nproc +
558 sizeof(int) * (
size_t)KMP_HW_LAST * 3;
559 char *bytes = (
char *)__kmp_allocate(size);
560 retval = (kmp_topology_t *)bytes;
562 retval->hw_threads = (kmp_hw_thread_t *)(bytes +
sizeof(kmp_topology_t));
564 retval->hw_threads =
nullptr;
566 retval->num_hw_threads = nproc;
567 retval->depth = ndepth;
569 (
int *)(bytes +
sizeof(kmp_topology_t) +
sizeof(kmp_hw_thread_t) * nproc);
570 retval->types = (kmp_hw_t *)arr;
571 retval->ratio = arr + (size_t)KMP_HW_LAST;
572 retval->count = arr + 2 * (size_t)KMP_HW_LAST;
573 retval->num_core_efficiencies = 0;
574 retval->num_core_types = 0;
575 for (
int i = 0; i < KMP_HW_MAX_NUM_CORE_TYPES; ++i)
576 retval->core_types[i] = KMP_HW_CORE_TYPE_UNKNOWN;
577 KMP_FOREACH_HW_TYPE(type) { retval->equivalent[type] = KMP_HW_UNKNOWN; }
578 for (
int i = 0; i < ndepth; ++i) {
579 retval->types[i] = types[i];
580 retval->equivalent[types[i]] = types[i];
585 void kmp_topology_t::deallocate(kmp_topology_t *topology) {
587 __kmp_free(topology);
590 bool kmp_topology_t::check_ids()
const {
592 if (num_hw_threads == 0)
594 for (
int i = 1; i < num_hw_threads; ++i) {
595 kmp_hw_thread_t ¤t_thread = hw_threads[i];
596 kmp_hw_thread_t &previous_thread = hw_threads[i - 1];
598 for (
int j = 0; j < depth; ++j) {
599 if (previous_thread.ids[j] != current_thread.ids[j]) {
611 void kmp_topology_t::dump()
const {
612 printf(
"***********************\n");
613 printf(
"*** __kmp_topology: ***\n");
614 printf(
"***********************\n");
615 printf(
"* depth: %d\n", depth);
618 for (
int i = 0; i < depth; ++i)
619 printf(
"%15s ", __kmp_hw_get_keyword(types[i]));
623 for (
int i = 0; i < depth; ++i) {
624 printf(
"%15d ", ratio[i]);
629 for (
int i = 0; i < depth; ++i) {
630 printf(
"%15d ", count[i]);
634 printf(
"* num_core_eff: %d\n", num_core_efficiencies);
635 printf(
"* num_core_types: %d\n", num_core_types);
636 printf(
"* core_types: ");
637 for (
int i = 0; i < num_core_types; ++i)
638 printf(
"%3d ", core_types[i]);
641 printf(
"* equivalent map:\n");
642 KMP_FOREACH_HW_TYPE(i) {
643 const char *key = __kmp_hw_get_keyword(i);
644 const char *value = __kmp_hw_get_keyword(equivalent[i]);
645 printf(
"%-15s -> %-15s\n", key, value);
648 printf(
"* uniform: %s\n", (is_uniform() ?
"Yes" :
"No"));
650 printf(
"* num_hw_threads: %d\n", num_hw_threads);
651 printf(
"* hw_threads:\n");
652 for (
int i = 0; i < num_hw_threads; ++i) {
653 hw_threads[i].print();
655 printf(
"***********************\n");
658 void kmp_topology_t::print(
const char *env_var)
const {
660 int print_types_depth;
661 __kmp_str_buf_init(&buf);
662 kmp_hw_t print_types[KMP_HW_LAST + 2];
665 KMP_INFORM(AvailableOSProc, env_var, num_hw_threads);
669 KMP_INFORM(Uniform, env_var);
671 KMP_INFORM(NonUniform, env_var);
675 KMP_FOREACH_HW_TYPE(type) {
676 kmp_hw_t eq_type = equivalent[type];
677 if (eq_type != KMP_HW_UNKNOWN && eq_type != type) {
678 KMP_INFORM(AffEqualTopologyTypes, env_var,
679 __kmp_hw_get_catalog_string(type),
680 __kmp_hw_get_catalog_string(eq_type));
685 KMP_ASSERT(depth > 0 && depth <= (
int)KMP_HW_LAST);
688 print_types_depth = 0;
689 for (
int level = 0; level < depth; ++level)
690 print_types[print_types_depth++] = types[level];
691 if (equivalent[KMP_HW_CORE] != KMP_HW_CORE) {
693 if (print_types[print_types_depth - 1] == KMP_HW_THREAD) {
696 print_types[print_types_depth - 1] = KMP_HW_CORE;
697 print_types[print_types_depth++] = KMP_HW_THREAD;
699 print_types[print_types_depth++] = KMP_HW_CORE;
703 if (equivalent[KMP_HW_THREAD] != KMP_HW_THREAD)
704 print_types[print_types_depth++] = KMP_HW_THREAD;
706 __kmp_str_buf_clear(&buf);
707 kmp_hw_t numerator_type;
708 kmp_hw_t denominator_type = KMP_HW_UNKNOWN;
709 int core_level = get_level(KMP_HW_CORE);
710 int ncores = get_count(core_level);
712 for (
int plevel = 0, level = 0; plevel < print_types_depth; ++plevel) {
715 numerator_type = print_types[plevel];
716 KMP_ASSERT_VALID_HW_TYPE(numerator_type);
717 if (equivalent[numerator_type] != numerator_type)
720 c = get_ratio(level++);
723 __kmp_str_buf_print(&buf,
"%d %s", c,
724 __kmp_hw_get_catalog_string(numerator_type, plural));
726 __kmp_str_buf_print(&buf,
" x %d %s/%s", c,
727 __kmp_hw_get_catalog_string(numerator_type, plural),
728 __kmp_hw_get_catalog_string(denominator_type));
730 denominator_type = numerator_type;
732 KMP_INFORM(TopologyGeneric, env_var, buf.str, ncores);
735 if (__kmp_is_hybrid_cpu()) {
736 for (
int i = 0; i < num_core_types; ++i) {
737 kmp_hw_core_type_t core_type = core_types[i];
740 attr.set_core_type(core_type);
741 int ncores = get_ncores_with_attr(attr);
743 KMP_INFORM(TopologyHybrid, env_var, ncores,
744 __kmp_hw_get_core_type_string(core_type));
745 KMP_ASSERT(num_core_efficiencies <= KMP_HW_MAX_NUM_CORE_EFFS)
746 for (
int eff = 0; eff < num_core_efficiencies; ++eff) {
747 attr.set_core_eff(eff);
748 int ncores_with_eff = get_ncores_with_attr(attr);
749 if (ncores_with_eff > 0) {
750 KMP_INFORM(TopologyHybridCoreEff, env_var, ncores_with_eff, eff);
757 if (num_hw_threads <= 0) {
758 __kmp_str_buf_free(&buf);
763 KMP_INFORM(OSProcToPhysicalThreadMap, env_var);
764 for (
int i = 0; i < num_hw_threads; i++) {
765 __kmp_str_buf_clear(&buf);
766 for (
int level = 0; level < depth; ++level) {
767 kmp_hw_t type = types[level];
768 __kmp_str_buf_print(&buf,
"%s ", __kmp_hw_get_catalog_string(type));
769 __kmp_str_buf_print(&buf,
"%d ", hw_threads[i].ids[level]);
771 if (__kmp_is_hybrid_cpu())
774 __kmp_hw_get_core_type_string(hw_threads[i].attrs.get_core_type()));
775 KMP_INFORM(OSProcMapToPack, env_var, hw_threads[i].os_id, buf.str);
778 __kmp_str_buf_free(&buf);
781 void kmp_topology_t::canonicalize() {
782 #if KMP_GROUP_AFFINITY
783 _insert_windows_proc_groups();
785 _remove_radix1_layers();
786 _gather_enumeration_information();
787 _discover_uniformity();
790 _set_last_level_cache();
792 #if KMP_MIC_SUPPORTED
794 if (__kmp_mic_type == mic3) {
795 if (get_level(KMP_HW_L2) != -1)
796 set_equivalent_type(KMP_HW_TILE, KMP_HW_L2);
797 else if (get_level(KMP_HW_TILE) != -1)
798 set_equivalent_type(KMP_HW_L2, KMP_HW_TILE);
803 KMP_ASSERT(depth > 0);
804 for (
int level = 0; level < depth; ++level) {
806 KMP_ASSERT(count[level] > 0 && ratio[level] > 0);
807 KMP_ASSERT_VALID_HW_TYPE(types[level]);
809 KMP_ASSERT(equivalent[types[level]] == types[level]);
812 #if KMP_AFFINITY_SUPPORTED
814 if (__kmp_affinity_gran_levels < 0) {
815 kmp_hw_t gran_type = get_equivalent_type(__kmp_affinity_gran);
817 if (gran_type == KMP_HW_UNKNOWN) {
819 kmp_hw_t gran_types[3] = {KMP_HW_CORE, KMP_HW_THREAD, KMP_HW_SOCKET};
820 for (
auto g : gran_types) {
821 if (__kmp_topology->get_equivalent_type(g) != KMP_HW_UNKNOWN) {
826 KMP_ASSERT(gran_type != KMP_HW_UNKNOWN);
828 KMP_WARNING(AffGranularityBad,
"KMP_AFFINITY",
829 __kmp_hw_get_catalog_string(__kmp_affinity_gran),
830 __kmp_hw_get_catalog_string(gran_type));
831 __kmp_affinity_gran = gran_type;
833 #if KMP_GROUP_AFFINITY
841 if (__kmp_num_proc_groups > 1) {
842 int gran_depth = __kmp_topology->get_level(gran_type);
843 int proc_group_depth = __kmp_topology->get_level(KMP_HW_PROC_GROUP);
844 if (gran_depth >= 0 && proc_group_depth >= 0 &&
845 gran_depth < proc_group_depth) {
846 KMP_WARNING(AffGranTooCoarseProcGroup,
"KMP_AFFINITY",
847 __kmp_hw_get_catalog_string(__kmp_affinity_gran));
848 __kmp_affinity_gran = gran_type = KMP_HW_PROC_GROUP;
852 __kmp_affinity_gran_levels = 0;
853 for (
int i = depth - 1; i >= 0 && get_type(i) != gran_type; --i)
854 __kmp_affinity_gran_levels++;
860 void kmp_topology_t::canonicalize(
int npackages,
int ncores_per_pkg,
861 int nthreads_per_core,
int ncores) {
864 KMP_FOREACH_HW_TYPE(i) { equivalent[i] = KMP_HW_UNKNOWN; }
865 for (
int level = 0; level < depth; ++level) {
869 count[0] = npackages;
871 count[2] = __kmp_xproc;
872 ratio[0] = npackages;
873 ratio[1] = ncores_per_pkg;
874 ratio[2] = nthreads_per_core;
875 equivalent[KMP_HW_SOCKET] = KMP_HW_SOCKET;
876 equivalent[KMP_HW_CORE] = KMP_HW_CORE;
877 equivalent[KMP_HW_THREAD] = KMP_HW_THREAD;
878 types[0] = KMP_HW_SOCKET;
879 types[1] = KMP_HW_CORE;
880 types[2] = KMP_HW_THREAD;
882 _discover_uniformity();
887 template <
size_t SIZE,
typename IndexFunc>
struct kmp_sub_ids_t {
890 int prev_sub_id[KMP_HW_LAST];
894 kmp_sub_ids_t(
int last_level) : last_level(last_level) {
895 KMP_ASSERT(last_level < KMP_HW_LAST);
896 for (
size_t i = 0; i < SIZE; ++i)
898 for (
size_t i = 0; i < KMP_HW_LAST; ++i)
901 void update(
const kmp_hw_thread_t &hw_thread) {
902 int idx = indexer(hw_thread);
903 KMP_ASSERT(idx < (
int)SIZE);
904 for (
int level = 0; level <= last_level; ++level) {
905 if (hw_thread.sub_ids[level] != prev_sub_id[level]) {
906 if (level < last_level)
912 for (
int level = 0; level <= last_level; ++level)
913 prev_sub_id[level] = hw_thread.sub_ids[level];
915 int get_sub_id(
const kmp_hw_thread_t &hw_thread)
const {
916 return sub_id[indexer(hw_thread)];
920 static kmp_str_buf_t *
921 __kmp_hw_get_catalog_core_string(
const kmp_hw_attr_t &attr, kmp_str_buf_t *buf,
923 __kmp_str_buf_init(buf);
924 if (attr.is_core_type_valid())
925 __kmp_str_buf_print(buf,
"%s %s",
926 __kmp_hw_get_core_type_string(attr.get_core_type()),
927 __kmp_hw_get_catalog_string(KMP_HW_CORE, plural));
929 __kmp_str_buf_print(buf,
"%s eff=%d",
930 __kmp_hw_get_catalog_string(KMP_HW_CORE, plural),
931 attr.get_core_eff());
938 bool kmp_topology_t::filter_hw_subset() {
940 if (!__kmp_hw_subset)
944 __kmp_hw_subset->sort();
947 bool using_core_types =
false;
948 bool using_core_effs =
false;
949 int hw_subset_depth = __kmp_hw_subset->get_depth();
950 kmp_hw_t specified[KMP_HW_LAST];
951 int topology_levels[hw_subset_depth];
952 KMP_ASSERT(hw_subset_depth > 0);
953 KMP_FOREACH_HW_TYPE(i) { specified[i] = KMP_HW_UNKNOWN; }
954 int core_level = get_level(KMP_HW_CORE);
955 for (
int i = 0; i < hw_subset_depth; ++i) {
957 const kmp_hw_subset_t::item_t &item = __kmp_hw_subset->at(i);
958 int num = item.num[0];
959 int offset = item.offset[0];
960 kmp_hw_t type = item.type;
961 kmp_hw_t equivalent_type = equivalent[type];
962 int level = get_level(type);
963 topology_levels[i] = level;
966 if (equivalent_type != KMP_HW_UNKNOWN) {
967 __kmp_hw_subset->at(i).type = equivalent_type;
969 KMP_WARNING(AffHWSubsetNotExistGeneric,
970 __kmp_hw_get_catalog_string(type));
976 if (specified[equivalent_type] != KMP_HW_UNKNOWN) {
977 KMP_WARNING(AffHWSubsetEqvLayers, __kmp_hw_get_catalog_string(type),
978 __kmp_hw_get_catalog_string(specified[equivalent_type]));
981 specified[equivalent_type] = type;
984 max_count = get_ratio(level);
986 (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) {
987 bool plural = (num > 1);
988 KMP_WARNING(AffHWSubsetManyGeneric,
989 __kmp_hw_get_catalog_string(type, plural));
994 if (core_level == level) {
996 for (
int j = 0; j < item.num_attrs; ++j) {
997 if (item.attr[j].is_core_type_valid())
998 using_core_types =
true;
999 if (item.attr[j].is_core_eff_valid())
1000 using_core_effs =
true;
1008 if ((using_core_effs || using_core_types) && !__kmp_is_hybrid_cpu()) {
1009 if (item.num_attrs == 1) {
1010 if (using_core_effs) {
1011 KMP_WARNING(AffHWSubsetIgnoringAttr,
"efficiency");
1013 KMP_WARNING(AffHWSubsetIgnoringAttr,
"core_type");
1015 using_core_effs =
false;
1016 using_core_types =
false;
1018 KMP_WARNING(AffHWSubsetAttrsNonHybrid);
1024 if (using_core_types && using_core_effs) {
1025 KMP_WARNING(AffHWSubsetIncompat,
"core_type",
"efficiency");
1030 if (using_core_effs) {
1031 for (
int j = 0; j < item.num_attrs; ++j) {
1032 if (item.attr[j].is_core_eff_valid()) {
1033 int core_eff = item.attr[j].get_core_eff();
1034 if (core_eff < 0 || core_eff >= num_core_efficiencies) {
1036 __kmp_str_buf_init(&buf);
1037 __kmp_str_buf_print(&buf,
"%d", item.attr[j].get_core_eff());
1038 __kmp_msg(kmp_ms_warning,
1039 KMP_MSG(AffHWSubsetAttrInvalid,
"efficiency", buf.str),
1040 KMP_HNT(ValidValuesRange, 0, num_core_efficiencies - 1),
1042 __kmp_str_buf_free(&buf);
1050 if (using_core_types || using_core_effs) {
1051 for (
int j = 0; j < item.num_attrs; ++j) {
1052 int num = item.num[j];
1053 int offset = item.offset[j];
1054 int level_above = core_level - 1;
1055 if (level_above >= 0) {
1056 max_count = get_ncores_with_attr_per(item.attr[j], level_above);
1057 if (max_count <= 0 ||
1058 (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) {
1060 __kmp_hw_get_catalog_core_string(item.attr[j], &buf, num > 0);
1061 KMP_WARNING(AffHWSubsetManyGeneric, buf.str);
1062 __kmp_str_buf_free(&buf);
1069 if ((using_core_types || using_core_effs) && item.num_attrs > 1) {
1070 for (
int j = 0; j < item.num_attrs; ++j) {
1073 if (!item.attr[j]) {
1074 kmp_hw_attr_t other_attr;
1075 for (
int k = 0; k < item.num_attrs; ++k) {
1076 if (item.attr[k] != item.attr[j]) {
1077 other_attr = item.attr[k];
1082 __kmp_hw_get_catalog_core_string(other_attr, &buf, item.num[j] > 0);
1083 KMP_WARNING(AffHWSubsetIncompat,
1084 __kmp_hw_get_catalog_string(KMP_HW_CORE), buf.str);
1085 __kmp_str_buf_free(&buf);
1089 for (
int k = 0; k < j; ++k) {
1090 if (!item.attr[j] || !item.attr[k])
1092 if (item.attr[k] == item.attr[j]) {
1094 __kmp_hw_get_catalog_core_string(item.attr[j], &buf,
1096 KMP_WARNING(AffHWSubsetAttrRepeat, buf.str);
1097 __kmp_str_buf_free(&buf);
1106 struct core_type_indexer {
1107 int operator()(
const kmp_hw_thread_t &t)
const {
1108 switch (t.attrs.get_core_type()) {
1109 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1110 case KMP_HW_CORE_TYPE_ATOM:
1112 case KMP_HW_CORE_TYPE_CORE:
1115 case KMP_HW_CORE_TYPE_UNKNOWN:
1122 struct core_eff_indexer {
1123 int operator()(
const kmp_hw_thread_t &t)
const {
1124 return t.attrs.get_core_eff();
1128 kmp_sub_ids_t<KMP_HW_MAX_NUM_CORE_TYPES, core_type_indexer> core_type_sub_ids(
1130 kmp_sub_ids_t<KMP_HW_MAX_NUM_CORE_EFFS, core_eff_indexer> core_eff_sub_ids(
1134 int num_filtered = 0;
1135 bool *filtered = (
bool *)__kmp_allocate(
sizeof(
bool) * num_hw_threads);
1136 for (
int i = 0; i < num_hw_threads; ++i) {
1137 kmp_hw_thread_t &hw_thread = hw_threads[i];
1139 if (using_core_types)
1140 core_type_sub_ids.update(hw_thread);
1141 if (using_core_effs)
1142 core_eff_sub_ids.update(hw_thread);
1145 bool should_be_filtered =
false;
1146 for (
int hw_subset_index = 0; hw_subset_index < hw_subset_depth;
1147 ++hw_subset_index) {
1148 const auto &hw_subset_item = __kmp_hw_subset->at(hw_subset_index);
1149 int level = topology_levels[hw_subset_index];
1152 if ((using_core_effs || using_core_types) && level == core_level) {
1158 kmp_hw_core_type_t core_type = hw_thread.attrs.get_core_type();
1159 int core_eff = hw_thread.attrs.get_core_eff();
1160 for (attr_idx = 0; attr_idx < hw_subset_item.num_attrs; ++attr_idx) {
1161 if (using_core_types &&
1162 hw_subset_item.attr[attr_idx].get_core_type() == core_type)
1164 if (using_core_effs &&
1165 hw_subset_item.attr[attr_idx].get_core_eff() == core_eff)
1169 if (attr_idx == hw_subset_item.num_attrs) {
1170 should_be_filtered =
true;
1174 int num = hw_subset_item.num[attr_idx];
1175 int offset = hw_subset_item.offset[attr_idx];
1176 if (using_core_types)
1177 sub_id = core_type_sub_ids.get_sub_id(hw_thread);
1179 sub_id = core_eff_sub_ids.get_sub_id(hw_thread);
1180 if (sub_id < offset ||
1181 (num != kmp_hw_subset_t::USE_ALL && sub_id >= offset + num)) {
1182 should_be_filtered =
true;
1186 int num = hw_subset_item.num[0];
1187 int offset = hw_subset_item.offset[0];
1188 if (hw_thread.sub_ids[level] < offset ||
1189 (num != kmp_hw_subset_t::USE_ALL &&
1190 hw_thread.sub_ids[level] >= offset + num)) {
1191 should_be_filtered =
true;
1197 filtered[i] = should_be_filtered;
1198 if (should_be_filtered)
1203 if (num_filtered == num_hw_threads) {
1204 KMP_WARNING(AffHWSubsetAllFiltered);
1205 __kmp_free(filtered);
1211 for (
int i = 0; i < num_hw_threads; ++i) {
1214 hw_threads[new_index] = hw_threads[i];
1217 #if KMP_AFFINITY_SUPPORTED
1218 KMP_CPU_CLR(hw_threads[i].os_id, __kmp_affin_fullMask);
1224 KMP_DEBUG_ASSERT(new_index <= num_hw_threads);
1225 num_hw_threads = new_index;
1228 _gather_enumeration_information();
1229 _discover_uniformity();
1231 _set_last_level_cache();
1232 __kmp_free(filtered);
1236 bool kmp_topology_t::is_close(
int hwt1,
int hwt2,
int hw_level)
const {
1237 if (hw_level >= depth)
1240 const kmp_hw_thread_t &t1 = hw_threads[hwt1];
1241 const kmp_hw_thread_t &t2 = hw_threads[hwt2];
1242 for (
int i = 0; i < (depth - hw_level); ++i) {
1243 if (t1.ids[i] != t2.ids[i])
1251 #if KMP_AFFINITY_SUPPORTED
1252 class kmp_affinity_raii_t {
1253 kmp_affin_mask_t *mask;
1257 kmp_affinity_raii_t() : restored(false) {
1258 KMP_CPU_ALLOC(mask);
1259 KMP_ASSERT(mask != NULL);
1260 __kmp_get_system_affinity(mask, TRUE);
1263 __kmp_set_system_affinity(mask, TRUE);
1267 ~kmp_affinity_raii_t() {
1269 __kmp_set_system_affinity(mask, TRUE);
1275 bool KMPAffinity::picked_api =
false;
1277 void *KMPAffinity::Mask::operator
new(
size_t n) {
return __kmp_allocate(n); }
1278 void *KMPAffinity::Mask::operator
new[](
size_t n) {
return __kmp_allocate(n); }
1279 void KMPAffinity::Mask::operator
delete(
void *p) { __kmp_free(p); }
1280 void KMPAffinity::Mask::operator
delete[](
void *p) { __kmp_free(p); }
1281 void *KMPAffinity::operator
new(
size_t n) {
return __kmp_allocate(n); }
1282 void KMPAffinity::operator
delete(
void *p) { __kmp_free(p); }
1284 void KMPAffinity::pick_api() {
1285 KMPAffinity *affinity_dispatch;
1291 if (__kmp_affinity_top_method == affinity_top_method_hwloc &&
1292 __kmp_affinity_type != affinity_disabled) {
1293 affinity_dispatch =
new KMPHwlocAffinity();
1297 affinity_dispatch =
new KMPNativeAffinity();
1299 __kmp_affinity_dispatch = affinity_dispatch;
1303 void KMPAffinity::destroy_api() {
1304 if (__kmp_affinity_dispatch != NULL) {
1305 delete __kmp_affinity_dispatch;
1306 __kmp_affinity_dispatch = NULL;
1311 #define KMP_ADVANCE_SCAN(scan) \
1312 while (*scan != '\0') { \
1320 char *__kmp_affinity_print_mask(
char *buf,
int buf_len,
1321 kmp_affin_mask_t *mask) {
1322 int start = 0, finish = 0, previous = 0;
1325 KMP_ASSERT(buf_len >= 40);
1328 char *end = buf + buf_len - 1;
1331 if (mask->begin() == mask->end()) {
1332 KMP_SNPRINTF(scan, end - scan + 1,
"{<empty>}");
1333 KMP_ADVANCE_SCAN(scan);
1334 KMP_ASSERT(scan <= end);
1339 start = mask->begin();
1343 for (finish = mask->next(start), previous = start;
1344 finish == previous + 1 && finish != mask->end();
1345 finish = mask->next(finish)) {
1352 KMP_SNPRINTF(scan, end - scan + 1,
"%s",
",");
1353 KMP_ADVANCE_SCAN(scan);
1355 first_range =
false;
1358 if (previous - start > 1) {
1359 KMP_SNPRINTF(scan, end - scan + 1,
"%u-%u", start, previous);
1362 KMP_SNPRINTF(scan, end - scan + 1,
"%u", start);
1363 KMP_ADVANCE_SCAN(scan);
1364 if (previous - start > 0) {
1365 KMP_SNPRINTF(scan, end - scan + 1,
",%u", previous);
1368 KMP_ADVANCE_SCAN(scan);
1371 if (start == mask->end())
1379 KMP_ASSERT(scan <= end);
1382 #undef KMP_ADVANCE_SCAN
1388 kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf,
1389 kmp_affin_mask_t *mask) {
1390 int start = 0, finish = 0, previous = 0;
1395 __kmp_str_buf_clear(buf);
1398 if (mask->begin() == mask->end()) {
1399 __kmp_str_buf_print(buf,
"%s",
"{<empty>}");
1404 start = mask->begin();
1408 for (finish = mask->next(start), previous = start;
1409 finish == previous + 1 && finish != mask->end();
1410 finish = mask->next(finish)) {
1417 __kmp_str_buf_print(buf,
"%s",
",");
1419 first_range =
false;
1422 if (previous - start > 1) {
1423 __kmp_str_buf_print(buf,
"%u-%u", start, previous);
1426 __kmp_str_buf_print(buf,
"%u", start);
1427 if (previous - start > 0) {
1428 __kmp_str_buf_print(buf,
",%u", previous);
1433 if (start == mask->end())
1441 kmp_affin_mask_t *__kmp_affinity_get_offline_cpus() {
1442 kmp_affin_mask_t *offline;
1443 KMP_CPU_ALLOC(offline);
1444 KMP_CPU_ZERO(offline);
1446 int n, begin_cpu, end_cpu;
1448 auto skip_ws = [](FILE *f) {
1452 }
while (isspace(c));
1458 int status = offline_file.
try_open(
"/sys/devices/system/cpu/offline",
"r");
1461 while (!feof(offline_file)) {
1462 skip_ws(offline_file);
1463 n = fscanf(offline_file,
"%d", &begin_cpu);
1466 skip_ws(offline_file);
1467 int c = fgetc(offline_file);
1468 if (c == EOF || c ==
',') {
1470 end_cpu = begin_cpu;
1471 }
else if (c ==
'-') {
1473 skip_ws(offline_file);
1474 n = fscanf(offline_file,
"%d", &end_cpu);
1477 skip_ws(offline_file);
1478 c = fgetc(offline_file);
1484 if (begin_cpu < 0 || begin_cpu >= __kmp_xproc || end_cpu < 0 ||
1485 end_cpu >= __kmp_xproc || begin_cpu > end_cpu) {
1489 for (
int cpu = begin_cpu; cpu <= end_cpu; ++cpu) {
1490 KMP_CPU_SET(cpu, offline);
1498 int __kmp_affinity_entire_machine_mask(kmp_affin_mask_t *mask) {
1502 #if KMP_GROUP_AFFINITY
1504 if (__kmp_num_proc_groups > 1) {
1506 KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount != NULL);
1507 for (group = 0; group < __kmp_num_proc_groups; group++) {
1509 int num = __kmp_GetActiveProcessorCount(group);
1510 for (i = 0; i < num; i++) {
1511 KMP_CPU_SET(i + group * (CHAR_BIT *
sizeof(DWORD_PTR)), mask);
1521 kmp_affin_mask_t *offline_cpus = __kmp_affinity_get_offline_cpus();
1522 for (proc = 0; proc < __kmp_xproc; proc++) {
1524 if (KMP_CPU_ISSET(proc, offline_cpus))
1526 KMP_CPU_SET(proc, mask);
1529 KMP_CPU_FREE(offline_cpus);
1538 kmp_affin_mask_t *__kmp_affin_fullMask = NULL;
1541 static inline bool __kmp_hwloc_is_cache_type(hwloc_obj_t obj) {
1542 #if HWLOC_API_VERSION >= 0x00020000
1543 return hwloc_obj_type_is_cache(obj->type);
1545 return obj->type == HWLOC_OBJ_CACHE;
1550 static inline kmp_hw_t __kmp_hwloc_type_2_topology_type(hwloc_obj_t obj) {
1552 if (__kmp_hwloc_is_cache_type(obj)) {
1553 if (obj->attr->cache.type == HWLOC_OBJ_CACHE_INSTRUCTION)
1554 return KMP_HW_UNKNOWN;
1555 switch (obj->attr->cache.depth) {
1559 #if KMP_MIC_SUPPORTED
1560 if (__kmp_mic_type == mic3) {
1568 return KMP_HW_UNKNOWN;
1571 switch (obj->type) {
1572 case HWLOC_OBJ_PACKAGE:
1573 return KMP_HW_SOCKET;
1574 case HWLOC_OBJ_NUMANODE:
1576 case HWLOC_OBJ_CORE:
1579 return KMP_HW_THREAD;
1580 case HWLOC_OBJ_GROUP:
1581 if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_DIE)
1583 else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_TILE)
1585 else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_MODULE)
1586 return KMP_HW_MODULE;
1587 else if (obj->attr->group.kind == HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP)
1588 return KMP_HW_PROC_GROUP;
1589 return KMP_HW_UNKNOWN;
1590 #if HWLOC_API_VERSION >= 0x00020100
1595 return KMP_HW_UNKNOWN;
1602 static int __kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj,
1603 hwloc_obj_type_t type) {
1606 for (first = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, obj->type,
1607 obj->logical_index, type, 0);
1608 first != NULL && hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology,
1609 obj->type, first) == obj;
1610 first = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, first->type,
1619 static int __kmp_hwloc_get_sub_id(hwloc_topology_t t, hwloc_obj_t higher,
1620 hwloc_obj_t lower) {
1622 hwloc_obj_type_t ltype = lower->type;
1623 int lindex = lower->logical_index - 1;
1626 obj = hwloc_get_obj_by_type(t, ltype, lindex);
1627 while (obj && lindex >= 0 &&
1628 hwloc_bitmap_isincluded(obj->cpuset, higher->cpuset)) {
1629 if (obj->userdata) {
1630 sub_id = (int)(RCAST(kmp_intptr_t, obj->userdata));
1635 obj = hwloc_get_obj_by_type(t, ltype, lindex);
1638 lower->userdata = RCAST(
void *, sub_id + 1);
1642 static bool __kmp_affinity_create_hwloc_map(kmp_i18n_id_t *
const msg_id) {
1644 int hw_thread_index, sub_id;
1646 hwloc_obj_t pu, obj, root, prev;
1647 kmp_hw_t types[KMP_HW_LAST];
1648 hwloc_obj_type_t hwloc_types[KMP_HW_LAST];
1650 hwloc_topology_t tp = __kmp_hwloc_topology;
1651 *msg_id = kmp_i18n_null;
1652 if (__kmp_affinity_verbose) {
1653 KMP_INFORM(AffUsingHwloc,
"KMP_AFFINITY");
1656 if (!KMP_AFFINITY_CAPABLE()) {
1659 KMP_ASSERT(__kmp_affinity_type == affinity_none);
1661 hwloc_obj_t o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PACKAGE, 0);
1663 nCoresPerPkg = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_CORE);
1666 o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_CORE, 0);
1668 __kmp_nThreadsPerCore = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_PU);
1670 __kmp_nThreadsPerCore = 1;
1671 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
1672 if (nCoresPerPkg == 0)
1674 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
1679 int nr_cpu_kinds = hwloc_cpukinds_get_nr(tp, 0);
1681 typedef struct kmp_hwloc_cpukinds_info_t {
1683 kmp_hw_core_type_t core_type;
1684 hwloc_bitmap_t mask;
1685 } kmp_hwloc_cpukinds_info_t;
1686 kmp_hwloc_cpukinds_info_t *cpukinds =
nullptr;
1688 if (nr_cpu_kinds > 0) {
1690 struct hwloc_info_s *infos;
1691 cpukinds = (kmp_hwloc_cpukinds_info_t *)__kmp_allocate(
1692 sizeof(kmp_hwloc_cpukinds_info_t) * nr_cpu_kinds);
1693 for (
unsigned idx = 0; idx < (unsigned)nr_cpu_kinds; ++idx) {
1694 cpukinds[idx].efficiency = -1;
1695 cpukinds[idx].core_type = KMP_HW_CORE_TYPE_UNKNOWN;
1696 cpukinds[idx].mask = hwloc_bitmap_alloc();
1697 if (hwloc_cpukinds_get_info(tp, idx, cpukinds[idx].mask,
1698 &cpukinds[idx].efficiency, &nr_infos, &infos,
1700 for (
unsigned i = 0; i < nr_infos; ++i) {
1701 if (__kmp_str_match(
"CoreType", 8, infos[i].name)) {
1702 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1703 if (__kmp_str_match(
"IntelAtom", 9, infos[i].value)) {
1704 cpukinds[idx].core_type = KMP_HW_CORE_TYPE_ATOM;
1706 }
else if (__kmp_str_match(
"IntelCore", 9, infos[i].value)) {
1707 cpukinds[idx].core_type = KMP_HW_CORE_TYPE_CORE;
1717 root = hwloc_get_root_obj(tp);
1721 pu = hwloc_get_pu_obj_by_os_index(tp, __kmp_affin_fullMask->begin());
1724 types[depth] = KMP_HW_THREAD;
1725 hwloc_types[depth] = obj->type;
1727 while (obj != root && obj != NULL) {
1729 #if HWLOC_API_VERSION >= 0x00020000
1730 if (obj->memory_arity) {
1732 for (memory = obj->memory_first_child; memory;
1733 memory = hwloc_get_next_child(tp, obj, memory)) {
1734 if (memory->type == HWLOC_OBJ_NUMANODE)
1737 if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
1738 types[depth] = KMP_HW_NUMA;
1739 hwloc_types[depth] = memory->type;
1744 type = __kmp_hwloc_type_2_topology_type(obj);
1745 if (type != KMP_HW_UNKNOWN) {
1746 types[depth] = type;
1747 hwloc_types[depth] = obj->type;
1751 KMP_ASSERT(depth > 0);
1754 for (
int i = 0, j = depth - 1; i < j; ++i, --j) {
1755 hwloc_obj_type_t hwloc_temp = hwloc_types[i];
1756 kmp_hw_t temp = types[i];
1757 types[i] = types[j];
1759 hwloc_types[i] = hwloc_types[j];
1760 hwloc_types[j] = hwloc_temp;
1764 __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1766 hw_thread_index = 0;
1768 while (pu = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, pu)) {
1769 int index = depth - 1;
1770 bool included = KMP_CPU_ISSET(pu->os_index, __kmp_affin_fullMask);
1771 kmp_hw_thread_t &hw_thread = __kmp_topology->at(hw_thread_index);
1774 hw_thread.ids[index] = pu->logical_index;
1775 hw_thread.os_id = pu->os_index;
1778 int cpukind_index = -1;
1779 for (
int i = 0; i < nr_cpu_kinds; ++i) {
1780 if (hwloc_bitmap_isset(cpukinds[i].mask, hw_thread.os_id)) {
1785 if (cpukind_index >= 0) {
1786 hw_thread.attrs.set_core_type(cpukinds[cpukind_index].core_type);
1787 hw_thread.attrs.set_core_eff(cpukinds[cpukind_index].efficiency);
1794 while (obj != root && obj != NULL) {
1796 #if HWLOC_API_VERSION >= 0x00020000
1800 if (obj->memory_arity) {
1802 for (memory = obj->memory_first_child; memory;
1803 memory = hwloc_get_next_child(tp, obj, memory)) {
1804 if (memory->type == HWLOC_OBJ_NUMANODE)
1807 if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
1808 sub_id = __kmp_hwloc_get_sub_id(tp, memory, prev);
1810 hw_thread.ids[index] = memory->logical_index;
1811 hw_thread.ids[index + 1] = sub_id;
1819 type = __kmp_hwloc_type_2_topology_type(obj);
1820 if (type != KMP_HW_UNKNOWN) {
1821 sub_id = __kmp_hwloc_get_sub_id(tp, obj, prev);
1823 hw_thread.ids[index] = obj->logical_index;
1824 hw_thread.ids[index + 1] = sub_id;
1836 for (
int idx = 0; idx < nr_cpu_kinds; ++idx)
1837 hwloc_bitmap_free(cpukinds[idx].mask);
1838 __kmp_free(cpukinds);
1840 __kmp_topology->sort_ids();
1848 static bool __kmp_affinity_create_flat_map(kmp_i18n_id_t *
const msg_id) {
1849 *msg_id = kmp_i18n_null;
1851 kmp_hw_t types[] = {KMP_HW_SOCKET, KMP_HW_CORE, KMP_HW_THREAD};
1853 if (__kmp_affinity_verbose) {
1854 KMP_INFORM(UsingFlatOS,
"KMP_AFFINITY");
1860 if (!KMP_AFFINITY_CAPABLE()) {
1861 KMP_ASSERT(__kmp_affinity_type == affinity_none);
1862 __kmp_ncores = nPackages = __kmp_xproc;
1863 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1871 __kmp_ncores = nPackages = __kmp_avail_proc;
1872 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1875 __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1878 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1880 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1883 kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct);
1885 hw_thread.os_id = i;
1886 hw_thread.ids[0] = i;
1887 hw_thread.ids[1] = 0;
1888 hw_thread.ids[2] = 0;
1891 if (__kmp_affinity_verbose) {
1892 KMP_INFORM(OSProcToPackage,
"KMP_AFFINITY");
1897 #if KMP_GROUP_AFFINITY
1902 static bool __kmp_affinity_create_proc_group_map(kmp_i18n_id_t *
const msg_id) {
1903 *msg_id = kmp_i18n_null;
1905 kmp_hw_t types[] = {KMP_HW_PROC_GROUP, KMP_HW_CORE, KMP_HW_THREAD};
1906 const static size_t BITS_PER_GROUP = CHAR_BIT *
sizeof(DWORD_PTR);
1908 if (__kmp_affinity_verbose) {
1909 KMP_INFORM(AffWindowsProcGroupMap,
"KMP_AFFINITY");
1913 if (!KMP_AFFINITY_CAPABLE()) {
1914 KMP_ASSERT(__kmp_affinity_type == affinity_none);
1915 nPackages = __kmp_num_proc_groups;
1916 __kmp_nThreadsPerCore = 1;
1917 __kmp_ncores = __kmp_xproc;
1918 nCoresPerPkg = nPackages / __kmp_ncores;
1923 __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1926 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1928 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1931 kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct++);
1933 hw_thread.os_id = i;
1934 hw_thread.ids[0] = i / BITS_PER_GROUP;
1935 hw_thread.ids[1] = hw_thread.ids[2] = i % BITS_PER_GROUP;
1941 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1943 template <kmp_u
int32 LSB, kmp_u
int32 MSB>
1944 static inline unsigned __kmp_extract_bits(kmp_uint32 v) {
1945 const kmp_uint32 SHIFT_LEFT =
sizeof(kmp_uint32) * 8 - 1 - MSB;
1946 const kmp_uint32 SHIFT_RIGHT = LSB;
1947 kmp_uint32 retval = v;
1948 retval <<= SHIFT_LEFT;
1949 retval >>= (SHIFT_LEFT + SHIFT_RIGHT);
1953 static int __kmp_cpuid_mask_width(
int count) {
1956 while ((1 << r) < count)
1961 class apicThreadInfo {
1965 unsigned maxCoresPerPkg;
1966 unsigned maxThreadsPerPkg;
1972 static int __kmp_affinity_cmp_apicThreadInfo_phys_id(
const void *a,
1974 const apicThreadInfo *aa = (
const apicThreadInfo *)a;
1975 const apicThreadInfo *bb = (
const apicThreadInfo *)b;
1976 if (aa->pkgId < bb->pkgId)
1978 if (aa->pkgId > bb->pkgId)
1980 if (aa->coreId < bb->coreId)
1982 if (aa->coreId > bb->coreId)
1984 if (aa->threadId < bb->threadId)
1986 if (aa->threadId > bb->threadId)
1991 class kmp_cache_info_t {
1994 unsigned level, mask;
1996 kmp_cache_info_t() : depth(0) { get_leaf4_levels(); }
1997 size_t get_depth()
const {
return depth; }
1998 info_t &operator[](
size_t index) {
return table[index]; }
1999 const info_t &operator[](
size_t index)
const {
return table[index]; }
2001 static kmp_hw_t get_topology_type(
unsigned level) {
2002 KMP_DEBUG_ASSERT(level >= 1 && level <= MAX_CACHE_LEVEL);
2011 return KMP_HW_UNKNOWN;
2015 static const int MAX_CACHE_LEVEL = 3;
2018 info_t table[MAX_CACHE_LEVEL];
2020 void get_leaf4_levels() {
2022 while (depth < MAX_CACHE_LEVEL) {
2023 unsigned cache_type, max_threads_sharing;
2024 unsigned cache_level, cache_mask_width;
2026 __kmp_x86_cpuid(4, level, &buf2);
2027 cache_type = __kmp_extract_bits<0, 4>(buf2.eax);
2031 if (cache_type == 2) {
2035 max_threads_sharing = __kmp_extract_bits<14, 25>(buf2.eax) + 1;
2036 cache_mask_width = __kmp_cpuid_mask_width(max_threads_sharing);
2037 cache_level = __kmp_extract_bits<5, 7>(buf2.eax);
2038 table[depth].level = cache_level;
2039 table[depth].mask = ((-1) << cache_mask_width);
2050 static bool __kmp_affinity_create_apicid_map(kmp_i18n_id_t *
const msg_id) {
2052 *msg_id = kmp_i18n_null;
2054 if (__kmp_affinity_verbose) {
2055 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", KMP_I18N_STR(DecodingLegacyAPIC));
2059 __kmp_x86_cpuid(0, 0, &buf);
2061 *msg_id = kmp_i18n_str_NoLeaf4Support;
2070 if (!KMP_AFFINITY_CAPABLE()) {
2073 KMP_ASSERT(__kmp_affinity_type == affinity_none);
2079 __kmp_x86_cpuid(1, 0, &buf);
2080 int maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
2081 if (maxThreadsPerPkg == 0) {
2082 maxThreadsPerPkg = 1;
2096 __kmp_x86_cpuid(0, 0, &buf);
2098 __kmp_x86_cpuid(4, 0, &buf);
2099 nCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
2117 __kmp_ncores = __kmp_xproc;
2118 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
2119 __kmp_nThreadsPerCore = 1;
2128 kmp_affinity_raii_t previous_affinity;
2156 apicThreadInfo *threadInfo = (apicThreadInfo *)__kmp_allocate(
2157 __kmp_avail_proc *
sizeof(apicThreadInfo));
2158 unsigned nApics = 0;
2159 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
2161 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
2164 KMP_DEBUG_ASSERT((
int)nApics < __kmp_avail_proc);
2166 __kmp_affinity_dispatch->bind_thread(i);
2167 threadInfo[nApics].osId = i;
2170 __kmp_x86_cpuid(1, 0, &buf);
2171 if (((buf.edx >> 9) & 1) == 0) {
2172 __kmp_free(threadInfo);
2173 *msg_id = kmp_i18n_str_ApicNotPresent;
2176 threadInfo[nApics].apicId = (buf.ebx >> 24) & 0xff;
2177 threadInfo[nApics].maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
2178 if (threadInfo[nApics].maxThreadsPerPkg == 0) {
2179 threadInfo[nApics].maxThreadsPerPkg = 1;
2188 __kmp_x86_cpuid(0, 0, &buf);
2190 __kmp_x86_cpuid(4, 0, &buf);
2191 threadInfo[nApics].maxCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
2193 threadInfo[nApics].maxCoresPerPkg = 1;
2197 int widthCT = __kmp_cpuid_mask_width(threadInfo[nApics].maxThreadsPerPkg);
2198 threadInfo[nApics].pkgId = threadInfo[nApics].apicId >> widthCT;
2200 int widthC = __kmp_cpuid_mask_width(threadInfo[nApics].maxCoresPerPkg);
2201 int widthT = widthCT - widthC;
2206 __kmp_free(threadInfo);
2207 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
2211 int maskC = (1 << widthC) - 1;
2212 threadInfo[nApics].coreId = (threadInfo[nApics].apicId >> widthT) & maskC;
2214 int maskT = (1 << widthT) - 1;
2215 threadInfo[nApics].threadId = threadInfo[nApics].apicId & maskT;
2222 previous_affinity.restore();
2225 qsort(threadInfo, nApics,
sizeof(*threadInfo),
2226 __kmp_affinity_cmp_apicThreadInfo_phys_id);
2243 __kmp_nThreadsPerCore = 1;
2244 unsigned nCores = 1;
2247 unsigned lastPkgId = threadInfo[0].pkgId;
2248 unsigned coreCt = 1;
2249 unsigned lastCoreId = threadInfo[0].coreId;
2250 unsigned threadCt = 1;
2251 unsigned lastThreadId = threadInfo[0].threadId;
2254 unsigned prevMaxCoresPerPkg = threadInfo[0].maxCoresPerPkg;
2255 unsigned prevMaxThreadsPerPkg = threadInfo[0].maxThreadsPerPkg;
2257 for (i = 1; i < nApics; i++) {
2258 if (threadInfo[i].pkgId != lastPkgId) {
2261 lastPkgId = threadInfo[i].pkgId;
2262 if ((
int)coreCt > nCoresPerPkg)
2263 nCoresPerPkg = coreCt;
2265 lastCoreId = threadInfo[i].coreId;
2266 if ((
int)threadCt > __kmp_nThreadsPerCore)
2267 __kmp_nThreadsPerCore = threadCt;
2269 lastThreadId = threadInfo[i].threadId;
2273 prevMaxCoresPerPkg = threadInfo[i].maxCoresPerPkg;
2274 prevMaxThreadsPerPkg = threadInfo[i].maxThreadsPerPkg;
2278 if (threadInfo[i].coreId != lastCoreId) {
2281 lastCoreId = threadInfo[i].coreId;
2282 if ((
int)threadCt > __kmp_nThreadsPerCore)
2283 __kmp_nThreadsPerCore = threadCt;
2285 lastThreadId = threadInfo[i].threadId;
2286 }
else if (threadInfo[i].threadId != lastThreadId) {
2288 lastThreadId = threadInfo[i].threadId;
2290 __kmp_free(threadInfo);
2291 *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
2297 if ((prevMaxCoresPerPkg != threadInfo[i].maxCoresPerPkg) ||
2298 (prevMaxThreadsPerPkg != threadInfo[i].maxThreadsPerPkg)) {
2299 __kmp_free(threadInfo);
2300 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
2308 if ((
int)coreCt > nCoresPerPkg)
2309 nCoresPerPkg = coreCt;
2310 if ((
int)threadCt > __kmp_nThreadsPerCore)
2311 __kmp_nThreadsPerCore = threadCt;
2312 __kmp_ncores = nCores;
2313 KMP_DEBUG_ASSERT(nApics == (
unsigned)__kmp_avail_proc);
2321 int threadLevel = 2;
2323 int depth = (pkgLevel >= 0) + (coreLevel >= 0) + (threadLevel >= 0);
2326 types[idx++] = KMP_HW_SOCKET;
2328 types[idx++] = KMP_HW_CORE;
2329 if (threadLevel >= 0)
2330 types[idx++] = KMP_HW_THREAD;
2332 KMP_ASSERT(depth > 0);
2333 __kmp_topology = kmp_topology_t::allocate(nApics, depth, types);
2335 for (i = 0; i < nApics; ++i) {
2337 unsigned os = threadInfo[i].osId;
2338 kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
2341 if (pkgLevel >= 0) {
2342 hw_thread.ids[idx++] = threadInfo[i].pkgId;
2344 if (coreLevel >= 0) {
2345 hw_thread.ids[idx++] = threadInfo[i].coreId;
2347 if (threadLevel >= 0) {
2348 hw_thread.ids[idx++] = threadInfo[i].threadId;
2350 hw_thread.os_id = os;
2353 __kmp_free(threadInfo);
2354 __kmp_topology->sort_ids();
2355 if (!__kmp_topology->check_ids()) {
2356 kmp_topology_t::deallocate(__kmp_topology);
2357 __kmp_topology =
nullptr;
2358 *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
2366 static void __kmp_get_hybrid_info(kmp_hw_core_type_t *type,
int *efficiency,
2367 unsigned *native_model_id) {
2369 __kmp_x86_cpuid(0x1a, 0, &buf);
2370 *type = (kmp_hw_core_type_t)__kmp_extract_bits<24, 31>(buf.eax);
2372 case KMP_HW_CORE_TYPE_ATOM:
2375 case KMP_HW_CORE_TYPE_CORE:
2381 *native_model_id = __kmp_extract_bits<0, 23>(buf.eax);
2403 INTEL_LEVEL_TYPE_INVALID = 0,
2404 INTEL_LEVEL_TYPE_SMT = 1,
2405 INTEL_LEVEL_TYPE_CORE = 2,
2406 INTEL_LEVEL_TYPE_TILE = 3,
2407 INTEL_LEVEL_TYPE_MODULE = 4,
2408 INTEL_LEVEL_TYPE_DIE = 5,
2409 INTEL_LEVEL_TYPE_LAST = 6,
2412 struct cpuid_level_info_t {
2413 unsigned level_type, mask, mask_width, nitems, cache_mask;
2416 static kmp_hw_t __kmp_intel_type_2_topology_type(
int intel_type) {
2417 switch (intel_type) {
2418 case INTEL_LEVEL_TYPE_INVALID:
2419 return KMP_HW_SOCKET;
2420 case INTEL_LEVEL_TYPE_SMT:
2421 return KMP_HW_THREAD;
2422 case INTEL_LEVEL_TYPE_CORE:
2424 case INTEL_LEVEL_TYPE_TILE:
2426 case INTEL_LEVEL_TYPE_MODULE:
2427 return KMP_HW_MODULE;
2428 case INTEL_LEVEL_TYPE_DIE:
2431 return KMP_HW_UNKNOWN;
2438 __kmp_x2apicid_get_levels(
int leaf,
2439 cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST],
2440 kmp_uint64 known_levels) {
2441 unsigned level, levels_index;
2442 unsigned level_type, mask_width, nitems;
2452 level = levels_index = 0;
2454 __kmp_x86_cpuid(leaf, level, &buf);
2455 level_type = __kmp_extract_bits<8, 15>(buf.ecx);
2456 mask_width = __kmp_extract_bits<0, 4>(buf.eax);
2457 nitems = __kmp_extract_bits<0, 15>(buf.ebx);
2458 if (level_type != INTEL_LEVEL_TYPE_INVALID && nitems == 0)
2461 if (known_levels & (1ull << level_type)) {
2463 KMP_ASSERT(levels_index < INTEL_LEVEL_TYPE_LAST);
2464 levels[levels_index].level_type = level_type;
2465 levels[levels_index].mask_width = mask_width;
2466 levels[levels_index].nitems = nitems;
2470 if (levels_index > 0) {
2471 levels[levels_index - 1].mask_width = mask_width;
2472 levels[levels_index - 1].nitems = nitems;
2476 }
while (level_type != INTEL_LEVEL_TYPE_INVALID);
2479 for (
unsigned i = 0; i < levels_index; ++i) {
2480 if (levels[i].level_type != INTEL_LEVEL_TYPE_INVALID) {
2481 levels[i].mask = ~((-1) << levels[i].mask_width);
2482 levels[i].cache_mask = (-1) << levels[i].mask_width;
2483 for (
unsigned j = 0; j < i; ++j)
2484 levels[i].mask ^= levels[j].mask;
2486 KMP_DEBUG_ASSERT(levels_index > 0);
2487 levels[i].mask = (-1) << levels[i - 1].mask_width;
2488 levels[i].cache_mask = 0;
2491 return levels_index;
2494 static bool __kmp_affinity_create_x2apicid_map(kmp_i18n_id_t *
const msg_id) {
2496 cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST];
2497 kmp_hw_t types[INTEL_LEVEL_TYPE_LAST];
2498 unsigned levels_index;
2500 kmp_uint64 known_levels;
2501 int topology_leaf, highest_leaf, apic_id;
2503 static int leaves[] = {0, 0};
2505 kmp_i18n_id_t leaf_message_id;
2507 KMP_BUILD_ASSERT(
sizeof(known_levels) * CHAR_BIT > KMP_HW_LAST);
2509 *msg_id = kmp_i18n_null;
2510 if (__kmp_affinity_verbose) {
2511 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC));
2515 known_levels = 0ull;
2516 for (
int i = 0; i < INTEL_LEVEL_TYPE_LAST; ++i) {
2517 if (__kmp_intel_type_2_topology_type(i) != KMP_HW_UNKNOWN) {
2518 known_levels |= (1ull << i);
2523 __kmp_x86_cpuid(0, 0, &buf);
2524 highest_leaf = buf.eax;
2529 if (__kmp_affinity_top_method == affinity_top_method_x2apicid) {
2532 leaf_message_id = kmp_i18n_str_NoLeaf11Support;
2533 }
else if (__kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
2536 leaf_message_id = kmp_i18n_str_NoLeaf31Support;
2541 leaf_message_id = kmp_i18n_str_NoLeaf11Support;
2545 __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
2547 for (
int i = 0; i < num_leaves; ++i) {
2548 int leaf = leaves[i];
2549 if (highest_leaf < leaf)
2551 __kmp_x86_cpuid(leaf, 0, &buf);
2554 topology_leaf = leaf;
2555 levels_index = __kmp_x2apicid_get_levels(leaf, levels, known_levels);
2556 if (levels_index == 0)
2560 if (topology_leaf == -1 || levels_index == 0) {
2561 *msg_id = leaf_message_id;
2564 KMP_ASSERT(levels_index <= INTEL_LEVEL_TYPE_LAST);
2571 if (!KMP_AFFINITY_CAPABLE()) {
2574 KMP_ASSERT(__kmp_affinity_type == affinity_none);
2575 for (
unsigned i = 0; i < levels_index; ++i) {
2576 if (levels[i].level_type == INTEL_LEVEL_TYPE_SMT) {
2577 __kmp_nThreadsPerCore = levels[i].nitems;
2578 }
else if (levels[i].level_type == INTEL_LEVEL_TYPE_CORE) {
2579 nCoresPerPkg = levels[i].nitems;
2582 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
2583 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
2588 int depth = levels_index;
2589 for (
int i = depth - 1, j = 0; i >= 0; --i, ++j)
2590 types[j] = __kmp_intel_type_2_topology_type(levels[i].level_type);
2592 kmp_topology_t::allocate(__kmp_avail_proc, levels_index, types);
2595 kmp_cache_info_t cache_info;
2596 for (
size_t i = 0; i < cache_info.get_depth(); ++i) {
2597 const kmp_cache_info_t::info_t &info = cache_info[i];
2598 unsigned cache_mask = info.mask;
2599 unsigned cache_level = info.level;
2600 for (
unsigned j = 0; j < levels_index; ++j) {
2601 unsigned hw_cache_mask = levels[j].cache_mask;
2602 kmp_hw_t cache_type = kmp_cache_info_t::get_topology_type(cache_level);
2603 if (hw_cache_mask == cache_mask && j < levels_index - 1) {
2605 __kmp_intel_type_2_topology_type(levels[j + 1].level_type);
2606 __kmp_topology->set_equivalent_type(cache_type, type);
2616 kmp_affinity_raii_t previous_affinity;
2621 int hw_thread_index = 0;
2622 KMP_CPU_SET_ITERATE(proc, __kmp_affin_fullMask) {
2623 cpuid_level_info_t my_levels[INTEL_LEVEL_TYPE_LAST];
2624 unsigned my_levels_index;
2627 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
2630 KMP_DEBUG_ASSERT(hw_thread_index < __kmp_avail_proc);
2632 __kmp_affinity_dispatch->bind_thread(proc);
2635 __kmp_x86_cpuid(topology_leaf, 0, &buf);
2637 kmp_hw_thread_t &hw_thread = __kmp_topology->at(hw_thread_index);
2639 __kmp_x2apicid_get_levels(topology_leaf, my_levels, known_levels);
2640 if (my_levels_index == 0 || my_levels_index != levels_index) {
2641 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
2645 hw_thread.os_id = proc;
2647 for (
unsigned j = 0, idx = depth - 1; j < my_levels_index; ++j, --idx) {
2648 hw_thread.ids[idx] = apic_id & my_levels[j].mask;
2650 hw_thread.ids[idx] >>= my_levels[j - 1].mask_width;
2654 if (__kmp_is_hybrid_cpu() && highest_leaf >= 0x1a) {
2655 kmp_hw_core_type_t type;
2656 unsigned native_model_id;
2658 __kmp_get_hybrid_info(&type, &efficiency, &native_model_id);
2659 hw_thread.attrs.set_core_type(type);
2660 hw_thread.attrs.set_core_eff(efficiency);
2664 KMP_ASSERT(hw_thread_index > 0);
2665 __kmp_topology->sort_ids();
2666 if (!__kmp_topology->check_ids()) {
2667 kmp_topology_t::deallocate(__kmp_topology);
2668 __kmp_topology =
nullptr;
2669 *msg_id = kmp_i18n_str_x2ApicIDsNotUnique;
2677 #define threadIdIndex 1
2678 #define coreIdIndex 2
2679 #define pkgIdIndex 3
2680 #define nodeIdIndex 4
2682 typedef unsigned *ProcCpuInfo;
2683 static unsigned maxIndex = pkgIdIndex;
2685 static int __kmp_affinity_cmp_ProcCpuInfo_phys_id(
const void *a,
2688 const unsigned *aa = *(
unsigned *
const *)a;
2689 const unsigned *bb = *(
unsigned *
const *)b;
2690 for (i = maxIndex;; i--) {
2701 #if KMP_USE_HIER_SCHED
2703 static void __kmp_dispatch_set_hierarchy_values() {
2709 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1] =
2710 nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
2711 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L1 + 1] = __kmp_ncores;
2712 #if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) && \
2714 if (__kmp_mic_type >= mic3)
2715 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores / 2;
2718 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores;
2719 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L3 + 1] = nPackages;
2720 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_NUMA + 1] = nPackages;
2721 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LOOP + 1] = 1;
2724 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_THREAD + 1] = 1;
2725 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L1 + 1] =
2726 __kmp_nThreadsPerCore;
2727 #if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) && \
2729 if (__kmp_mic_type >= mic3)
2730 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
2731 2 * __kmp_nThreadsPerCore;
2734 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
2735 __kmp_nThreadsPerCore;
2736 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L3 + 1] =
2737 nCoresPerPkg * __kmp_nThreadsPerCore;
2738 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_NUMA + 1] =
2739 nCoresPerPkg * __kmp_nThreadsPerCore;
2740 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LOOP + 1] =
2741 nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
2746 int __kmp_dispatch_get_index(
int tid, kmp_hier_layer_e type) {
2747 int index = type + 1;
2748 int num_hw_threads = __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1];
2749 KMP_DEBUG_ASSERT(type != kmp_hier_layer_e::LAYER_LAST);
2750 if (type == kmp_hier_layer_e::LAYER_THREAD)
2752 else if (type == kmp_hier_layer_e::LAYER_LOOP)
2754 KMP_DEBUG_ASSERT(__kmp_hier_max_units[index] != 0);
2755 if (tid >= num_hw_threads)
2756 tid = tid % num_hw_threads;
2757 return (tid / __kmp_hier_threads_per[index]) % __kmp_hier_max_units[index];
2761 int __kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1, kmp_hier_layer_e t2) {
2764 KMP_DEBUG_ASSERT(i1 <= i2);
2765 KMP_DEBUG_ASSERT(t1 != kmp_hier_layer_e::LAYER_LAST);
2766 KMP_DEBUG_ASSERT(t2 != kmp_hier_layer_e::LAYER_LAST);
2767 KMP_DEBUG_ASSERT(__kmp_hier_threads_per[i1] != 0);
2769 return __kmp_hier_threads_per[i2] / __kmp_hier_threads_per[i1];
2773 static inline const char *__kmp_cpuinfo_get_filename() {
2774 const char *filename;
2775 if (__kmp_cpuinfo_file !=
nullptr)
2776 filename = __kmp_cpuinfo_file;
2778 filename =
"/proc/cpuinfo";
2782 static inline const char *__kmp_cpuinfo_get_envvar() {
2783 const char *envvar =
nullptr;
2784 if (__kmp_cpuinfo_file !=
nullptr)
2785 envvar =
"KMP_CPUINFO_FILE";
2791 static bool __kmp_affinity_create_cpuinfo_map(
int *line,
2792 kmp_i18n_id_t *
const msg_id) {
2793 const char *filename = __kmp_cpuinfo_get_filename();
2794 const char *envvar = __kmp_cpuinfo_get_envvar();
2795 *msg_id = kmp_i18n_null;
2797 if (__kmp_affinity_verbose) {
2798 KMP_INFORM(AffParseFilename,
"KMP_AFFINITY", filename);
2806 unsigned num_records = 0;
2808 buf[
sizeof(buf) - 1] = 1;
2809 if (!fgets(buf,
sizeof(buf), f)) {
2814 char s1[] =
"processor";
2815 if (strncmp(buf, s1,
sizeof(s1) - 1) == 0) {
2822 if (KMP_SSCANF(buf,
"node_%u id", &level) == 1) {
2824 if (level > (
unsigned)__kmp_xproc) {
2825 level = __kmp_xproc;
2827 if (nodeIdIndex + level >= maxIndex) {
2828 maxIndex = nodeIdIndex + level;
2836 if (num_records == 0) {
2837 *msg_id = kmp_i18n_str_NoProcRecords;
2840 if (num_records > (
unsigned)__kmp_xproc) {
2841 *msg_id = kmp_i18n_str_TooManyProcRecords;
2850 if (fseek(f, 0, SEEK_SET) != 0) {
2851 *msg_id = kmp_i18n_str_CantRewindCpuinfo;
2857 unsigned **threadInfo =
2858 (
unsigned **)__kmp_allocate((num_records + 1) *
sizeof(
unsigned *));
2860 for (i = 0; i <= num_records; i++) {
2862 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
2865 #define CLEANUP_THREAD_INFO \
2866 for (i = 0; i <= num_records; i++) { \
2867 __kmp_free(threadInfo[i]); \
2869 __kmp_free(threadInfo);
2874 #define INIT_PROC_INFO(p) \
2875 for (__index = 0; __index <= maxIndex; __index++) { \
2876 (p)[__index] = UINT_MAX; \
2879 for (i = 0; i <= num_records; i++) {
2880 INIT_PROC_INFO(threadInfo[i]);
2883 unsigned num_avail = 0;
2890 buf[
sizeof(buf) - 1] = 1;
2891 bool long_line =
false;
2892 if (!fgets(buf,
sizeof(buf), f)) {
2897 for (i = 0; i <= maxIndex; i++) {
2898 if (threadInfo[num_avail][i] != UINT_MAX) {
2906 }
else if (!buf[
sizeof(buf) - 1]) {
2911 #define CHECK_LINE \
2913 CLEANUP_THREAD_INFO; \
2914 *msg_id = kmp_i18n_str_LongLineCpuinfo; \
2920 char s1[] =
"processor";
2921 if (strncmp(buf, s1,
sizeof(s1) - 1) == 0) {
2923 char *p = strchr(buf +
sizeof(s1) - 1,
':');
2925 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
2927 if (threadInfo[num_avail][osIdIndex] != UINT_MAX)
2928 #if KMP_ARCH_AARCH64
2937 threadInfo[num_avail][osIdIndex] = val;
2938 #if KMP_OS_LINUX && !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
2942 "/sys/devices/system/cpu/cpu%u/topology/physical_package_id",
2943 threadInfo[num_avail][osIdIndex]);
2944 __kmp_read_from_file(path,
"%u", &threadInfo[num_avail][pkgIdIndex]);
2946 KMP_SNPRINTF(path,
sizeof(path),
2947 "/sys/devices/system/cpu/cpu%u/topology/core_id",
2948 threadInfo[num_avail][osIdIndex]);
2949 __kmp_read_from_file(path,
"%u", &threadInfo[num_avail][coreIdIndex]);
2953 char s2[] =
"physical id";
2954 if (strncmp(buf, s2,
sizeof(s2) - 1) == 0) {
2956 char *p = strchr(buf +
sizeof(s2) - 1,
':');
2958 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
2960 if (threadInfo[num_avail][pkgIdIndex] != UINT_MAX)
2962 threadInfo[num_avail][pkgIdIndex] = val;
2965 char s3[] =
"core id";
2966 if (strncmp(buf, s3,
sizeof(s3) - 1) == 0) {
2968 char *p = strchr(buf +
sizeof(s3) - 1,
':');
2970 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
2972 if (threadInfo[num_avail][coreIdIndex] != UINT_MAX)
2974 threadInfo[num_avail][coreIdIndex] = val;
2978 char s4[] =
"thread id";
2979 if (strncmp(buf, s4,
sizeof(s4) - 1) == 0) {
2981 char *p = strchr(buf +
sizeof(s4) - 1,
':');
2983 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
2985 if (threadInfo[num_avail][threadIdIndex] != UINT_MAX)
2987 threadInfo[num_avail][threadIdIndex] = val;
2991 if (KMP_SSCANF(buf,
"node_%u id", &level) == 1) {
2993 char *p = strchr(buf +
sizeof(s4) - 1,
':');
2995 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
2998 if (level > (
unsigned)__kmp_xproc) {
2999 level = __kmp_xproc;
3001 if (threadInfo[num_avail][nodeIdIndex + level] != UINT_MAX)
3003 threadInfo[num_avail][nodeIdIndex + level] = val;
3010 if ((*buf != 0) && (*buf !=
'\n')) {
3015 while (((ch = fgetc(f)) != EOF) && (ch !=
'\n'))
3023 if ((
int)num_avail == __kmp_xproc) {
3024 CLEANUP_THREAD_INFO;
3025 *msg_id = kmp_i18n_str_TooManyEntries;
3031 if (threadInfo[num_avail][osIdIndex] == UINT_MAX) {
3032 CLEANUP_THREAD_INFO;
3033 *msg_id = kmp_i18n_str_MissingProcField;
3036 if (threadInfo[0][pkgIdIndex] == UINT_MAX) {
3037 CLEANUP_THREAD_INFO;
3038 *msg_id = kmp_i18n_str_MissingPhysicalIDField;
3043 if (!KMP_CPU_ISSET(threadInfo[num_avail][osIdIndex],
3044 __kmp_affin_fullMask)) {
3045 INIT_PROC_INFO(threadInfo[num_avail]);
3052 KMP_ASSERT(num_avail <= num_records);
3053 INIT_PROC_INFO(threadInfo[num_avail]);
3058 CLEANUP_THREAD_INFO;
3059 *msg_id = kmp_i18n_str_MissingValCpuinfo;
3063 CLEANUP_THREAD_INFO;
3064 *msg_id = kmp_i18n_str_DuplicateFieldCpuinfo;
3069 #if KMP_MIC && REDUCE_TEAM_SIZE
3070 unsigned teamSize = 0;
3078 KMP_ASSERT(num_avail > 0);
3079 KMP_ASSERT(num_avail <= num_records);
3082 qsort(threadInfo, num_avail,
sizeof(*threadInfo),
3083 __kmp_affinity_cmp_ProcCpuInfo_phys_id);
3095 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3097 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3099 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3101 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3103 bool assign_thread_ids =
false;
3104 unsigned threadIdCt;
3107 restart_radix_check:
3111 if (assign_thread_ids) {
3112 if (threadInfo[0][threadIdIndex] == UINT_MAX) {
3113 threadInfo[0][threadIdIndex] = threadIdCt++;
3114 }
else if (threadIdCt <= threadInfo[0][threadIdIndex]) {
3115 threadIdCt = threadInfo[0][threadIdIndex] + 1;
3118 for (index = 0; index <= maxIndex; index++) {
3122 lastId[index] = threadInfo[0][index];
3127 for (i = 1; i < num_avail; i++) {
3130 for (index = maxIndex; index >= threadIdIndex; index--) {
3131 if (assign_thread_ids && (index == threadIdIndex)) {
3133 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
3134 threadInfo[i][threadIdIndex] = threadIdCt++;
3138 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
3139 threadIdCt = threadInfo[i][threadIdIndex] + 1;
3142 if (threadInfo[i][index] != lastId[index]) {
3147 for (index2 = threadIdIndex; index2 < index; index2++) {
3149 if (counts[index2] > maxCt[index2]) {
3150 maxCt[index2] = counts[index2];
3153 lastId[index2] = threadInfo[i][index2];
3157 lastId[index] = threadInfo[i][index];
3159 if (assign_thread_ids && (index > threadIdIndex)) {
3161 #if KMP_MIC && REDUCE_TEAM_SIZE
3164 teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
3171 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
3172 threadInfo[i][threadIdIndex] = threadIdCt++;
3178 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
3179 threadIdCt = threadInfo[i][threadIdIndex] + 1;
3185 if (index < threadIdIndex) {
3189 if ((threadInfo[i][threadIdIndex] != UINT_MAX) || assign_thread_ids) {
3194 CLEANUP_THREAD_INFO;
3195 *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
3201 assign_thread_ids =
true;
3202 goto restart_radix_check;
3206 #if KMP_MIC && REDUCE_TEAM_SIZE
3209 teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
3212 for (index = threadIdIndex; index <= maxIndex; index++) {
3213 if (counts[index] > maxCt[index]) {
3214 maxCt[index] = counts[index];
3218 __kmp_nThreadsPerCore = maxCt[threadIdIndex];
3219 nCoresPerPkg = maxCt[coreIdIndex];
3220 nPackages = totals[pkgIdIndex];
3226 __kmp_ncores = totals[coreIdIndex];
3227 if (!KMP_AFFINITY_CAPABLE()) {
3228 KMP_ASSERT(__kmp_affinity_type == affinity_none);
3232 #if KMP_MIC && REDUCE_TEAM_SIZE
3234 if ((__kmp_dflt_team_nth == 0) && (teamSize > 0)) {
3235 __kmp_dflt_team_nth = teamSize;
3236 KA_TRACE(20, (
"__kmp_affinity_create_cpuinfo_map: setting "
3237 "__kmp_dflt_team_nth = %d\n",
3238 __kmp_dflt_team_nth));
3242 KMP_DEBUG_ASSERT(num_avail == (
unsigned)__kmp_avail_proc);
3249 bool *inMap = (
bool *)__kmp_allocate((maxIndex + 1) *
sizeof(bool));
3250 for (index = threadIdIndex; index < maxIndex; index++) {
3251 KMP_ASSERT(totals[index] >= totals[index + 1]);
3252 inMap[index] = (totals[index] > totals[index + 1]);
3254 inMap[maxIndex] = (totals[maxIndex] > 1);
3255 inMap[pkgIdIndex] =
true;
3256 inMap[coreIdIndex] =
true;
3257 inMap[threadIdIndex] =
true;
3261 kmp_hw_t types[KMP_HW_LAST];
3264 int threadLevel = -1;
3265 for (index = threadIdIndex; index <= maxIndex; index++) {
3270 if (inMap[pkgIdIndex]) {
3272 types[idx++] = KMP_HW_SOCKET;
3274 if (inMap[coreIdIndex]) {
3276 types[idx++] = KMP_HW_CORE;
3278 if (inMap[threadIdIndex]) {
3280 types[idx++] = KMP_HW_THREAD;
3282 KMP_ASSERT(depth > 0);
3285 __kmp_topology = kmp_topology_t::allocate(num_avail, depth, types);
3287 for (i = 0; i < num_avail; ++i) {
3288 unsigned os = threadInfo[i][osIdIndex];
3291 kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
3293 hw_thread.os_id = os;
3296 for (src_index = maxIndex; src_index >= threadIdIndex; src_index--) {
3297 if (!inMap[src_index]) {
3300 if (src_index == pkgIdIndex) {
3301 hw_thread.ids[pkgLevel] = threadInfo[i][src_index];
3302 }
else if (src_index == coreIdIndex) {
3303 hw_thread.ids[coreLevel] = threadInfo[i][src_index];
3304 }
else if (src_index == threadIdIndex) {
3305 hw_thread.ids[threadLevel] = threadInfo[i][src_index];
3316 CLEANUP_THREAD_INFO;
3317 __kmp_topology->sort_ids();
3318 if (!__kmp_topology->check_ids()) {
3319 kmp_topology_t::deallocate(__kmp_topology);
3320 __kmp_topology =
nullptr;
3321 *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
3330 static kmp_affin_mask_t *__kmp_create_masks(
unsigned *maxIndex,
3331 unsigned *numUnique) {
3335 int numAddrs = __kmp_topology->get_num_hw_threads();
3336 int depth = __kmp_topology->get_depth();
3337 KMP_ASSERT(numAddrs);
3341 for (i = numAddrs - 1;; --i) {
3342 int osId = __kmp_topology->at(i).os_id;
3343 if (osId > maxOsId) {
3349 kmp_affin_mask_t *osId2Mask;
3350 KMP_CPU_ALLOC_ARRAY(osId2Mask, (maxOsId + 1));
3351 KMP_ASSERT(__kmp_affinity_gran_levels >= 0);
3352 if (__kmp_affinity_verbose && (__kmp_affinity_gran_levels > 0)) {
3353 KMP_INFORM(ThreadsMigrate,
"KMP_AFFINITY", __kmp_affinity_gran_levels);
3355 if (__kmp_affinity_gran_levels >= (
int)depth) {
3356 if (__kmp_affinity_verbose ||
3357 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {
3358 KMP_WARNING(AffThreadsMayMigrate);
3369 kmp_affin_mask_t *sum;
3370 KMP_CPU_ALLOC_ON_STACK(sum);
3372 KMP_CPU_SET(__kmp_topology->at(0).os_id, sum);
3373 for (i = 1; i < numAddrs; i++) {
3377 if (__kmp_topology->is_close(leader, i, __kmp_affinity_gran_levels)) {
3378 KMP_CPU_SET(__kmp_topology->at(i).os_id, sum);
3384 for (; j < i; j++) {
3385 int osId = __kmp_topology->at(j).os_id;
3386 KMP_DEBUG_ASSERT(osId <= maxOsId);
3387 kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
3388 KMP_CPU_COPY(mask, sum);
3389 __kmp_topology->at(j).leader = (j == leader);
3396 KMP_CPU_SET(__kmp_topology->at(i).os_id, sum);
3401 for (; j < i; j++) {
3402 int osId = __kmp_topology->at(j).os_id;
3403 KMP_DEBUG_ASSERT(osId <= maxOsId);
3404 kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
3405 KMP_CPU_COPY(mask, sum);
3406 __kmp_topology->at(j).leader = (j == leader);
3409 KMP_CPU_FREE_FROM_STACK(sum);
3411 *maxIndex = maxOsId;
3412 *numUnique = unique;
3419 static kmp_affin_mask_t *newMasks;
3420 static int numNewMasks;
3421 static int nextNewMask;
3423 #define ADD_MASK(_mask) \
3425 if (nextNewMask >= numNewMasks) { \
3428 kmp_affin_mask_t *temp; \
3429 KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks); \
3430 for (i = 0; i < numNewMasks / 2; i++) { \
3431 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i); \
3432 kmp_affin_mask_t *dest = KMP_CPU_INDEX(temp, i); \
3433 KMP_CPU_COPY(dest, src); \
3435 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks / 2); \
3438 KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask)); \
3442 #define ADD_MASK_OSID(_osId, _osId2Mask, _maxOsId) \
3444 if (((_osId) > _maxOsId) || \
3445 (!KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId))))) { \
3446 if (__kmp_affinity_verbose || \
3447 (__kmp_affinity_warnings && \
3448 (__kmp_affinity_type != affinity_none))) { \
3449 KMP_WARNING(AffIgnoreInvalidProcID, _osId); \
3452 ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId))); \
3458 static void __kmp_affinity_process_proclist(kmp_affin_mask_t **out_masks,
3459 unsigned int *out_numMasks,
3460 const char *proclist,
3461 kmp_affin_mask_t *osId2Mask,
3464 const char *scan = proclist;
3465 const char *next = proclist;
3470 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
3472 kmp_affin_mask_t *sumMask;
3473 KMP_CPU_ALLOC(sumMask);
3477 int start, end, stride;
3481 if (*next ==
'\0') {
3493 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad proclist");
3495 num = __kmp_str_to_int(scan, *next);
3496 KMP_ASSERT2(num >= 0,
"bad explicit proc list");
3499 if ((num > maxOsId) ||
3500 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3501 if (__kmp_affinity_verbose ||
3502 (__kmp_affinity_warnings &&
3503 (__kmp_affinity_type != affinity_none))) {
3504 KMP_WARNING(AffIgnoreInvalidProcID, num);
3506 KMP_CPU_ZERO(sumMask);
3508 KMP_CPU_COPY(sumMask, KMP_CPU_INDEX(osId2Mask, num));
3528 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
3531 num = __kmp_str_to_int(scan, *next);
3532 KMP_ASSERT2(num >= 0,
"bad explicit proc list");
3535 if ((num > maxOsId) ||
3536 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3537 if (__kmp_affinity_verbose ||
3538 (__kmp_affinity_warnings &&
3539 (__kmp_affinity_type != affinity_none))) {
3540 KMP_WARNING(AffIgnoreInvalidProcID, num);
3543 KMP_CPU_UNION(sumMask, KMP_CPU_INDEX(osId2Mask, num));
3560 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
3562 start = __kmp_str_to_int(scan, *next);
3563 KMP_ASSERT2(start >= 0,
"bad explicit proc list");
3568 ADD_MASK_OSID(start, osId2Mask, maxOsId);
3582 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
3584 end = __kmp_str_to_int(scan, *next);
3585 KMP_ASSERT2(end >= 0,
"bad explicit proc list");
3602 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
3604 stride = __kmp_str_to_int(scan, *next);
3605 KMP_ASSERT2(stride >= 0,
"bad explicit proc list");
3610 KMP_ASSERT2(stride != 0,
"bad explicit proc list");
3612 KMP_ASSERT2(start <= end,
"bad explicit proc list");
3614 KMP_ASSERT2(start >= end,
"bad explicit proc list");
3616 KMP_ASSERT2((end - start) / stride <= 65536,
"bad explicit proc list");
3621 ADD_MASK_OSID(start, osId2Mask, maxOsId);
3623 }
while (start <= end);
3626 ADD_MASK_OSID(start, osId2Mask, maxOsId);
3628 }
while (start >= end);
3639 *out_numMasks = nextNewMask;
3640 if (nextNewMask == 0) {
3642 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3645 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3646 for (i = 0; i < nextNewMask; i++) {
3647 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
3648 kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
3649 KMP_CPU_COPY(dest, src);
3651 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3652 KMP_CPU_FREE(sumMask);
3675 static void __kmp_process_subplace_list(
const char **scan,
3676 kmp_affin_mask_t *osId2Mask,
3677 int maxOsId, kmp_affin_mask_t *tempMask,
3682 int start, count, stride, i;
3686 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
"bad explicit places list");
3689 start = __kmp_str_to_int(*scan, *next);
3690 KMP_ASSERT(start >= 0);
3695 if (**scan ==
'}' || **scan ==
',') {
3696 if ((start > maxOsId) ||
3697 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3698 if (__kmp_affinity_verbose ||
3699 (__kmp_affinity_warnings &&
3700 (__kmp_affinity_type != affinity_none))) {
3701 KMP_WARNING(AffIgnoreInvalidProcID, start);
3704 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3707 if (**scan ==
'}') {
3713 KMP_ASSERT2(**scan ==
':',
"bad explicit places list");
3718 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
"bad explicit places list");
3721 count = __kmp_str_to_int(*scan, *next);
3722 KMP_ASSERT(count >= 0);
3727 if (**scan ==
'}' || **scan ==
',') {
3728 for (i = 0; i < count; i++) {
3729 if ((start > maxOsId) ||
3730 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3731 if (__kmp_affinity_verbose ||
3732 (__kmp_affinity_warnings &&
3733 (__kmp_affinity_type != affinity_none))) {
3734 KMP_WARNING(AffIgnoreInvalidProcID, start);
3738 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3743 if (**scan ==
'}') {
3749 KMP_ASSERT2(**scan ==
':',
"bad explicit places list");
3756 if (**scan ==
'+') {
3760 if (**scan ==
'-') {
3768 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
"bad explicit places list");
3771 stride = __kmp_str_to_int(*scan, *next);
3772 KMP_ASSERT(stride >= 0);
3778 if (**scan ==
'}' || **scan ==
',') {
3779 for (i = 0; i < count; i++) {
3780 if ((start > maxOsId) ||
3781 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3782 if (__kmp_affinity_verbose ||
3783 (__kmp_affinity_warnings &&
3784 (__kmp_affinity_type != affinity_none))) {
3785 KMP_WARNING(AffIgnoreInvalidProcID, start);
3789 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3794 if (**scan ==
'}') {
3801 KMP_ASSERT2(0,
"bad explicit places list");
3805 static void __kmp_process_place(
const char **scan, kmp_affin_mask_t *osId2Mask,
3806 int maxOsId, kmp_affin_mask_t *tempMask,
3812 if (**scan ==
'{') {
3814 __kmp_process_subplace_list(scan, osId2Mask, maxOsId, tempMask, setSize);
3815 KMP_ASSERT2(**scan ==
'}',
"bad explicit places list");
3817 }
else if (**scan ==
'!') {
3819 __kmp_process_place(scan, osId2Mask, maxOsId, tempMask, setSize);
3820 KMP_CPU_COMPLEMENT(maxOsId, tempMask);
3821 }
else if ((**scan >=
'0') && (**scan <=
'9')) {
3824 int num = __kmp_str_to_int(*scan, *next);
3825 KMP_ASSERT(num >= 0);
3826 if ((num > maxOsId) ||
3827 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3828 if (__kmp_affinity_verbose ||
3829 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {
3830 KMP_WARNING(AffIgnoreInvalidProcID, num);
3833 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, num));
3838 KMP_ASSERT2(0,
"bad explicit places list");
3843 void __kmp_affinity_process_placelist(kmp_affin_mask_t **out_masks,
3844 unsigned int *out_numMasks,
3845 const char *placelist,
3846 kmp_affin_mask_t *osId2Mask,
3848 int i, j, count, stride, sign;
3849 const char *scan = placelist;
3850 const char *next = placelist;
3853 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
3859 kmp_affin_mask_t *tempMask;
3860 kmp_affin_mask_t *previousMask;
3861 KMP_CPU_ALLOC(tempMask);
3862 KMP_CPU_ZERO(tempMask);
3863 KMP_CPU_ALLOC(previousMask);
3864 KMP_CPU_ZERO(previousMask);
3868 __kmp_process_place(&scan, osId2Mask, maxOsId, tempMask, &setSize);
3872 if (*scan ==
'\0' || *scan ==
',') {
3876 KMP_CPU_ZERO(tempMask);
3878 if (*scan ==
'\0') {
3885 KMP_ASSERT2(*scan ==
':',
"bad explicit places list");
3890 KMP_ASSERT2((*scan >=
'0') && (*scan <=
'9'),
"bad explicit places list");
3893 count = __kmp_str_to_int(scan, *next);
3894 KMP_ASSERT(count >= 0);
3899 if (*scan ==
'\0' || *scan ==
',') {
3902 KMP_ASSERT2(*scan ==
':',
"bad explicit places list");
3921 KMP_ASSERT2((*scan >=
'0') && (*scan <=
'9'),
"bad explicit places list");
3924 stride = __kmp_str_to_int(scan, *next);
3925 KMP_DEBUG_ASSERT(stride >= 0);
3931 for (i = 0; i < count; i++) {
3936 KMP_CPU_COPY(previousMask, tempMask);
3937 ADD_MASK(previousMask);
3938 KMP_CPU_ZERO(tempMask);
3940 KMP_CPU_SET_ITERATE(j, previousMask) {
3941 if (!KMP_CPU_ISSET(j, previousMask)) {
3944 if ((j + stride > maxOsId) || (j + stride < 0) ||
3945 (!KMP_CPU_ISSET(j, __kmp_affin_fullMask)) ||
3946 (!KMP_CPU_ISSET(j + stride,
3947 KMP_CPU_INDEX(osId2Mask, j + stride)))) {
3948 if ((__kmp_affinity_verbose ||
3949 (__kmp_affinity_warnings &&
3950 (__kmp_affinity_type != affinity_none))) &&
3952 KMP_WARNING(AffIgnoreInvalidProcID, j + stride);
3956 KMP_CPU_SET(j + stride, tempMask);
3960 KMP_CPU_ZERO(tempMask);
3965 if (*scan ==
'\0') {
3973 KMP_ASSERT2(0,
"bad explicit places list");
3976 *out_numMasks = nextNewMask;
3977 if (nextNewMask == 0) {
3979 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3982 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3983 KMP_CPU_FREE(tempMask);
3984 KMP_CPU_FREE(previousMask);
3985 for (i = 0; i < nextNewMask; i++) {
3986 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
3987 kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
3988 KMP_CPU_COPY(dest, src);
3990 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3994 #undef ADD_MASK_OSID
3998 static int __kmp_affinity_find_core_level(
int nprocs,
int bottom_level) {
4001 for (
int i = 0; i < nprocs; i++) {
4002 const kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
4003 for (
int j = bottom_level; j > 0; j--) {
4004 if (hw_thread.ids[j] > 0) {
4005 if (core_level < (j - 1)) {
4015 static int __kmp_affinity_compute_ncores(
int nprocs,
int bottom_level,
4017 return __kmp_topology->get_count(core_level);
4020 static int __kmp_affinity_find_core(
int proc,
int bottom_level,
4023 KMP_DEBUG_ASSERT(proc >= 0 && proc < __kmp_topology->get_num_hw_threads());
4024 for (
int i = 0; i <= proc; ++i) {
4025 if (i + 1 <= proc) {
4026 for (
int j = 0; j <= core_level; ++j) {
4027 if (__kmp_topology->at(i + 1).sub_ids[j] !=
4028 __kmp_topology->at(i).sub_ids[j]) {
4040 static int __kmp_affinity_max_proc_per_core(
int nprocs,
int bottom_level,
4042 if (core_level >= bottom_level)
4044 int thread_level = __kmp_topology->get_level(KMP_HW_THREAD);
4045 return __kmp_topology->calculate_ratio(thread_level, core_level);
4048 static int *procarr = NULL;
4049 static int __kmp_aff_depth = 0;
4053 static void __kmp_create_affinity_none_places() {
4054 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4055 KMP_ASSERT(__kmp_affinity_type == affinity_none);
4056 __kmp_affinity_num_masks = 1;
4057 KMP_CPU_ALLOC_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4058 kmp_affin_mask_t *dest = KMP_CPU_INDEX(__kmp_affinity_masks, 0);
4059 KMP_CPU_COPY(dest, __kmp_affin_fullMask);
4062 static void __kmp_aux_affinity_initialize(
void) {
4063 if (__kmp_affinity_masks != NULL) {
4064 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4072 if (__kmp_affin_fullMask == NULL) {
4073 KMP_CPU_ALLOC(__kmp_affin_fullMask);
4075 if (KMP_AFFINITY_CAPABLE()) {
4076 __kmp_get_system_affinity(__kmp_affin_fullMask, TRUE);
4077 if (__kmp_affinity_respect_mask) {
4080 __kmp_avail_proc = 0;
4081 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
4082 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
4087 if (__kmp_avail_proc > __kmp_xproc) {
4088 if (__kmp_affinity_verbose ||
4089 (__kmp_affinity_warnings &&
4090 (__kmp_affinity_type != affinity_none))) {
4091 KMP_WARNING(ErrorInitializeAffinity);
4093 __kmp_affinity_type = affinity_none;
4094 KMP_AFFINITY_DISABLE();
4098 if (__kmp_affinity_verbose) {
4099 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4100 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4101 __kmp_affin_fullMask);
4102 KMP_INFORM(InitOSProcSetRespect,
"KMP_AFFINITY", buf);
4105 if (__kmp_affinity_verbose) {
4106 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4107 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4108 __kmp_affin_fullMask);
4109 KMP_INFORM(InitOSProcSetNotRespect,
"KMP_AFFINITY", buf);
4112 __kmp_affinity_entire_machine_mask(__kmp_affin_fullMask);
4116 __kmp_affin_fullMask->set_process_affinity(
true);
4121 kmp_i18n_id_t msg_id = kmp_i18n_null;
4125 if ((__kmp_cpuinfo_file != NULL) &&
4126 (__kmp_affinity_top_method == affinity_top_method_all)) {
4127 __kmp_affinity_top_method = affinity_top_method_cpuinfo;
4130 bool success =
false;
4131 if (__kmp_affinity_top_method == affinity_top_method_all) {
4137 __kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC) {
4138 if (!__kmp_hwloc_error) {
4139 success = __kmp_affinity_create_hwloc_map(&msg_id);
4140 if (!success && __kmp_affinity_verbose) {
4141 KMP_INFORM(AffIgnoringHwloc,
"KMP_AFFINITY");
4143 }
else if (__kmp_affinity_verbose) {
4144 KMP_INFORM(AffIgnoringHwloc,
"KMP_AFFINITY");
4149 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4151 success = __kmp_affinity_create_x2apicid_map(&msg_id);
4152 if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
4153 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
4157 success = __kmp_affinity_create_apicid_map(&msg_id);
4158 if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
4159 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
4167 success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id);
4168 if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
4169 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
4174 #if KMP_GROUP_AFFINITY
4175 if (!success && (__kmp_num_proc_groups > 1)) {
4176 success = __kmp_affinity_create_proc_group_map(&msg_id);
4177 if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
4178 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
4184 success = __kmp_affinity_create_flat_map(&msg_id);
4185 if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
4186 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
4188 KMP_ASSERT(success);
4196 else if (__kmp_affinity_top_method == affinity_top_method_hwloc) {
4197 KMP_ASSERT(__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC);
4198 success = __kmp_affinity_create_hwloc_map(&msg_id);
4200 KMP_ASSERT(msg_id != kmp_i18n_null);
4201 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4206 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4207 else if (__kmp_affinity_top_method == affinity_top_method_x2apicid ||
4208 __kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
4209 success = __kmp_affinity_create_x2apicid_map(&msg_id);
4211 KMP_ASSERT(msg_id != kmp_i18n_null);
4212 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4214 }
else if (__kmp_affinity_top_method == affinity_top_method_apicid) {
4215 success = __kmp_affinity_create_apicid_map(&msg_id);
4217 KMP_ASSERT(msg_id != kmp_i18n_null);
4218 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4223 else if (__kmp_affinity_top_method == affinity_top_method_cpuinfo) {
4225 success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id);
4227 KMP_ASSERT(msg_id != kmp_i18n_null);
4228 const char *filename = __kmp_cpuinfo_get_filename();
4230 KMP_FATAL(FileLineMsgExiting, filename, line,
4231 __kmp_i18n_catgets(msg_id));
4233 KMP_FATAL(FileMsgExiting, filename, __kmp_i18n_catgets(msg_id));
4238 #if KMP_GROUP_AFFINITY
4239 else if (__kmp_affinity_top_method == affinity_top_method_group) {
4240 success = __kmp_affinity_create_proc_group_map(&msg_id);
4241 KMP_ASSERT(success);
4243 KMP_ASSERT(msg_id != kmp_i18n_null);
4244 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4249 else if (__kmp_affinity_top_method == affinity_top_method_flat) {
4250 success = __kmp_affinity_create_flat_map(&msg_id);
4252 KMP_ASSERT(success);
4256 if (!__kmp_topology) {
4257 if (KMP_AFFINITY_CAPABLE() &&
4258 (__kmp_affinity_verbose ||
4259 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none)))) {
4260 KMP_WARNING(ErrorInitializeAffinity);
4262 if (nPackages > 0 && nCoresPerPkg > 0 && __kmp_nThreadsPerCore > 0 &&
4264 __kmp_topology = kmp_topology_t::allocate(0, 0, NULL);
4265 __kmp_topology->canonicalize(nPackages, nCoresPerPkg,
4266 __kmp_nThreadsPerCore, __kmp_ncores);
4267 if (__kmp_affinity_verbose) {
4268 __kmp_topology->print(
"KMP_AFFINITY");
4271 __kmp_affinity_type = affinity_none;
4272 __kmp_create_affinity_none_places();
4273 #if KMP_USE_HIER_SCHED
4274 __kmp_dispatch_set_hierarchy_values();
4276 KMP_AFFINITY_DISABLE();
4282 __kmp_topology->canonicalize();
4283 if (__kmp_affinity_verbose)
4284 __kmp_topology->print(
"KMP_AFFINITY");
4285 bool filtered = __kmp_topology->filter_hw_subset();
4286 if (filtered && __kmp_affinity_verbose)
4287 __kmp_topology->print(
"KMP_HW_SUBSET");
4288 machine_hierarchy.init(__kmp_topology->get_num_hw_threads());
4289 KMP_ASSERT(__kmp_avail_proc == __kmp_topology->get_num_hw_threads());
4293 if (__kmp_affinity_type == affinity_none) {
4294 __kmp_create_affinity_none_places();
4295 #if KMP_USE_HIER_SCHED
4296 __kmp_dispatch_set_hierarchy_values();
4300 int depth = __kmp_topology->get_depth();
4305 kmp_affin_mask_t *osId2Mask = __kmp_create_masks(&maxIndex, &numUnique);
4306 if (__kmp_affinity_gran_levels == 0) {
4307 KMP_DEBUG_ASSERT((
int)numUnique == __kmp_avail_proc);
4310 switch (__kmp_affinity_type) {
4312 case affinity_explicit:
4313 KMP_DEBUG_ASSERT(__kmp_affinity_proclist != NULL);
4314 if (__kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) {
4315 __kmp_affinity_process_proclist(
4316 &__kmp_affinity_masks, &__kmp_affinity_num_masks,
4317 __kmp_affinity_proclist, osId2Mask, maxIndex);
4319 __kmp_affinity_process_placelist(
4320 &__kmp_affinity_masks, &__kmp_affinity_num_masks,
4321 __kmp_affinity_proclist, osId2Mask, maxIndex);
4323 if (__kmp_affinity_num_masks == 0) {
4324 if (__kmp_affinity_verbose ||
4325 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {
4326 KMP_WARNING(AffNoValidProcID);
4328 __kmp_affinity_type = affinity_none;
4329 __kmp_create_affinity_none_places();
4338 case affinity_logical:
4339 __kmp_affinity_compact = 0;
4340 if (__kmp_affinity_offset) {
4341 __kmp_affinity_offset =
4342 __kmp_nThreadsPerCore * __kmp_affinity_offset % __kmp_avail_proc;
4346 case affinity_physical:
4347 if (__kmp_nThreadsPerCore > 1) {
4348 __kmp_affinity_compact = 1;
4349 if (__kmp_affinity_compact >= depth) {
4350 __kmp_affinity_compact = 0;
4353 __kmp_affinity_compact = 0;
4355 if (__kmp_affinity_offset) {
4356 __kmp_affinity_offset =
4357 __kmp_nThreadsPerCore * __kmp_affinity_offset % __kmp_avail_proc;
4361 case affinity_scatter:
4362 if (__kmp_affinity_compact >= depth) {
4363 __kmp_affinity_compact = 0;
4365 __kmp_affinity_compact = depth - 1 - __kmp_affinity_compact;
4369 case affinity_compact:
4370 if (__kmp_affinity_compact >= depth) {
4371 __kmp_affinity_compact = depth - 1;
4375 case affinity_balanced:
4377 if (__kmp_affinity_verbose || __kmp_affinity_warnings) {
4378 KMP_WARNING(AffBalancedNotAvail,
"KMP_AFFINITY");
4380 __kmp_affinity_type = affinity_none;
4381 __kmp_create_affinity_none_places();
4383 }
else if (!__kmp_topology->is_uniform()) {
4385 __kmp_aff_depth = depth;
4388 __kmp_affinity_find_core_level(__kmp_avail_proc, depth - 1);
4389 int ncores = __kmp_affinity_compute_ncores(__kmp_avail_proc, depth - 1,
4391 int maxprocpercore = __kmp_affinity_max_proc_per_core(
4392 __kmp_avail_proc, depth - 1, core_level);
4394 int nproc = ncores * maxprocpercore;
4395 if ((nproc < 2) || (nproc < __kmp_avail_proc)) {
4396 if (__kmp_affinity_verbose || __kmp_affinity_warnings) {
4397 KMP_WARNING(AffBalancedNotAvail,
"KMP_AFFINITY");
4399 __kmp_affinity_type = affinity_none;
4403 procarr = (
int *)__kmp_allocate(
sizeof(
int) * nproc);
4404 for (
int i = 0; i < nproc; i++) {
4410 for (
int i = 0; i < __kmp_avail_proc; i++) {
4411 int proc = __kmp_topology->at(i).os_id;
4412 int core = __kmp_affinity_find_core(i, depth - 1, core_level);
4414 if (core == lastcore) {
4421 procarr[core * maxprocpercore + inlastcore] = proc;
4424 if (__kmp_affinity_compact >= depth) {
4425 __kmp_affinity_compact = depth - 1;
4430 if (__kmp_affinity_dups) {
4431 __kmp_affinity_num_masks = __kmp_avail_proc;
4433 __kmp_affinity_num_masks = numUnique;
4436 if ((__kmp_nested_proc_bind.bind_types[0] != proc_bind_intel) &&
4437 (__kmp_affinity_num_places > 0) &&
4438 ((
unsigned)__kmp_affinity_num_places < __kmp_affinity_num_masks)) {
4439 __kmp_affinity_num_masks = __kmp_affinity_num_places;
4442 KMP_CPU_ALLOC_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4446 __kmp_topology->sort_compact();
4450 int num_hw_threads = __kmp_topology->get_num_hw_threads();
4451 for (i = 0, j = 0; i < num_hw_threads; i++) {
4452 if ((!__kmp_affinity_dups) && (!__kmp_topology->at(i).leader)) {
4455 int osId = __kmp_topology->at(i).os_id;
4457 kmp_affin_mask_t *src = KMP_CPU_INDEX(osId2Mask, osId);
4458 kmp_affin_mask_t *dest = KMP_CPU_INDEX(__kmp_affinity_masks, j);
4459 KMP_ASSERT(KMP_CPU_ISSET(osId, src));
4460 KMP_CPU_COPY(dest, src);
4461 if (++j >= __kmp_affinity_num_masks) {
4465 KMP_DEBUG_ASSERT(j == __kmp_affinity_num_masks);
4468 __kmp_topology->sort_ids();
4472 KMP_ASSERT2(0,
"Unexpected affinity setting");
4475 KMP_CPU_FREE_ARRAY(osId2Mask, maxIndex + 1);
4478 void __kmp_affinity_initialize(
void) {
4487 int disabled = (__kmp_affinity_type == affinity_disabled);
4488 if (!KMP_AFFINITY_CAPABLE()) {
4489 KMP_ASSERT(disabled);
4492 __kmp_affinity_type = affinity_none;
4494 __kmp_aux_affinity_initialize();
4496 __kmp_affinity_type = affinity_disabled;
4500 void __kmp_affinity_uninitialize(
void) {
4501 if (__kmp_affinity_masks != NULL) {
4502 KMP_CPU_FREE_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4503 __kmp_affinity_masks = NULL;
4505 if (__kmp_affin_fullMask != NULL) {
4506 KMP_CPU_FREE(__kmp_affin_fullMask);
4507 __kmp_affin_fullMask = NULL;
4509 __kmp_affinity_num_masks = 0;
4510 __kmp_affinity_type = affinity_default;
4511 __kmp_affinity_num_places = 0;
4512 if (__kmp_affinity_proclist != NULL) {
4513 __kmp_free(__kmp_affinity_proclist);
4514 __kmp_affinity_proclist = NULL;
4516 if (procarr != NULL) {
4517 __kmp_free(procarr);
4521 if (__kmp_hwloc_topology != NULL) {
4522 hwloc_topology_destroy(__kmp_hwloc_topology);
4523 __kmp_hwloc_topology = NULL;
4526 if (__kmp_hw_subset) {
4527 kmp_hw_subset_t::deallocate(__kmp_hw_subset);
4528 __kmp_hw_subset =
nullptr;
4530 if (__kmp_topology) {
4531 kmp_topology_t::deallocate(__kmp_topology);
4532 __kmp_topology =
nullptr;
4534 KMPAffinity::destroy_api();
4537 void __kmp_affinity_set_init_mask(
int gtid,
int isa_root) {
4538 if (!KMP_AFFINITY_CAPABLE()) {
4542 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4543 if (th->th.th_affin_mask == NULL) {
4544 KMP_CPU_ALLOC(th->th.th_affin_mask);
4546 KMP_CPU_ZERO(th->th.th_affin_mask);
4553 kmp_affin_mask_t *mask;
4556 if (KMP_AFFINITY_NON_PROC_BIND) {
4557 if ((__kmp_affinity_type == affinity_none) ||
4558 (__kmp_affinity_type == affinity_balanced) ||
4559 KMP_HIDDEN_HELPER_THREAD(gtid)) {
4560 #if KMP_GROUP_AFFINITY
4561 if (__kmp_num_proc_groups > 1) {
4565 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4567 mask = __kmp_affin_fullMask;
4569 int mask_idx = __kmp_adjust_gtid_for_hidden_helpers(gtid);
4570 KMP_DEBUG_ASSERT(__kmp_affinity_num_masks > 0);
4571 i = (mask_idx + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4572 mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4575 if ((!isa_root) || KMP_HIDDEN_HELPER_THREAD(gtid) ||
4576 (__kmp_nested_proc_bind.bind_types[0] == proc_bind_false)) {
4577 #if KMP_GROUP_AFFINITY
4578 if (__kmp_num_proc_groups > 1) {
4582 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4584 mask = __kmp_affin_fullMask;
4588 int mask_idx = __kmp_adjust_gtid_for_hidden_helpers(gtid);
4589 KMP_DEBUG_ASSERT(__kmp_affinity_num_masks > 0);
4590 i = (mask_idx + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4591 mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4595 th->th.th_current_place = i;
4596 if (isa_root || KMP_HIDDEN_HELPER_THREAD(gtid)) {
4597 th->th.th_new_place = i;
4598 th->th.th_first_place = 0;
4599 th->th.th_last_place = __kmp_affinity_num_masks - 1;
4600 }
else if (KMP_AFFINITY_NON_PROC_BIND) {
4603 th->th.th_first_place = 0;
4604 th->th.th_last_place = __kmp_affinity_num_masks - 1;
4607 if (i == KMP_PLACE_ALL) {
4608 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: binding T#%d to all places\n",
4611 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: binding T#%d to place %d\n",
4615 KMP_CPU_COPY(th->th.th_affin_mask, mask);
4617 if (__kmp_affinity_verbose && !KMP_HIDDEN_HELPER_THREAD(gtid)
4619 && (__kmp_affinity_type == affinity_none ||
4620 (i != KMP_PLACE_ALL && __kmp_affinity_type != affinity_balanced))) {
4621 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4622 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4623 th->th.th_affin_mask);
4624 KMP_INFORM(BoundToOSProcSet,
"KMP_AFFINITY", (kmp_int32)getpid(),
4625 __kmp_gettid(), gtid, buf);
4630 if (__kmp_affinity_verbose && KMP_HIDDEN_HELPER_THREAD(gtid)) {
4631 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4632 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4633 th->th.th_affin_mask);
4634 KMP_INFORM(BoundToOSProcSet,
"KMP_AFFINITY (hidden helper thread)",
4635 (kmp_int32)getpid(), __kmp_gettid(), gtid, buf);
4643 if (__kmp_affinity_type == affinity_none) {
4644 __kmp_set_system_affinity(th->th.th_affin_mask, FALSE);
4647 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4650 void __kmp_affinity_set_place(
int gtid) {
4651 if (!KMP_AFFINITY_CAPABLE()) {
4655 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4657 KA_TRACE(100, (
"__kmp_affinity_set_place: binding T#%d to place %d (current "
4659 gtid, th->th.th_new_place, th->th.th_current_place));
4662 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4663 KMP_ASSERT(th->th.th_new_place >= 0);
4664 KMP_ASSERT((
unsigned)th->th.th_new_place <= __kmp_affinity_num_masks);
4665 if (th->th.th_first_place <= th->th.th_last_place) {
4666 KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place) &&
4667 (th->th.th_new_place <= th->th.th_last_place));
4669 KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place) ||
4670 (th->th.th_new_place >= th->th.th_last_place));
4675 kmp_affin_mask_t *mask =
4676 KMP_CPU_INDEX(__kmp_affinity_masks, th->th.th_new_place);
4677 KMP_CPU_COPY(th->th.th_affin_mask, mask);
4678 th->th.th_current_place = th->th.th_new_place;
4680 if (__kmp_affinity_verbose) {
4681 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4682 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4683 th->th.th_affin_mask);
4684 KMP_INFORM(BoundToOSProcSet,
"OMP_PROC_BIND", (kmp_int32)getpid(),
4685 __kmp_gettid(), gtid, buf);
4687 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4690 int __kmp_aux_set_affinity(
void **mask) {
4695 if (!KMP_AFFINITY_CAPABLE()) {
4699 gtid = __kmp_entry_gtid();
4702 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4703 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4704 (kmp_affin_mask_t *)(*mask));
4706 "kmp_set_affinity: setting affinity mask for thread %d = %s\n",
4710 if (__kmp_env_consistency_check) {
4711 if ((mask == NULL) || (*mask == NULL)) {
4712 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4717 KMP_CPU_SET_ITERATE(proc, ((kmp_affin_mask_t *)(*mask))) {
4718 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4719 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4721 if (!KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask))) {
4726 if (num_procs == 0) {
4727 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4730 #if KMP_GROUP_AFFINITY
4731 if (__kmp_get_proc_group((kmp_affin_mask_t *)(*mask)) < 0) {
4732 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4738 th = __kmp_threads[gtid];
4739 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4740 retval = __kmp_set_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4742 KMP_CPU_COPY(th->th.th_affin_mask, (kmp_affin_mask_t *)(*mask));
4745 th->th.th_current_place = KMP_PLACE_UNDEFINED;
4746 th->th.th_new_place = KMP_PLACE_UNDEFINED;
4747 th->th.th_first_place = 0;
4748 th->th.th_last_place = __kmp_affinity_num_masks - 1;
4751 th->th.th_current_task->td_icvs.proc_bind = proc_bind_false;
4756 int __kmp_aux_get_affinity(
void **mask) {
4759 #if KMP_OS_WINDOWS || KMP_DEBUG
4762 if (!KMP_AFFINITY_CAPABLE()) {
4766 gtid = __kmp_entry_gtid();
4767 #if KMP_OS_WINDOWS || KMP_DEBUG
4768 th = __kmp_threads[gtid];
4772 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4776 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4777 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4778 th->th.th_affin_mask);
4780 "kmp_get_affinity: stored affinity mask for thread %d = %s\n", gtid,
4784 if (__kmp_env_consistency_check) {
4785 if ((mask == NULL) || (*mask == NULL)) {
4786 KMP_FATAL(AffinityInvalidMask,
"kmp_get_affinity");
4792 retval = __kmp_get_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4795 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4796 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4797 (kmp_affin_mask_t *)(*mask));
4799 "kmp_get_affinity: system affinity mask for thread %d = %s\n", gtid,
4807 KMP_CPU_COPY((kmp_affin_mask_t *)(*mask), th->th.th_affin_mask);
4813 int __kmp_aux_get_affinity_max_proc() {
4814 if (!KMP_AFFINITY_CAPABLE()) {
4817 #if KMP_GROUP_AFFINITY
4818 if (__kmp_num_proc_groups > 1) {
4819 return (
int)(__kmp_num_proc_groups *
sizeof(DWORD_PTR) * CHAR_BIT);
4825 int __kmp_aux_set_affinity_mask_proc(
int proc,
void **mask) {
4826 if (!KMP_AFFINITY_CAPABLE()) {
4832 int gtid = __kmp_entry_gtid();
4833 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4834 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4835 (kmp_affin_mask_t *)(*mask));
4836 __kmp_debug_printf(
"kmp_set_affinity_mask_proc: setting proc %d in "
4837 "affinity mask for thread %d = %s\n",
4841 if (__kmp_env_consistency_check) {
4842 if ((mask == NULL) || (*mask == NULL)) {
4843 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity_mask_proc");
4847 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
4850 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4854 KMP_CPU_SET(proc, (kmp_affin_mask_t *)(*mask));
4858 int __kmp_aux_unset_affinity_mask_proc(
int proc,
void **mask) {
4859 if (!KMP_AFFINITY_CAPABLE()) {
4865 int gtid = __kmp_entry_gtid();
4866 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4867 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4868 (kmp_affin_mask_t *)(*mask));
4869 __kmp_debug_printf(
"kmp_unset_affinity_mask_proc: unsetting proc %d in "
4870 "affinity mask for thread %d = %s\n",
4874 if (__kmp_env_consistency_check) {
4875 if ((mask == NULL) || (*mask == NULL)) {
4876 KMP_FATAL(AffinityInvalidMask,
"kmp_unset_affinity_mask_proc");
4880 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
4883 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4887 KMP_CPU_CLR(proc, (kmp_affin_mask_t *)(*mask));
4891 int __kmp_aux_get_affinity_mask_proc(
int proc,
void **mask) {
4892 if (!KMP_AFFINITY_CAPABLE()) {
4898 int gtid = __kmp_entry_gtid();
4899 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4900 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4901 (kmp_affin_mask_t *)(*mask));
4902 __kmp_debug_printf(
"kmp_get_affinity_mask_proc: getting proc %d in "
4903 "affinity mask for thread %d = %s\n",
4907 if (__kmp_env_consistency_check) {
4908 if ((mask == NULL) || (*mask == NULL)) {
4909 KMP_FATAL(AffinityInvalidMask,
"kmp_get_affinity_mask_proc");
4913 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
4916 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4920 return KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask));
4924 void __kmp_balanced_affinity(kmp_info_t *th,
int nthreads) {
4925 KMP_DEBUG_ASSERT(th);
4926 bool fine_gran =
true;
4927 int tid = th->th.th_info.ds.ds_tid;
4930 if (KMP_HIDDEN_HELPER_THREAD(__kmp_gtid_from_thread(th)))
4933 switch (__kmp_affinity_gran) {
4937 if (__kmp_nThreadsPerCore > 1) {
4942 if (nCoresPerPkg > 1) {
4950 if (__kmp_topology->is_uniform()) {
4954 int __kmp_nth_per_core = __kmp_avail_proc / __kmp_ncores;
4956 int ncores = __kmp_ncores;
4957 if ((nPackages > 1) && (__kmp_nth_per_core <= 1)) {
4958 __kmp_nth_per_core = __kmp_avail_proc / nPackages;
4962 int chunk = nthreads / ncores;
4964 int big_cores = nthreads % ncores;
4966 int big_nth = (chunk + 1) * big_cores;
4967 if (tid < big_nth) {
4968 coreID = tid / (chunk + 1);
4969 threadID = (tid % (chunk + 1)) % __kmp_nth_per_core;
4971 coreID = (tid - big_cores) / chunk;
4972 threadID = ((tid - big_cores) % chunk) % __kmp_nth_per_core;
4974 KMP_DEBUG_ASSERT2(KMP_AFFINITY_CAPABLE(),
4975 "Illegal set affinity operation when not capable");
4977 kmp_affin_mask_t *mask = th->th.th_affin_mask;
4982 __kmp_topology->at(coreID * __kmp_nth_per_core + threadID).os_id;
4983 KMP_CPU_SET(osID, mask);
4985 for (
int i = 0; i < __kmp_nth_per_core; i++) {
4987 osID = __kmp_topology->at(coreID * __kmp_nth_per_core + i).os_id;
4988 KMP_CPU_SET(osID, mask);
4991 if (__kmp_affinity_verbose) {
4992 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4993 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
4994 KMP_INFORM(BoundToOSProcSet,
"KMP_AFFINITY", (kmp_int32)getpid(),
4995 __kmp_gettid(), tid, buf);
4997 __kmp_set_system_affinity(mask, TRUE);
5000 kmp_affin_mask_t *mask = th->th.th_affin_mask;
5004 __kmp_affinity_find_core_level(__kmp_avail_proc, __kmp_aff_depth - 1);
5005 int ncores = __kmp_affinity_compute_ncores(__kmp_avail_proc,
5006 __kmp_aff_depth - 1, core_level);
5007 int nth_per_core = __kmp_affinity_max_proc_per_core(
5008 __kmp_avail_proc, __kmp_aff_depth - 1, core_level);
5012 if (nthreads == __kmp_avail_proc) {
5014 int osID = __kmp_topology->at(tid).os_id;
5015 KMP_CPU_SET(osID, mask);
5018 __kmp_affinity_find_core(tid, __kmp_aff_depth - 1, core_level);
5019 for (
int i = 0; i < __kmp_avail_proc; i++) {
5020 int osID = __kmp_topology->at(i).os_id;
5021 if (__kmp_affinity_find_core(i, __kmp_aff_depth - 1, core_level) ==
5023 KMP_CPU_SET(osID, mask);
5027 }
else if (nthreads <= ncores) {
5030 for (
int i = 0; i < ncores; i++) {
5033 for (
int j = 0; j < nth_per_core; j++) {
5034 if (procarr[i * nth_per_core + j] != -1) {
5041 for (
int j = 0; j < nth_per_core; j++) {
5042 int osID = procarr[i * nth_per_core + j];
5044 KMP_CPU_SET(osID, mask);
5060 int *nproc_at_core = (
int *)KMP_ALLOCA(
sizeof(
int) * ncores);
5062 int *ncores_with_x_procs =
5063 (
int *)KMP_ALLOCA(
sizeof(
int) * (nth_per_core + 1));
5065 int *ncores_with_x_to_max_procs =
5066 (
int *)KMP_ALLOCA(
sizeof(
int) * (nth_per_core + 1));
5068 for (
int i = 0; i <= nth_per_core; i++) {
5069 ncores_with_x_procs[i] = 0;
5070 ncores_with_x_to_max_procs[i] = 0;
5073 for (
int i = 0; i < ncores; i++) {
5075 for (
int j = 0; j < nth_per_core; j++) {
5076 if (procarr[i * nth_per_core + j] != -1) {
5080 nproc_at_core[i] = cnt;
5081 ncores_with_x_procs[cnt]++;
5084 for (
int i = 0; i <= nth_per_core; i++) {
5085 for (
int j = i; j <= nth_per_core; j++) {
5086 ncores_with_x_to_max_procs[i] += ncores_with_x_procs[j];
5091 int nproc = nth_per_core * ncores;
5093 int *newarr = (
int *)__kmp_allocate(
sizeof(
int) * nproc);
5094 for (
int i = 0; i < nproc; i++) {
5101 for (
int j = 1; j <= nth_per_core; j++) {
5102 int cnt = ncores_with_x_to_max_procs[j];
5103 for (
int i = 0; i < ncores; i++) {
5105 if (nproc_at_core[i] == 0) {
5108 for (
int k = 0; k < nth_per_core; k++) {
5109 if (procarr[i * nth_per_core + k] != -1) {
5110 if (newarr[i * nth_per_core + k] == 0) {
5111 newarr[i * nth_per_core + k] = 1;
5117 newarr[i * nth_per_core + k]++;
5125 if (cnt == 0 || nth == 0) {
5136 for (
int i = 0; i < nproc; i++) {
5140 int osID = procarr[i];
5141 KMP_CPU_SET(osID, mask);
5143 int coreID = i / nth_per_core;
5144 for (
int ii = 0; ii < nth_per_core; ii++) {
5145 int osID = procarr[coreID * nth_per_core + ii];
5147 KMP_CPU_SET(osID, mask);
5157 if (__kmp_affinity_verbose) {
5158 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5159 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
5160 KMP_INFORM(BoundToOSProcSet,
"KMP_AFFINITY", (kmp_int32)getpid(),
5161 __kmp_gettid(), tid, buf);
5163 __kmp_set_system_affinity(mask, TRUE);
5167 #if KMP_OS_LINUX || KMP_OS_FREEBSD
5181 kmp_set_thread_affinity_mask_initial()
5186 int gtid = __kmp_get_gtid();
5189 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: "
5190 "non-omp thread, returning\n"));
5193 if (!KMP_AFFINITY_CAPABLE() || !__kmp_init_middle) {
5194 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: "
5195 "affinity not initialized, returning\n"));
5198 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: "
5199 "set full mask for thread %d\n",
5201 KMP_DEBUG_ASSERT(__kmp_affin_fullMask != NULL);
5202 return __kmp_set_system_affinity(__kmp_affin_fullMask, FALSE);
int try_open(const char *filename, const char *mode)