LLVM OpenMP* Runtime Library
Loading...
Searching...
No Matches
kmp_affinity.cpp
1/*
2 * kmp_affinity.cpp -- affinity management
3 */
4
5//===----------------------------------------------------------------------===//
6//
7// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8// See https://llvm.org/LICENSE.txt for license information.
9// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10//
11//===----------------------------------------------------------------------===//
12
13#include "kmp.h"
14#include "kmp_affinity.h"
15#include "kmp_i18n.h"
16#include "kmp_io.h"
17#include "kmp_str.h"
18#include "kmp_wrapper_getpid.h"
19#if KMP_USE_HIER_SCHED
20#include "kmp_dispatch_hier.h"
21#endif
22
23// Store the real or imagined machine hierarchy here
24static hierarchy_info machine_hierarchy;
25
26void __kmp_cleanup_hierarchy() { machine_hierarchy.fini(); }
27
28void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar) {
29 kmp_uint32 depth;
30 // The test below is true if affinity is available, but set to "none". Need to
31 // init on first use of hierarchical barrier.
32 if (TCR_1(machine_hierarchy.uninitialized))
33 machine_hierarchy.init(NULL, nproc);
34
35 // Adjust the hierarchy in case num threads exceeds original
36 if (nproc > machine_hierarchy.base_num_threads)
37 machine_hierarchy.resize(nproc);
38
39 depth = machine_hierarchy.depth;
40 KMP_DEBUG_ASSERT(depth > 0);
41
42 thr_bar->depth = depth;
43 thr_bar->base_leaf_kids = (kmp_uint8)machine_hierarchy.numPerLevel[0] - 1;
44 thr_bar->skip_per_level = machine_hierarchy.skipPerLevel;
45}
46
47#if KMP_AFFINITY_SUPPORTED
48
49bool KMPAffinity::picked_api = false;
50
51void *KMPAffinity::Mask::operator new(size_t n) { return __kmp_allocate(n); }
52void *KMPAffinity::Mask::operator new[](size_t n) { return __kmp_allocate(n); }
53void KMPAffinity::Mask::operator delete(void *p) { __kmp_free(p); }
54void KMPAffinity::Mask::operator delete[](void *p) { __kmp_free(p); }
55void *KMPAffinity::operator new(size_t n) { return __kmp_allocate(n); }
56void KMPAffinity::operator delete(void *p) { __kmp_free(p); }
57
58void KMPAffinity::pick_api() {
59 KMPAffinity *affinity_dispatch;
60 if (picked_api)
61 return;
62#if KMP_USE_HWLOC
63 // Only use Hwloc if affinity isn't explicitly disabled and
64 // user requests Hwloc topology method
65 if (__kmp_affinity_top_method == affinity_top_method_hwloc &&
66 __kmp_affinity_type != affinity_disabled) {
67 affinity_dispatch = new KMPHwlocAffinity();
68 } else
69#endif
70 {
71 affinity_dispatch = new KMPNativeAffinity();
72 }
73 __kmp_affinity_dispatch = affinity_dispatch;
74 picked_api = true;
75}
76
77void KMPAffinity::destroy_api() {
78 if (__kmp_affinity_dispatch != NULL) {
79 delete __kmp_affinity_dispatch;
80 __kmp_affinity_dispatch = NULL;
81 picked_api = false;
82 }
83}
84
85#define KMP_ADVANCE_SCAN(scan) \
86 while (*scan != '\0') { \
87 scan++; \
88 }
89
90// Print the affinity mask to the character array in a pretty format.
91// The format is a comma separated list of non-negative integers or integer
92// ranges: e.g., 1,2,3-5,7,9-15
93// The format can also be the string "{<empty>}" if no bits are set in mask
94char *__kmp_affinity_print_mask(char *buf, int buf_len,
95 kmp_affin_mask_t *mask) {
96 int start = 0, finish = 0, previous = 0;
97 bool first_range;
98 KMP_ASSERT(buf);
99 KMP_ASSERT(buf_len >= 40);
100 KMP_ASSERT(mask);
101 char *scan = buf;
102 char *end = buf + buf_len - 1;
103
104 // Check for empty set.
105 if (mask->begin() == mask->end()) {
106 KMP_SNPRINTF(scan, end - scan + 1, "{<empty>}");
107 KMP_ADVANCE_SCAN(scan);
108 KMP_ASSERT(scan <= end);
109 return buf;
110 }
111
112 first_range = true;
113 start = mask->begin();
114 while (1) {
115 // Find next range
116 // [start, previous] is inclusive range of contiguous bits in mask
117 for (finish = mask->next(start), previous = start;
118 finish == previous + 1 && finish != mask->end();
119 finish = mask->next(finish)) {
120 previous = finish;
121 }
122
123 // The first range does not need a comma printed before it, but the rest
124 // of the ranges do need a comma beforehand
125 if (!first_range) {
126 KMP_SNPRINTF(scan, end - scan + 1, "%s", ",");
127 KMP_ADVANCE_SCAN(scan);
128 } else {
129 first_range = false;
130 }
131 // Range with three or more contiguous bits in the affinity mask
132 if (previous - start > 1) {
133 KMP_SNPRINTF(scan, end - scan + 1, "%d-%d", static_cast<int>(start),
134 static_cast<int>(previous));
135 } else {
136 // Range with one or two contiguous bits in the affinity mask
137 KMP_SNPRINTF(scan, end - scan + 1, "%d", static_cast<int>(start));
138 KMP_ADVANCE_SCAN(scan);
139 if (previous - start > 0) {
140 KMP_SNPRINTF(scan, end - scan + 1, ",%d", static_cast<int>(previous));
141 }
142 }
143 KMP_ADVANCE_SCAN(scan);
144 // Start over with new start point
145 start = finish;
146 if (start == mask->end())
147 break;
148 // Check for overflow
149 if (end - scan < 2)
150 break;
151 }
152
153 // Check for overflow
154 KMP_ASSERT(scan <= end);
155 return buf;
156}
157#undef KMP_ADVANCE_SCAN
158
159// Print the affinity mask to the string buffer object in a pretty format
160// The format is a comma separated list of non-negative integers or integer
161// ranges: e.g., 1,2,3-5,7,9-15
162// The format can also be the string "{<empty>}" if no bits are set in mask
163kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf,
164 kmp_affin_mask_t *mask) {
165 int start = 0, finish = 0, previous = 0;
166 bool first_range;
167 KMP_ASSERT(buf);
168 KMP_ASSERT(mask);
169
170 __kmp_str_buf_clear(buf);
171
172 // Check for empty set.
173 if (mask->begin() == mask->end()) {
174 __kmp_str_buf_print(buf, "%s", "{<empty>}");
175 return buf;
176 }
177
178 first_range = true;
179 start = mask->begin();
180 while (1) {
181 // Find next range
182 // [start, previous] is inclusive range of contiguous bits in mask
183 for (finish = mask->next(start), previous = start;
184 finish == previous + 1 && finish != mask->end();
185 finish = mask->next(finish)) {
186 previous = finish;
187 }
188
189 // The first range does not need a comma printed before it, but the rest
190 // of the ranges do need a comma beforehand
191 if (!first_range) {
192 __kmp_str_buf_print(buf, "%s", ",");
193 } else {
194 first_range = false;
195 }
196 // Range with three or more contiguous bits in the affinity mask
197 if (previous - start > 1) {
198 __kmp_str_buf_print(buf, "%d-%d", static_cast<int>(start),
199 static_cast<int>(previous));
200 } else {
201 // Range with one or two contiguous bits in the affinity mask
202 __kmp_str_buf_print(buf, "%d", static_cast<int>(start));
203 if (previous - start > 0) {
204 __kmp_str_buf_print(buf, ",%d", static_cast<int>(previous));
205 }
206 }
207 // Start over with new start point
208 start = finish;
209 if (start == mask->end())
210 break;
211 }
212 return buf;
213}
214
215void __kmp_affinity_entire_machine_mask(kmp_affin_mask_t *mask) {
216 KMP_CPU_ZERO(mask);
217
218#if KMP_GROUP_AFFINITY
219
220 if (__kmp_num_proc_groups > 1) {
221 int group;
222 KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount != NULL);
223 for (group = 0; group < __kmp_num_proc_groups; group++) {
224 int i;
225 int num = __kmp_GetActiveProcessorCount(group);
226 for (i = 0; i < num; i++) {
227 KMP_CPU_SET(i + group * (CHAR_BIT * sizeof(DWORD_PTR)), mask);
228 }
229 }
230 } else
231
232#endif /* KMP_GROUP_AFFINITY */
233
234 {
235 int proc;
236 for (proc = 0; proc < __kmp_xproc; proc++) {
237 KMP_CPU_SET(proc, mask);
238 }
239 }
240}
241
242// When sorting by labels, __kmp_affinity_assign_child_nums() must first be
243// called to renumber the labels from [0..n] and place them into the child_num
244// vector of the address object. This is done in case the labels used for
245// the children at one node of the hierarchy differ from those used for
246// another node at the same level. Example: suppose the machine has 2 nodes
247// with 2 packages each. The first node contains packages 601 and 602, and
248// second node contains packages 603 and 604. If we try to sort the table
249// for "scatter" affinity, the table will still be sorted 601, 602, 603, 604
250// because we are paying attention to the labels themselves, not the ordinal
251// child numbers. By using the child numbers in the sort, the result is
252// {0,0}=601, {0,1}=603, {1,0}=602, {1,1}=604.
253static void __kmp_affinity_assign_child_nums(AddrUnsPair *address2os,
254 int numAddrs) {
255 KMP_DEBUG_ASSERT(numAddrs > 0);
256 int depth = address2os->first.depth;
257 unsigned *counts = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
258 unsigned *lastLabel = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
259 int labCt;
260 for (labCt = 0; labCt < depth; labCt++) {
261 address2os[0].first.childNums[labCt] = counts[labCt] = 0;
262 lastLabel[labCt] = address2os[0].first.labels[labCt];
263 }
264 int i;
265 for (i = 1; i < numAddrs; i++) {
266 for (labCt = 0; labCt < depth; labCt++) {
267 if (address2os[i].first.labels[labCt] != lastLabel[labCt]) {
268 int labCt2;
269 for (labCt2 = labCt + 1; labCt2 < depth; labCt2++) {
270 counts[labCt2] = 0;
271 lastLabel[labCt2] = address2os[i].first.labels[labCt2];
272 }
273 counts[labCt]++;
274 lastLabel[labCt] = address2os[i].first.labels[labCt];
275 break;
276 }
277 }
278 for (labCt = 0; labCt < depth; labCt++) {
279 address2os[i].first.childNums[labCt] = counts[labCt];
280 }
281 for (; labCt < (int)Address::maxDepth; labCt++) {
282 address2os[i].first.childNums[labCt] = 0;
283 }
284 }
285 __kmp_free(lastLabel);
286 __kmp_free(counts);
287}
288
289// All of the __kmp_affinity_create_*_map() routines should set
290// __kmp_affinity_masks to a vector of affinity mask objects of length
291// __kmp_affinity_num_masks, if __kmp_affinity_type != affinity_none, and return
292// the number of levels in the machine topology tree (zero if
293// __kmp_affinity_type == affinity_none).
294//
295// All of the __kmp_affinity_create_*_map() routines should set
296// *__kmp_affin_fullMask to the affinity mask for the initialization thread.
297// They need to save and restore the mask, and it could be needed later, so
298// saving it is just an optimization to avoid calling kmp_get_system_affinity()
299// again.
300kmp_affin_mask_t *__kmp_affin_fullMask = NULL;
301
302static int nCoresPerPkg, nPackages;
303static int __kmp_nThreadsPerCore;
304#ifndef KMP_DFLT_NTH_CORES
305static int __kmp_ncores;
306#endif
307static int *__kmp_pu_os_idx = NULL;
308
309// __kmp_affinity_uniform_topology() doesn't work when called from
310// places which support arbitrarily many levels in the machine topology
311// map, i.e. the non-default cases in __kmp_affinity_create_cpuinfo_map()
312// __kmp_affinity_create_x2apicid_map().
313inline static bool __kmp_affinity_uniform_topology() {
314 return __kmp_avail_proc == (__kmp_nThreadsPerCore * nCoresPerPkg * nPackages);
315}
316
317// Print out the detailed machine topology map, i.e. the physical locations
318// of each OS proc.
319static void __kmp_affinity_print_topology(AddrUnsPair *address2os, int len,
320 int depth, int pkgLevel,
321 int coreLevel, int threadLevel) {
322 int proc;
323
324 KMP_INFORM(OSProcToPhysicalThreadMap, "KMP_AFFINITY");
325 for (proc = 0; proc < len; proc++) {
326 int level;
327 kmp_str_buf_t buf;
328 __kmp_str_buf_init(&buf);
329 for (level = 0; level < depth; level++) {
330 if (level == threadLevel) {
331 __kmp_str_buf_print(&buf, "%s ", KMP_I18N_STR(Thread));
332 } else if (level == coreLevel) {
333 __kmp_str_buf_print(&buf, "%s ", KMP_I18N_STR(Core));
334 } else if (level == pkgLevel) {
335 __kmp_str_buf_print(&buf, "%s ", KMP_I18N_STR(Package));
336 } else if (level > pkgLevel) {
337 __kmp_str_buf_print(&buf, "%s_%d ", KMP_I18N_STR(Node),
338 level - pkgLevel - 1);
339 } else {
340 __kmp_str_buf_print(&buf, "L%d ", level);
341 }
342 __kmp_str_buf_print(&buf, "%d ", address2os[proc].first.labels[level]);
343 }
344 KMP_INFORM(OSProcMapToPack, "KMP_AFFINITY", address2os[proc].second,
345 buf.str);
346 __kmp_str_buf_free(&buf);
347 }
348}
349
350#if KMP_USE_HWLOC
351
352static void __kmp_affinity_print_hwloc_tp(AddrUnsPair *addrP, int len,
353 int depth, int *levels) {
354 int proc;
355 kmp_str_buf_t buf;
356 __kmp_str_buf_init(&buf);
357 KMP_INFORM(OSProcToPhysicalThreadMap, "KMP_AFFINITY");
358 for (proc = 0; proc < len; proc++) {
359 __kmp_str_buf_print(&buf, "%s %d ", KMP_I18N_STR(Package),
360 addrP[proc].first.labels[0]);
361 if (depth > 1) {
362 int level = 1; // iterate over levels
363 int label = 1; // iterate over labels
364 if (__kmp_numa_detected)
365 // node level follows package
366 if (levels[level++] > 0)
367 __kmp_str_buf_print(&buf, "%s %d ", KMP_I18N_STR(Node),
368 addrP[proc].first.labels[label++]);
369 if (__kmp_tile_depth > 0)
370 // tile level follows node if any, or package
371 if (levels[level++] > 0)
372 __kmp_str_buf_print(&buf, "%s %d ", KMP_I18N_STR(Tile),
373 addrP[proc].first.labels[label++]);
374 if (levels[level++] > 0)
375 // core level follows
376 __kmp_str_buf_print(&buf, "%s %d ", KMP_I18N_STR(Core),
377 addrP[proc].first.labels[label++]);
378 if (levels[level++] > 0)
379 // thread level is the latest
380 __kmp_str_buf_print(&buf, "%s %d ", KMP_I18N_STR(Thread),
381 addrP[proc].first.labels[label++]);
382 KMP_DEBUG_ASSERT(label == depth);
383 }
384 KMP_INFORM(OSProcMapToPack, "KMP_AFFINITY", addrP[proc].second, buf.str);
385 __kmp_str_buf_clear(&buf);
386 }
387 __kmp_str_buf_free(&buf);
388}
389
390static int nNodePerPkg, nTilePerPkg, nTilePerNode, nCorePerNode, nCorePerTile;
391
392// This function removes the topology levels that are radix 1 and don't offer
393// further information about the topology. The most common example is when you
394// have one thread context per core, we don't want the extra thread context
395// level if it offers no unique labels. So they are removed.
396// return value: the new depth of address2os
397static int __kmp_affinity_remove_radix_one_levels(AddrUnsPair *addrP, int nTh,
398 int depth, int *levels) {
399 int level;
400 int i;
401 int radix1_detected;
402 int new_depth = depth;
403 for (level = depth - 1; level > 0; --level) {
404 // Detect if this level is radix 1
405 radix1_detected = 1;
406 for (i = 1; i < nTh; ++i) {
407 if (addrP[0].first.labels[level] != addrP[i].first.labels[level]) {
408 // There are differing label values for this level so it stays
409 radix1_detected = 0;
410 break;
411 }
412 }
413 if (!radix1_detected)
414 continue;
415 // Radix 1 was detected
416 --new_depth;
417 levels[level] = -1; // mark level as not present in address2os array
418 if (level == new_depth) {
419 // "turn off" deepest level, just decrement the depth that removes
420 // the level from address2os array
421 for (i = 0; i < nTh; ++i) {
422 addrP[i].first.depth--;
423 }
424 } else {
425 // For other levels, we move labels over and also reduce the depth
426 int j;
427 for (j = level; j < new_depth; ++j) {
428 for (i = 0; i < nTh; ++i) {
429 addrP[i].first.labels[j] = addrP[i].first.labels[j + 1];
430 addrP[i].first.depth--;
431 }
432 levels[j + 1] -= 1;
433 }
434 }
435 }
436 return new_depth;
437}
438
439// Returns the number of objects of type 'type' below 'obj' within the topology
440// tree structure. e.g., if obj is a HWLOC_OBJ_PACKAGE object, and type is
441// HWLOC_OBJ_PU, then this will return the number of PU's under the SOCKET
442// object.
443static int __kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj,
444 hwloc_obj_type_t type) {
445 int retval = 0;
446 hwloc_obj_t first;
447 for (first = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, obj->type,
448 obj->logical_index, type, 0);
449 first != NULL &&
450 hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology, obj->type, first) ==
451 obj;
452 first = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, first->type,
453 first)) {
454 ++retval;
455 }
456 return retval;
457}
458
459static int __kmp_hwloc_count_children_by_depth(hwloc_topology_t t,
460 hwloc_obj_t o,
461 kmp_hwloc_depth_t depth,
462 hwloc_obj_t *f) {
463 if (o->depth == depth) {
464 if (*f == NULL)
465 *f = o; // output first descendant found
466 return 1;
467 }
468 int sum = 0;
469 for (unsigned i = 0; i < o->arity; i++)
470 sum += __kmp_hwloc_count_children_by_depth(t, o->children[i], depth, f);
471 return sum; // will be 0 if no one found (as PU arity is 0)
472}
473
474static int __kmp_hwloc_count_children_by_type(hwloc_topology_t t, hwloc_obj_t o,
475 hwloc_obj_type_t type,
476 hwloc_obj_t *f) {
477 if (!hwloc_compare_types(o->type, type)) {
478 if (*f == NULL)
479 *f = o; // output first descendant found
480 return 1;
481 }
482 int sum = 0;
483 for (unsigned i = 0; i < o->arity; i++)
484 sum += __kmp_hwloc_count_children_by_type(t, o->children[i], type, f);
485 return sum; // will be 0 if no one found (as PU arity is 0)
486}
487
488static int __kmp_hwloc_process_obj_core_pu(AddrUnsPair *addrPair,
489 int &nActiveThreads,
490 int &num_active_cores,
491 hwloc_obj_t obj, int depth,
492 int *labels) {
493 hwloc_obj_t core = NULL;
494 hwloc_topology_t &tp = __kmp_hwloc_topology;
495 int NC = __kmp_hwloc_count_children_by_type(tp, obj, HWLOC_OBJ_CORE, &core);
496 for (int core_id = 0; core_id < NC; ++core_id, core = core->next_cousin) {
497 hwloc_obj_t pu = NULL;
498 KMP_DEBUG_ASSERT(core != NULL);
499 int num_active_threads = 0;
500 int NT = __kmp_hwloc_count_children_by_type(tp, core, HWLOC_OBJ_PU, &pu);
501 // int NT = core->arity; pu = core->first_child; // faster?
502 for (int pu_id = 0; pu_id < NT; ++pu_id, pu = pu->next_cousin) {
503 KMP_DEBUG_ASSERT(pu != NULL);
504 if (!KMP_CPU_ISSET(pu->os_index, __kmp_affin_fullMask))
505 continue; // skip inactive (inaccessible) unit
506 Address addr(depth + 2);
507 KA_TRACE(20, ("Hwloc inserting %d (%d) %d (%d) %d (%d) into address2os\n",
508 obj->os_index, obj->logical_index, core->os_index,
509 core->logical_index, pu->os_index, pu->logical_index));
510 for (int i = 0; i < depth; ++i)
511 addr.labels[i] = labels[i]; // package, etc.
512 addr.labels[depth] = core_id; // core
513 addr.labels[depth + 1] = pu_id; // pu
514 addrPair[nActiveThreads] = AddrUnsPair(addr, pu->os_index);
515 __kmp_pu_os_idx[nActiveThreads] = pu->os_index;
516 nActiveThreads++;
517 ++num_active_threads; // count active threads per core
518 }
519 if (num_active_threads) { // were there any active threads on the core?
520 ++__kmp_ncores; // count total active cores
521 ++num_active_cores; // count active cores per socket
522 if (num_active_threads > __kmp_nThreadsPerCore)
523 __kmp_nThreadsPerCore = num_active_threads; // calc maximum
524 }
525 }
526 return 0;
527}
528
529// Check if NUMA node detected below the package,
530// and if tile object is detected and return its depth
531static int __kmp_hwloc_check_numa() {
532 hwloc_topology_t &tp = __kmp_hwloc_topology;
533 hwloc_obj_t hT, hC, hL, hN, hS; // hwloc objects (pointers to)
534 int depth, l2cache_depth, package_depth;
535
536 // Get some PU
537 hT = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PU, 0);
538 if (hT == NULL) // something has gone wrong
539 return 1;
540
541 // check NUMA node below PACKAGE
542 hN = hwloc_get_ancestor_obj_by_type(tp, HWLOC_OBJ_NUMANODE, hT);
543 hS = hwloc_get_ancestor_obj_by_type(tp, HWLOC_OBJ_PACKAGE, hT);
544 KMP_DEBUG_ASSERT(hS != NULL);
545 if (hN != NULL && hN->depth > hS->depth) {
546 __kmp_numa_detected = TRUE; // socket includes node(s)
547 if (__kmp_affinity_gran == affinity_gran_node) {
548 __kmp_affinity_gran = affinity_gran_numa;
549 }
550 }
551
552 package_depth = hwloc_get_type_depth(tp, HWLOC_OBJ_PACKAGE);
553 l2cache_depth = hwloc_get_cache_type_depth(tp, 2, HWLOC_OBJ_CACHE_UNIFIED);
554 // check tile, get object by depth because of multiple caches possible
555 depth = (l2cache_depth < package_depth) ? package_depth : l2cache_depth;
556 hL = hwloc_get_ancestor_obj_by_depth(tp, depth, hT);
557 hC = NULL; // not used, but reset it here just in case
558 if (hL != NULL &&
559 __kmp_hwloc_count_children_by_type(tp, hL, HWLOC_OBJ_CORE, &hC) > 1)
560 __kmp_tile_depth = depth; // tile consists of multiple cores
561 return 0;
562}
563
564static int __kmp_affinity_create_hwloc_map(AddrUnsPair **address2os,
565 kmp_i18n_id_t *const msg_id) {
566 hwloc_topology_t &tp = __kmp_hwloc_topology; // shortcut of a long name
567 *address2os = NULL;
568 *msg_id = kmp_i18n_null;
569
570 // Save the affinity mask for the current thread.
571 kmp_affin_mask_t *oldMask;
572 KMP_CPU_ALLOC(oldMask);
573 __kmp_get_system_affinity(oldMask, TRUE);
574 __kmp_hwloc_check_numa();
575
576 if (!KMP_AFFINITY_CAPABLE()) {
577 // Hack to try and infer the machine topology using only the data
578 // available from cpuid on the current thread, and __kmp_xproc.
579 KMP_ASSERT(__kmp_affinity_type == affinity_none);
580
581 nCoresPerPkg = __kmp_hwloc_get_nobjs_under_obj(
582 hwloc_get_obj_by_type(tp, HWLOC_OBJ_PACKAGE, 0), HWLOC_OBJ_CORE);
583 __kmp_nThreadsPerCore = __kmp_hwloc_get_nobjs_under_obj(
584 hwloc_get_obj_by_type(tp, HWLOC_OBJ_CORE, 0), HWLOC_OBJ_PU);
585 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
586 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
587 if (__kmp_affinity_verbose) {
588 KMP_INFORM(AffNotCapableUseLocCpuidL11, "KMP_AFFINITY");
589 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
590 if (__kmp_affinity_uniform_topology()) {
591 KMP_INFORM(Uniform, "KMP_AFFINITY");
592 } else {
593 KMP_INFORM(NonUniform, "KMP_AFFINITY");
594 }
595 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
596 __kmp_nThreadsPerCore, __kmp_ncores);
597 }
598 KMP_CPU_FREE(oldMask);
599 return 0;
600 }
601
602 int depth = 3;
603 int levels[5] = {0, 1, 2, 3, 4}; // package, [node,] [tile,] core, thread
604 int labels[3] = {0}; // package [,node] [,tile] - head of labels array
605 if (__kmp_numa_detected)
606 ++depth;
607 if (__kmp_tile_depth)
608 ++depth;
609
610 // Allocate the data structure to be returned.
611 AddrUnsPair *retval =
612 (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * __kmp_avail_proc);
613 KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
614 __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
615
616 // When affinity is off, this routine will still be called to set
617 // __kmp_ncores, as well as __kmp_nThreadsPerCore,
618 // nCoresPerPkg, & nPackages. Make sure all these vars are set
619 // correctly, and return if affinity is not enabled.
620
621 hwloc_obj_t socket, node, tile;
622 int nActiveThreads = 0;
623 int socket_id = 0;
624 // re-calculate globals to count only accessible resources
625 __kmp_ncores = nPackages = nCoresPerPkg = __kmp_nThreadsPerCore = 0;
626 nNodePerPkg = nTilePerPkg = nTilePerNode = nCorePerNode = nCorePerTile = 0;
627 for (socket = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PACKAGE, 0); socket != NULL;
628 socket = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PACKAGE, socket),
629 socket_id++) {
630 labels[0] = socket_id;
631 if (__kmp_numa_detected) {
632 int NN;
633 int n_active_nodes = 0;
634 node = NULL;
635 NN = __kmp_hwloc_count_children_by_type(tp, socket, HWLOC_OBJ_NUMANODE,
636 &node);
637 for (int node_id = 0; node_id < NN; ++node_id, node = node->next_cousin) {
638 labels[1] = node_id;
639 if (__kmp_tile_depth) {
640 // NUMA + tiles
641 int NT;
642 int n_active_tiles = 0;
643 tile = NULL;
644 NT = __kmp_hwloc_count_children_by_depth(tp, node, __kmp_tile_depth,
645 &tile);
646 for (int tl_id = 0; tl_id < NT; ++tl_id, tile = tile->next_cousin) {
647 labels[2] = tl_id;
648 int n_active_cores = 0;
649 __kmp_hwloc_process_obj_core_pu(retval, nActiveThreads,
650 n_active_cores, tile, 3, labels);
651 if (n_active_cores) { // were there any active cores on the socket?
652 ++n_active_tiles; // count active tiles per node
653 if (n_active_cores > nCorePerTile)
654 nCorePerTile = n_active_cores; // calc maximum
655 }
656 }
657 if (n_active_tiles) { // were there any active tiles on the socket?
658 ++n_active_nodes; // count active nodes per package
659 if (n_active_tiles > nTilePerNode)
660 nTilePerNode = n_active_tiles; // calc maximum
661 }
662 } else {
663 // NUMA, no tiles
664 int n_active_cores = 0;
665 __kmp_hwloc_process_obj_core_pu(retval, nActiveThreads,
666 n_active_cores, node, 2, labels);
667 if (n_active_cores) { // were there any active cores on the socket?
668 ++n_active_nodes; // count active nodes per package
669 if (n_active_cores > nCorePerNode)
670 nCorePerNode = n_active_cores; // calc maximum
671 }
672 }
673 }
674 if (n_active_nodes) { // were there any active nodes on the socket?
675 ++nPackages; // count total active packages
676 if (n_active_nodes > nNodePerPkg)
677 nNodePerPkg = n_active_nodes; // calc maximum
678 }
679 } else {
680 if (__kmp_tile_depth) {
681 // no NUMA, tiles
682 int NT;
683 int n_active_tiles = 0;
684 tile = NULL;
685 NT = __kmp_hwloc_count_children_by_depth(tp, socket, __kmp_tile_depth,
686 &tile);
687 for (int tl_id = 0; tl_id < NT; ++tl_id, tile = tile->next_cousin) {
688 labels[1] = tl_id;
689 int n_active_cores = 0;
690 __kmp_hwloc_process_obj_core_pu(retval, nActiveThreads,
691 n_active_cores, tile, 2, labels);
692 if (n_active_cores) { // were there any active cores on the socket?
693 ++n_active_tiles; // count active tiles per package
694 if (n_active_cores > nCorePerTile)
695 nCorePerTile = n_active_cores; // calc maximum
696 }
697 }
698 if (n_active_tiles) { // were there any active tiles on the socket?
699 ++nPackages; // count total active packages
700 if (n_active_tiles > nTilePerPkg)
701 nTilePerPkg = n_active_tiles; // calc maximum
702 }
703 } else {
704 // no NUMA, no tiles
705 int n_active_cores = 0;
706 __kmp_hwloc_process_obj_core_pu(retval, nActiveThreads, n_active_cores,
707 socket, 1, labels);
708 if (n_active_cores) { // were there any active cores on the socket?
709 ++nPackages; // count total active packages
710 if (n_active_cores > nCoresPerPkg)
711 nCoresPerPkg = n_active_cores; // calc maximum
712 }
713 }
714 }
715 }
716
717 // If there's only one thread context to bind to, return now.
718 KMP_DEBUG_ASSERT(nActiveThreads == __kmp_avail_proc);
719 KMP_ASSERT(nActiveThreads > 0);
720 if (nActiveThreads == 1) {
721 __kmp_ncores = nPackages = 1;
722 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
723 if (__kmp_affinity_verbose) {
724 char buf[KMP_AFFIN_MASK_PRINT_LEN];
725 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
726
727 KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY");
728 if (__kmp_affinity_respect_mask) {
729 KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
730 } else {
731 KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
732 }
733 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
734 KMP_INFORM(Uniform, "KMP_AFFINITY");
735 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
736 __kmp_nThreadsPerCore, __kmp_ncores);
737 }
738
739 if (__kmp_affinity_type == affinity_none) {
740 __kmp_free(retval);
741 KMP_CPU_FREE(oldMask);
742 return 0;
743 }
744
745 // Form an Address object which only includes the package level.
746 Address addr(1);
747 addr.labels[0] = retval[0].first.labels[0];
748 retval[0].first = addr;
749
750 if (__kmp_affinity_gran_levels < 0) {
751 __kmp_affinity_gran_levels = 0;
752 }
753
754 if (__kmp_affinity_verbose) {
755 __kmp_affinity_print_topology(retval, 1, 1, 0, -1, -1);
756 }
757
758 *address2os = retval;
759 KMP_CPU_FREE(oldMask);
760 return 1;
761 }
762
763 // Sort the table by physical Id.
764 qsort(retval, nActiveThreads, sizeof(*retval),
765 __kmp_affinity_cmp_Address_labels);
766
767 // Check to see if the machine topology is uniform
768 int nPUs = nPackages * __kmp_nThreadsPerCore;
769 if (__kmp_numa_detected) {
770 if (__kmp_tile_depth) { // NUMA + tiles
771 nPUs *= (nNodePerPkg * nTilePerNode * nCorePerTile);
772 } else { // NUMA, no tiles
773 nPUs *= (nNodePerPkg * nCorePerNode);
774 }
775 } else {
776 if (__kmp_tile_depth) { // no NUMA, tiles
777 nPUs *= (nTilePerPkg * nCorePerTile);
778 } else { // no NUMA, no tiles
779 nPUs *= nCoresPerPkg;
780 }
781 }
782 unsigned uniform = (nPUs == nActiveThreads);
783
784 // Print the machine topology summary.
785 if (__kmp_affinity_verbose) {
786 char mask[KMP_AFFIN_MASK_PRINT_LEN];
787 __kmp_affinity_print_mask(mask, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
788 if (__kmp_affinity_respect_mask) {
789 KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", mask);
790 } else {
791 KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", mask);
792 }
793 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
794 if (uniform) {
795 KMP_INFORM(Uniform, "KMP_AFFINITY");
796 } else {
797 KMP_INFORM(NonUniform, "KMP_AFFINITY");
798 }
799 if (__kmp_numa_detected) {
800 if (__kmp_tile_depth) { // NUMA + tiles
801 KMP_INFORM(TopologyExtraNoTi, "KMP_AFFINITY", nPackages, nNodePerPkg,
802 nTilePerNode, nCorePerTile, __kmp_nThreadsPerCore,
803 __kmp_ncores);
804 } else { // NUMA, no tiles
805 KMP_INFORM(TopologyExtraNode, "KMP_AFFINITY", nPackages, nNodePerPkg,
806 nCorePerNode, __kmp_nThreadsPerCore, __kmp_ncores);
807 nPUs *= (nNodePerPkg * nCorePerNode);
808 }
809 } else {
810 if (__kmp_tile_depth) { // no NUMA, tiles
811 KMP_INFORM(TopologyExtraTile, "KMP_AFFINITY", nPackages, nTilePerPkg,
812 nCorePerTile, __kmp_nThreadsPerCore, __kmp_ncores);
813 } else { // no NUMA, no tiles
814 kmp_str_buf_t buf;
815 __kmp_str_buf_init(&buf);
816 __kmp_str_buf_print(&buf, "%d", nPackages);
817 KMP_INFORM(TopologyExtra, "KMP_AFFINITY", buf.str, nCoresPerPkg,
818 __kmp_nThreadsPerCore, __kmp_ncores);
819 __kmp_str_buf_free(&buf);
820 }
821 }
822 }
823
824 if (__kmp_affinity_type == affinity_none) {
825 __kmp_free(retval);
826 KMP_CPU_FREE(oldMask);
827 return 0;
828 }
829
830 int depth_full = depth; // number of levels before compressing
831 // Find any levels with radix 1, and remove them from the map
832 // (except for the package level).
833 depth = __kmp_affinity_remove_radix_one_levels(retval, nActiveThreads, depth,
834 levels);
835 KMP_DEBUG_ASSERT(__kmp_affinity_gran != affinity_gran_default);
836 if (__kmp_affinity_gran_levels < 0) {
837 // Set the granularity level based on what levels are modeled
838 // in the machine topology map.
839 __kmp_affinity_gran_levels = 0; // lowest level (e.g. fine)
840 if (__kmp_affinity_gran > affinity_gran_thread) {
841 for (int i = 1; i <= depth_full; ++i) {
842 if (__kmp_affinity_gran <= i) // only count deeper levels
843 break;
844 if (levels[depth_full - i] > 0)
845 __kmp_affinity_gran_levels++;
846 }
847 }
848 if (__kmp_affinity_gran > affinity_gran_package)
849 __kmp_affinity_gran_levels++; // e.g. granularity = group
850 }
851
852 if (__kmp_affinity_verbose)
853 __kmp_affinity_print_hwloc_tp(retval, nActiveThreads, depth, levels);
854
855 KMP_CPU_FREE(oldMask);
856 *address2os = retval;
857 return depth;
858}
859#endif // KMP_USE_HWLOC
860
861// If we don't know how to retrieve the machine's processor topology, or
862// encounter an error in doing so, this routine is called to form a "flat"
863// mapping of os thread id's <-> processor id's.
864static int __kmp_affinity_create_flat_map(AddrUnsPair **address2os,
865 kmp_i18n_id_t *const msg_id) {
866 *address2os = NULL;
867 *msg_id = kmp_i18n_null;
868
869 // Even if __kmp_affinity_type == affinity_none, this routine might still
870 // called to set __kmp_ncores, as well as
871 // __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
872 if (!KMP_AFFINITY_CAPABLE()) {
873 KMP_ASSERT(__kmp_affinity_type == affinity_none);
874 __kmp_ncores = nPackages = __kmp_xproc;
875 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
876 if (__kmp_affinity_verbose) {
877 KMP_INFORM(AffFlatTopology, "KMP_AFFINITY");
878 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
879 KMP_INFORM(Uniform, "KMP_AFFINITY");
880 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
881 __kmp_nThreadsPerCore, __kmp_ncores);
882 }
883 return 0;
884 }
885
886 // When affinity is off, this routine will still be called to set
887 // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
888 // Make sure all these vars are set correctly, and return now if affinity is
889 // not enabled.
890 __kmp_ncores = nPackages = __kmp_avail_proc;
891 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
892 if (__kmp_affinity_verbose) {
893 char buf[KMP_AFFIN_MASK_PRINT_LEN];
894 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
895 __kmp_affin_fullMask);
896
897 KMP_INFORM(AffCapableUseFlat, "KMP_AFFINITY");
898 if (__kmp_affinity_respect_mask) {
899 KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
900 } else {
901 KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
902 }
903 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
904 KMP_INFORM(Uniform, "KMP_AFFINITY");
905 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
906 __kmp_nThreadsPerCore, __kmp_ncores);
907 }
908 KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
909 __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
910 if (__kmp_affinity_type == affinity_none) {
911 int avail_ct = 0;
912 int i;
913 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
914 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask))
915 continue;
916 __kmp_pu_os_idx[avail_ct++] = i; // suppose indices are flat
917 }
918 return 0;
919 }
920
921 // Construct the data structure to be returned.
922 *address2os =
923 (AddrUnsPair *)__kmp_allocate(sizeof(**address2os) * __kmp_avail_proc);
924 int avail_ct = 0;
925 int i;
926 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
927 // Skip this proc if it is not included in the machine model.
928 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
929 continue;
930 }
931 __kmp_pu_os_idx[avail_ct] = i; // suppose indices are flat
932 Address addr(1);
933 addr.labels[0] = i;
934 (*address2os)[avail_ct++] = AddrUnsPair(addr, i);
935 }
936 if (__kmp_affinity_verbose) {
937 KMP_INFORM(OSProcToPackage, "KMP_AFFINITY");
938 }
939
940 if (__kmp_affinity_gran_levels < 0) {
941 // Only the package level is modeled in the machine topology map,
942 // so the #levels of granularity is either 0 or 1.
943 if (__kmp_affinity_gran > affinity_gran_package) {
944 __kmp_affinity_gran_levels = 1;
945 } else {
946 __kmp_affinity_gran_levels = 0;
947 }
948 }
949 return 1;
950}
951
952#if KMP_GROUP_AFFINITY
953
954// If multiple Windows* OS processor groups exist, we can create a 2-level
955// topology map with the groups at level 0 and the individual procs at level 1.
956// This facilitates letting the threads float among all procs in a group,
957// if granularity=group (the default when there are multiple groups).
958static int __kmp_affinity_create_proc_group_map(AddrUnsPair **address2os,
959 kmp_i18n_id_t *const msg_id) {
960 *address2os = NULL;
961 *msg_id = kmp_i18n_null;
962
963 // If we aren't affinity capable, then return now.
964 // The flat mapping will be used.
965 if (!KMP_AFFINITY_CAPABLE()) {
966 // FIXME set *msg_id
967 return -1;
968 }
969
970 // Construct the data structure to be returned.
971 *address2os =
972 (AddrUnsPair *)__kmp_allocate(sizeof(**address2os) * __kmp_avail_proc);
973 KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
974 __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
975 int avail_ct = 0;
976 int i;
977 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
978 // Skip this proc if it is not included in the machine model.
979 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
980 continue;
981 }
982 __kmp_pu_os_idx[avail_ct] = i; // suppose indices are flat
983 Address addr(2);
984 addr.labels[0] = i / (CHAR_BIT * sizeof(DWORD_PTR));
985 addr.labels[1] = i % (CHAR_BIT * sizeof(DWORD_PTR));
986 (*address2os)[avail_ct++] = AddrUnsPair(addr, i);
987
988 if (__kmp_affinity_verbose) {
989 KMP_INFORM(AffOSProcToGroup, "KMP_AFFINITY", i, addr.labels[0],
990 addr.labels[1]);
991 }
992 }
993
994 if (__kmp_affinity_gran_levels < 0) {
995 if (__kmp_affinity_gran == affinity_gran_group) {
996 __kmp_affinity_gran_levels = 1;
997 } else if ((__kmp_affinity_gran == affinity_gran_fine) ||
998 (__kmp_affinity_gran == affinity_gran_thread)) {
999 __kmp_affinity_gran_levels = 0;
1000 } else {
1001 const char *gran_str = NULL;
1002 if (__kmp_affinity_gran == affinity_gran_core) {
1003 gran_str = "core";
1004 } else if (__kmp_affinity_gran == affinity_gran_package) {
1005 gran_str = "package";
1006 } else if (__kmp_affinity_gran == affinity_gran_node) {
1007 gran_str = "node";
1008 } else {
1009 KMP_ASSERT(0);
1010 }
1011
1012 // Warning: can't use affinity granularity \"gran\" with group topology
1013 // method, using "thread"
1014 __kmp_affinity_gran_levels = 0;
1015 }
1016 }
1017 return 2;
1018}
1019
1020#endif /* KMP_GROUP_AFFINITY */
1021
1022#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1023
1024static int __kmp_cpuid_mask_width(int count) {
1025 int r = 0;
1026
1027 while ((1 << r) < count)
1028 ++r;
1029 return r;
1030}
1031
1032class apicThreadInfo {
1033public:
1034 unsigned osId; // param to __kmp_affinity_bind_thread
1035 unsigned apicId; // from cpuid after binding
1036 unsigned maxCoresPerPkg; // ""
1037 unsigned maxThreadsPerPkg; // ""
1038 unsigned pkgId; // inferred from above values
1039 unsigned coreId; // ""
1040 unsigned threadId; // ""
1041};
1042
1043static int __kmp_affinity_cmp_apicThreadInfo_phys_id(const void *a,
1044 const void *b) {
1045 const apicThreadInfo *aa = (const apicThreadInfo *)a;
1046 const apicThreadInfo *bb = (const apicThreadInfo *)b;
1047 if (aa->pkgId < bb->pkgId)
1048 return -1;
1049 if (aa->pkgId > bb->pkgId)
1050 return 1;
1051 if (aa->coreId < bb->coreId)
1052 return -1;
1053 if (aa->coreId > bb->coreId)
1054 return 1;
1055 if (aa->threadId < bb->threadId)
1056 return -1;
1057 if (aa->threadId > bb->threadId)
1058 return 1;
1059 return 0;
1060}
1061
1062// On IA-32 architecture and Intel(R) 64 architecture, we attempt to use
1063// an algorithm which cycles through the available os threads, setting
1064// the current thread's affinity mask to that thread, and then retrieves
1065// the Apic Id for each thread context using the cpuid instruction.
1066static int __kmp_affinity_create_apicid_map(AddrUnsPair **address2os,
1067 kmp_i18n_id_t *const msg_id) {
1068 kmp_cpuid buf;
1069 *address2os = NULL;
1070 *msg_id = kmp_i18n_null;
1071
1072 // Check if cpuid leaf 4 is supported.
1073 __kmp_x86_cpuid(0, 0, &buf);
1074 if (buf.eax < 4) {
1075 *msg_id = kmp_i18n_str_NoLeaf4Support;
1076 return -1;
1077 }
1078
1079 // The algorithm used starts by setting the affinity to each available thread
1080 // and retrieving info from the cpuid instruction, so if we are not capable of
1081 // calling __kmp_get_system_affinity() and _kmp_get_system_affinity(), then we
1082 // need to do something else - use the defaults that we calculated from
1083 // issuing cpuid without binding to each proc.
1084 if (!KMP_AFFINITY_CAPABLE()) {
1085 // Hack to try and infer the machine topology using only the data
1086 // available from cpuid on the current thread, and __kmp_xproc.
1087 KMP_ASSERT(__kmp_affinity_type == affinity_none);
1088
1089 // Get an upper bound on the number of threads per package using cpuid(1).
1090 // On some OS/chps combinations where HT is supported by the chip but is
1091 // disabled, this value will be 2 on a single core chip. Usually, it will be
1092 // 2 if HT is enabled and 1 if HT is disabled.
1093 __kmp_x86_cpuid(1, 0, &buf);
1094 int maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
1095 if (maxThreadsPerPkg == 0) {
1096 maxThreadsPerPkg = 1;
1097 }
1098
1099 // The num cores per pkg comes from cpuid(4). 1 must be added to the encoded
1100 // value.
1101 //
1102 // The author of cpu_count.cpp treated this only an upper bound on the
1103 // number of cores, but I haven't seen any cases where it was greater than
1104 // the actual number of cores, so we will treat it as exact in this block of
1105 // code.
1106 //
1107 // First, we need to check if cpuid(4) is supported on this chip. To see if
1108 // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n or
1109 // greater.
1110 __kmp_x86_cpuid(0, 0, &buf);
1111 if (buf.eax >= 4) {
1112 __kmp_x86_cpuid(4, 0, &buf);
1113 nCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
1114 } else {
1115 nCoresPerPkg = 1;
1116 }
1117
1118 // There is no way to reliably tell if HT is enabled without issuing the
1119 // cpuid instruction from every thread, can correlating the cpuid info, so
1120 // if the machine is not affinity capable, we assume that HT is off. We have
1121 // seen quite a few machines where maxThreadsPerPkg is 2, yet the machine
1122 // does not support HT.
1123 //
1124 // - Older OSes are usually found on machines with older chips, which do not
1125 // support HT.
1126 // - The performance penalty for mistakenly identifying a machine as HT when
1127 // it isn't (which results in blocktime being incorrectly set to 0) is
1128 // greater than the penalty when for mistakenly identifying a machine as
1129 // being 1 thread/core when it is really HT enabled (which results in
1130 // blocktime being incorrectly set to a positive value).
1131 __kmp_ncores = __kmp_xproc;
1132 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
1133 __kmp_nThreadsPerCore = 1;
1134 if (__kmp_affinity_verbose) {
1135 KMP_INFORM(AffNotCapableUseLocCpuid, "KMP_AFFINITY");
1136 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1137 if (__kmp_affinity_uniform_topology()) {
1138 KMP_INFORM(Uniform, "KMP_AFFINITY");
1139 } else {
1140 KMP_INFORM(NonUniform, "KMP_AFFINITY");
1141 }
1142 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1143 __kmp_nThreadsPerCore, __kmp_ncores);
1144 }
1145 return 0;
1146 }
1147
1148 // From here on, we can assume that it is safe to call
1149 // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if
1150 // __kmp_affinity_type = affinity_none.
1151
1152 // Save the affinity mask for the current thread.
1153 kmp_affin_mask_t *oldMask;
1154 KMP_CPU_ALLOC(oldMask);
1155 KMP_ASSERT(oldMask != NULL);
1156 __kmp_get_system_affinity(oldMask, TRUE);
1157
1158 // Run through each of the available contexts, binding the current thread
1159 // to it, and obtaining the pertinent information using the cpuid instr.
1160 //
1161 // The relevant information is:
1162 // - Apic Id: Bits 24:31 of ebx after issuing cpuid(1) - each thread context
1163 // has a uniqie Apic Id, which is of the form pkg# : core# : thread#.
1164 // - Max Threads Per Pkg: Bits 16:23 of ebx after issuing cpuid(1). The value
1165 // of this field determines the width of the core# + thread# fields in the
1166 // Apic Id. It is also an upper bound on the number of threads per
1167 // package, but it has been verified that situations happen were it is not
1168 // exact. In particular, on certain OS/chip combinations where Intel(R)
1169 // Hyper-Threading Technology is supported by the chip but has been
1170 // disabled, the value of this field will be 2 (for a single core chip).
1171 // On other OS/chip combinations supporting Intel(R) Hyper-Threading
1172 // Technology, the value of this field will be 1 when Intel(R)
1173 // Hyper-Threading Technology is disabled and 2 when it is enabled.
1174 // - Max Cores Per Pkg: Bits 26:31 of eax after issuing cpuid(4). The value
1175 // of this field (+1) determines the width of the core# field in the Apic
1176 // Id. The comments in "cpucount.cpp" say that this value is an upper
1177 // bound, but the IA-32 architecture manual says that it is exactly the
1178 // number of cores per package, and I haven't seen any case where it
1179 // wasn't.
1180 //
1181 // From this information, deduce the package Id, core Id, and thread Id,
1182 // and set the corresponding fields in the apicThreadInfo struct.
1183 unsigned i;
1184 apicThreadInfo *threadInfo = (apicThreadInfo *)__kmp_allocate(
1185 __kmp_avail_proc * sizeof(apicThreadInfo));
1186 unsigned nApics = 0;
1187 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1188 // Skip this proc if it is not included in the machine model.
1189 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1190 continue;
1191 }
1192 KMP_DEBUG_ASSERT((int)nApics < __kmp_avail_proc);
1193
1194 __kmp_affinity_dispatch->bind_thread(i);
1195 threadInfo[nApics].osId = i;
1196
1197 // The apic id and max threads per pkg come from cpuid(1).
1198 __kmp_x86_cpuid(1, 0, &buf);
1199 if (((buf.edx >> 9) & 1) == 0) {
1200 __kmp_set_system_affinity(oldMask, TRUE);
1201 __kmp_free(threadInfo);
1202 KMP_CPU_FREE(oldMask);
1203 *msg_id = kmp_i18n_str_ApicNotPresent;
1204 return -1;
1205 }
1206 threadInfo[nApics].apicId = (buf.ebx >> 24) & 0xff;
1207 threadInfo[nApics].maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
1208 if (threadInfo[nApics].maxThreadsPerPkg == 0) {
1209 threadInfo[nApics].maxThreadsPerPkg = 1;
1210 }
1211
1212 // Max cores per pkg comes from cpuid(4). 1 must be added to the encoded
1213 // value.
1214 //
1215 // First, we need to check if cpuid(4) is supported on this chip. To see if
1216 // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n
1217 // or greater.
1218 __kmp_x86_cpuid(0, 0, &buf);
1219 if (buf.eax >= 4) {
1220 __kmp_x86_cpuid(4, 0, &buf);
1221 threadInfo[nApics].maxCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
1222 } else {
1223 threadInfo[nApics].maxCoresPerPkg = 1;
1224 }
1225
1226 // Infer the pkgId / coreId / threadId using only the info obtained locally.
1227 int widthCT = __kmp_cpuid_mask_width(threadInfo[nApics].maxThreadsPerPkg);
1228 threadInfo[nApics].pkgId = threadInfo[nApics].apicId >> widthCT;
1229
1230 int widthC = __kmp_cpuid_mask_width(threadInfo[nApics].maxCoresPerPkg);
1231 int widthT = widthCT - widthC;
1232 if (widthT < 0) {
1233 // I've never seen this one happen, but I suppose it could, if the cpuid
1234 // instruction on a chip was really screwed up. Make sure to restore the
1235 // affinity mask before the tail call.
1236 __kmp_set_system_affinity(oldMask, TRUE);
1237 __kmp_free(threadInfo);
1238 KMP_CPU_FREE(oldMask);
1239 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1240 return -1;
1241 }
1242
1243 int maskC = (1 << widthC) - 1;
1244 threadInfo[nApics].coreId = (threadInfo[nApics].apicId >> widthT) & maskC;
1245
1246 int maskT = (1 << widthT) - 1;
1247 threadInfo[nApics].threadId = threadInfo[nApics].apicId & maskT;
1248
1249 nApics++;
1250 }
1251
1252 // We've collected all the info we need.
1253 // Restore the old affinity mask for this thread.
1254 __kmp_set_system_affinity(oldMask, TRUE);
1255
1256 // If there's only one thread context to bind to, form an Address object
1257 // with depth 1 and return immediately (or, if affinity is off, set
1258 // address2os to NULL and return).
1259 //
1260 // If it is configured to omit the package level when there is only a single
1261 // package, the logic at the end of this routine won't work if there is only
1262 // a single thread - it would try to form an Address object with depth 0.
1263 KMP_ASSERT(nApics > 0);
1264 if (nApics == 1) {
1265 __kmp_ncores = nPackages = 1;
1266 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1267 if (__kmp_affinity_verbose) {
1268 char buf[KMP_AFFIN_MASK_PRINT_LEN];
1269 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
1270
1271 KMP_INFORM(AffUseGlobCpuid, "KMP_AFFINITY");
1272 if (__kmp_affinity_respect_mask) {
1273 KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
1274 } else {
1275 KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
1276 }
1277 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1278 KMP_INFORM(Uniform, "KMP_AFFINITY");
1279 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1280 __kmp_nThreadsPerCore, __kmp_ncores);
1281 }
1282
1283 if (__kmp_affinity_type == affinity_none) {
1284 __kmp_free(threadInfo);
1285 KMP_CPU_FREE(oldMask);
1286 return 0;
1287 }
1288
1289 *address2os = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair));
1290 Address addr(1);
1291 addr.labels[0] = threadInfo[0].pkgId;
1292 (*address2os)[0] = AddrUnsPair(addr, threadInfo[0].osId);
1293
1294 if (__kmp_affinity_gran_levels < 0) {
1295 __kmp_affinity_gran_levels = 0;
1296 }
1297
1298 if (__kmp_affinity_verbose) {
1299 __kmp_affinity_print_topology(*address2os, 1, 1, 0, -1, -1);
1300 }
1301
1302 __kmp_free(threadInfo);
1303 KMP_CPU_FREE(oldMask);
1304 return 1;
1305 }
1306
1307 // Sort the threadInfo table by physical Id.
1308 qsort(threadInfo, nApics, sizeof(*threadInfo),
1309 __kmp_affinity_cmp_apicThreadInfo_phys_id);
1310
1311 // The table is now sorted by pkgId / coreId / threadId, but we really don't
1312 // know the radix of any of the fields. pkgId's may be sparsely assigned among
1313 // the chips on a system. Although coreId's are usually assigned
1314 // [0 .. coresPerPkg-1] and threadId's are usually assigned
1315 // [0..threadsPerCore-1], we don't want to make any such assumptions.
1316 //
1317 // For that matter, we don't know what coresPerPkg and threadsPerCore (or the
1318 // total # packages) are at this point - we want to determine that now. We
1319 // only have an upper bound on the first two figures.
1320 //
1321 // We also perform a consistency check at this point: the values returned by
1322 // the cpuid instruction for any thread bound to a given package had better
1323 // return the same info for maxThreadsPerPkg and maxCoresPerPkg.
1324 nPackages = 1;
1325 nCoresPerPkg = 1;
1326 __kmp_nThreadsPerCore = 1;
1327 unsigned nCores = 1;
1328
1329 unsigned pkgCt = 1; // to determine radii
1330 unsigned lastPkgId = threadInfo[0].pkgId;
1331 unsigned coreCt = 1;
1332 unsigned lastCoreId = threadInfo[0].coreId;
1333 unsigned threadCt = 1;
1334 unsigned lastThreadId = threadInfo[0].threadId;
1335
1336 // intra-pkg consist checks
1337 unsigned prevMaxCoresPerPkg = threadInfo[0].maxCoresPerPkg;
1338 unsigned prevMaxThreadsPerPkg = threadInfo[0].maxThreadsPerPkg;
1339
1340 for (i = 1; i < nApics; i++) {
1341 if (threadInfo[i].pkgId != lastPkgId) {
1342 nCores++;
1343 pkgCt++;
1344 lastPkgId = threadInfo[i].pkgId;
1345 if ((int)coreCt > nCoresPerPkg)
1346 nCoresPerPkg = coreCt;
1347 coreCt = 1;
1348 lastCoreId = threadInfo[i].coreId;
1349 if ((int)threadCt > __kmp_nThreadsPerCore)
1350 __kmp_nThreadsPerCore = threadCt;
1351 threadCt = 1;
1352 lastThreadId = threadInfo[i].threadId;
1353
1354 // This is a different package, so go on to the next iteration without
1355 // doing any consistency checks. Reset the consistency check vars, though.
1356 prevMaxCoresPerPkg = threadInfo[i].maxCoresPerPkg;
1357 prevMaxThreadsPerPkg = threadInfo[i].maxThreadsPerPkg;
1358 continue;
1359 }
1360
1361 if (threadInfo[i].coreId != lastCoreId) {
1362 nCores++;
1363 coreCt++;
1364 lastCoreId = threadInfo[i].coreId;
1365 if ((int)threadCt > __kmp_nThreadsPerCore)
1366 __kmp_nThreadsPerCore = threadCt;
1367 threadCt = 1;
1368 lastThreadId = threadInfo[i].threadId;
1369 } else if (threadInfo[i].threadId != lastThreadId) {
1370 threadCt++;
1371 lastThreadId = threadInfo[i].threadId;
1372 } else {
1373 __kmp_free(threadInfo);
1374 KMP_CPU_FREE(oldMask);
1375 *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
1376 return -1;
1377 }
1378
1379 // Check to make certain that the maxCoresPerPkg and maxThreadsPerPkg
1380 // fields agree between all the threads bounds to a given package.
1381 if ((prevMaxCoresPerPkg != threadInfo[i].maxCoresPerPkg) ||
1382 (prevMaxThreadsPerPkg != threadInfo[i].maxThreadsPerPkg)) {
1383 __kmp_free(threadInfo);
1384 KMP_CPU_FREE(oldMask);
1385 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
1386 return -1;
1387 }
1388 }
1389 nPackages = pkgCt;
1390 if ((int)coreCt > nCoresPerPkg)
1391 nCoresPerPkg = coreCt;
1392 if ((int)threadCt > __kmp_nThreadsPerCore)
1393 __kmp_nThreadsPerCore = threadCt;
1394
1395 // When affinity is off, this routine will still be called to set
1396 // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
1397 // Make sure all these vars are set correctly, and return now if affinity is
1398 // not enabled.
1399 __kmp_ncores = nCores;
1400 if (__kmp_affinity_verbose) {
1401 char buf[KMP_AFFIN_MASK_PRINT_LEN];
1402 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
1403
1404 KMP_INFORM(AffUseGlobCpuid, "KMP_AFFINITY");
1405 if (__kmp_affinity_respect_mask) {
1406 KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
1407 } else {
1408 KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
1409 }
1410 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1411 if (__kmp_affinity_uniform_topology()) {
1412 KMP_INFORM(Uniform, "KMP_AFFINITY");
1413 } else {
1414 KMP_INFORM(NonUniform, "KMP_AFFINITY");
1415 }
1416 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1417 __kmp_nThreadsPerCore, __kmp_ncores);
1418 }
1419 KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
1420 KMP_DEBUG_ASSERT(nApics == (unsigned)__kmp_avail_proc);
1421 __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
1422 for (i = 0; i < nApics; ++i) {
1423 __kmp_pu_os_idx[i] = threadInfo[i].osId;
1424 }
1425 if (__kmp_affinity_type == affinity_none) {
1426 __kmp_free(threadInfo);
1427 KMP_CPU_FREE(oldMask);
1428 return 0;
1429 }
1430
1431 // Now that we've determined the number of packages, the number of cores per
1432 // package, and the number of threads per core, we can construct the data
1433 // structure that is to be returned.
1434 int pkgLevel = 0;
1435 int coreLevel = (nCoresPerPkg <= 1) ? -1 : 1;
1436 int threadLevel =
1437 (__kmp_nThreadsPerCore <= 1) ? -1 : ((coreLevel >= 0) ? 2 : 1);
1438 unsigned depth = (pkgLevel >= 0) + (coreLevel >= 0) + (threadLevel >= 0);
1439
1440 KMP_ASSERT(depth > 0);
1441 *address2os = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * nApics);
1442
1443 for (i = 0; i < nApics; ++i) {
1444 Address addr(depth);
1445 unsigned os = threadInfo[i].osId;
1446 int d = 0;
1447
1448 if (pkgLevel >= 0) {
1449 addr.labels[d++] = threadInfo[i].pkgId;
1450 }
1451 if (coreLevel >= 0) {
1452 addr.labels[d++] = threadInfo[i].coreId;
1453 }
1454 if (threadLevel >= 0) {
1455 addr.labels[d++] = threadInfo[i].threadId;
1456 }
1457 (*address2os)[i] = AddrUnsPair(addr, os);
1458 }
1459
1460 if (__kmp_affinity_gran_levels < 0) {
1461 // Set the granularity level based on what levels are modeled in the machine
1462 // topology map.
1463 __kmp_affinity_gran_levels = 0;
1464 if ((threadLevel >= 0) && (__kmp_affinity_gran > affinity_gran_thread)) {
1465 __kmp_affinity_gran_levels++;
1466 }
1467 if ((coreLevel >= 0) && (__kmp_affinity_gran > affinity_gran_core)) {
1468 __kmp_affinity_gran_levels++;
1469 }
1470 if ((pkgLevel >= 0) && (__kmp_affinity_gran > affinity_gran_package)) {
1471 __kmp_affinity_gran_levels++;
1472 }
1473 }
1474
1475 if (__kmp_affinity_verbose) {
1476 __kmp_affinity_print_topology(*address2os, nApics, depth, pkgLevel,
1477 coreLevel, threadLevel);
1478 }
1479
1480 __kmp_free(threadInfo);
1481 KMP_CPU_FREE(oldMask);
1482 return depth;
1483}
1484
1485// Intel(R) microarchitecture code name Nehalem, Dunnington and later
1486// architectures support a newer interface for specifying the x2APIC Ids,
1487// based on cpuid leaf 11.
1488static int __kmp_affinity_create_x2apicid_map(AddrUnsPair **address2os,
1489 kmp_i18n_id_t *const msg_id) {
1490 kmp_cpuid buf;
1491 *address2os = NULL;
1492 *msg_id = kmp_i18n_null;
1493
1494 // Check to see if cpuid leaf 11 is supported.
1495 __kmp_x86_cpuid(0, 0, &buf);
1496 if (buf.eax < 11) {
1497 *msg_id = kmp_i18n_str_NoLeaf11Support;
1498 return -1;
1499 }
1500 __kmp_x86_cpuid(11, 0, &buf);
1501 if (buf.ebx == 0) {
1502 *msg_id = kmp_i18n_str_NoLeaf11Support;
1503 return -1;
1504 }
1505
1506 // Find the number of levels in the machine topology. While we're at it, get
1507 // the default values for __kmp_nThreadsPerCore & nCoresPerPkg. We will try to
1508 // get more accurate values later by explicitly counting them, but get
1509 // reasonable defaults now, in case we return early.
1510 int level;
1511 int threadLevel = -1;
1512 int coreLevel = -1;
1513 int pkgLevel = -1;
1514 __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
1515
1516 for (level = 0;; level++) {
1517 if (level > 31) {
1518 // FIXME: Hack for DPD200163180
1519 //
1520 // If level is big then something went wrong -> exiting
1521 //
1522 // There could actually be 32 valid levels in the machine topology, but so
1523 // far, the only machine we have seen which does not exit this loop before
1524 // iteration 32 has fubar x2APIC settings.
1525 //
1526 // For now, just reject this case based upon loop trip count.
1527 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1528 return -1;
1529 }
1530 __kmp_x86_cpuid(11, level, &buf);
1531 if (buf.ebx == 0) {
1532 if (pkgLevel < 0) {
1533 // Will infer nPackages from __kmp_xproc
1534 pkgLevel = level;
1535 level++;
1536 }
1537 break;
1538 }
1539 int kind = (buf.ecx >> 8) & 0xff;
1540 if (kind == 1) {
1541 // SMT level
1542 threadLevel = level;
1543 coreLevel = -1;
1544 pkgLevel = -1;
1545 __kmp_nThreadsPerCore = buf.ebx & 0xffff;
1546 if (__kmp_nThreadsPerCore == 0) {
1547 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1548 return -1;
1549 }
1550 } else if (kind == 2) {
1551 // core level
1552 coreLevel = level;
1553 pkgLevel = -1;
1554 nCoresPerPkg = buf.ebx & 0xffff;
1555 if (nCoresPerPkg == 0) {
1556 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1557 return -1;
1558 }
1559 } else {
1560 if (level <= 0) {
1561 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1562 return -1;
1563 }
1564 if (pkgLevel >= 0) {
1565 continue;
1566 }
1567 pkgLevel = level;
1568 nPackages = buf.ebx & 0xffff;
1569 if (nPackages == 0) {
1570 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1571 return -1;
1572 }
1573 }
1574 }
1575 int depth = level;
1576
1577 // In the above loop, "level" was counted from the finest level (usually
1578 // thread) to the coarsest. The caller expects that we will place the labels
1579 // in (*address2os)[].first.labels[] in the inverse order, so we need to
1580 // invert the vars saying which level means what.
1581 if (threadLevel >= 0) {
1582 threadLevel = depth - threadLevel - 1;
1583 }
1584 if (coreLevel >= 0) {
1585 coreLevel = depth - coreLevel - 1;
1586 }
1587 KMP_DEBUG_ASSERT(pkgLevel >= 0);
1588 pkgLevel = depth - pkgLevel - 1;
1589
1590 // The algorithm used starts by setting the affinity to each available thread
1591 // and retrieving info from the cpuid instruction, so if we are not capable of
1592 // calling __kmp_get_system_affinity() and _kmp_get_system_affinity(), then we
1593 // need to do something else - use the defaults that we calculated from
1594 // issuing cpuid without binding to each proc.
1595 if (!KMP_AFFINITY_CAPABLE()) {
1596 // Hack to try and infer the machine topology using only the data
1597 // available from cpuid on the current thread, and __kmp_xproc.
1598 KMP_ASSERT(__kmp_affinity_type == affinity_none);
1599
1600 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
1601 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
1602 if (__kmp_affinity_verbose) {
1603 KMP_INFORM(AffNotCapableUseLocCpuidL11, "KMP_AFFINITY");
1604 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1605 if (__kmp_affinity_uniform_topology()) {
1606 KMP_INFORM(Uniform, "KMP_AFFINITY");
1607 } else {
1608 KMP_INFORM(NonUniform, "KMP_AFFINITY");
1609 }
1610 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1611 __kmp_nThreadsPerCore, __kmp_ncores);
1612 }
1613 return 0;
1614 }
1615
1616 // From here on, we can assume that it is safe to call
1617 // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if
1618 // __kmp_affinity_type = affinity_none.
1619
1620 // Save the affinity mask for the current thread.
1621 kmp_affin_mask_t *oldMask;
1622 KMP_CPU_ALLOC(oldMask);
1623 __kmp_get_system_affinity(oldMask, TRUE);
1624
1625 // Allocate the data structure to be returned.
1626 AddrUnsPair *retval =
1627 (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * __kmp_avail_proc);
1628
1629 // Run through each of the available contexts, binding the current thread
1630 // to it, and obtaining the pertinent information using the cpuid instr.
1631 unsigned int proc;
1632 int nApics = 0;
1633 KMP_CPU_SET_ITERATE(proc, __kmp_affin_fullMask) {
1634 // Skip this proc if it is not included in the machine model.
1635 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
1636 continue;
1637 }
1638 KMP_DEBUG_ASSERT(nApics < __kmp_avail_proc);
1639
1640 __kmp_affinity_dispatch->bind_thread(proc);
1641
1642 // Extract labels for each level in the machine topology map from Apic ID.
1643 Address addr(depth);
1644 int prev_shift = 0;
1645
1646 for (level = 0; level < depth; level++) {
1647 __kmp_x86_cpuid(11, level, &buf);
1648 unsigned apicId = buf.edx;
1649 if (buf.ebx == 0) {
1650 if (level != depth - 1) {
1651 KMP_CPU_FREE(oldMask);
1652 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
1653 return -1;
1654 }
1655 addr.labels[depth - level - 1] = apicId >> prev_shift;
1656 level++;
1657 break;
1658 }
1659 int shift = buf.eax & 0x1f;
1660 int mask = (1 << shift) - 1;
1661 addr.labels[depth - level - 1] = (apicId & mask) >> prev_shift;
1662 prev_shift = shift;
1663 }
1664 if (level != depth) {
1665 KMP_CPU_FREE(oldMask);
1666 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
1667 return -1;
1668 }
1669
1670 retval[nApics] = AddrUnsPair(addr, proc);
1671 nApics++;
1672 }
1673
1674 // We've collected all the info we need.
1675 // Restore the old affinity mask for this thread.
1676 __kmp_set_system_affinity(oldMask, TRUE);
1677
1678 // If there's only one thread context to bind to, return now.
1679 KMP_ASSERT(nApics > 0);
1680 if (nApics == 1) {
1681 __kmp_ncores = nPackages = 1;
1682 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1683 if (__kmp_affinity_verbose) {
1684 char buf[KMP_AFFIN_MASK_PRINT_LEN];
1685 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
1686
1687 KMP_INFORM(AffUseGlobCpuidL11, "KMP_AFFINITY");
1688 if (__kmp_affinity_respect_mask) {
1689 KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
1690 } else {
1691 KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
1692 }
1693 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1694 KMP_INFORM(Uniform, "KMP_AFFINITY");
1695 KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1696 __kmp_nThreadsPerCore, __kmp_ncores);
1697 }
1698
1699 if (__kmp_affinity_type == affinity_none) {
1700 __kmp_free(retval);
1701 KMP_CPU_FREE(oldMask);
1702 return 0;
1703 }
1704
1705 // Form an Address object which only includes the package level.
1706 Address addr(1);
1707 addr.labels[0] = retval[0].first.labels[pkgLevel];
1708 retval[0].first = addr;
1709
1710 if (__kmp_affinity_gran_levels < 0) {
1711 __kmp_affinity_gran_levels = 0;
1712 }
1713
1714 if (__kmp_affinity_verbose) {
1715 __kmp_affinity_print_topology(retval, 1, 1, 0, -1, -1);
1716 }
1717
1718 *address2os = retval;
1719 KMP_CPU_FREE(oldMask);
1720 return 1;
1721 }
1722
1723 // Sort the table by physical Id.
1724 qsort(retval, nApics, sizeof(*retval), __kmp_affinity_cmp_Address_labels);
1725
1726 // Find the radix at each of the levels.
1727 unsigned *totals = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
1728 unsigned *counts = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
1729 unsigned *maxCt = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
1730 unsigned *last = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
1731 for (level = 0; level < depth; level++) {
1732 totals[level] = 1;
1733 maxCt[level] = 1;
1734 counts[level] = 1;
1735 last[level] = retval[0].first.labels[level];
1736 }
1737
1738 // From here on, the iteration variable "level" runs from the finest level to
1739 // the coarsest, i.e. we iterate forward through
1740 // (*address2os)[].first.labels[] - in the previous loops, we iterated
1741 // backwards.
1742 for (proc = 1; (int)proc < nApics; proc++) {
1743 int level;
1744 for (level = 0; level < depth; level++) {
1745 if (retval[proc].first.labels[level] != last[level]) {
1746 int j;
1747 for (j = level + 1; j < depth; j++) {
1748 totals[j]++;
1749 counts[j] = 1;
1750 // The line below causes printing incorrect topology information in
1751 // case the max value for some level (maxCt[level]) is encountered
1752 // earlier than some less value while going through the array. For
1753 // example, let pkg0 has 4 cores and pkg1 has 2 cores. Then
1754 // maxCt[1] == 2
1755 // whereas it must be 4.
1756 // TODO!!! Check if it can be commented safely
1757 // maxCt[j] = 1;
1758 last[j] = retval[proc].first.labels[j];
1759 }
1760 totals[level]++;
1761 counts[level]++;
1762 if (counts[level] > maxCt[level]) {
1763 maxCt[level] = counts[level];
1764 }
1765 last[level] = retval[proc].first.labels[level];
1766 break;
1767 } else if (level == depth - 1) {
1768 __kmp_free(last);
1769 __kmp_free(maxCt);
1770 __kmp_free(counts);
1771 __kmp_free(totals);
1772 __kmp_free(retval);
1773 KMP_CPU_FREE(oldMask);
1774 *msg_id = kmp_i18n_str_x2ApicIDsNotUnique;
1775 return -1;
1776 }
1777 }
1778 }
1779
1780 // When affinity is off, this routine will still be called to set
1781 // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
1782 // Make sure all these vars are set correctly, and return if affinity is not
1783 // enabled.
1784 if (threadLevel >= 0) {
1785 __kmp_nThreadsPerCore = maxCt[threadLevel];
1786 } else {
1787 __kmp_nThreadsPerCore = 1;
1788 }
1789 nPackages = totals[pkgLevel];
1790
1791 if (coreLevel >= 0) {
1792 __kmp_ncores = totals[coreLevel];
1793 nCoresPerPkg = maxCt[coreLevel];
1794 } else {
1795 __kmp_ncores = nPackages;
1796 nCoresPerPkg = 1;
1797 }
1798
1799 // Check to see if the machine topology is uniform
1800 unsigned prod = maxCt[0];
1801 for (level = 1; level < depth; level++) {
1802 prod *= maxCt[level];
1803 }
1804 bool uniform = (prod == totals[level - 1]);
1805
1806 // Print the machine topology summary.
1807 if (__kmp_affinity_verbose) {
1808 char mask[KMP_AFFIN_MASK_PRINT_LEN];
1809 __kmp_affinity_print_mask(mask, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
1810
1811 KMP_INFORM(AffUseGlobCpuidL11, "KMP_AFFINITY");
1812 if (__kmp_affinity_respect_mask) {
1813 KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", mask);
1814 } else {
1815 KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", mask);
1816 }
1817 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1818 if (uniform) {
1819 KMP_INFORM(Uniform, "KMP_AFFINITY");
1820 } else {
1821 KMP_INFORM(NonUniform, "KMP_AFFINITY");
1822 }
1823
1824 kmp_str_buf_t buf;
1825 __kmp_str_buf_init(&buf);
1826
1827 __kmp_str_buf_print(&buf, "%d", totals[0]);
1828 for (level = 1; level <= pkgLevel; level++) {
1829 __kmp_str_buf_print(&buf, " x %d", maxCt[level]);
1830 }
1831 KMP_INFORM(TopologyExtra, "KMP_AFFINITY", buf.str, nCoresPerPkg,
1832 __kmp_nThreadsPerCore, __kmp_ncores);
1833
1834 __kmp_str_buf_free(&buf);
1835 }
1836 KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
1837 KMP_DEBUG_ASSERT(nApics == __kmp_avail_proc);
1838 __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
1839 for (proc = 0; (int)proc < nApics; ++proc) {
1840 __kmp_pu_os_idx[proc] = retval[proc].second;
1841 }
1842 if (__kmp_affinity_type == affinity_none) {
1843 __kmp_free(last);
1844 __kmp_free(maxCt);
1845 __kmp_free(counts);
1846 __kmp_free(totals);
1847 __kmp_free(retval);
1848 KMP_CPU_FREE(oldMask);
1849 return 0;
1850 }
1851
1852 // Find any levels with radix 1, and remove them from the map
1853 // (except for the package level).
1854 int new_depth = 0;
1855 for (level = 0; level < depth; level++) {
1856 if ((maxCt[level] == 1) && (level != pkgLevel)) {
1857 continue;
1858 }
1859 new_depth++;
1860 }
1861
1862 // If we are removing any levels, allocate a new vector to return,
1863 // and copy the relevant information to it.
1864 if (new_depth != depth) {
1865 AddrUnsPair *new_retval =
1866 (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * nApics);
1867 for (proc = 0; (int)proc < nApics; proc++) {
1868 Address addr(new_depth);
1869 new_retval[proc] = AddrUnsPair(addr, retval[proc].second);
1870 }
1871 int new_level = 0;
1872 int newPkgLevel = -1;
1873 int newCoreLevel = -1;
1874 int newThreadLevel = -1;
1875 for (level = 0; level < depth; level++) {
1876 if ((maxCt[level] == 1) && (level != pkgLevel)) {
1877 // Remove this level. Never remove the package level
1878 continue;
1879 }
1880 if (level == pkgLevel) {
1881 newPkgLevel = new_level;
1882 }
1883 if (level == coreLevel) {
1884 newCoreLevel = new_level;
1885 }
1886 if (level == threadLevel) {
1887 newThreadLevel = new_level;
1888 }
1889 for (proc = 0; (int)proc < nApics; proc++) {
1890 new_retval[proc].first.labels[new_level] =
1891 retval[proc].first.labels[level];
1892 }
1893 new_level++;
1894 }
1895
1896 __kmp_free(retval);
1897 retval = new_retval;
1898 depth = new_depth;
1899 pkgLevel = newPkgLevel;
1900 coreLevel = newCoreLevel;
1901 threadLevel = newThreadLevel;
1902 }
1903
1904 if (__kmp_affinity_gran_levels < 0) {
1905 // Set the granularity level based on what levels are modeled
1906 // in the machine topology map.
1907 __kmp_affinity_gran_levels = 0;
1908 if ((threadLevel >= 0) && (__kmp_affinity_gran > affinity_gran_thread)) {
1909 __kmp_affinity_gran_levels++;
1910 }
1911 if ((coreLevel >= 0) && (__kmp_affinity_gran > affinity_gran_core)) {
1912 __kmp_affinity_gran_levels++;
1913 }
1914 if (__kmp_affinity_gran > affinity_gran_package) {
1915 __kmp_affinity_gran_levels++;
1916 }
1917 }
1918
1919 if (__kmp_affinity_verbose) {
1920 __kmp_affinity_print_topology(retval, nApics, depth, pkgLevel, coreLevel,
1921 threadLevel);
1922 }
1923
1924 __kmp_free(last);
1925 __kmp_free(maxCt);
1926 __kmp_free(counts);
1927 __kmp_free(totals);
1928 KMP_CPU_FREE(oldMask);
1929 *address2os = retval;
1930 return depth;
1931}
1932
1933#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
1934
1935#define osIdIndex 0
1936#define threadIdIndex 1
1937#define coreIdIndex 2
1938#define pkgIdIndex 3
1939#define nodeIdIndex 4
1940
1941typedef unsigned *ProcCpuInfo;
1942static unsigned maxIndex = pkgIdIndex;
1943
1944static int __kmp_affinity_cmp_ProcCpuInfo_phys_id(const void *a,
1945 const void *b) {
1946 unsigned i;
1947 const unsigned *aa = *(unsigned *const *)a;
1948 const unsigned *bb = *(unsigned *const *)b;
1949 for (i = maxIndex;; i--) {
1950 if (aa[i] < bb[i])
1951 return -1;
1952 if (aa[i] > bb[i])
1953 return 1;
1954 if (i == osIdIndex)
1955 break;
1956 }
1957 return 0;
1958}
1959
1960#if KMP_USE_HIER_SCHED
1961// Set the array sizes for the hierarchy layers
1962static void __kmp_dispatch_set_hierarchy_values() {
1963 // Set the maximum number of L1's to number of cores
1964 // Set the maximum number of L2's to to either number of cores / 2 for
1965 // Intel(R) Xeon Phi(TM) coprocessor formally codenamed Knights Landing
1966 // Or the number of cores for Intel(R) Xeon(R) processors
1967 // Set the maximum number of NUMA nodes and L3's to number of packages
1968 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1] =
1969 nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
1970 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L1 + 1] = __kmp_ncores;
1971#if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) && \
1972 KMP_MIC_SUPPORTED
1973 if (__kmp_mic_type >= mic3)
1974 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores / 2;
1975 else
1976#endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
1977 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores;
1978 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L3 + 1] = nPackages;
1979 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_NUMA + 1] = nPackages;
1980 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LOOP + 1] = 1;
1981 // Set the number of threads per unit
1982 // Number of hardware threads per L1/L2/L3/NUMA/LOOP
1983 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_THREAD + 1] = 1;
1984 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L1 + 1] =
1985 __kmp_nThreadsPerCore;
1986#if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) && \
1987 KMP_MIC_SUPPORTED
1988 if (__kmp_mic_type >= mic3)
1989 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
1990 2 * __kmp_nThreadsPerCore;
1991 else
1992#endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
1993 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
1994 __kmp_nThreadsPerCore;
1995 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L3 + 1] =
1996 nCoresPerPkg * __kmp_nThreadsPerCore;
1997 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_NUMA + 1] =
1998 nCoresPerPkg * __kmp_nThreadsPerCore;
1999 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LOOP + 1] =
2000 nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
2001}
2002
2003// Return the index into the hierarchy for this tid and layer type (L1, L2, etc)
2004// i.e., this thread's L1 or this thread's L2, etc.
2005int __kmp_dispatch_get_index(int tid, kmp_hier_layer_e type) {
2006 int index = type + 1;
2007 int num_hw_threads = __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1];
2008 KMP_DEBUG_ASSERT(type != kmp_hier_layer_e::LAYER_LAST);
2009 if (type == kmp_hier_layer_e::LAYER_THREAD)
2010 return tid;
2011 else if (type == kmp_hier_layer_e::LAYER_LOOP)
2012 return 0;
2013 KMP_DEBUG_ASSERT(__kmp_hier_max_units[index] != 0);
2014 if (tid >= num_hw_threads)
2015 tid = tid % num_hw_threads;
2016 return (tid / __kmp_hier_threads_per[index]) % __kmp_hier_max_units[index];
2017}
2018
2019// Return the number of t1's per t2
2020int __kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1, kmp_hier_layer_e t2) {
2021 int i1 = t1 + 1;
2022 int i2 = t2 + 1;
2023 KMP_DEBUG_ASSERT(i1 <= i2);
2024 KMP_DEBUG_ASSERT(t1 != kmp_hier_layer_e::LAYER_LAST);
2025 KMP_DEBUG_ASSERT(t2 != kmp_hier_layer_e::LAYER_LAST);
2026 KMP_DEBUG_ASSERT(__kmp_hier_threads_per[i1] != 0);
2027 // (nthreads/t2) / (nthreads/t1) = t1 / t2
2028 return __kmp_hier_threads_per[i2] / __kmp_hier_threads_per[i1];
2029}
2030#endif // KMP_USE_HIER_SCHED
2031
2032// Parse /proc/cpuinfo (or an alternate file in the same format) to obtain the
2033// affinity map.
2034static int __kmp_affinity_create_cpuinfo_map(AddrUnsPair **address2os,
2035 int *line,
2036 kmp_i18n_id_t *const msg_id,
2037 FILE *f) {
2038 *address2os = NULL;
2039 *msg_id = kmp_i18n_null;
2040
2041 // Scan of the file, and count the number of "processor" (osId) fields,
2042 // and find the highest value of <n> for a node_<n> field.
2043 char buf[256];
2044 unsigned num_records = 0;
2045 while (!feof(f)) {
2046 buf[sizeof(buf) - 1] = 1;
2047 if (!fgets(buf, sizeof(buf), f)) {
2048 // Read errors presumably because of EOF
2049 break;
2050 }
2051
2052 char s1[] = "processor";
2053 if (strncmp(buf, s1, sizeof(s1) - 1) == 0) {
2054 num_records++;
2055 continue;
2056 }
2057
2058 // FIXME - this will match "node_<n> <garbage>"
2059 unsigned level;
2060 if (KMP_SSCANF(buf, "node_%u id", &level) == 1) {
2061 if (nodeIdIndex + level >= maxIndex) {
2062 maxIndex = nodeIdIndex + level;
2063 }
2064 continue;
2065 }
2066 }
2067
2068 // Check for empty file / no valid processor records, or too many. The number
2069 // of records can't exceed the number of valid bits in the affinity mask.
2070 if (num_records == 0) {
2071 *line = 0;
2072 *msg_id = kmp_i18n_str_NoProcRecords;
2073 return -1;
2074 }
2075 if (num_records > (unsigned)__kmp_xproc) {
2076 *line = 0;
2077 *msg_id = kmp_i18n_str_TooManyProcRecords;
2078 return -1;
2079 }
2080
2081 // Set the file pointer back to the beginning, so that we can scan the file
2082 // again, this time performing a full parse of the data. Allocate a vector of
2083 // ProcCpuInfo object, where we will place the data. Adding an extra element
2084 // at the end allows us to remove a lot of extra checks for termination
2085 // conditions.
2086 if (fseek(f, 0, SEEK_SET) != 0) {
2087 *line = 0;
2088 *msg_id = kmp_i18n_str_CantRewindCpuinfo;
2089 return -1;
2090 }
2091
2092 // Allocate the array of records to store the proc info in. The dummy
2093 // element at the end makes the logic in filling them out easier to code.
2094 unsigned **threadInfo =
2095 (unsigned **)__kmp_allocate((num_records + 1) * sizeof(unsigned *));
2096 unsigned i;
2097 for (i = 0; i <= num_records; i++) {
2098 threadInfo[i] =
2099 (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2100 }
2101
2102#define CLEANUP_THREAD_INFO \
2103 for (i = 0; i <= num_records; i++) { \
2104 __kmp_free(threadInfo[i]); \
2105 } \
2106 __kmp_free(threadInfo);
2107
2108 // A value of UINT_MAX means that we didn't find the field
2109 unsigned __index;
2110
2111#define INIT_PROC_INFO(p) \
2112 for (__index = 0; __index <= maxIndex; __index++) { \
2113 (p)[__index] = UINT_MAX; \
2114 }
2115
2116 for (i = 0; i <= num_records; i++) {
2117 INIT_PROC_INFO(threadInfo[i]);
2118 }
2119
2120 unsigned num_avail = 0;
2121 *line = 0;
2122 while (!feof(f)) {
2123 // Create an inner scoping level, so that all the goto targets at the end of
2124 // the loop appear in an outer scoping level. This avoids warnings about
2125 // jumping past an initialization to a target in the same block.
2126 {
2127 buf[sizeof(buf) - 1] = 1;
2128 bool long_line = false;
2129 if (!fgets(buf, sizeof(buf), f)) {
2130 // Read errors presumably because of EOF
2131 // If there is valid data in threadInfo[num_avail], then fake
2132 // a blank line in ensure that the last address gets parsed.
2133 bool valid = false;
2134 for (i = 0; i <= maxIndex; i++) {
2135 if (threadInfo[num_avail][i] != UINT_MAX) {
2136 valid = true;
2137 }
2138 }
2139 if (!valid) {
2140 break;
2141 }
2142 buf[0] = 0;
2143 } else if (!buf[sizeof(buf) - 1]) {
2144 // The line is longer than the buffer. Set a flag and don't
2145 // emit an error if we were going to ignore the line, anyway.
2146 long_line = true;
2147
2148#define CHECK_LINE \
2149 if (long_line) { \
2150 CLEANUP_THREAD_INFO; \
2151 *msg_id = kmp_i18n_str_LongLineCpuinfo; \
2152 return -1; \
2153 }
2154 }
2155 (*line)++;
2156
2157 char s1[] = "processor";
2158 if (strncmp(buf, s1, sizeof(s1) - 1) == 0) {
2159 CHECK_LINE;
2160 char *p = strchr(buf + sizeof(s1) - 1, ':');
2161 unsigned val;
2162 if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2163 goto no_val;
2164 if (threadInfo[num_avail][osIdIndex] != UINT_MAX)
2165#if KMP_ARCH_AARCH64
2166 // Handle the old AArch64 /proc/cpuinfo layout differently,
2167 // it contains all of the 'processor' entries listed in a
2168 // single 'Processor' section, therefore the normal looking
2169 // for duplicates in that section will always fail.
2170 num_avail++;
2171#else
2172 goto dup_field;
2173#endif
2174 threadInfo[num_avail][osIdIndex] = val;
2175#if KMP_OS_LINUX && !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
2176 char path[256];
2177 KMP_SNPRINTF(
2178 path, sizeof(path),
2179 "/sys/devices/system/cpu/cpu%u/topology/physical_package_id",
2180 threadInfo[num_avail][osIdIndex]);
2181 __kmp_read_from_file(path, "%u", &threadInfo[num_avail][pkgIdIndex]);
2182
2183 KMP_SNPRINTF(path, sizeof(path),
2184 "/sys/devices/system/cpu/cpu%u/topology/core_id",
2185 threadInfo[num_avail][osIdIndex]);
2186 __kmp_read_from_file(path, "%u", &threadInfo[num_avail][coreIdIndex]);
2187 continue;
2188#else
2189 }
2190 char s2[] = "physical id";
2191 if (strncmp(buf, s2, sizeof(s2) - 1) == 0) {
2192 CHECK_LINE;
2193 char *p = strchr(buf + sizeof(s2) - 1, ':');
2194 unsigned val;
2195 if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2196 goto no_val;
2197 if (threadInfo[num_avail][pkgIdIndex] != UINT_MAX)
2198 goto dup_field;
2199 threadInfo[num_avail][pkgIdIndex] = val;
2200 continue;
2201 }
2202 char s3[] = "core id";
2203 if (strncmp(buf, s3, sizeof(s3) - 1) == 0) {
2204 CHECK_LINE;
2205 char *p = strchr(buf + sizeof(s3) - 1, ':');
2206 unsigned val;
2207 if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2208 goto no_val;
2209 if (threadInfo[num_avail][coreIdIndex] != UINT_MAX)
2210 goto dup_field;
2211 threadInfo[num_avail][coreIdIndex] = val;
2212 continue;
2213#endif // KMP_OS_LINUX && USE_SYSFS_INFO
2214 }
2215 char s4[] = "thread id";
2216 if (strncmp(buf, s4, sizeof(s4) - 1) == 0) {
2217 CHECK_LINE;
2218 char *p = strchr(buf + sizeof(s4) - 1, ':');
2219 unsigned val;
2220 if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2221 goto no_val;
2222 if (threadInfo[num_avail][threadIdIndex] != UINT_MAX)
2223 goto dup_field;
2224 threadInfo[num_avail][threadIdIndex] = val;
2225 continue;
2226 }
2227 unsigned level;
2228 if (KMP_SSCANF(buf, "node_%u id", &level) == 1) {
2229 CHECK_LINE;
2230 char *p = strchr(buf + sizeof(s4) - 1, ':');
2231 unsigned val;
2232 if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2233 goto no_val;
2234 KMP_ASSERT(nodeIdIndex + level <= maxIndex);
2235 if (threadInfo[num_avail][nodeIdIndex + level] != UINT_MAX)
2236 goto dup_field;
2237 threadInfo[num_avail][nodeIdIndex + level] = val;
2238 continue;
2239 }
2240
2241 // We didn't recognize the leading token on the line. There are lots of
2242 // leading tokens that we don't recognize - if the line isn't empty, go on
2243 // to the next line.
2244 if ((*buf != 0) && (*buf != '\n')) {
2245 // If the line is longer than the buffer, read characters
2246 // until we find a newline.
2247 if (long_line) {
2248 int ch;
2249 while (((ch = fgetc(f)) != EOF) && (ch != '\n'))
2250 ;
2251 }
2252 continue;
2253 }
2254
2255 // A newline has signalled the end of the processor record.
2256 // Check that there aren't too many procs specified.
2257 if ((int)num_avail == __kmp_xproc) {
2258 CLEANUP_THREAD_INFO;
2259 *msg_id = kmp_i18n_str_TooManyEntries;
2260 return -1;
2261 }
2262
2263 // Check for missing fields. The osId field must be there, and we
2264 // currently require that the physical id field is specified, also.
2265 if (threadInfo[num_avail][osIdIndex] == UINT_MAX) {
2266 CLEANUP_THREAD_INFO;
2267 *msg_id = kmp_i18n_str_MissingProcField;
2268 return -1;
2269 }
2270 if (threadInfo[0][pkgIdIndex] == UINT_MAX) {
2271 CLEANUP_THREAD_INFO;
2272 *msg_id = kmp_i18n_str_MissingPhysicalIDField;
2273 return -1;
2274 }
2275
2276 // Skip this proc if it is not included in the machine model.
2277 if (!KMP_CPU_ISSET(threadInfo[num_avail][osIdIndex],
2278 __kmp_affin_fullMask)) {
2279 INIT_PROC_INFO(threadInfo[num_avail]);
2280 continue;
2281 }
2282
2283 // We have a successful parse of this proc's info.
2284 // Increment the counter, and prepare for the next proc.
2285 num_avail++;
2286 KMP_ASSERT(num_avail <= num_records);
2287 INIT_PROC_INFO(threadInfo[num_avail]);
2288 }
2289 continue;
2290
2291 no_val:
2292 CLEANUP_THREAD_INFO;
2293 *msg_id = kmp_i18n_str_MissingValCpuinfo;
2294 return -1;
2295
2296 dup_field:
2297 CLEANUP_THREAD_INFO;
2298 *msg_id = kmp_i18n_str_DuplicateFieldCpuinfo;
2299 return -1;
2300 }
2301 *line = 0;
2302
2303#if KMP_MIC && REDUCE_TEAM_SIZE
2304 unsigned teamSize = 0;
2305#endif // KMP_MIC && REDUCE_TEAM_SIZE
2306
2307 // check for num_records == __kmp_xproc ???
2308
2309 // If there's only one thread context to bind to, form an Address object with
2310 // depth 1 and return immediately (or, if affinity is off, set address2os to
2311 // NULL and return).
2312 //
2313 // If it is configured to omit the package level when there is only a single
2314 // package, the logic at the end of this routine won't work if there is only a
2315 // single thread - it would try to form an Address object with depth 0.
2316 KMP_ASSERT(num_avail > 0);
2317 KMP_ASSERT(num_avail <= num_records);
2318 if (num_avail == 1) {
2319 __kmp_ncores = 1;
2320 __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
2321 if (__kmp_affinity_verbose) {
2322 if (!KMP_AFFINITY_CAPABLE()) {
2323 KMP_INFORM(AffNotCapableUseCpuinfo, "KMP_AFFINITY");
2324 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
2325 KMP_INFORM(Uniform, "KMP_AFFINITY");
2326 } else {
2327 char buf[KMP_AFFIN_MASK_PRINT_LEN];
2328 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
2329 __kmp_affin_fullMask);
2330 KMP_INFORM(AffCapableUseCpuinfo, "KMP_AFFINITY");
2331 if (__kmp_affinity_respect_mask) {
2332 KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
2333 } else {
2334 KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
2335 }
2336 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
2337 KMP_INFORM(Uniform, "KMP_AFFINITY");
2338 }
2339 int index;
2340 kmp_str_buf_t buf;
2341 __kmp_str_buf_init(&buf);
2342 __kmp_str_buf_print(&buf, "1");
2343 for (index = maxIndex - 1; index > pkgIdIndex; index--) {
2344 __kmp_str_buf_print(&buf, " x 1");
2345 }
2346 KMP_INFORM(TopologyExtra, "KMP_AFFINITY", buf.str, 1, 1, 1);
2347 __kmp_str_buf_free(&buf);
2348 }
2349
2350 if (__kmp_affinity_type == affinity_none) {
2351 CLEANUP_THREAD_INFO;
2352 return 0;
2353 }
2354
2355 *address2os = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair));
2356 Address addr(1);
2357 addr.labels[0] = threadInfo[0][pkgIdIndex];
2358 (*address2os)[0] = AddrUnsPair(addr, threadInfo[0][osIdIndex]);
2359
2360 if (__kmp_affinity_gran_levels < 0) {
2361 __kmp_affinity_gran_levels = 0;
2362 }
2363
2364 if (__kmp_affinity_verbose) {
2365 __kmp_affinity_print_topology(*address2os, 1, 1, 0, -1, -1);
2366 }
2367
2368 CLEANUP_THREAD_INFO;
2369 return 1;
2370 }
2371
2372 // Sort the threadInfo table by physical Id.
2373 qsort(threadInfo, num_avail, sizeof(*threadInfo),
2374 __kmp_affinity_cmp_ProcCpuInfo_phys_id);
2375
2376 // The table is now sorted by pkgId / coreId / threadId, but we really don't
2377 // know the radix of any of the fields. pkgId's may be sparsely assigned among
2378 // the chips on a system. Although coreId's are usually assigned
2379 // [0 .. coresPerPkg-1] and threadId's are usually assigned
2380 // [0..threadsPerCore-1], we don't want to make any such assumptions.
2381 //
2382 // For that matter, we don't know what coresPerPkg and threadsPerCore (or the
2383 // total # packages) are at this point - we want to determine that now. We
2384 // only have an upper bound on the first two figures.
2385 unsigned *counts =
2386 (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2387 unsigned *maxCt =
2388 (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2389 unsigned *totals =
2390 (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2391 unsigned *lastId =
2392 (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2393
2394 bool assign_thread_ids = false;
2395 unsigned threadIdCt;
2396 unsigned index;
2397
2398restart_radix_check:
2399 threadIdCt = 0;
2400
2401 // Initialize the counter arrays with data from threadInfo[0].
2402 if (assign_thread_ids) {
2403 if (threadInfo[0][threadIdIndex] == UINT_MAX) {
2404 threadInfo[0][threadIdIndex] = threadIdCt++;
2405 } else if (threadIdCt <= threadInfo[0][threadIdIndex]) {
2406 threadIdCt = threadInfo[0][threadIdIndex] + 1;
2407 }
2408 }
2409 for (index = 0; index <= maxIndex; index++) {
2410 counts[index] = 1;
2411 maxCt[index] = 1;
2412 totals[index] = 1;
2413 lastId[index] = threadInfo[0][index];
2414 ;
2415 }
2416
2417 // Run through the rest of the OS procs.
2418 for (i = 1; i < num_avail; i++) {
2419 // Find the most significant index whose id differs from the id for the
2420 // previous OS proc.
2421 for (index = maxIndex; index >= threadIdIndex; index--) {
2422 if (assign_thread_ids && (index == threadIdIndex)) {
2423 // Auto-assign the thread id field if it wasn't specified.
2424 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
2425 threadInfo[i][threadIdIndex] = threadIdCt++;
2426 }
2427 // Apparently the thread id field was specified for some entries and not
2428 // others. Start the thread id counter off at the next higher thread id.
2429 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
2430 threadIdCt = threadInfo[i][threadIdIndex] + 1;
2431 }
2432 }
2433 if (threadInfo[i][index] != lastId[index]) {
2434 // Run through all indices which are less significant, and reset the
2435 // counts to 1. At all levels up to and including index, we need to
2436 // increment the totals and record the last id.
2437 unsigned index2;
2438 for (index2 = threadIdIndex; index2 < index; index2++) {
2439 totals[index2]++;
2440 if (counts[index2] > maxCt[index2]) {
2441 maxCt[index2] = counts[index2];
2442 }
2443 counts[index2] = 1;
2444 lastId[index2] = threadInfo[i][index2];
2445 }
2446 counts[index]++;
2447 totals[index]++;
2448 lastId[index] = threadInfo[i][index];
2449
2450 if (assign_thread_ids && (index > threadIdIndex)) {
2451
2452#if KMP_MIC && REDUCE_TEAM_SIZE
2453 // The default team size is the total #threads in the machine
2454 // minus 1 thread for every core that has 3 or more threads.
2455 teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
2456#endif // KMP_MIC && REDUCE_TEAM_SIZE
2457
2458 // Restart the thread counter, as we are on a new core.
2459 threadIdCt = 0;
2460
2461 // Auto-assign the thread id field if it wasn't specified.
2462 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
2463 threadInfo[i][threadIdIndex] = threadIdCt++;
2464 }
2465
2466 // Apparently the thread id field was specified for some entries and
2467 // not others. Start the thread id counter off at the next higher
2468 // thread id.
2469 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
2470 threadIdCt = threadInfo[i][threadIdIndex] + 1;
2471 }
2472 }
2473 break;
2474 }
2475 }
2476 if (index < threadIdIndex) {
2477 // If thread ids were specified, it is an error if they are not unique.
2478 // Also, check that we waven't already restarted the loop (to be safe -
2479 // shouldn't need to).
2480 if ((threadInfo[i][threadIdIndex] != UINT_MAX) || assign_thread_ids) {
2481 __kmp_free(lastId);
2482 __kmp_free(totals);
2483 __kmp_free(maxCt);
2484 __kmp_free(counts);
2485 CLEANUP_THREAD_INFO;
2486 *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
2487 return -1;
2488 }
2489
2490 // If the thread ids were not specified and we see entries entries that
2491 // are duplicates, start the loop over and assign the thread ids manually.
2492 assign_thread_ids = true;
2493 goto restart_radix_check;
2494 }
2495 }
2496
2497#if KMP_MIC && REDUCE_TEAM_SIZE
2498 // The default team size is the total #threads in the machine
2499 // minus 1 thread for every core that has 3 or more threads.
2500 teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
2501#endif // KMP_MIC && REDUCE_TEAM_SIZE
2502
2503 for (index = threadIdIndex; index <= maxIndex; index++) {
2504 if (counts[index] > maxCt[index]) {
2505 maxCt[index] = counts[index];
2506 }
2507 }
2508
2509 __kmp_nThreadsPerCore = maxCt[threadIdIndex];
2510 nCoresPerPkg = maxCt[coreIdIndex];
2511 nPackages = totals[pkgIdIndex];
2512
2513 // Check to see if the machine topology is uniform
2514 unsigned prod = totals[maxIndex];
2515 for (index = threadIdIndex; index < maxIndex; index++) {
2516 prod *= maxCt[index];
2517 }
2518 bool uniform = (prod == totals[threadIdIndex]);
2519
2520 // When affinity is off, this routine will still be called to set
2521 // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
2522 // Make sure all these vars are set correctly, and return now if affinity is
2523 // not enabled.
2524 __kmp_ncores = totals[coreIdIndex];
2525
2526 if (__kmp_affinity_verbose) {
2527 if (!KMP_AFFINITY_CAPABLE()) {
2528 KMP_INFORM(AffNotCapableUseCpuinfo, "KMP_AFFINITY");
2529 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
2530 if (uniform) {
2531 KMP_INFORM(Uniform, "KMP_AFFINITY");
2532 } else {
2533 KMP_INFORM(NonUniform, "KMP_AFFINITY");
2534 }
2535 } else {
2536 char buf[KMP_AFFIN_MASK_PRINT_LEN];
2537 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
2538 __kmp_affin_fullMask);
2539 KMP_INFORM(AffCapableUseCpuinfo, "KMP_AFFINITY");
2540 if (__kmp_affinity_respect_mask) {
2541 KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
2542 } else {
2543 KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
2544 }
2545 KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
2546 if (uniform) {
2547 KMP_INFORM(Uniform, "KMP_AFFINITY");
2548 } else {
2549 KMP_INFORM(NonUniform, "KMP_AFFINITY");
2550 }
2551 }
2552 kmp_str_buf_t buf;
2553 __kmp_str_buf_init(&buf);
2554
2555 __kmp_str_buf_print(&buf, "%d", totals[maxIndex]);
2556 for (index = maxIndex - 1; index >= pkgIdIndex; index--) {
2557 __kmp_str_buf_print(&buf, " x %d", maxCt[index]);
2558 }
2559 KMP_INFORM(TopologyExtra, "KMP_AFFINITY", buf.str, maxCt[coreIdIndex],
2560 maxCt[threadIdIndex], __kmp_ncores);
2561
2562 __kmp_str_buf_free(&buf);
2563 }
2564
2565#if KMP_MIC && REDUCE_TEAM_SIZE
2566 // Set the default team size.
2567 if ((__kmp_dflt_team_nth == 0) && (teamSize > 0)) {
2568 __kmp_dflt_team_nth = teamSize;
2569 KA_TRACE(20, ("__kmp_affinity_create_cpuinfo_map: setting "
2570 "__kmp_dflt_team_nth = %d\n",
2571 __kmp_dflt_team_nth));
2572 }
2573#endif // KMP_MIC && REDUCE_TEAM_SIZE
2574
2575 KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
2576 KMP_DEBUG_ASSERT(num_avail == (unsigned)__kmp_avail_proc);
2577 __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
2578 for (i = 0; i < num_avail; ++i) { // fill the os indices
2579 __kmp_pu_os_idx[i] = threadInfo[i][osIdIndex];
2580 }
2581
2582 if (__kmp_affinity_type == affinity_none) {
2583 __kmp_free(lastId);
2584 __kmp_free(totals);
2585 __kmp_free(maxCt);
2586 __kmp_free(counts);
2587 CLEANUP_THREAD_INFO;
2588 return 0;
2589 }
2590
2591 // Count the number of levels which have more nodes at that level than at the
2592 // parent's level (with there being an implicit root node of the top level).
2593 // This is equivalent to saying that there is at least one node at this level
2594 // which has a sibling. These levels are in the map, and the package level is
2595 // always in the map.
2596 bool *inMap = (bool *)__kmp_allocate((maxIndex + 1) * sizeof(bool));
2597 for (index = threadIdIndex; index < maxIndex; index++) {
2598 KMP_ASSERT(totals[index] >= totals[index + 1]);
2599 inMap[index] = (totals[index] > totals[index + 1]);
2600 }
2601 inMap[maxIndex] = (totals[maxIndex] > 1);
2602 inMap[pkgIdIndex] = true;
2603
2604 int depth = 0;
2605 for (index = threadIdIndex; index <= maxIndex; index++) {
2606 if (inMap[index]) {
2607 depth++;
2608 }
2609 }
2610 KMP_ASSERT(depth > 0);
2611
2612 // Construct the data structure that is to be returned.
2613 *address2os = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * num_avail);
2614 int pkgLevel = -1;
2615 int coreLevel = -1;
2616 int threadLevel = -1;
2617
2618 for (i = 0; i < num_avail; ++i) {
2619 Address addr(depth);
2620 unsigned os = threadInfo[i][osIdIndex];
2621 int src_index;
2622 int dst_index = 0;
2623
2624 for (src_index = maxIndex; src_index >= threadIdIndex; src_index--) {
2625 if (!inMap[src_index]) {
2626 continue;
2627 }
2628 addr.labels[dst_index] = threadInfo[i][src_index];
2629 if (src_index == pkgIdIndex) {
2630 pkgLevel = dst_index;
2631 } else if (src_index == coreIdIndex) {
2632 coreLevel = dst_index;
2633 } else if (src_index == threadIdIndex) {
2634 threadLevel = dst_index;
2635 }
2636 dst_index++;
2637 }
2638 (*address2os)[i] = AddrUnsPair(addr, os);
2639 }
2640
2641 if (__kmp_affinity_gran_levels < 0) {
2642 // Set the granularity level based on what levels are modeled
2643 // in the machine topology map.
2644 unsigned src_index;
2645 __kmp_affinity_gran_levels = 0;
2646 for (src_index = threadIdIndex; src_index <= maxIndex; src_index++) {
2647 if (!inMap[src_index]) {
2648 continue;
2649 }
2650 switch (src_index) {
2651 case threadIdIndex:
2652 if (__kmp_affinity_gran > affinity_gran_thread) {
2653 __kmp_affinity_gran_levels++;
2654 }
2655
2656 break;
2657 case coreIdIndex:
2658 if (__kmp_affinity_gran > affinity_gran_core) {
2659 __kmp_affinity_gran_levels++;
2660 }
2661 break;
2662
2663 case pkgIdIndex:
2664 if (__kmp_affinity_gran > affinity_gran_package) {
2665 __kmp_affinity_gran_levels++;
2666 }
2667 break;
2668 }
2669 }
2670 }
2671
2672 if (__kmp_affinity_verbose) {
2673 __kmp_affinity_print_topology(*address2os, num_avail, depth, pkgLevel,
2674 coreLevel, threadLevel);
2675 }
2676
2677 __kmp_free(inMap);
2678 __kmp_free(lastId);
2679 __kmp_free(totals);
2680 __kmp_free(maxCt);
2681 __kmp_free(counts);
2682 CLEANUP_THREAD_INFO;
2683 return depth;
2684}
2685
2686// Create and return a table of affinity masks, indexed by OS thread ID.
2687// This routine handles OR'ing together all the affinity masks of threads
2688// that are sufficiently close, if granularity > fine.
2689static kmp_affin_mask_t *__kmp_create_masks(unsigned *maxIndex,
2690 unsigned *numUnique,
2691 AddrUnsPair *address2os,
2692 unsigned numAddrs) {
2693 // First form a table of affinity masks in order of OS thread id.
2694 unsigned depth;
2695 unsigned maxOsId;
2696 unsigned i;
2697
2698 KMP_ASSERT(numAddrs > 0);
2699 depth = address2os[0].first.depth;
2700
2701 maxOsId = 0;
2702 for (i = numAddrs - 1;; --i) {
2703 unsigned osId = address2os[i].second;
2704 if (osId > maxOsId) {
2705 maxOsId = osId;
2706 }
2707 if (i == 0)
2708 break;
2709 }
2710 kmp_affin_mask_t *osId2Mask;
2711 KMP_CPU_ALLOC_ARRAY(osId2Mask, (maxOsId + 1));
2712
2713 // Sort the address2os table according to physical order. Doing so will put
2714 // all threads on the same core/package/node in consecutive locations.
2715 qsort(address2os, numAddrs, sizeof(*address2os),
2716 __kmp_affinity_cmp_Address_labels);
2717
2718 KMP_ASSERT(__kmp_affinity_gran_levels >= 0);
2719 if (__kmp_affinity_verbose && (__kmp_affinity_gran_levels > 0)) {
2720 KMP_INFORM(ThreadsMigrate, "KMP_AFFINITY", __kmp_affinity_gran_levels);
2721 }
2722 if (__kmp_affinity_gran_levels >= (int)depth) {
2723 if (__kmp_affinity_verbose ||
2724 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {
2725 KMP_WARNING(AffThreadsMayMigrate);
2726 }
2727 }
2728
2729 // Run through the table, forming the masks for all threads on each core.
2730 // Threads on the same core will have identical "Address" objects, not
2731 // considering the last level, which must be the thread id. All threads on a
2732 // core will appear consecutively.
2733 unsigned unique = 0;
2734 unsigned j = 0; // index of 1st thread on core
2735 unsigned leader = 0;
2736 Address *leaderAddr = &(address2os[0].first);
2737 kmp_affin_mask_t *sum;
2738 KMP_CPU_ALLOC_ON_STACK(sum);
2739 KMP_CPU_ZERO(sum);
2740 KMP_CPU_SET(address2os[0].second, sum);
2741 for (i = 1; i < numAddrs; i++) {
2742 // If this thread is sufficiently close to the leader (within the
2743 // granularity setting), then set the bit for this os thread in the
2744 // affinity mask for this group, and go on to the next thread.
2745 if (leaderAddr->isClose(address2os[i].first, __kmp_affinity_gran_levels)) {
2746 KMP_CPU_SET(address2os[i].second, sum);
2747 continue;
2748 }
2749
2750 // For every thread in this group, copy the mask to the thread's entry in
2751 // the osId2Mask table. Mark the first address as a leader.
2752 for (; j < i; j++) {
2753 unsigned osId = address2os[j].second;
2754 KMP_DEBUG_ASSERT(osId <= maxOsId);
2755 kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
2756 KMP_CPU_COPY(mask, sum);
2757 address2os[j].first.leader = (j == leader);
2758 }
2759 unique++;
2760
2761 // Start a new mask.
2762 leader = i;
2763 leaderAddr = &(address2os[i].first);
2764 KMP_CPU_ZERO(sum);
2765 KMP_CPU_SET(address2os[i].second, sum);
2766 }
2767
2768 // For every thread in last group, copy the mask to the thread's
2769 // entry in the osId2Mask table.
2770 for (; j < i; j++) {
2771 unsigned osId = address2os[j].second;
2772 KMP_DEBUG_ASSERT(osId <= maxOsId);
2773 kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
2774 KMP_CPU_COPY(mask, sum);
2775 address2os[j].first.leader = (j == leader);
2776 }
2777 unique++;
2778 KMP_CPU_FREE_FROM_STACK(sum);
2779
2780 *maxIndex = maxOsId;
2781 *numUnique = unique;
2782 return osId2Mask;
2783}
2784
2785// Stuff for the affinity proclist parsers. It's easier to declare these vars
2786// as file-static than to try and pass them through the calling sequence of
2787// the recursive-descent OMP_PLACES parser.
2788static kmp_affin_mask_t *newMasks;
2789static int numNewMasks;
2790static int nextNewMask;
2791
2792#define ADD_MASK(_mask) \
2793 { \
2794 if (nextNewMask >= numNewMasks) { \
2795 int i; \
2796 numNewMasks *= 2; \
2797 kmp_affin_mask_t *temp; \
2798 KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks); \
2799 for (i = 0; i < numNewMasks / 2; i++) { \
2800 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i); \
2801 kmp_affin_mask_t *dest = KMP_CPU_INDEX(temp, i); \
2802 KMP_CPU_COPY(dest, src); \
2803 } \
2804 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks / 2); \
2805 newMasks = temp; \
2806 } \
2807 KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask)); \
2808 nextNewMask++; \
2809 }
2810
2811#define ADD_MASK_OSID(_osId, _osId2Mask, _maxOsId) \
2812 { \
2813 if (((_osId) > _maxOsId) || \
2814 (!KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId))))) { \
2815 if (__kmp_affinity_verbose || \
2816 (__kmp_affinity_warnings && \
2817 (__kmp_affinity_type != affinity_none))) { \
2818 KMP_WARNING(AffIgnoreInvalidProcID, _osId); \
2819 } \
2820 } else { \
2821 ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId))); \
2822 } \
2823 }
2824
2825// Re-parse the proclist (for the explicit affinity type), and form the list
2826// of affinity newMasks indexed by gtid.
2827static void __kmp_affinity_process_proclist(kmp_affin_mask_t **out_masks,
2828 unsigned int *out_numMasks,
2829 const char *proclist,
2830 kmp_affin_mask_t *osId2Mask,
2831 int maxOsId) {
2832 int i;
2833 const char *scan = proclist;
2834 const char *next = proclist;
2835
2836 // We use malloc() for the temporary mask vector, so that we can use
2837 // realloc() to extend it.
2838 numNewMasks = 2;
2839 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
2840 nextNewMask = 0;
2841 kmp_affin_mask_t *sumMask;
2842 KMP_CPU_ALLOC(sumMask);
2843 int setSize = 0;
2844
2845 for (;;) {
2846 int start, end, stride;
2847
2848 SKIP_WS(scan);
2849 next = scan;
2850 if (*next == '\0') {
2851 break;
2852 }
2853
2854 if (*next == '{') {
2855 int num;
2856 setSize = 0;
2857 next++; // skip '{'
2858 SKIP_WS(next);
2859 scan = next;
2860
2861 // Read the first integer in the set.
2862 KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad proclist");
2863 SKIP_DIGITS(next);
2864 num = __kmp_str_to_int(scan, *next);
2865 KMP_ASSERT2(num >= 0, "bad explicit proc list");
2866
2867 // Copy the mask for that osId to the sum (union) mask.
2868 if ((num > maxOsId) ||
2869 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
2870 if (__kmp_affinity_verbose ||
2871 (__kmp_affinity_warnings &&
2872 (__kmp_affinity_type != affinity_none))) {
2873 KMP_WARNING(AffIgnoreInvalidProcID, num);
2874 }
2875 KMP_CPU_ZERO(sumMask);
2876 } else {
2877 KMP_CPU_COPY(sumMask, KMP_CPU_INDEX(osId2Mask, num));
2878 setSize = 1;
2879 }
2880
2881 for (;;) {
2882 // Check for end of set.
2883 SKIP_WS(next);
2884 if (*next == '}') {
2885 next++; // skip '}'
2886 break;
2887 }
2888
2889 // Skip optional comma.
2890 if (*next == ',') {
2891 next++;
2892 }
2893 SKIP_WS(next);
2894
2895 // Read the next integer in the set.
2896 scan = next;
2897 KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
2898
2899 SKIP_DIGITS(next);
2900 num = __kmp_str_to_int(scan, *next);
2901 KMP_ASSERT2(num >= 0, "bad explicit proc list");
2902
2903 // Add the mask for that osId to the sum mask.
2904 if ((num > maxOsId) ||
2905 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
2906 if (__kmp_affinity_verbose ||
2907 (__kmp_affinity_warnings &&
2908 (__kmp_affinity_type != affinity_none))) {
2909 KMP_WARNING(AffIgnoreInvalidProcID, num);
2910 }
2911 } else {
2912 KMP_CPU_UNION(sumMask, KMP_CPU_INDEX(osId2Mask, num));
2913 setSize++;
2914 }
2915 }
2916 if (setSize > 0) {
2917 ADD_MASK(sumMask);
2918 }
2919
2920 SKIP_WS(next);
2921 if (*next == ',') {
2922 next++;
2923 }
2924 scan = next;
2925 continue;
2926 }
2927
2928 // Read the first integer.
2929 KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
2930 SKIP_DIGITS(next);
2931 start = __kmp_str_to_int(scan, *next);
2932 KMP_ASSERT2(start >= 0, "bad explicit proc list");
2933 SKIP_WS(next);
2934
2935 // If this isn't a range, then add a mask to the list and go on.
2936 if (*next != '-') {
2937 ADD_MASK_OSID(start, osId2Mask, maxOsId);
2938
2939 // Skip optional comma.
2940 if (*next == ',') {
2941 next++;
2942 }
2943 scan = next;
2944 continue;
2945 }
2946
2947 // This is a range. Skip over the '-' and read in the 2nd int.
2948 next++; // skip '-'
2949 SKIP_WS(next);
2950 scan = next;
2951 KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
2952 SKIP_DIGITS(next);
2953 end = __kmp_str_to_int(scan, *next);
2954 KMP_ASSERT2(end >= 0, "bad explicit proc list");
2955
2956 // Check for a stride parameter
2957 stride = 1;
2958 SKIP_WS(next);
2959 if (*next == ':') {
2960 // A stride is specified. Skip over the ':" and read the 3rd int.
2961 int sign = +1;
2962 next++; // skip ':'
2963 SKIP_WS(next);
2964 scan = next;
2965 if (*next == '-') {
2966 sign = -1;
2967 next++;
2968 SKIP_WS(next);
2969 scan = next;
2970 }
2971 KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
2972 SKIP_DIGITS(next);
2973 stride = __kmp_str_to_int(scan, *next);
2974 KMP_ASSERT2(stride >= 0, "bad explicit proc list");
2975 stride *= sign;
2976 }
2977
2978 // Do some range checks.
2979 KMP_ASSERT2(stride != 0, "bad explicit proc list");
2980 if (stride > 0) {
2981 KMP_ASSERT2(start <= end, "bad explicit proc list");
2982 } else {
2983 KMP_ASSERT2(start >= end, "bad explicit proc list");
2984 }
2985 KMP_ASSERT2((end - start) / stride <= 65536, "bad explicit proc list");
2986
2987 // Add the mask for each OS proc # to the list.
2988 if (stride > 0) {
2989 do {
2990 ADD_MASK_OSID(start, osId2Mask, maxOsId);
2991 start += stride;
2992 } while (start <= end);
2993 } else {
2994 do {
2995 ADD_MASK_OSID(start, osId2Mask, maxOsId);
2996 start += stride;
2997 } while (start >= end);
2998 }
2999
3000 // Skip optional comma.
3001 SKIP_WS(next);
3002 if (*next == ',') {
3003 next++;
3004 }
3005 scan = next;
3006 }
3007
3008 *out_numMasks = nextNewMask;
3009 if (nextNewMask == 0) {
3010 *out_masks = NULL;
3011 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3012 return;
3013 }
3014 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3015 for (i = 0; i < nextNewMask; i++) {
3016 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
3017 kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
3018 KMP_CPU_COPY(dest, src);
3019 }
3020 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3021 KMP_CPU_FREE(sumMask);
3022}
3023
3024/*-----------------------------------------------------------------------------
3025Re-parse the OMP_PLACES proc id list, forming the newMasks for the different
3026places. Again, Here is the grammar:
3027
3028place_list := place
3029place_list := place , place_list
3030place := num
3031place := place : num
3032place := place : num : signed
3033place := { subplacelist }
3034place := ! place // (lowest priority)
3035subplace_list := subplace
3036subplace_list := subplace , subplace_list
3037subplace := num
3038subplace := num : num
3039subplace := num : num : signed
3040signed := num
3041signed := + signed
3042signed := - signed
3043-----------------------------------------------------------------------------*/
3044static void __kmp_process_subplace_list(const char **scan,
3045 kmp_affin_mask_t *osId2Mask,
3046 int maxOsId, kmp_affin_mask_t *tempMask,
3047 int *setSize) {
3048 const char *next;
3049
3050 for (;;) {
3051 int start, count, stride, i;
3052
3053 // Read in the starting proc id
3054 SKIP_WS(*scan);
3055 KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list");
3056 next = *scan;
3057 SKIP_DIGITS(next);
3058 start = __kmp_str_to_int(*scan, *next);
3059 KMP_ASSERT(start >= 0);
3060 *scan = next;
3061
3062 // valid follow sets are ',' ':' and '}'
3063 SKIP_WS(*scan);
3064 if (**scan == '}' || **scan == ',') {
3065 if ((start > maxOsId) ||
3066 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3067 if (__kmp_affinity_verbose ||
3068 (__kmp_affinity_warnings &&
3069 (__kmp_affinity_type != affinity_none))) {
3070 KMP_WARNING(AffIgnoreInvalidProcID, start);
3071 }
3072 } else {
3073 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3074 (*setSize)++;
3075 }
3076 if (**scan == '}') {
3077 break;
3078 }
3079 (*scan)++; // skip ','
3080 continue;
3081 }
3082 KMP_ASSERT2(**scan == ':', "bad explicit places list");
3083 (*scan)++; // skip ':'
3084
3085 // Read count parameter
3086 SKIP_WS(*scan);
3087 KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list");
3088 next = *scan;
3089 SKIP_DIGITS(next);
3090 count = __kmp_str_to_int(*scan, *next);
3091 KMP_ASSERT(count >= 0);
3092 *scan = next;
3093
3094 // valid follow sets are ',' ':' and '}'
3095 SKIP_WS(*scan);
3096 if (**scan == '}' || **scan == ',') {
3097 for (i = 0; i < count; i++) {
3098 if ((start > maxOsId) ||
3099 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3100 if (__kmp_affinity_verbose ||
3101 (__kmp_affinity_warnings &&
3102 (__kmp_affinity_type != affinity_none))) {
3103 KMP_WARNING(AffIgnoreInvalidProcID, start);
3104 }
3105 break; // don't proliferate warnings for large count
3106 } else {
3107 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3108 start++;
3109 (*setSize)++;
3110 }
3111 }
3112 if (**scan == '}') {
3113 break;
3114 }
3115 (*scan)++; // skip ','
3116 continue;
3117 }
3118 KMP_ASSERT2(**scan == ':', "bad explicit places list");
3119 (*scan)++; // skip ':'
3120
3121 // Read stride parameter
3122 int sign = +1;
3123 for (;;) {
3124 SKIP_WS(*scan);
3125 if (**scan == '+') {
3126 (*scan)++; // skip '+'
3127 continue;
3128 }
3129 if (**scan == '-') {
3130 sign *= -1;
3131 (*scan)++; // skip '-'
3132 continue;
3133 }
3134 break;
3135 }
3136 SKIP_WS(*scan);
3137 KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list");
3138 next = *scan;
3139 SKIP_DIGITS(next);
3140 stride = __kmp_str_to_int(*scan, *next);
3141 KMP_ASSERT(stride >= 0);
3142 *scan = next;
3143 stride *= sign;
3144
3145 // valid follow sets are ',' and '}'
3146 SKIP_WS(*scan);
3147 if (**scan == '}' || **scan == ',') {
3148 for (i = 0; i < count; i++) {
3149 if ((start > maxOsId) ||
3150 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3151 if (__kmp_affinity_verbose ||
3152 (__kmp_affinity_warnings &&
3153 (__kmp_affinity_type != affinity_none))) {
3154 KMP_WARNING(AffIgnoreInvalidProcID, start);
3155 }
3156 break; // don't proliferate warnings for large count
3157 } else {
3158 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3159 start += stride;
3160 (*setSize)++;
3161 }
3162 }
3163 if (**scan == '}') {
3164 break;
3165 }
3166 (*scan)++; // skip ','
3167 continue;
3168 }
3169
3170 KMP_ASSERT2(0, "bad explicit places list");
3171 }
3172}
3173
3174static void __kmp_process_place(const char **scan, kmp_affin_mask_t *osId2Mask,
3175 int maxOsId, kmp_affin_mask_t *tempMask,
3176 int *setSize) {
3177 const char *next;
3178
3179 // valid follow sets are '{' '!' and num
3180 SKIP_WS(*scan);
3181 if (**scan == '{') {
3182 (*scan)++; // skip '{'
3183 __kmp_process_subplace_list(scan, osId2Mask, maxOsId, tempMask, setSize);
3184 KMP_ASSERT2(**scan == '}', "bad explicit places list");
3185 (*scan)++; // skip '}'
3186 } else if (**scan == '!') {
3187 (*scan)++; // skip '!'
3188 __kmp_process_place(scan, osId2Mask, maxOsId, tempMask, setSize);
3189 KMP_CPU_COMPLEMENT(maxOsId, tempMask);
3190 } else if ((**scan >= '0') && (**scan <= '9')) {
3191 next = *scan;
3192 SKIP_DIGITS(next);
3193 int num = __kmp_str_to_int(*scan, *next);
3194 KMP_ASSERT(num >= 0);
3195 if ((num > maxOsId) ||
3196 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3197 if (__kmp_affinity_verbose ||
3198 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {
3199 KMP_WARNING(AffIgnoreInvalidProcID, num);
3200 }
3201 } else {
3202 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, num));
3203 (*setSize)++;
3204 }
3205 *scan = next; // skip num
3206 } else {
3207 KMP_ASSERT2(0, "bad explicit places list");
3208 }
3209}
3210
3211// static void
3212void __kmp_affinity_process_placelist(kmp_affin_mask_t **out_masks,
3213 unsigned int *out_numMasks,
3214 const char *placelist,
3215 kmp_affin_mask_t *osId2Mask,
3216 int maxOsId) {
3217 int i, j, count, stride, sign;
3218 const char *scan = placelist;
3219 const char *next = placelist;
3220
3221 numNewMasks = 2;
3222 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
3223 nextNewMask = 0;
3224
3225 // tempMask is modified based on the previous or initial
3226 // place to form the current place
3227 // previousMask contains the previous place
3228 kmp_affin_mask_t *tempMask;
3229 kmp_affin_mask_t *previousMask;
3230 KMP_CPU_ALLOC(tempMask);
3231 KMP_CPU_ZERO(tempMask);
3232 KMP_CPU_ALLOC(previousMask);
3233 KMP_CPU_ZERO(previousMask);
3234 int setSize = 0;
3235
3236 for (;;) {
3237 __kmp_process_place(&scan, osId2Mask, maxOsId, tempMask, &setSize);
3238
3239 // valid follow sets are ',' ':' and EOL
3240 SKIP_WS(scan);
3241 if (*scan == '\0' || *scan == ',') {
3242 if (setSize > 0) {
3243 ADD_MASK(tempMask);
3244 }
3245 KMP_CPU_ZERO(tempMask);
3246 setSize = 0;
3247 if (*scan == '\0') {
3248 break;
3249 }
3250 scan++; // skip ','
3251 continue;
3252 }
3253
3254 KMP_ASSERT2(*scan == ':', "bad explicit places list");
3255 scan++; // skip ':'
3256
3257 // Read count parameter
3258 SKIP_WS(scan);
3259 KMP_ASSERT2((*scan >= '0') && (*scan <= '9'), "bad explicit places list");
3260 next = scan;
3261 SKIP_DIGITS(next);
3262 count = __kmp_str_to_int(scan, *next);
3263 KMP_ASSERT(count >= 0);
3264 scan = next;
3265
3266 // valid follow sets are ',' ':' and EOL
3267 SKIP_WS(scan);
3268 if (*scan == '\0' || *scan == ',') {
3269 stride = +1;
3270 } else {
3271 KMP_ASSERT2(*scan == ':', "bad explicit places list");
3272 scan++; // skip ':'
3273
3274 // Read stride parameter
3275 sign = +1;
3276 for (;;) {
3277 SKIP_WS(scan);
3278 if (*scan == '+') {
3279 scan++; // skip '+'
3280 continue;
3281 }
3282 if (*scan == '-') {
3283 sign *= -1;
3284 scan++; // skip '-'
3285 continue;
3286 }
3287 break;
3288 }
3289 SKIP_WS(scan);
3290 KMP_ASSERT2((*scan >= '0') && (*scan <= '9'), "bad explicit places list");
3291 next = scan;
3292 SKIP_DIGITS(next);
3293 stride = __kmp_str_to_int(scan, *next);
3294 KMP_DEBUG_ASSERT(stride >= 0);
3295 scan = next;
3296 stride *= sign;
3297 }
3298
3299 // Add places determined by initial_place : count : stride
3300 for (i = 0; i < count; i++) {
3301 if (setSize == 0) {
3302 break;
3303 }
3304 // Add the current place, then build the next place (tempMask) from that
3305 KMP_CPU_COPY(previousMask, tempMask);
3306 ADD_MASK(previousMask);
3307 KMP_CPU_ZERO(tempMask);
3308 setSize = 0;
3309 KMP_CPU_SET_ITERATE(j, previousMask) {
3310 if (!KMP_CPU_ISSET(j, previousMask)) {
3311 continue;
3312 }
3313 if ((j + stride > maxOsId) || (j + stride < 0) ||
3314 (!KMP_CPU_ISSET(j, __kmp_affin_fullMask)) ||
3315 (!KMP_CPU_ISSET(j + stride,
3316 KMP_CPU_INDEX(osId2Mask, j + stride)))) {
3317 if ((__kmp_affinity_verbose ||
3318 (__kmp_affinity_warnings &&
3319 (__kmp_affinity_type != affinity_none))) &&
3320 i < count - 1) {
3321 KMP_WARNING(AffIgnoreInvalidProcID, j + stride);
3322 }
3323 continue;
3324 }
3325 KMP_CPU_SET(j + stride, tempMask);
3326 setSize++;
3327 }
3328 }
3329 KMP_CPU_ZERO(tempMask);
3330 setSize = 0;
3331
3332 // valid follow sets are ',' and EOL
3333 SKIP_WS(scan);
3334 if (*scan == '\0') {
3335 break;
3336 }
3337 if (*scan == ',') {
3338 scan++; // skip ','
3339 continue;
3340 }
3341
3342 KMP_ASSERT2(0, "bad explicit places list");
3343 }
3344
3345 *out_numMasks = nextNewMask;
3346 if (nextNewMask == 0) {
3347 *out_masks = NULL;
3348 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3349 return;
3350 }
3351 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3352 KMP_CPU_FREE(tempMask);
3353 KMP_CPU_FREE(previousMask);
3354 for (i = 0; i < nextNewMask; i++) {
3355 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
3356 kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
3357 KMP_CPU_COPY(dest, src);
3358 }
3359 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3360}
3361
3362#undef ADD_MASK
3363#undef ADD_MASK_OSID
3364
3365#if KMP_USE_HWLOC
3366static int __kmp_hwloc_skip_PUs_obj(hwloc_topology_t t, hwloc_obj_t o) {
3367 // skip PUs descendants of the object o
3368 int skipped = 0;
3369 hwloc_obj_t hT = NULL;
3370 int N = __kmp_hwloc_count_children_by_type(t, o, HWLOC_OBJ_PU, &hT);
3371 for (int i = 0; i < N; ++i) {
3372 KMP_DEBUG_ASSERT(hT);
3373 unsigned idx = hT->os_index;
3374 if (KMP_CPU_ISSET(idx, __kmp_affin_fullMask)) {
3375 KMP_CPU_CLR(idx, __kmp_affin_fullMask);
3376 KC_TRACE(200, ("KMP_HW_SUBSET: skipped proc %d\n", idx));
3377 ++skipped;
3378 }
3379 hT = hwloc_get_next_obj_by_type(t, HWLOC_OBJ_PU, hT);
3380 }
3381 return skipped; // count number of skipped units
3382}
3383
3384static int __kmp_hwloc_obj_has_PUs(hwloc_topology_t t, hwloc_obj_t o) {
3385 // check if obj has PUs present in fullMask
3386 hwloc_obj_t hT = NULL;
3387 int N = __kmp_hwloc_count_children_by_type(t, o, HWLOC_OBJ_PU, &hT);
3388 for (int i = 0; i < N; ++i) {
3389 KMP_DEBUG_ASSERT(hT);
3390 unsigned idx = hT->os_index;
3391 if (KMP_CPU_ISSET(idx, __kmp_affin_fullMask))
3392 return 1; // found PU
3393 hT = hwloc_get_next_obj_by_type(t, HWLOC_OBJ_PU, hT);
3394 }
3395 return 0; // no PUs found
3396}
3397#endif // KMP_USE_HWLOC
3398
3399static void __kmp_apply_thread_places(AddrUnsPair **pAddr, int depth) {
3400 AddrUnsPair *newAddr;
3401 if (__kmp_hws_requested == 0)
3402 goto _exit; // no topology limiting actions requested, exit
3403#if KMP_USE_HWLOC
3404 if (__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC) {
3405 // Number of subobjects calculated dynamically, this works fine for
3406 // any non-uniform topology.
3407 // L2 cache objects are determined by depth, other objects - by type.
3408 hwloc_topology_t tp = __kmp_hwloc_topology;
3409 int nS = 0, nN = 0, nL = 0, nC = 0,
3410 nT = 0; // logical index including skipped
3411 int nCr = 0, nTr = 0; // number of requested units
3412 int nPkg = 0, nCo = 0, n_new = 0, n_old = 0, nCpP = 0, nTpC = 0; // counters
3413 hwloc_obj_t hT, hC, hL, hN, hS; // hwloc objects (pointers to)
3414 int L2depth, idx;
3415
3416 // check support of extensions ----------------------------------
3417 int numa_support = 0, tile_support = 0;
3418 if (__kmp_pu_os_idx)
3419 hT = hwloc_get_pu_obj_by_os_index(tp,
3420 __kmp_pu_os_idx[__kmp_avail_proc - 1]);
3421 else
3422 hT = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PU, __kmp_avail_proc - 1);
3423 if (hT == NULL) { // something's gone wrong
3424 KMP_WARNING(AffHWSubsetUnsupported);
3425 goto _exit;
3426 }
3427 // check NUMA node
3428 hN = hwloc_get_ancestor_obj_by_type(tp, HWLOC_OBJ_NUMANODE, hT);
3429 hS = hwloc_get_ancestor_obj_by_type(tp, HWLOC_OBJ_PACKAGE, hT);
3430 if (hN != NULL && hN->depth > hS->depth) {
3431 numa_support = 1; // 1 in case socket includes node(s)
3432 } else if (__kmp_hws_node.num > 0) {
3433 // don't support sockets inside NUMA node (no such HW found for testing)
3434 KMP_WARNING(AffHWSubsetUnsupported);
3435 goto _exit;
3436 }
3437 // check L2 cahce, get object by depth because of multiple caches
3438 L2depth = hwloc_get_cache_type_depth(tp, 2, HWLOC_OBJ_CACHE_UNIFIED);
3439 hL = hwloc_get_ancestor_obj_by_depth(tp, L2depth, hT);
3440 if (hL != NULL &&
3441 __kmp_hwloc_count_children_by_type(tp, hL, HWLOC_OBJ_CORE, &hC) > 1) {
3442 tile_support = 1; // no sense to count L2 if it includes single core
3443 } else if (__kmp_hws_tile.num > 0) {
3444 if (__kmp_hws_core.num == 0) {
3445 __kmp_hws_core = __kmp_hws_tile; // replace L2 with core
3446 __kmp_hws_tile.num = 0;
3447 } else {
3448 // L2 and core are both requested, but represent same object
3449 KMP_WARNING(AffHWSubsetInvalid);
3450 goto _exit;
3451 }
3452 }
3453 // end of check of extensions -----------------------------------
3454
3455 // fill in unset items, validate settings -----------------------
3456 if (__kmp_hws_socket.num == 0)
3457 __kmp_hws_socket.num = nPackages; // use all available sockets
3458 if (__kmp_hws_socket.offset >= nPackages) {
3459 KMP_WARNING(AffHWSubsetManySockets);
3460 goto _exit;
3461 }
3462 if (numa_support) {
3463 hN = NULL;
3464 int NN = __kmp_hwloc_count_children_by_type(tp, hS, HWLOC_OBJ_NUMANODE,
3465 &hN); // num nodes in socket
3466 if (__kmp_hws_node.num == 0)
3467 __kmp_hws_node.num = NN; // use all available nodes
3468 if (__kmp_hws_node.offset >= NN) {
3469 KMP_WARNING(AffHWSubsetManyNodes);
3470 goto _exit;
3471 }
3472 if (tile_support) {
3473 // get num tiles in node
3474 int NL = __kmp_hwloc_count_children_by_depth(tp, hN, L2depth, &hL);
3475 if (__kmp_hws_tile.num == 0) {
3476 __kmp_hws_tile.num = NL + 1;
3477 } // use all available tiles, some node may have more tiles, thus +1
3478 if (__kmp_hws_tile.offset >= NL) {
3479 KMP_WARNING(AffHWSubsetManyTiles);
3480 goto _exit;
3481 }
3482 int NC = __kmp_hwloc_count_children_by_type(tp, hL, HWLOC_OBJ_CORE,
3483 &hC); // num cores in tile
3484 if (__kmp_hws_core.num == 0)
3485 __kmp_hws_core.num = NC; // use all available cores
3486 if (__kmp_hws_core.offset >= NC) {
3487 KMP_WARNING(AffHWSubsetManyCores);
3488 goto _exit;
3489 }
3490 } else { // tile_support
3491 int NC = __kmp_hwloc_count_children_by_type(tp, hN, HWLOC_OBJ_CORE,
3492 &hC); // num cores in node
3493 if (__kmp_hws_core.num == 0)
3494 __kmp_hws_core.num = NC; // use all available cores
3495 if (__kmp_hws_core.offset >= NC) {
3496 KMP_WARNING(AffHWSubsetManyCores);
3497 goto _exit;
3498 }
3499 } // tile_support
3500 } else { // numa_support
3501 if (tile_support) {
3502 // get num tiles in socket
3503 int NL = __kmp_hwloc_count_children_by_depth(tp, hS, L2depth, &hL);
3504 if (__kmp_hws_tile.num == 0)
3505 __kmp_hws_tile.num = NL; // use all available tiles
3506 if (__kmp_hws_tile.offset >= NL) {
3507 KMP_WARNING(AffHWSubsetManyTiles);
3508 goto _exit;
3509 }
3510 int NC = __kmp_hwloc_count_children_by_type(tp, hL, HWLOC_OBJ_CORE,
3511 &hC); // num cores in tile
3512 if (__kmp_hws_core.num == 0)
3513 __kmp_hws_core.num = NC; // use all available cores
3514 if (__kmp_hws_core.offset >= NC) {
3515 KMP_WARNING(AffHWSubsetManyCores);
3516 goto _exit;
3517 }
3518 } else { // tile_support
3519 int NC = __kmp_hwloc_count_children_by_type(tp, hS, HWLOC_OBJ_CORE,
3520 &hC); // num cores in socket
3521 if (__kmp_hws_core.num == 0)
3522 __kmp_hws_core.num = NC; // use all available cores
3523 if (__kmp_hws_core.offset >= NC) {
3524 KMP_WARNING(AffHWSubsetManyCores);
3525 goto _exit;
3526 }
3527 } // tile_support
3528 }
3529 if (__kmp_hws_proc.num == 0)
3530 __kmp_hws_proc.num = __kmp_nThreadsPerCore; // use all available procs
3531 if (__kmp_hws_proc.offset >= __kmp_nThreadsPerCore) {
3532 KMP_WARNING(AffHWSubsetManyProcs);
3533 goto _exit;
3534 }
3535 // end of validation --------------------------------------------
3536
3537 if (pAddr) // pAddr is NULL in case of affinity_none
3538 newAddr = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) *
3539 __kmp_avail_proc); // max size
3540 // main loop to form HW subset ----------------------------------
3541 hS = NULL;
3542 int NP = hwloc_get_nbobjs_by_type(tp, HWLOC_OBJ_PACKAGE);
3543 for (int s = 0; s < NP; ++s) {
3544 // Check Socket -----------------------------------------------
3545 hS = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PACKAGE, hS);
3546 if (!__kmp_hwloc_obj_has_PUs(tp, hS))
3547 continue; // skip socket if all PUs are out of fullMask
3548 ++nS; // only count objects those have PUs in affinity mask
3549 if (nS <= __kmp_hws_socket.offset ||
3550 nS > __kmp_hws_socket.num + __kmp_hws_socket.offset) {
3551 n_old += __kmp_hwloc_skip_PUs_obj(tp, hS); // skip socket
3552 continue; // move to next socket
3553 }
3554 nCr = 0; // count number of cores per socket
3555 // socket requested, go down the topology tree
3556 // check 4 cases: (+NUMA+Tile), (+NUMA-Tile), (-NUMA+Tile), (-NUMA-Tile)
3557 if (numa_support) {
3558 nN = 0;
3559 hN = NULL;
3560 // num nodes in current socket
3561 int NN =
3562 __kmp_hwloc_count_children_by_type(tp, hS, HWLOC_OBJ_NUMANODE, &hN);
3563 for (int n = 0; n < NN; ++n) {
3564 // Check NUMA Node ----------------------------------------
3565 if (!__kmp_hwloc_obj_has_PUs(tp, hN)) {
3566 hN = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_NUMANODE, hN);
3567 continue; // skip node if all PUs are out of fullMask
3568 }
3569 ++nN;
3570 if (nN <= __kmp_hws_node.offset ||
3571 nN > __kmp_hws_node.num + __kmp_hws_node.offset) {
3572 // skip node as not requested
3573 n_old += __kmp_hwloc_skip_PUs_obj(tp, hN); // skip node
3574 hN = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_NUMANODE, hN);
3575 continue; // move to next node
3576 }
3577 // node requested, go down the topology tree
3578 if (tile_support) {
3579 nL = 0;
3580 hL = NULL;
3581 int NL = __kmp_hwloc_count_children_by_depth(tp, hN, L2depth, &hL);
3582 for (int l = 0; l < NL; ++l) {
3583 // Check L2 (tile) ------------------------------------
3584 if (!__kmp_hwloc_obj_has_PUs(tp, hL)) {
3585 hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3586 continue; // skip tile if all PUs are out of fullMask
3587 }
3588 ++nL;
3589 if (nL <= __kmp_hws_tile.offset ||
3590 nL > __kmp_hws_tile.num + __kmp_hws_tile.offset) {
3591 // skip tile as not requested
3592 n_old += __kmp_hwloc_skip_PUs_obj(tp, hL); // skip tile
3593 hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3594 continue; // move to next tile
3595 }
3596 // tile requested, go down the topology tree
3597 nC = 0;
3598 hC = NULL;
3599 // num cores in current tile
3600 int NC = __kmp_hwloc_count_children_by_type(tp, hL,
3601 HWLOC_OBJ_CORE, &hC);
3602 for (int c = 0; c < NC; ++c) {
3603 // Check Core ---------------------------------------
3604 if (!__kmp_hwloc_obj_has_PUs(tp, hC)) {
3605 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3606 continue; // skip core if all PUs are out of fullMask
3607 }
3608 ++nC;
3609 if (nC <= __kmp_hws_core.offset ||
3610 nC > __kmp_hws_core.num + __kmp_hws_core.offset) {
3611 // skip node as not requested
3612 n_old += __kmp_hwloc_skip_PUs_obj(tp, hC); // skip core
3613 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3614 continue; // move to next node
3615 }
3616 // core requested, go down to PUs
3617 nT = 0;
3618 nTr = 0;
3619 hT = NULL;
3620 // num procs in current core
3621 int NT = __kmp_hwloc_count_children_by_type(tp, hC,
3622 HWLOC_OBJ_PU, &hT);
3623 for (int t = 0; t < NT; ++t) {
3624 // Check PU ---------------------------------------
3625 idx = hT->os_index;
3626 if (!KMP_CPU_ISSET(idx, __kmp_affin_fullMask)) {
3627 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3628 continue; // skip PU if not in fullMask
3629 }
3630 ++nT;
3631 if (nT <= __kmp_hws_proc.offset ||
3632 nT > __kmp_hws_proc.num + __kmp_hws_proc.offset) {
3633 // skip PU
3634 KMP_CPU_CLR(idx, __kmp_affin_fullMask);
3635 ++n_old;
3636 KC_TRACE(200, ("KMP_HW_SUBSET: skipped proc %d\n", idx));
3637 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3638 continue; // move to next node
3639 }
3640 ++nTr;
3641 if (pAddr) // collect requested thread's data
3642 newAddr[n_new] = (*pAddr)[n_old];
3643 ++n_new;
3644 ++n_old;
3645 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3646 } // threads loop
3647 if (nTr > 0) {
3648 ++nCr; // num cores per socket
3649 ++nCo; // total num cores
3650 if (nTr > nTpC)
3651 nTpC = nTr; // calc max threads per core
3652 }
3653 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3654 } // cores loop
3655 hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3656 } // tiles loop
3657 } else { // tile_support
3658 // no tiles, check cores
3659 nC = 0;
3660 hC = NULL;
3661 // num cores in current node
3662 int NC =
3663 __kmp_hwloc_count_children_by_type(tp, hN, HWLOC_OBJ_CORE, &hC);
3664 for (int c = 0; c < NC; ++c) {
3665 // Check Core ---------------------------------------
3666 if (!__kmp_hwloc_obj_has_PUs(tp, hC)) {
3667 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3668 continue; // skip core if all PUs are out of fullMask
3669 }
3670 ++nC;
3671 if (nC <= __kmp_hws_core.offset ||
3672 nC > __kmp_hws_core.num + __kmp_hws_core.offset) {
3673 // skip node as not requested
3674 n_old += __kmp_hwloc_skip_PUs_obj(tp, hC); // skip core
3675 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3676 continue; // move to next node
3677 }
3678 // core requested, go down to PUs
3679 nT = 0;
3680 nTr = 0;
3681 hT = NULL;
3682 int NT =
3683 __kmp_hwloc_count_children_by_type(tp, hC, HWLOC_OBJ_PU, &hT);
3684 for (int t = 0; t < NT; ++t) {
3685 // Check PU ---------------------------------------
3686 idx = hT->os_index;
3687 if (!KMP_CPU_ISSET(idx, __kmp_affin_fullMask)) {
3688 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3689 continue; // skip PU if not in fullMask
3690 }
3691 ++nT;
3692 if (nT <= __kmp_hws_proc.offset ||
3693 nT > __kmp_hws_proc.num + __kmp_hws_proc.offset) {
3694 // skip PU
3695 KMP_CPU_CLR(idx, __kmp_affin_fullMask);
3696 ++n_old;
3697 KC_TRACE(200, ("KMP_HW_SUBSET: skipped proc %d\n", idx));
3698 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3699 continue; // move to next node
3700 }
3701 ++nTr;
3702 if (pAddr) // collect requested thread's data
3703 newAddr[n_new] = (*pAddr)[n_old];
3704 ++n_new;
3705 ++n_old;
3706 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3707 } // threads loop
3708 if (nTr > 0) {
3709 ++nCr; // num cores per socket
3710 ++nCo; // total num cores
3711 if (nTr > nTpC)
3712 nTpC = nTr; // calc max threads per core
3713 }
3714 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3715 } // cores loop
3716 } // tiles support
3717 hN = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_NUMANODE, hN);
3718 } // nodes loop
3719 } else { // numa_support
3720 // no NUMA support
3721 if (tile_support) {
3722 nL = 0;
3723 hL = NULL;
3724 // num tiles in current socket
3725 int NL = __kmp_hwloc_count_children_by_depth(tp, hS, L2depth, &hL);
3726 for (int l = 0; l < NL; ++l) {
3727 // Check L2 (tile) ------------------------------------
3728 if (!__kmp_hwloc_obj_has_PUs(tp, hL)) {
3729 hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3730 continue; // skip tile if all PUs are out of fullMask
3731 }
3732 ++nL;
3733 if (nL <= __kmp_hws_tile.offset ||
3734 nL > __kmp_hws_tile.num + __kmp_hws_tile.offset) {
3735 // skip tile as not requested
3736 n_old += __kmp_hwloc_skip_PUs_obj(tp, hL); // skip tile
3737 hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3738 continue; // move to next tile
3739 }
3740 // tile requested, go down the topology tree
3741 nC = 0;
3742 hC = NULL;
3743 // num cores per tile
3744 int NC =
3745 __kmp_hwloc_count_children_by_type(tp, hL, HWLOC_OBJ_CORE, &hC);
3746 for (int c = 0; c < NC; ++c) {
3747 // Check Core ---------------------------------------
3748 if (!__kmp_hwloc_obj_has_PUs(tp, hC)) {
3749 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3750 continue; // skip core if all PUs are out of fullMask
3751 }
3752 ++nC;
3753 if (nC <= __kmp_hws_core.offset ||
3754 nC > __kmp_hws_core.num + __kmp_hws_core.offset) {
3755 // skip node as not requested
3756 n_old += __kmp_hwloc_skip_PUs_obj(tp, hC); // skip core
3757 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3758 continue; // move to next node
3759 }
3760 // core requested, go down to PUs
3761 nT = 0;
3762 nTr = 0;
3763 hT = NULL;
3764 // num procs per core
3765 int NT =
3766 __kmp_hwloc_count_children_by_type(tp, hC, HWLOC_OBJ_PU, &hT);
3767 for (int t = 0; t < NT; ++t) {
3768 // Check PU ---------------------------------------
3769 idx = hT->os_index;
3770 if (!KMP_CPU_ISSET(idx, __kmp_affin_fullMask)) {
3771 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3772 continue; // skip PU if not in fullMask
3773 }
3774 ++nT;
3775 if (nT <= __kmp_hws_proc.offset ||
3776 nT > __kmp_hws_proc.num + __kmp_hws_proc.offset) {
3777 // skip PU
3778 KMP_CPU_CLR(idx, __kmp_affin_fullMask);
3779 ++n_old;
3780 KC_TRACE(200, ("KMP_HW_SUBSET: skipped proc %d\n", idx));
3781 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3782 continue; // move to next node
3783 }
3784 ++nTr;
3785 if (pAddr) // collect requested thread's data
3786 newAddr[n_new] = (*pAddr)[n_old];
3787 ++n_new;
3788 ++n_old;
3789 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3790 } // threads loop
3791 if (nTr > 0) {
3792 ++nCr; // num cores per socket
3793 ++nCo; // total num cores
3794 if (nTr > nTpC)
3795 nTpC = nTr; // calc max threads per core
3796 }
3797 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3798 } // cores loop
3799 hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3800 } // tiles loop
3801 } else { // tile_support
3802 // no tiles, check cores
3803 nC = 0;
3804 hC = NULL;
3805 // num cores in socket
3806 int NC =
3807 __kmp_hwloc_count_children_by_type(tp, hS, HWLOC_OBJ_CORE, &hC);
3808 for (int c = 0; c < NC; ++c) {
3809 // Check Core -------------------------------------------
3810 if (!__kmp_hwloc_obj_has_PUs(tp, hC)) {
3811 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3812 continue; // skip core if all PUs are out of fullMask
3813 }
3814 ++nC;
3815 if (nC <= __kmp_hws_core.offset ||
3816 nC > __kmp_hws_core.num + __kmp_hws_core.offset) {
3817 // skip node as not requested
3818 n_old += __kmp_hwloc_skip_PUs_obj(tp, hC); // skip core
3819 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3820 continue; // move to next node
3821 }
3822 // core requested, go down to PUs
3823 nT = 0;
3824 nTr = 0;
3825 hT = NULL;
3826 // num procs per core
3827 int NT =
3828 __kmp_hwloc_count_children_by_type(tp, hC, HWLOC_OBJ_PU, &hT);
3829 for (int t = 0; t < NT; ++t) {
3830 // Check PU ---------------------------------------
3831 idx = hT->os_index;
3832 if (!KMP_CPU_ISSET(idx, __kmp_affin_fullMask)) {
3833 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3834 continue; // skip PU if not in fullMask
3835 }
3836 ++nT;
3837 if (nT <= __kmp_hws_proc.offset ||
3838 nT > __kmp_hws_proc.num + __kmp_hws_proc.offset) {
3839 // skip PU
3840 KMP_CPU_CLR(idx, __kmp_affin_fullMask);
3841 ++n_old;
3842 KC_TRACE(200, ("KMP_HW_SUBSET: skipped proc %d\n", idx));
3843 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3844 continue; // move to next node
3845 }
3846 ++nTr;
3847 if (pAddr) // collect requested thread's data
3848 newAddr[n_new] = (*pAddr)[n_old];
3849 ++n_new;
3850 ++n_old;
3851 hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3852 } // threads loop
3853 if (nTr > 0) {
3854 ++nCr; // num cores per socket
3855 ++nCo; // total num cores
3856 if (nTr > nTpC)
3857 nTpC = nTr; // calc max threads per core
3858 }
3859 hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3860 } // cores loop
3861 } // tiles support
3862 } // numa_support
3863 if (nCr > 0) { // found cores?
3864 ++nPkg; // num sockets
3865 if (nCr > nCpP)
3866 nCpP = nCr; // calc max cores per socket
3867 }
3868 } // sockets loop
3869
3870 // check the subset is valid
3871 KMP_DEBUG_ASSERT(n_old == __kmp_avail_proc);
3872 KMP_DEBUG_ASSERT(nPkg > 0);
3873 KMP_DEBUG_ASSERT(nCpP > 0);
3874 KMP_DEBUG_ASSERT(nTpC > 0);
3875 KMP_DEBUG_ASSERT(nCo > 0);
3876 KMP_DEBUG_ASSERT(nPkg <= nPackages);
3877 KMP_DEBUG_ASSERT(nCpP <= nCoresPerPkg);
3878 KMP_DEBUG_ASSERT(nTpC <= __kmp_nThreadsPerCore);
3879 KMP_DEBUG_ASSERT(nCo <= __kmp_ncores);
3880
3881 nPackages = nPkg; // correct num sockets
3882 nCoresPerPkg = nCpP; // correct num cores per socket
3883 __kmp_nThreadsPerCore = nTpC; // correct num threads per core
3884 __kmp_avail_proc = n_new; // correct num procs
3885 __kmp_ncores = nCo; // correct num cores
3886 // hwloc topology method end
3887 } else
3888#endif // KMP_USE_HWLOC
3889 {
3890 int n_old = 0, n_new = 0, proc_num = 0;
3891 if (__kmp_hws_node.num > 0 || __kmp_hws_tile.num > 0) {
3892 KMP_WARNING(AffHWSubsetNoHWLOC);
3893 goto _exit;
3894 }
3895 if (__kmp_hws_socket.num == 0)
3896 __kmp_hws_socket.num = nPackages; // use all available sockets
3897 if (__kmp_hws_core.num == 0)
3898 __kmp_hws_core.num = nCoresPerPkg; // use all available cores
3899 if (__kmp_hws_proc.num == 0 || __kmp_hws_proc.num > __kmp_nThreadsPerCore)
3900 __kmp_hws_proc.num = __kmp_nThreadsPerCore; // use all HW contexts
3901 if (!__kmp_affinity_uniform_topology()) {
3902 KMP_WARNING(AffHWSubsetNonUniform);
3903 goto _exit; // don't support non-uniform topology
3904 }
3905 if (depth > 3) {
3906 KMP_WARNING(AffHWSubsetNonThreeLevel);
3907 goto _exit; // don't support not-3-level topology
3908 }
3909 if (__kmp_hws_socket.offset + __kmp_hws_socket.num > nPackages) {
3910 KMP_WARNING(AffHWSubsetManySockets);
3911 goto _exit;
3912 }
3913 if (__kmp_hws_core.offset + __kmp_hws_core.num > nCoresPerPkg) {
3914 KMP_WARNING(AffHWSubsetManyCores);
3915 goto _exit;
3916 }
3917 // Form the requested subset
3918 if (pAddr) // pAddr is NULL in case of affinity_none
3919 newAddr = (AddrUnsPair *)__kmp_allocate(
3920 sizeof(AddrUnsPair) * __kmp_hws_socket.num * __kmp_hws_core.num *
3921 __kmp_hws_proc.num);
3922 for (int i = 0; i < nPackages; ++i) {
3923 if (i < __kmp_hws_socket.offset ||
3924 i >= __kmp_hws_socket.offset + __kmp_hws_socket.num) {
3925 // skip not-requested socket
3926 n_old += nCoresPerPkg * __kmp_nThreadsPerCore;
3927 if (__kmp_pu_os_idx != NULL) {
3928 // walk through skipped socket
3929 for (int j = 0; j < nCoresPerPkg; ++j) {
3930 for (int k = 0; k < __kmp_nThreadsPerCore; ++k) {
3931 KMP_CPU_CLR(__kmp_pu_os_idx[proc_num], __kmp_affin_fullMask);
3932 ++proc_num;
3933 }
3934 }
3935 }
3936 } else {
3937 // walk through requested socket
3938 for (int j = 0; j < nCoresPerPkg; ++j) {
3939 if (j < __kmp_hws_core.offset ||
3940 j >= __kmp_hws_core.offset +
3941 __kmp_hws_core.num) { // skip not-requested core
3942 n_old += __kmp_nThreadsPerCore;
3943 if (__kmp_pu_os_idx != NULL) {
3944 for (int k = 0; k < __kmp_nThreadsPerCore; ++k) {
3945 KMP_CPU_CLR(__kmp_pu_os_idx[proc_num], __kmp_affin_fullMask);
3946 ++proc_num;
3947 }
3948 }
3949 } else {
3950 // walk through requested core
3951 for (int k = 0; k < __kmp_nThreadsPerCore; ++k) {
3952 if (k < __kmp_hws_proc.num) {
3953 if (pAddr) // collect requested thread's data
3954 newAddr[n_new] = (*pAddr)[n_old];
3955 n_new++;
3956 } else {
3957 if (__kmp_pu_os_idx != NULL)
3958 KMP_CPU_CLR(__kmp_pu_os_idx[proc_num], __kmp_affin_fullMask);
3959 }
3960 n_old++;
3961 ++proc_num;
3962 }
3963 }
3964 }
3965 }
3966 }
3967 KMP_DEBUG_ASSERT(n_old == nPackages * nCoresPerPkg * __kmp_nThreadsPerCore);
3968 KMP_DEBUG_ASSERT(n_new ==
3969 __kmp_hws_socket.num * __kmp_hws_core.num *
3970 __kmp_hws_proc.num);
3971 nPackages = __kmp_hws_socket.num; // correct nPackages
3972 nCoresPerPkg = __kmp_hws_core.num; // correct nCoresPerPkg
3973 __kmp_nThreadsPerCore = __kmp_hws_proc.num; // correct __kmp_nThreadsPerCore
3974 __kmp_avail_proc = n_new; // correct avail_proc
3975 __kmp_ncores = nPackages * __kmp_hws_core.num; // correct ncores
3976 } // non-hwloc topology method
3977 if (pAddr) {
3978 __kmp_free(*pAddr);
3979 *pAddr = newAddr; // replace old topology with new one
3980 }
3981 if (__kmp_affinity_verbose) {
3982 char m[KMP_AFFIN_MASK_PRINT_LEN];
3983 __kmp_affinity_print_mask(m, KMP_AFFIN_MASK_PRINT_LEN,
3984 __kmp_affin_fullMask);
3985 if (__kmp_affinity_respect_mask) {
3986 KMP_INFORM(InitOSProcSetRespect, "KMP_HW_SUBSET", m);
3987 } else {
3988 KMP_INFORM(InitOSProcSetNotRespect, "KMP_HW_SUBSET", m);
3989 }
3990 KMP_INFORM(AvailableOSProc, "KMP_HW_SUBSET", __kmp_avail_proc);
3991 kmp_str_buf_t buf;
3992 __kmp_str_buf_init(&buf);
3993 __kmp_str_buf_print(&buf, "%d", nPackages);
3994 KMP_INFORM(TopologyExtra, "KMP_HW_SUBSET", buf.str, nCoresPerPkg,
3995 __kmp_nThreadsPerCore, __kmp_ncores);
3996 __kmp_str_buf_free(&buf);
3997 }
3998_exit:
3999 if (__kmp_pu_os_idx != NULL) {
4000 __kmp_free(__kmp_pu_os_idx);
4001 __kmp_pu_os_idx = NULL;
4002 }
4003}
4004
4005// This function figures out the deepest level at which there is at least one
4006// cluster/core with more than one processing unit bound to it.
4007static int __kmp_affinity_find_core_level(const AddrUnsPair *address2os,
4008 int nprocs, int bottom_level) {
4009 int core_level = 0;
4010
4011 for (int i = 0; i < nprocs; i++) {
4012 for (int j = bottom_level; j > 0; j--) {
4013 if (address2os[i].first.labels[j] > 0) {
4014 if (core_level < (j - 1)) {
4015 core_level = j - 1;
4016 }
4017 }
4018 }
4019 }
4020 return core_level;
4021}
4022
4023// This function counts number of clusters/cores at given level.
4024static int __kmp_affinity_compute_ncores(const AddrUnsPair *address2os,
4025 int nprocs, int bottom_level,
4026 int core_level) {
4027 int ncores = 0;
4028 int i, j;
4029
4030 j = bottom_level;
4031 for (i = 0; i < nprocs; i++) {
4032 for (j = bottom_level; j > core_level; j--) {
4033 if ((i + 1) < nprocs) {
4034 if (address2os[i + 1].first.labels[j] > 0) {
4035 break;
4036 }
4037 }
4038 }
4039 if (j == core_level) {
4040 ncores++;
4041 }
4042 }
4043 if (j > core_level) {
4044 // In case of ( nprocs < __kmp_avail_proc ) we may end too deep and miss one
4045 // core. May occur when called from __kmp_affinity_find_core().
4046 ncores++;
4047 }
4048 return ncores;
4049}
4050
4051// This function finds to which cluster/core given processing unit is bound.
4052static int __kmp_affinity_find_core(const AddrUnsPair *address2os, int proc,
4053 int bottom_level, int core_level) {
4054 return __kmp_affinity_compute_ncores(address2os, proc + 1, bottom_level,
4055 core_level) -
4056 1;
4057}
4058
4059// This function finds maximal number of processing units bound to a
4060// cluster/core at given level.
4061static int __kmp_affinity_max_proc_per_core(const AddrUnsPair *address2os,
4062 int nprocs, int bottom_level,
4063 int core_level) {
4064 int maxprocpercore = 0;
4065
4066 if (core_level < bottom_level) {
4067 for (int i = 0; i < nprocs; i++) {
4068 int percore = address2os[i].first.labels[core_level + 1] + 1;
4069
4070 if (percore > maxprocpercore) {
4071 maxprocpercore = percore;
4072 }
4073 }
4074 } else {
4075 maxprocpercore = 1;
4076 }
4077 return maxprocpercore;
4078}
4079
4080static AddrUnsPair *address2os = NULL;
4081static int *procarr = NULL;
4082static int __kmp_aff_depth = 0;
4083
4084#if KMP_USE_HIER_SCHED
4085#define KMP_EXIT_AFF_NONE \
4086 KMP_ASSERT(__kmp_affinity_type == affinity_none); \
4087 KMP_ASSERT(address2os == NULL); \
4088 __kmp_apply_thread_places(NULL, 0); \
4089 __kmp_create_affinity_none_places(); \
4090 __kmp_dispatch_set_hierarchy_values(); \
4091 return;
4092#else
4093#define KMP_EXIT_AFF_NONE \
4094 KMP_ASSERT(__kmp_affinity_type == affinity_none); \
4095 KMP_ASSERT(address2os == NULL); \
4096 __kmp_apply_thread_places(NULL, 0); \
4097 __kmp_create_affinity_none_places(); \
4098 return;
4099#endif
4100
4101// Create a one element mask array (set of places) which only contains the
4102// initial process's affinity mask
4103static void __kmp_create_affinity_none_places() {
4104 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4105 KMP_ASSERT(__kmp_affinity_type == affinity_none);
4106 __kmp_affinity_num_masks = 1;
4107 KMP_CPU_ALLOC_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4108 kmp_affin_mask_t *dest = KMP_CPU_INDEX(__kmp_affinity_masks, 0);
4109 KMP_CPU_COPY(dest, __kmp_affin_fullMask);
4110}
4111
4112static int __kmp_affinity_cmp_Address_child_num(const void *a, const void *b) {
4113 const Address *aa = &(((const AddrUnsPair *)a)->first);
4114 const Address *bb = &(((const AddrUnsPair *)b)->first);
4115 unsigned depth = aa->depth;
4116 unsigned i;
4117 KMP_DEBUG_ASSERT(depth == bb->depth);
4118 KMP_DEBUG_ASSERT((unsigned)__kmp_affinity_compact <= depth);
4119 KMP_DEBUG_ASSERT(__kmp_affinity_compact >= 0);
4120 for (i = 0; i < (unsigned)__kmp_affinity_compact; i++) {
4121 int j = depth - i - 1;
4122 if (aa->childNums[j] < bb->childNums[j])
4123 return -1;
4124 if (aa->childNums[j] > bb->childNums[j])
4125 return 1;
4126 }
4127 for (; i < depth; i++) {
4128 int j = i - __kmp_affinity_compact;
4129 if (aa->childNums[j] < bb->childNums[j])
4130 return -1;
4131 if (aa->childNums[j] > bb->childNums[j])
4132 return 1;
4133 }
4134 return 0;
4135}
4136
4137static void __kmp_aux_affinity_initialize(void) {
4138 if (__kmp_affinity_masks != NULL) {
4139 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4140 return;
4141 }
4142
4143 // Create the "full" mask - this defines all of the processors that we
4144 // consider to be in the machine model. If respect is set, then it is the
4145 // initialization thread's affinity mask. Otherwise, it is all processors that
4146 // we know about on the machine.
4147 if (__kmp_affin_fullMask == NULL) {
4148 KMP_CPU_ALLOC(__kmp_affin_fullMask);
4149 }
4150 if (KMP_AFFINITY_CAPABLE()) {
4151 if (__kmp_affinity_respect_mask) {
4152 __kmp_get_system_affinity(__kmp_affin_fullMask, TRUE);
4153
4154 // Count the number of available processors.
4155 unsigned i;
4156 __kmp_avail_proc = 0;
4157 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
4158 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
4159 continue;
4160 }
4161 __kmp_avail_proc++;
4162 }
4163 if (__kmp_avail_proc > __kmp_xproc) {
4164 if (__kmp_affinity_verbose ||
4165 (__kmp_affinity_warnings &&
4166 (__kmp_affinity_type != affinity_none))) {
4167 KMP_WARNING(ErrorInitializeAffinity);
4168 }
4169 __kmp_affinity_type = affinity_none;
4170 KMP_AFFINITY_DISABLE();
4171 return;
4172 }
4173 } else {
4174 __kmp_affinity_entire_machine_mask(__kmp_affin_fullMask);
4175 __kmp_avail_proc = __kmp_xproc;
4176 }
4177 }
4178
4179 if (__kmp_affinity_gran == affinity_gran_tile &&
4180 // check if user's request is valid
4181 __kmp_affinity_dispatch->get_api_type() == KMPAffinity::NATIVE_OS) {
4182 KMP_WARNING(AffTilesNoHWLOC, "KMP_AFFINITY");
4183 __kmp_affinity_gran = affinity_gran_package;
4184 }
4185
4186 int depth = -1;
4187 kmp_i18n_id_t msg_id = kmp_i18n_null;
4188
4189 // For backward compatibility, setting KMP_CPUINFO_FILE =>
4190 // KMP_TOPOLOGY_METHOD=cpuinfo
4191 if ((__kmp_cpuinfo_file != NULL) &&
4192 (__kmp_affinity_top_method == affinity_top_method_all)) {
4193 __kmp_affinity_top_method = affinity_top_method_cpuinfo;
4194 }
4195
4196 if (__kmp_affinity_top_method == affinity_top_method_all) {
4197 // In the default code path, errors are not fatal - we just try using
4198 // another method. We only emit a warning message if affinity is on, or the
4199 // verbose flag is set, and the nowarnings flag was not set.
4200 const char *file_name = NULL;
4201 int line = 0;
4202#if KMP_USE_HWLOC
4203 if (depth < 0 &&
4204 __kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC) {
4205 if (__kmp_affinity_verbose) {
4206 KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY");
4207 }
4208 if (!__kmp_hwloc_error) {
4209 depth = __kmp_affinity_create_hwloc_map(&address2os, &msg_id);
4210 if (depth == 0) {
4211 KMP_EXIT_AFF_NONE;
4212 } else if (depth < 0 && __kmp_affinity_verbose) {
4213 KMP_INFORM(AffIgnoringHwloc, "KMP_AFFINITY");
4214 }
4215 } else if (__kmp_affinity_verbose) {
4216 KMP_INFORM(AffIgnoringHwloc, "KMP_AFFINITY");
4217 }
4218 }
4219#endif
4220
4221#if KMP_ARCH_X86 || KMP_ARCH_X86_64
4222
4223 if (depth < 0) {
4224 if (__kmp_affinity_verbose) {
4225 KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC));
4226 }
4227
4228 file_name = NULL;
4229 depth = __kmp_affinity_create_x2apicid_map(&address2os, &msg_id);
4230 if (depth == 0) {
4231 KMP_EXIT_AFF_NONE;
4232 }
4233
4234 if (depth < 0) {
4235 if (__kmp_affinity_verbose) {
4236 if (msg_id != kmp_i18n_null) {
4237 KMP_INFORM(AffInfoStrStr, "KMP_AFFINITY",
4238 __kmp_i18n_catgets(msg_id),
4239 KMP_I18N_STR(DecodingLegacyAPIC));
4240 } else {
4241 KMP_INFORM(AffInfoStr, "KMP_AFFINITY",
4242 KMP_I18N_STR(DecodingLegacyAPIC));
4243 }
4244 }
4245
4246 file_name = NULL;
4247 depth = __kmp_affinity_create_apicid_map(&address2os, &msg_id);
4248 if (depth == 0) {
4249 KMP_EXIT_AFF_NONE;
4250 }
4251 }
4252 }
4253
4254#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4255
4256#if KMP_OS_LINUX
4257
4258 if (depth < 0) {
4259 if (__kmp_affinity_verbose) {
4260 if (msg_id != kmp_i18n_null) {
4261 KMP_INFORM(AffStrParseFilename, "KMP_AFFINITY",
4262 __kmp_i18n_catgets(msg_id), "/proc/cpuinfo");
4263 } else {
4264 KMP_INFORM(AffParseFilename, "KMP_AFFINITY", "/proc/cpuinfo");
4265 }
4266 }
4267
4268 FILE *f = fopen("/proc/cpuinfo", "r");
4269 if (f == NULL) {
4270 msg_id = kmp_i18n_str_CantOpenCpuinfo;
4271 } else {
4272 file_name = "/proc/cpuinfo";
4273 depth =
4274 __kmp_affinity_create_cpuinfo_map(&address2os, &line, &msg_id, f);
4275 fclose(f);
4276 if (depth == 0) {
4277 KMP_EXIT_AFF_NONE;
4278 }
4279 }
4280 }
4281
4282#endif /* KMP_OS_LINUX */
4283
4284#if KMP_GROUP_AFFINITY
4285
4286 if ((depth < 0) && (__kmp_num_proc_groups > 1)) {
4287 if (__kmp_affinity_verbose) {
4288 KMP_INFORM(AffWindowsProcGroupMap, "KMP_AFFINITY");
4289 }
4290
4291 depth = __kmp_affinity_create_proc_group_map(&address2os, &msg_id);
4292 KMP_ASSERT(depth != 0);
4293 }
4294
4295#endif /* KMP_GROUP_AFFINITY */
4296
4297 if (depth < 0) {
4298 if (__kmp_affinity_verbose && (msg_id != kmp_i18n_null)) {
4299 if (file_name == NULL) {
4300 KMP_INFORM(UsingFlatOS, __kmp_i18n_catgets(msg_id));
4301 } else if (line == 0) {
4302 KMP_INFORM(UsingFlatOSFile, file_name, __kmp_i18n_catgets(msg_id));
4303 } else {
4304 KMP_INFORM(UsingFlatOSFileLine, file_name, line,
4305 __kmp_i18n_catgets(msg_id));
4306 }
4307 }
4308 // FIXME - print msg if msg_id = kmp_i18n_null ???
4309
4310 file_name = "";
4311 depth = __kmp_affinity_create_flat_map(&address2os, &msg_id);
4312 if (depth == 0) {
4313 KMP_EXIT_AFF_NONE;
4314 }
4315 KMP_ASSERT(depth > 0);
4316 KMP_ASSERT(address2os != NULL);
4317 }
4318 }
4319
4320#if KMP_USE_HWLOC
4321 else if (__kmp_affinity_top_method == affinity_top_method_hwloc) {
4322 KMP_ASSERT(__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC);
4323 if (__kmp_affinity_verbose) {
4324 KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY");
4325 }
4326 depth = __kmp_affinity_create_hwloc_map(&address2os, &msg_id);
4327 if (depth == 0) {
4328 KMP_EXIT_AFF_NONE;
4329 }
4330 }
4331#endif // KMP_USE_HWLOC
4332
4333// If the user has specified that a particular topology discovery method is to be
4334// used, then we abort if that method fails. The exception is group affinity,
4335// which might have been implicitly set.
4336
4337#if KMP_ARCH_X86 || KMP_ARCH_X86_64
4338
4339 else if (__kmp_affinity_top_method == affinity_top_method_x2apicid) {
4340 if (__kmp_affinity_verbose) {
4341 KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC));
4342 }
4343
4344 depth = __kmp_affinity_create_x2apicid_map(&address2os, &msg_id);
4345 if (depth == 0) {
4346 KMP_EXIT_AFF_NONE;
4347 }
4348 if (depth < 0) {
4349 KMP_ASSERT(msg_id != kmp_i18n_null);
4350 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4351 }
4352 } else if (__kmp_affinity_top_method == affinity_top_method_apicid) {
4353 if (__kmp_affinity_verbose) {
4354 KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(DecodingLegacyAPIC));
4355 }
4356
4357 depth = __kmp_affinity_create_apicid_map(&address2os, &msg_id);
4358 if (depth == 0) {
4359 KMP_EXIT_AFF_NONE;
4360 }
4361 if (depth < 0) {
4362 KMP_ASSERT(msg_id != kmp_i18n_null);
4363 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4364 }
4365 }
4366
4367#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4368
4369 else if (__kmp_affinity_top_method == affinity_top_method_cpuinfo) {
4370 const char *filename;
4371 if (__kmp_cpuinfo_file != NULL) {
4372 filename = __kmp_cpuinfo_file;
4373 } else {
4374 filename = "/proc/cpuinfo";
4375 }
4376
4377 if (__kmp_affinity_verbose) {
4378 KMP_INFORM(AffParseFilename, "KMP_AFFINITY", filename);
4379 }
4380
4381 FILE *f = fopen(filename, "r");
4382 if (f == NULL) {
4383 int code = errno;
4384 if (__kmp_cpuinfo_file != NULL) {
4385 __kmp_fatal(KMP_MSG(CantOpenFileForReading, filename), KMP_ERR(code),
4386 KMP_HNT(NameComesFrom_CPUINFO_FILE), __kmp_msg_null);
4387 } else {
4388 __kmp_fatal(KMP_MSG(CantOpenFileForReading, filename), KMP_ERR(code),
4389 __kmp_msg_null);
4390 }
4391 }
4392 int line = 0;
4393 depth = __kmp_affinity_create_cpuinfo_map(&address2os, &line, &msg_id, f);
4394 fclose(f);
4395 if (depth < 0) {
4396 KMP_ASSERT(msg_id != kmp_i18n_null);
4397 if (line > 0) {
4398 KMP_FATAL(FileLineMsgExiting, filename, line,
4399 __kmp_i18n_catgets(msg_id));
4400 } else {
4401 KMP_FATAL(FileMsgExiting, filename, __kmp_i18n_catgets(msg_id));
4402 }
4403 }
4404 if (__kmp_affinity_type == affinity_none) {
4405 KMP_ASSERT(depth == 0);
4406 KMP_EXIT_AFF_NONE;
4407 }
4408 }
4409
4410#if KMP_GROUP_AFFINITY
4411
4412 else if (__kmp_affinity_top_method == affinity_top_method_group) {
4413 if (__kmp_affinity_verbose) {
4414 KMP_INFORM(AffWindowsProcGroupMap, "KMP_AFFINITY");
4415 }
4416
4417 depth = __kmp_affinity_create_proc_group_map(&address2os, &msg_id);
4418 KMP_ASSERT(depth != 0);
4419 if (depth < 0) {
4420 KMP_ASSERT(msg_id != kmp_i18n_null);
4421 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4422 }
4423 }
4424
4425#endif /* KMP_GROUP_AFFINITY */
4426
4427 else if (__kmp_affinity_top_method == affinity_top_method_flat) {
4428 if (__kmp_affinity_verbose) {
4429 KMP_INFORM(AffUsingFlatOS, "KMP_AFFINITY");
4430 }
4431
4432 depth = __kmp_affinity_create_flat_map(&address2os, &msg_id);
4433 if (depth == 0) {
4434 KMP_EXIT_AFF_NONE;
4435 }
4436 // should not fail
4437 KMP_ASSERT(depth > 0);
4438 KMP_ASSERT(address2os != NULL);
4439 }
4440
4441#if KMP_USE_HIER_SCHED
4442 __kmp_dispatch_set_hierarchy_values();
4443#endif
4444
4445 if (address2os == NULL) {
4446 if (KMP_AFFINITY_CAPABLE() &&
4447 (__kmp_affinity_verbose ||
4448 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none)))) {
4449 KMP_WARNING(ErrorInitializeAffinity);
4450 }
4451 __kmp_affinity_type = affinity_none;
4452 __kmp_create_affinity_none_places();
4453 KMP_AFFINITY_DISABLE();
4454 return;
4455 }
4456
4457 if (__kmp_affinity_gran == affinity_gran_tile
4458#if KMP_USE_HWLOC
4459 && __kmp_tile_depth == 0
4460#endif
4461 ) {
4462 // tiles requested but not detected, warn user on this
4463 KMP_WARNING(AffTilesNoTiles, "KMP_AFFINITY");
4464 }
4465
4466 __kmp_apply_thread_places(&address2os, depth);
4467
4468 // Create the table of masks, indexed by thread Id.
4469 unsigned maxIndex;
4470 unsigned numUnique;
4471 kmp_affin_mask_t *osId2Mask =
4472 __kmp_create_masks(&maxIndex, &numUnique, address2os, __kmp_avail_proc);
4473 if (__kmp_affinity_gran_levels == 0) {
4474 KMP_DEBUG_ASSERT((int)numUnique == __kmp_avail_proc);
4475 }
4476
4477 // Set the childNums vector in all Address objects. This must be done before
4478 // we can sort using __kmp_affinity_cmp_Address_child_num(), which takes into
4479 // account the setting of __kmp_affinity_compact.
4480 __kmp_affinity_assign_child_nums(address2os, __kmp_avail_proc);
4481
4482 switch (__kmp_affinity_type) {
4483
4484 case affinity_explicit:
4485 KMP_DEBUG_ASSERT(__kmp_affinity_proclist != NULL);
4486 if (__kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) {
4487 __kmp_affinity_process_proclist(
4488 &__kmp_affinity_masks, &__kmp_affinity_num_masks,
4489 __kmp_affinity_proclist, osId2Mask, maxIndex);
4490 } else {
4491 __kmp_affinity_process_placelist(
4492 &__kmp_affinity_masks, &__kmp_affinity_num_masks,
4493 __kmp_affinity_proclist, osId2Mask, maxIndex);
4494 }
4495 if (__kmp_affinity_num_masks == 0) {
4496 if (__kmp_affinity_verbose ||
4497 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {
4498 KMP_WARNING(AffNoValidProcID);
4499 }
4500 __kmp_affinity_type = affinity_none;
4501 __kmp_create_affinity_none_places();
4502 return;
4503 }
4504 break;
4505
4506 // The other affinity types rely on sorting the Addresses according to some
4507 // permutation of the machine topology tree. Set __kmp_affinity_compact and
4508 // __kmp_affinity_offset appropriately, then jump to a common code fragment
4509 // to do the sort and create the array of affinity masks.
4510
4511 case affinity_logical:
4512 __kmp_affinity_compact = 0;
4513 if (__kmp_affinity_offset) {
4514 __kmp_affinity_offset =
4515 __kmp_nThreadsPerCore * __kmp_affinity_offset % __kmp_avail_proc;
4516 }
4517 goto sortAddresses;
4518
4519 case affinity_physical:
4520 if (__kmp_nThreadsPerCore > 1) {
4521 __kmp_affinity_compact = 1;
4522 if (__kmp_affinity_compact >= depth) {
4523 __kmp_affinity_compact = 0;
4524 }
4525 } else {
4526 __kmp_affinity_compact = 0;
4527 }
4528 if (__kmp_affinity_offset) {
4529 __kmp_affinity_offset =
4530 __kmp_nThreadsPerCore * __kmp_affinity_offset % __kmp_avail_proc;
4531 }
4532 goto sortAddresses;
4533
4534 case affinity_scatter:
4535 if (__kmp_affinity_compact >= depth) {
4536 __kmp_affinity_compact = 0;
4537 } else {
4538 __kmp_affinity_compact = depth - 1 - __kmp_affinity_compact;
4539 }
4540 goto sortAddresses;
4541
4542 case affinity_compact:
4543 if (__kmp_affinity_compact >= depth) {
4544 __kmp_affinity_compact = depth - 1;
4545 }
4546 goto sortAddresses;
4547
4548 case affinity_balanced:
4549 if (depth <= 1) {
4550 if (__kmp_affinity_verbose || __kmp_affinity_warnings) {
4551 KMP_WARNING(AffBalancedNotAvail, "KMP_AFFINITY");
4552 }
4553 __kmp_affinity_type = affinity_none;
4554 __kmp_create_affinity_none_places();
4555 return;
4556 } else if (!__kmp_affinity_uniform_topology()) {
4557 // Save the depth for further usage
4558 __kmp_aff_depth = depth;
4559
4560 int core_level = __kmp_affinity_find_core_level(
4561 address2os, __kmp_avail_proc, depth - 1);
4562 int ncores = __kmp_affinity_compute_ncores(address2os, __kmp_avail_proc,
4563 depth - 1, core_level);
4564 int maxprocpercore = __kmp_affinity_max_proc_per_core(
4565 address2os, __kmp_avail_proc, depth - 1, core_level);
4566
4567 int nproc = ncores * maxprocpercore;
4568 if ((nproc < 2) || (nproc < __kmp_avail_proc)) {
4569 if (__kmp_affinity_verbose || __kmp_affinity_warnings) {
4570 KMP_WARNING(AffBalancedNotAvail, "KMP_AFFINITY");
4571 }
4572 __kmp_affinity_type = affinity_none;
4573 return;
4574 }
4575
4576 procarr = (int *)__kmp_allocate(sizeof(int) * nproc);
4577 for (int i = 0; i < nproc; i++) {
4578 procarr[i] = -1;
4579 }
4580
4581 int lastcore = -1;
4582 int inlastcore = 0;
4583 for (int i = 0; i < __kmp_avail_proc; i++) {
4584 int proc = address2os[i].second;
4585 int core =
4586 __kmp_affinity_find_core(address2os, i, depth - 1, core_level);
4587
4588 if (core == lastcore) {
4589 inlastcore++;
4590 } else {
4591 inlastcore = 0;
4592 }
4593 lastcore = core;
4594
4595 procarr[core * maxprocpercore + inlastcore] = proc;
4596 }
4597 }
4598 if (__kmp_affinity_compact >= depth) {
4599 __kmp_affinity_compact = depth - 1;
4600 }
4601
4602 sortAddresses:
4603 // Allocate the gtid->affinity mask table.
4604 if (__kmp_affinity_dups) {
4605 __kmp_affinity_num_masks = __kmp_avail_proc;
4606 } else {
4607 __kmp_affinity_num_masks = numUnique;
4608 }
4609
4610 if ((__kmp_nested_proc_bind.bind_types[0] != proc_bind_intel) &&
4611 (__kmp_affinity_num_places > 0) &&
4612 ((unsigned)__kmp_affinity_num_places < __kmp_affinity_num_masks)) {
4613 __kmp_affinity_num_masks = __kmp_affinity_num_places;
4614 }
4615
4616 KMP_CPU_ALLOC_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4617
4618 // Sort the address2os table according to the current setting of
4619 // __kmp_affinity_compact, then fill out __kmp_affinity_masks.
4620 qsort(address2os, __kmp_avail_proc, sizeof(*address2os),
4621 __kmp_affinity_cmp_Address_child_num);
4622 {
4623 int i;
4624 unsigned j;
4625 for (i = 0, j = 0; i < __kmp_avail_proc; i++) {
4626 if ((!__kmp_affinity_dups) && (!address2os[i].first.leader)) {
4627 continue;
4628 }
4629 unsigned osId = address2os[i].second;
4630 kmp_affin_mask_t *src = KMP_CPU_INDEX(osId2Mask, osId);
4631 kmp_affin_mask_t *dest = KMP_CPU_INDEX(__kmp_affinity_masks, j);
4632 KMP_ASSERT(KMP_CPU_ISSET(osId, src));
4633 KMP_CPU_COPY(dest, src);
4634 if (++j >= __kmp_affinity_num_masks) {
4635 break;
4636 }
4637 }
4638 KMP_DEBUG_ASSERT(j == __kmp_affinity_num_masks);
4639 }
4640 break;
4641
4642 default:
4643 KMP_ASSERT2(0, "Unexpected affinity setting");
4644 }
4645
4646 KMP_CPU_FREE_ARRAY(osId2Mask, maxIndex + 1);
4647 machine_hierarchy.init(address2os, __kmp_avail_proc);
4648}
4649#undef KMP_EXIT_AFF_NONE
4650
4651void __kmp_affinity_initialize(void) {
4652 // Much of the code above was written assuming that if a machine was not
4653 // affinity capable, then __kmp_affinity_type == affinity_none. We now
4654 // explicitly represent this as __kmp_affinity_type == affinity_disabled.
4655 // There are too many checks for __kmp_affinity_type == affinity_none
4656 // in this code. Instead of trying to change them all, check if
4657 // __kmp_affinity_type == affinity_disabled, and if so, slam it with
4658 // affinity_none, call the real initialization routine, then restore
4659 // __kmp_affinity_type to affinity_disabled.
4660 int disabled = (__kmp_affinity_type == affinity_disabled);
4661 if (!KMP_AFFINITY_CAPABLE()) {
4662 KMP_ASSERT(disabled);
4663 }
4664 if (disabled) {
4665 __kmp_affinity_type = affinity_none;
4666 }
4667 __kmp_aux_affinity_initialize();
4668 if (disabled) {
4669 __kmp_affinity_type = affinity_disabled;
4670 }
4671}
4672
4673void __kmp_affinity_uninitialize(void) {
4674 if (__kmp_affinity_masks != NULL) {
4675 KMP_CPU_FREE_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4676 __kmp_affinity_masks = NULL;
4677 }
4678 if (__kmp_affin_fullMask != NULL) {
4679 KMP_CPU_FREE(__kmp_affin_fullMask);
4680 __kmp_affin_fullMask = NULL;
4681 }
4682 __kmp_affinity_num_masks = 0;
4683 __kmp_affinity_type = affinity_default;
4684 __kmp_affinity_num_places = 0;
4685 if (__kmp_affinity_proclist != NULL) {
4686 __kmp_free(__kmp_affinity_proclist);
4687 __kmp_affinity_proclist = NULL;
4688 }
4689 if (address2os != NULL) {
4690 __kmp_free(address2os);
4691 address2os = NULL;
4692 }
4693 if (procarr != NULL) {
4694 __kmp_free(procarr);
4695 procarr = NULL;
4696 }
4697#if KMP_USE_HWLOC
4698 if (__kmp_hwloc_topology != NULL) {
4699 hwloc_topology_destroy(__kmp_hwloc_topology);
4700 __kmp_hwloc_topology = NULL;
4701 }
4702#endif
4703 KMPAffinity::destroy_api();
4704}
4705
4706void __kmp_affinity_set_init_mask(int gtid, int isa_root) {
4707 if (!KMP_AFFINITY_CAPABLE()) {
4708 return;
4709 }
4710
4711 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4712 if (th->th.th_affin_mask == NULL) {
4713 KMP_CPU_ALLOC(th->th.th_affin_mask);
4714 } else {
4715 KMP_CPU_ZERO(th->th.th_affin_mask);
4716 }
4717
4718 // Copy the thread mask to the kmp_info_t structure. If
4719 // __kmp_affinity_type == affinity_none, copy the "full" mask, i.e. one that
4720 // has all of the OS proc ids set, or if __kmp_affinity_respect_mask is set,
4721 // then the full mask is the same as the mask of the initialization thread.
4722 kmp_affin_mask_t *mask;
4723 int i;
4724
4725 if (KMP_AFFINITY_NON_PROC_BIND) {
4726 if ((__kmp_affinity_type == affinity_none) ||
4727 (__kmp_affinity_type == affinity_balanced)) {
4728#if KMP_GROUP_AFFINITY
4729 if (__kmp_num_proc_groups > 1) {
4730 return;
4731 }
4732#endif
4733 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4734 i = 0;
4735 mask = __kmp_affin_fullMask;
4736 } else {
4737 KMP_DEBUG_ASSERT(__kmp_affinity_num_masks > 0);
4738 i = (gtid + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4739 mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4740 }
4741 } else {
4742 if ((!isa_root) ||
4743 (__kmp_nested_proc_bind.bind_types[0] == proc_bind_false)) {
4744#if KMP_GROUP_AFFINITY
4745 if (__kmp_num_proc_groups > 1) {
4746 return;
4747 }
4748#endif
4749 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4750 i = KMP_PLACE_ALL;
4751 mask = __kmp_affin_fullMask;
4752 } else {
4753 // int i = some hash function or just a counter that doesn't
4754 // always start at 0. Use gtid for now.
4755 KMP_DEBUG_ASSERT(__kmp_affinity_num_masks > 0);
4756 i = (gtid + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4757 mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4758 }
4759 }
4760
4761 th->th.th_current_place = i;
4762 if (isa_root) {
4763 th->th.th_new_place = i;
4764 th->th.th_first_place = 0;
4765 th->th.th_last_place = __kmp_affinity_num_masks - 1;
4766 } else if (KMP_AFFINITY_NON_PROC_BIND) {
4767 // When using a Non-OMP_PROC_BIND affinity method,
4768 // set all threads' place-partition-var to the entire place list
4769 th->th.th_first_place = 0;
4770 th->th.th_last_place = __kmp_affinity_num_masks - 1;
4771 }
4772
4773 if (i == KMP_PLACE_ALL) {
4774 KA_TRACE(100, ("__kmp_affinity_set_init_mask: binding T#%d to all places\n",
4775 gtid));
4776 } else {
4777 KA_TRACE(100, ("__kmp_affinity_set_init_mask: binding T#%d to place %d\n",
4778 gtid, i));
4779 }
4780
4781 KMP_CPU_COPY(th->th.th_affin_mask, mask);
4782
4783 if (__kmp_affinity_verbose
4784 /* to avoid duplicate printing (will be correctly printed on barrier) */
4785 && (__kmp_affinity_type == affinity_none ||
4786 (i != KMP_PLACE_ALL && __kmp_affinity_type != affinity_balanced))) {
4787 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4788 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4789 th->th.th_affin_mask);
4790 KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY", (kmp_int32)getpid(),
4791 __kmp_gettid(), gtid, buf);
4792 }
4793
4794#if KMP_OS_WINDOWS
4795 // On Windows* OS, the process affinity mask might have changed. If the user
4796 // didn't request affinity and this call fails, just continue silently.
4797 // See CQ171393.
4798 if (__kmp_affinity_type == affinity_none) {
4799 __kmp_set_system_affinity(th->th.th_affin_mask, FALSE);
4800 } else
4801#endif
4802 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4803}
4804
4805void __kmp_affinity_set_place(int gtid) {
4806 if (!KMP_AFFINITY_CAPABLE()) {
4807 return;
4808 }
4809
4810 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4811
4812 KA_TRACE(100, ("__kmp_affinity_set_place: binding T#%d to place %d (current "
4813 "place = %d)\n",
4814 gtid, th->th.th_new_place, th->th.th_current_place));
4815
4816 // Check that the new place is within this thread's partition.
4817 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4818 KMP_ASSERT(th->th.th_new_place >= 0);
4819 KMP_ASSERT((unsigned)th->th.th_new_place <= __kmp_affinity_num_masks);
4820 if (th->th.th_first_place <= th->th.th_last_place) {
4821 KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place) &&
4822 (th->th.th_new_place <= th->th.th_last_place));
4823 } else {
4824 KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place) ||
4825 (th->th.th_new_place >= th->th.th_last_place));
4826 }
4827
4828 // Copy the thread mask to the kmp_info_t structure,
4829 // and set this thread's affinity.
4830 kmp_affin_mask_t *mask =
4831 KMP_CPU_INDEX(__kmp_affinity_masks, th->th.th_new_place);
4832 KMP_CPU_COPY(th->th.th_affin_mask, mask);
4833 th->th.th_current_place = th->th.th_new_place;
4834
4835 if (__kmp_affinity_verbose) {
4836 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4837 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4838 th->th.th_affin_mask);
4839 KMP_INFORM(BoundToOSProcSet, "OMP_PROC_BIND", (kmp_int32)getpid(),
4840 __kmp_gettid(), gtid, buf);
4841 }
4842 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4843}
4844
4845int __kmp_aux_set_affinity(void **mask) {
4846 int gtid;
4847 kmp_info_t *th;
4848 int retval;
4849
4850 if (!KMP_AFFINITY_CAPABLE()) {
4851 return -1;
4852 }
4853
4854 gtid = __kmp_entry_gtid();
4855 KA_TRACE(1000, (""); {
4856 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4857 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4858 (kmp_affin_mask_t *)(*mask));
4859 __kmp_debug_printf(
4860 "kmp_set_affinity: setting affinity mask for thread %d = %s\n", gtid,
4861 buf);
4862 });
4863
4864 if (__kmp_env_consistency_check) {
4865 if ((mask == NULL) || (*mask == NULL)) {
4866 KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4867 } else {
4868 unsigned proc;
4869 int num_procs = 0;
4870
4871 KMP_CPU_SET_ITERATE(proc, ((kmp_affin_mask_t *)(*mask))) {
4872 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4873 KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4874 }
4875 if (!KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask))) {
4876 continue;
4877 }
4878 num_procs++;
4879 }
4880 if (num_procs == 0) {
4881 KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4882 }
4883
4884#if KMP_GROUP_AFFINITY
4885 if (__kmp_get_proc_group((kmp_affin_mask_t *)(*mask)) < 0) {
4886 KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4887 }
4888#endif /* KMP_GROUP_AFFINITY */
4889 }
4890 }
4891
4892 th = __kmp_threads[gtid];
4893 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4894 retval = __kmp_set_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4895 if (retval == 0) {
4896 KMP_CPU_COPY(th->th.th_affin_mask, (kmp_affin_mask_t *)(*mask));
4897 }
4898
4899 th->th.th_current_place = KMP_PLACE_UNDEFINED;
4900 th->th.th_new_place = KMP_PLACE_UNDEFINED;
4901 th->th.th_first_place = 0;
4902 th->th.th_last_place = __kmp_affinity_num_masks - 1;
4903
4904 // Turn off 4.0 affinity for the current tread at this parallel level.
4905 th->th.th_current_task->td_icvs.proc_bind = proc_bind_false;
4906
4907 return retval;
4908}
4909
4910int __kmp_aux_get_affinity(void **mask) {
4911 int gtid;
4912 int retval;
4913 kmp_info_t *th;
4914
4915 if (!KMP_AFFINITY_CAPABLE()) {
4916 return -1;
4917 }
4918
4919 gtid = __kmp_entry_gtid();
4920 th = __kmp_threads[gtid];
4921 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4922
4923 KA_TRACE(1000, (""); {
4924 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4925 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4926 th->th.th_affin_mask);
4927 __kmp_printf("kmp_get_affinity: stored affinity mask for thread %d = %s\n",
4928 gtid, buf);
4929 });
4930
4931 if (__kmp_env_consistency_check) {
4932 if ((mask == NULL) || (*mask == NULL)) {
4933 KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity");
4934 }
4935 }
4936
4937#if !KMP_OS_WINDOWS
4938
4939 retval = __kmp_get_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4940 KA_TRACE(1000, (""); {
4941 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4942 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4943 (kmp_affin_mask_t *)(*mask));
4944 __kmp_printf("kmp_get_affinity: system affinity mask for thread %d = %s\n",
4945 gtid, buf);
4946 });
4947 return retval;
4948
4949#else
4950
4951 KMP_CPU_COPY((kmp_affin_mask_t *)(*mask), th->th.th_affin_mask);
4952 return 0;
4953
4954#endif /* KMP_OS_WINDOWS */
4955}
4956
4957int __kmp_aux_get_affinity_max_proc() {
4958 if (!KMP_AFFINITY_CAPABLE()) {
4959 return 0;
4960 }
4961#if KMP_GROUP_AFFINITY
4962 if (__kmp_num_proc_groups > 1) {
4963 return (int)(__kmp_num_proc_groups * sizeof(DWORD_PTR) * CHAR_BIT);
4964 }
4965#endif
4966 return __kmp_xproc;
4967}
4968
4969int __kmp_aux_set_affinity_mask_proc(int proc, void **mask) {
4970 if (!KMP_AFFINITY_CAPABLE()) {
4971 return -1;
4972 }
4973
4974 KA_TRACE(1000, (""); {
4975 int gtid = __kmp_entry_gtid();
4976 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4977 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4978 (kmp_affin_mask_t *)(*mask));
4979 __kmp_debug_printf("kmp_set_affinity_mask_proc: setting proc %d in "
4980 "affinity mask for thread %d = %s\n",
4981 proc, gtid, buf);
4982 });
4983
4984 if (__kmp_env_consistency_check) {
4985 if ((mask == NULL) || (*mask == NULL)) {
4986 KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity_mask_proc");
4987 }
4988 }
4989
4990 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
4991 return -1;
4992 }
4993 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4994 return -2;
4995 }
4996
4997 KMP_CPU_SET(proc, (kmp_affin_mask_t *)(*mask));
4998 return 0;
4999}
5000
5001int __kmp_aux_unset_affinity_mask_proc(int proc, void **mask) {
5002 if (!KMP_AFFINITY_CAPABLE()) {
5003 return -1;
5004 }
5005
5006 KA_TRACE(1000, (""); {
5007 int gtid = __kmp_entry_gtid();
5008 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5009 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5010 (kmp_affin_mask_t *)(*mask));
5011 __kmp_debug_printf("kmp_unset_affinity_mask_proc: unsetting proc %d in "
5012 "affinity mask for thread %d = %s\n",
5013 proc, gtid, buf);
5014 });
5015
5016 if (__kmp_env_consistency_check) {
5017 if ((mask == NULL) || (*mask == NULL)) {
5018 KMP_FATAL(AffinityInvalidMask, "kmp_unset_affinity_mask_proc");
5019 }
5020 }
5021
5022 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5023 return -1;
5024 }
5025 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5026 return -2;
5027 }
5028
5029 KMP_CPU_CLR(proc, (kmp_affin_mask_t *)(*mask));
5030 return 0;
5031}
5032
5033int __kmp_aux_get_affinity_mask_proc(int proc, void **mask) {
5034 if (!KMP_AFFINITY_CAPABLE()) {
5035 return -1;
5036 }
5037
5038 KA_TRACE(1000, (""); {
5039 int gtid = __kmp_entry_gtid();
5040 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5041 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5042 (kmp_affin_mask_t *)(*mask));
5043 __kmp_debug_printf("kmp_get_affinity_mask_proc: getting proc %d in "
5044 "affinity mask for thread %d = %s\n",
5045 proc, gtid, buf);
5046 });
5047
5048 if (__kmp_env_consistency_check) {
5049 if ((mask == NULL) || (*mask == NULL)) {
5050 KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity_mask_proc");
5051 }
5052 }
5053
5054 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5055 return -1;
5056 }
5057 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5058 return 0;
5059 }
5060
5061 return KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask));
5062}
5063
5064// Dynamic affinity settings - Affinity balanced
5065void __kmp_balanced_affinity(kmp_info_t *th, int nthreads) {
5066 KMP_DEBUG_ASSERT(th);
5067 bool fine_gran = true;
5068 int tid = th->th.th_info.ds.ds_tid;
5069
5070 switch (__kmp_affinity_gran) {
5071 case affinity_gran_fine:
5072 case affinity_gran_thread:
5073 break;
5074 case affinity_gran_core:
5075 if (__kmp_nThreadsPerCore > 1) {
5076 fine_gran = false;
5077 }
5078 break;
5079 case affinity_gran_package:
5080 if (nCoresPerPkg > 1) {
5081 fine_gran = false;
5082 }
5083 break;
5084 default:
5085 fine_gran = false;
5086 }
5087
5088 if (__kmp_affinity_uniform_topology()) {
5089 int coreID;
5090 int threadID;
5091 // Number of hyper threads per core in HT machine
5092 int __kmp_nth_per_core = __kmp_avail_proc / __kmp_ncores;
5093 // Number of cores
5094 int ncores = __kmp_ncores;
5095 if ((nPackages > 1) && (__kmp_nth_per_core <= 1)) {
5096 __kmp_nth_per_core = __kmp_avail_proc / nPackages;
5097 ncores = nPackages;
5098 }
5099 // How many threads will be bound to each core
5100 int chunk = nthreads / ncores;
5101 // How many cores will have an additional thread bound to it - "big cores"
5102 int big_cores = nthreads % ncores;
5103 // Number of threads on the big cores
5104 int big_nth = (chunk + 1) * big_cores;
5105 if (tid < big_nth) {
5106 coreID = tid / (chunk + 1);
5107 threadID = (tid % (chunk + 1)) % __kmp_nth_per_core;
5108 } else { // tid >= big_nth
5109 coreID = (tid - big_cores) / chunk;
5110 threadID = ((tid - big_cores) % chunk) % __kmp_nth_per_core;
5111 }
5112
5113 KMP_DEBUG_ASSERT2(KMP_AFFINITY_CAPABLE(),
5114 "Illegal set affinity operation when not capable");
5115
5116 kmp_affin_mask_t *mask = th->th.th_affin_mask;
5117 KMP_CPU_ZERO(mask);
5118
5119 if (fine_gran) {
5120 int osID = address2os[coreID * __kmp_nth_per_core + threadID].second;
5121 KMP_CPU_SET(osID, mask);
5122 } else {
5123 for (int i = 0; i < __kmp_nth_per_core; i++) {
5124 int osID;
5125 osID = address2os[coreID * __kmp_nth_per_core + i].second;
5126 KMP_CPU_SET(osID, mask);
5127 }
5128 }
5129 if (__kmp_affinity_verbose) {
5130 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5131 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
5132 KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY", (kmp_int32)getpid(),
5133 __kmp_gettid(), tid, buf);
5134 }
5135 __kmp_set_system_affinity(mask, TRUE);
5136 } else { // Non-uniform topology
5137
5138 kmp_affin_mask_t *mask = th->th.th_affin_mask;
5139 KMP_CPU_ZERO(mask);
5140
5141 int core_level = __kmp_affinity_find_core_level(
5142 address2os, __kmp_avail_proc, __kmp_aff_depth - 1);
5143 int ncores = __kmp_affinity_compute_ncores(address2os, __kmp_avail_proc,
5144 __kmp_aff_depth - 1, core_level);
5145 int nth_per_core = __kmp_affinity_max_proc_per_core(
5146 address2os, __kmp_avail_proc, __kmp_aff_depth - 1, core_level);
5147
5148 // For performance gain consider the special case nthreads ==
5149 // __kmp_avail_proc
5150 if (nthreads == __kmp_avail_proc) {
5151 if (fine_gran) {
5152 int osID = address2os[tid].second;
5153 KMP_CPU_SET(osID, mask);
5154 } else {
5155 int core = __kmp_affinity_find_core(address2os, tid,
5156 __kmp_aff_depth - 1, core_level);
5157 for (int i = 0; i < __kmp_avail_proc; i++) {
5158 int osID = address2os[i].second;
5159 if (__kmp_affinity_find_core(address2os, i, __kmp_aff_depth - 1,
5160 core_level) == core) {
5161 KMP_CPU_SET(osID, mask);
5162 }
5163 }
5164 }
5165 } else if (nthreads <= ncores) {
5166
5167 int core = 0;
5168 for (int i = 0; i < ncores; i++) {
5169 // Check if this core from procarr[] is in the mask
5170 int in_mask = 0;
5171 for (int j = 0; j < nth_per_core; j++) {
5172 if (procarr[i * nth_per_core + j] != -1) {
5173 in_mask = 1;
5174 break;
5175 }
5176 }
5177 if (in_mask) {
5178 if (tid == core) {
5179 for (int j = 0; j < nth_per_core; j++) {
5180 int osID = procarr[i * nth_per_core + j];
5181 if (osID != -1) {
5182 KMP_CPU_SET(osID, mask);
5183 // For fine granularity it is enough to set the first available
5184 // osID for this core
5185 if (fine_gran) {
5186 break;
5187 }
5188 }
5189 }
5190 break;
5191 } else {
5192 core++;
5193 }
5194 }
5195 }
5196 } else { // nthreads > ncores
5197 // Array to save the number of processors at each core
5198 int *nproc_at_core = (int *)KMP_ALLOCA(sizeof(int) * ncores);
5199 // Array to save the number of cores with "x" available processors;
5200 int *ncores_with_x_procs =
5201 (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core + 1));
5202 // Array to save the number of cores with # procs from x to nth_per_core
5203 int *ncores_with_x_to_max_procs =
5204 (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core + 1));
5205
5206 for (int i = 0; i <= nth_per_core; i++) {
5207 ncores_with_x_procs[i] = 0;
5208 ncores_with_x_to_max_procs[i] = 0;
5209 }
5210
5211 for (int i = 0; i < ncores; i++) {
5212 int cnt = 0;
5213 for (int j = 0; j < nth_per_core; j++) {
5214 if (procarr[i * nth_per_core + j] != -1) {
5215 cnt++;
5216 }
5217 }
5218 nproc_at_core[i] = cnt;
5219 ncores_with_x_procs[cnt]++;
5220 }
5221
5222 for (int i = 0; i <= nth_per_core; i++) {
5223 for (int j = i; j <= nth_per_core; j++) {
5224 ncores_with_x_to_max_procs[i] += ncores_with_x_procs[j];
5225 }
5226 }
5227
5228 // Max number of processors
5229 int nproc = nth_per_core * ncores;
5230 // An array to keep number of threads per each context
5231 int *newarr = (int *)__kmp_allocate(sizeof(int) * nproc);
5232 for (int i = 0; i < nproc; i++) {
5233 newarr[i] = 0;
5234 }
5235
5236 int nth = nthreads;
5237 int flag = 0;
5238 while (nth > 0) {
5239 for (int j = 1; j <= nth_per_core; j++) {
5240 int cnt = ncores_with_x_to_max_procs[j];
5241 for (int i = 0; i < ncores; i++) {
5242 // Skip the core with 0 processors
5243 if (nproc_at_core[i] == 0) {
5244 continue;
5245 }
5246 for (int k = 0; k < nth_per_core; k++) {
5247 if (procarr[i * nth_per_core + k] != -1) {
5248 if (newarr[i * nth_per_core + k] == 0) {
5249 newarr[i * nth_per_core + k] = 1;
5250 cnt--;
5251 nth--;
5252 break;
5253 } else {
5254 if (flag != 0) {
5255 newarr[i * nth_per_core + k]++;
5256 cnt--;
5257 nth--;
5258 break;
5259 }
5260 }
5261 }
5262 }
5263 if (cnt == 0 || nth == 0) {
5264 break;
5265 }
5266 }
5267 if (nth == 0) {
5268 break;
5269 }
5270 }
5271 flag = 1;
5272 }
5273 int sum = 0;
5274 for (int i = 0; i < nproc; i++) {
5275 sum += newarr[i];
5276 if (sum > tid) {
5277 if (fine_gran) {
5278 int osID = procarr[i];
5279 KMP_CPU_SET(osID, mask);
5280 } else {
5281 int coreID = i / nth_per_core;
5282 for (int ii = 0; ii < nth_per_core; ii++) {
5283 int osID = procarr[coreID * nth_per_core + ii];
5284 if (osID != -1) {
5285 KMP_CPU_SET(osID, mask);
5286 }
5287 }
5288 }
5289 break;
5290 }
5291 }
5292 __kmp_free(newarr);
5293 }
5294
5295 if (__kmp_affinity_verbose) {
5296 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5297 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
5298 KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY", (kmp_int32)getpid(),
5299 __kmp_gettid(), tid, buf);
5300 }
5301 __kmp_set_system_affinity(mask, TRUE);
5302 }
5303}
5304
5305#if KMP_OS_LINUX || KMP_OS_FREEBSD
5306// We don't need this entry for Windows because
5307// there is GetProcessAffinityMask() api
5308//
5309// The intended usage is indicated by these steps:
5310// 1) The user gets the current affinity mask
5311// 2) Then sets the affinity by calling this function
5312// 3) Error check the return value
5313// 4) Use non-OpenMP parallelization
5314// 5) Reset the affinity to what was stored in step 1)
5315#ifdef __cplusplus
5316extern "C"
5317#endif
5318 int
5319 kmp_set_thread_affinity_mask_initial()
5320// the function returns 0 on success,
5321// -1 if we cannot bind thread
5322// >0 (errno) if an error happened during binding
5323{
5324 int gtid = __kmp_get_gtid();
5325 if (gtid < 0) {
5326 // Do not touch non-omp threads
5327 KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5328 "non-omp thread, returning\n"));
5329 return -1;
5330 }
5331 if (!KMP_AFFINITY_CAPABLE() || !__kmp_init_middle) {
5332 KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5333 "affinity not initialized, returning\n"));
5334 return -1;
5335 }
5336 KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5337 "set full mask for thread %d\n",
5338 gtid));
5339 KMP_DEBUG_ASSERT(__kmp_affin_fullMask != NULL);
5340 return __kmp_set_system_affinity(__kmp_affin_fullMask, FALSE);
5341}
5342#endif
5343
5344#endif // KMP_AFFINITY_SUPPORTED