LLVM OpenMP* Runtime Library
kmp_affinity.cpp
1 /*
2  * kmp_affinity.cpp -- affinity management
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_affinity.h"
15 #include "kmp_i18n.h"
16 #include "kmp_io.h"
17 #include "kmp_str.h"
18 #include "kmp_wrapper_getpid.h"
19 #if KMP_USE_HIER_SCHED
20 #include "kmp_dispatch_hier.h"
21 #endif
22 #if KMP_USE_HWLOC
23 // Copied from hwloc
24 #define HWLOC_GROUP_KIND_INTEL_MODULE 102
25 #define HWLOC_GROUP_KIND_INTEL_TILE 103
26 #define HWLOC_GROUP_KIND_INTEL_DIE 104
27 #define HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP 220
28 #endif
29 #include <ctype.h>
30 
31 // The machine topology
32 kmp_topology_t *__kmp_topology = nullptr;
33 // KMP_HW_SUBSET environment variable
34 kmp_hw_subset_t *__kmp_hw_subset = nullptr;
35 
36 // Store the real or imagined machine hierarchy here
37 static hierarchy_info machine_hierarchy;
38 
39 void __kmp_cleanup_hierarchy() { machine_hierarchy.fini(); }
40 
41 void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar) {
42  kmp_uint32 depth;
43  // The test below is true if affinity is available, but set to "none". Need to
44  // init on first use of hierarchical barrier.
45  if (TCR_1(machine_hierarchy.uninitialized))
46  machine_hierarchy.init(nproc);
47 
48  // Adjust the hierarchy in case num threads exceeds original
49  if (nproc > machine_hierarchy.base_num_threads)
50  machine_hierarchy.resize(nproc);
51 
52  depth = machine_hierarchy.depth;
53  KMP_DEBUG_ASSERT(depth > 0);
54 
55  thr_bar->depth = depth;
56  __kmp_type_convert(machine_hierarchy.numPerLevel[0] - 1,
57  &(thr_bar->base_leaf_kids));
58  thr_bar->skip_per_level = machine_hierarchy.skipPerLevel;
59 }
60 
61 static int nCoresPerPkg, nPackages;
62 static int __kmp_nThreadsPerCore;
63 #ifndef KMP_DFLT_NTH_CORES
64 static int __kmp_ncores;
65 #endif
66 
67 const char *__kmp_hw_get_catalog_string(kmp_hw_t type, bool plural) {
68  switch (type) {
69  case KMP_HW_SOCKET:
70  return ((plural) ? KMP_I18N_STR(Sockets) : KMP_I18N_STR(Socket));
71  case KMP_HW_DIE:
72  return ((plural) ? KMP_I18N_STR(Dice) : KMP_I18N_STR(Die));
73  case KMP_HW_MODULE:
74  return ((plural) ? KMP_I18N_STR(Modules) : KMP_I18N_STR(Module));
75  case KMP_HW_TILE:
76  return ((plural) ? KMP_I18N_STR(Tiles) : KMP_I18N_STR(Tile));
77  case KMP_HW_NUMA:
78  return ((plural) ? KMP_I18N_STR(NumaDomains) : KMP_I18N_STR(NumaDomain));
79  case KMP_HW_L3:
80  return ((plural) ? KMP_I18N_STR(L3Caches) : KMP_I18N_STR(L3Cache));
81  case KMP_HW_L2:
82  return ((plural) ? KMP_I18N_STR(L2Caches) : KMP_I18N_STR(L2Cache));
83  case KMP_HW_L1:
84  return ((plural) ? KMP_I18N_STR(L1Caches) : KMP_I18N_STR(L1Cache));
85  case KMP_HW_LLC:
86  return ((plural) ? KMP_I18N_STR(LLCaches) : KMP_I18N_STR(LLCache));
87  case KMP_HW_CORE:
88  return ((plural) ? KMP_I18N_STR(Cores) : KMP_I18N_STR(Core));
89  case KMP_HW_THREAD:
90  return ((plural) ? KMP_I18N_STR(Threads) : KMP_I18N_STR(Thread));
91  case KMP_HW_PROC_GROUP:
92  return ((plural) ? KMP_I18N_STR(ProcGroups) : KMP_I18N_STR(ProcGroup));
93  }
94  return KMP_I18N_STR(Unknown);
95 }
96 
97 const char *__kmp_hw_get_keyword(kmp_hw_t type, bool plural) {
98  switch (type) {
99  case KMP_HW_SOCKET:
100  return ((plural) ? "sockets" : "socket");
101  case KMP_HW_DIE:
102  return ((plural) ? "dice" : "die");
103  case KMP_HW_MODULE:
104  return ((plural) ? "modules" : "module");
105  case KMP_HW_TILE:
106  return ((plural) ? "tiles" : "tile");
107  case KMP_HW_NUMA:
108  return ((plural) ? "numa_domains" : "numa_domain");
109  case KMP_HW_L3:
110  return ((plural) ? "l3_caches" : "l3_cache");
111  case KMP_HW_L2:
112  return ((plural) ? "l2_caches" : "l2_cache");
113  case KMP_HW_L1:
114  return ((plural) ? "l1_caches" : "l1_cache");
115  case KMP_HW_LLC:
116  return ((plural) ? "ll_caches" : "ll_cache");
117  case KMP_HW_CORE:
118  return ((plural) ? "cores" : "core");
119  case KMP_HW_THREAD:
120  return ((plural) ? "threads" : "thread");
121  case KMP_HW_PROC_GROUP:
122  return ((plural) ? "proc_groups" : "proc_group");
123  }
124  return ((plural) ? "unknowns" : "unknown");
125 }
126 
127 const char *__kmp_hw_get_core_type_string(kmp_hw_core_type_t type) {
128  switch (type) {
129  case KMP_HW_CORE_TYPE_UNKNOWN:
130  return "unknown";
131 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
132  case KMP_HW_CORE_TYPE_ATOM:
133  return "Intel Atom(R) processor";
134  case KMP_HW_CORE_TYPE_CORE:
135  return "Intel(R) Core(TM) processor";
136 #endif
137  }
138  return "unknown";
139 }
140 
142 // kmp_hw_thread_t methods
143 int kmp_hw_thread_t::compare_ids(const void *a, const void *b) {
144  const kmp_hw_thread_t *ahwthread = (const kmp_hw_thread_t *)a;
145  const kmp_hw_thread_t *bhwthread = (const kmp_hw_thread_t *)b;
146  int depth = __kmp_topology->get_depth();
147  for (int level = 0; level < depth; ++level) {
148  if (ahwthread->ids[level] < bhwthread->ids[level])
149  return -1;
150  else if (ahwthread->ids[level] > bhwthread->ids[level])
151  return 1;
152  }
153  if (ahwthread->os_id < bhwthread->os_id)
154  return -1;
155  else if (ahwthread->os_id > bhwthread->os_id)
156  return 1;
157  return 0;
158 }
159 
160 #if KMP_AFFINITY_SUPPORTED
161 int kmp_hw_thread_t::compare_compact(const void *a, const void *b) {
162  int i;
163  const kmp_hw_thread_t *aa = (const kmp_hw_thread_t *)a;
164  const kmp_hw_thread_t *bb = (const kmp_hw_thread_t *)b;
165  int depth = __kmp_topology->get_depth();
166  KMP_DEBUG_ASSERT(__kmp_affinity_compact >= 0);
167  KMP_DEBUG_ASSERT(__kmp_affinity_compact <= depth);
168  for (i = 0; i < __kmp_affinity_compact; i++) {
169  int j = depth - i - 1;
170  if (aa->sub_ids[j] < bb->sub_ids[j])
171  return -1;
172  if (aa->sub_ids[j] > bb->sub_ids[j])
173  return 1;
174  }
175  for (; i < depth; i++) {
176  int j = i - __kmp_affinity_compact;
177  if (aa->sub_ids[j] < bb->sub_ids[j])
178  return -1;
179  if (aa->sub_ids[j] > bb->sub_ids[j])
180  return 1;
181  }
182  return 0;
183 }
184 #endif
185 
186 void kmp_hw_thread_t::print() const {
187  int depth = __kmp_topology->get_depth();
188  printf("%4d ", os_id);
189  for (int i = 0; i < depth; ++i) {
190  printf("%4d ", ids[i]);
191  }
192  if (attrs) {
193  if (attrs.is_core_type_valid())
194  printf(" (%s)", __kmp_hw_get_core_type_string(attrs.get_core_type()));
195  if (attrs.is_core_eff_valid())
196  printf(" (eff=%d)", attrs.get_core_eff());
197  }
198  printf("\n");
199 }
200 
202 // kmp_topology_t methods
203 
204 // Add a layer to the topology based on the ids. Assume the topology
205 // is perfectly nested (i.e., so no object has more than one parent)
206 void kmp_topology_t::_insert_layer(kmp_hw_t type, const int *ids) {
207  // Figure out where the layer should go by comparing the ids of the current
208  // layers with the new ids
209  int target_layer;
210  int previous_id = kmp_hw_thread_t::UNKNOWN_ID;
211  int previous_new_id = kmp_hw_thread_t::UNKNOWN_ID;
212 
213  // Start from the highest layer and work down to find target layer
214  // If new layer is equal to another layer then put the new layer above
215  for (target_layer = 0; target_layer < depth; ++target_layer) {
216  bool layers_equal = true;
217  bool strictly_above_target_layer = false;
218  for (int i = 0; i < num_hw_threads; ++i) {
219  int id = hw_threads[i].ids[target_layer];
220  int new_id = ids[i];
221  if (id != previous_id && new_id == previous_new_id) {
222  // Found the layer we are strictly above
223  strictly_above_target_layer = true;
224  layers_equal = false;
225  break;
226  } else if (id == previous_id && new_id != previous_new_id) {
227  // Found a layer we are below. Move to next layer and check.
228  layers_equal = false;
229  break;
230  }
231  previous_id = id;
232  previous_new_id = new_id;
233  }
234  if (strictly_above_target_layer || layers_equal)
235  break;
236  }
237 
238  // Found the layer we are above. Now move everything to accommodate the new
239  // layer. And put the new ids and type into the topology.
240  for (int i = depth - 1, j = depth; i >= target_layer; --i, --j)
241  types[j] = types[i];
242  types[target_layer] = type;
243  for (int k = 0; k < num_hw_threads; ++k) {
244  for (int i = depth - 1, j = depth; i >= target_layer; --i, --j)
245  hw_threads[k].ids[j] = hw_threads[k].ids[i];
246  hw_threads[k].ids[target_layer] = ids[k];
247  }
248  equivalent[type] = type;
249  depth++;
250 }
251 
252 #if KMP_GROUP_AFFINITY
253 // Insert the Windows Processor Group structure into the topology
254 void kmp_topology_t::_insert_windows_proc_groups() {
255  // Do not insert the processor group structure for a single group
256  if (__kmp_num_proc_groups == 1)
257  return;
258  kmp_affin_mask_t *mask;
259  int *ids = (int *)__kmp_allocate(sizeof(int) * num_hw_threads);
260  KMP_CPU_ALLOC(mask);
261  for (int i = 0; i < num_hw_threads; ++i) {
262  KMP_CPU_ZERO(mask);
263  KMP_CPU_SET(hw_threads[i].os_id, mask);
264  ids[i] = __kmp_get_proc_group(mask);
265  }
266  KMP_CPU_FREE(mask);
267  _insert_layer(KMP_HW_PROC_GROUP, ids);
268  __kmp_free(ids);
269 }
270 #endif
271 
272 // Remove layers that don't add information to the topology.
273 // This is done by having the layer take on the id = UNKNOWN_ID (-1)
274 void kmp_topology_t::_remove_radix1_layers() {
275  int preference[KMP_HW_LAST];
276  int top_index1, top_index2;
277  // Set up preference associative array
278  preference[KMP_HW_SOCKET] = 110;
279  preference[KMP_HW_PROC_GROUP] = 100;
280  preference[KMP_HW_CORE] = 95;
281  preference[KMP_HW_THREAD] = 90;
282  preference[KMP_HW_NUMA] = 85;
283  preference[KMP_HW_DIE] = 80;
284  preference[KMP_HW_TILE] = 75;
285  preference[KMP_HW_MODULE] = 73;
286  preference[KMP_HW_L3] = 70;
287  preference[KMP_HW_L2] = 65;
288  preference[KMP_HW_L1] = 60;
289  preference[KMP_HW_LLC] = 5;
290  top_index1 = 0;
291  top_index2 = 1;
292  while (top_index1 < depth - 1 && top_index2 < depth) {
293  kmp_hw_t type1 = types[top_index1];
294  kmp_hw_t type2 = types[top_index2];
295  KMP_ASSERT_VALID_HW_TYPE(type1);
296  KMP_ASSERT_VALID_HW_TYPE(type2);
297  // Do not allow the three main topology levels (sockets, cores, threads) to
298  // be compacted down
299  if ((type1 == KMP_HW_THREAD || type1 == KMP_HW_CORE ||
300  type1 == KMP_HW_SOCKET) &&
301  (type2 == KMP_HW_THREAD || type2 == KMP_HW_CORE ||
302  type2 == KMP_HW_SOCKET)) {
303  top_index1 = top_index2++;
304  continue;
305  }
306  bool radix1 = true;
307  bool all_same = true;
308  int id1 = hw_threads[0].ids[top_index1];
309  int id2 = hw_threads[0].ids[top_index2];
310  int pref1 = preference[type1];
311  int pref2 = preference[type2];
312  for (int hwidx = 1; hwidx < num_hw_threads; ++hwidx) {
313  if (hw_threads[hwidx].ids[top_index1] == id1 &&
314  hw_threads[hwidx].ids[top_index2] != id2) {
315  radix1 = false;
316  break;
317  }
318  if (hw_threads[hwidx].ids[top_index2] != id2)
319  all_same = false;
320  id1 = hw_threads[hwidx].ids[top_index1];
321  id2 = hw_threads[hwidx].ids[top_index2];
322  }
323  if (radix1) {
324  // Select the layer to remove based on preference
325  kmp_hw_t remove_type, keep_type;
326  int remove_layer, remove_layer_ids;
327  if (pref1 > pref2) {
328  remove_type = type2;
329  remove_layer = remove_layer_ids = top_index2;
330  keep_type = type1;
331  } else {
332  remove_type = type1;
333  remove_layer = remove_layer_ids = top_index1;
334  keep_type = type2;
335  }
336  // If all the indexes for the second (deeper) layer are the same.
337  // e.g., all are zero, then make sure to keep the first layer's ids
338  if (all_same)
339  remove_layer_ids = top_index2;
340  // Remove radix one type by setting the equivalence, removing the id from
341  // the hw threads and removing the layer from types and depth
342  set_equivalent_type(remove_type, keep_type);
343  for (int idx = 0; idx < num_hw_threads; ++idx) {
344  kmp_hw_thread_t &hw_thread = hw_threads[idx];
345  for (int d = remove_layer_ids; d < depth - 1; ++d)
346  hw_thread.ids[d] = hw_thread.ids[d + 1];
347  }
348  for (int idx = remove_layer; idx < depth - 1; ++idx)
349  types[idx] = types[idx + 1];
350  depth--;
351  } else {
352  top_index1 = top_index2++;
353  }
354  }
355  KMP_ASSERT(depth > 0);
356 }
357 
358 void kmp_topology_t::_set_last_level_cache() {
359  if (get_equivalent_type(KMP_HW_L3) != KMP_HW_UNKNOWN)
360  set_equivalent_type(KMP_HW_LLC, KMP_HW_L3);
361  else if (get_equivalent_type(KMP_HW_L2) != KMP_HW_UNKNOWN)
362  set_equivalent_type(KMP_HW_LLC, KMP_HW_L2);
363 #if KMP_MIC_SUPPORTED
364  else if (__kmp_mic_type == mic3) {
365  if (get_equivalent_type(KMP_HW_L2) != KMP_HW_UNKNOWN)
366  set_equivalent_type(KMP_HW_LLC, KMP_HW_L2);
367  else if (get_equivalent_type(KMP_HW_TILE) != KMP_HW_UNKNOWN)
368  set_equivalent_type(KMP_HW_LLC, KMP_HW_TILE);
369  // L2/Tile wasn't detected so just say L1
370  else
371  set_equivalent_type(KMP_HW_LLC, KMP_HW_L1);
372  }
373 #endif
374  else if (get_equivalent_type(KMP_HW_L1) != KMP_HW_UNKNOWN)
375  set_equivalent_type(KMP_HW_LLC, KMP_HW_L1);
376  // Fallback is to set last level cache to socket or core
377  if (get_equivalent_type(KMP_HW_LLC) == KMP_HW_UNKNOWN) {
378  if (get_equivalent_type(KMP_HW_SOCKET) != KMP_HW_UNKNOWN)
379  set_equivalent_type(KMP_HW_LLC, KMP_HW_SOCKET);
380  else if (get_equivalent_type(KMP_HW_CORE) != KMP_HW_UNKNOWN)
381  set_equivalent_type(KMP_HW_LLC, KMP_HW_CORE);
382  }
383  KMP_ASSERT(get_equivalent_type(KMP_HW_LLC) != KMP_HW_UNKNOWN);
384 }
385 
386 // Gather the count of each topology layer and the ratio
387 void kmp_topology_t::_gather_enumeration_information() {
388  int previous_id[KMP_HW_LAST];
389  int max[KMP_HW_LAST];
390 
391  for (int i = 0; i < depth; ++i) {
392  previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID;
393  max[i] = 0;
394  count[i] = 0;
395  ratio[i] = 0;
396  }
397  int core_level = get_level(KMP_HW_CORE);
398  for (int i = 0; i < num_hw_threads; ++i) {
399  kmp_hw_thread_t &hw_thread = hw_threads[i];
400  for (int layer = 0; layer < depth; ++layer) {
401  int id = hw_thread.ids[layer];
402  if (id != previous_id[layer]) {
403  // Add an additional increment to each count
404  for (int l = layer; l < depth; ++l)
405  count[l]++;
406  // Keep track of topology layer ratio statistics
407  max[layer]++;
408  for (int l = layer + 1; l < depth; ++l) {
409  if (max[l] > ratio[l])
410  ratio[l] = max[l];
411  max[l] = 1;
412  }
413  // Figure out the number of different core types
414  // and efficiencies for hybrid CPUs
415  if (__kmp_is_hybrid_cpu() && core_level >= 0 && layer <= core_level) {
416  if (hw_thread.attrs.is_core_eff_valid() &&
417  hw_thread.attrs.core_eff >= num_core_efficiencies) {
418  // Because efficiencies can range from 0 to max efficiency - 1,
419  // the number of efficiencies is max efficiency + 1
420  num_core_efficiencies = hw_thread.attrs.core_eff + 1;
421  }
422  if (hw_thread.attrs.is_core_type_valid()) {
423  bool found = false;
424  for (int j = 0; j < num_core_types; ++j) {
425  if (hw_thread.attrs.get_core_type() == core_types[j]) {
426  found = true;
427  break;
428  }
429  }
430  if (!found) {
431  KMP_ASSERT(num_core_types < KMP_HW_MAX_NUM_CORE_TYPES);
432  core_types[num_core_types++] = hw_thread.attrs.get_core_type();
433  }
434  }
435  }
436  break;
437  }
438  }
439  for (int layer = 0; layer < depth; ++layer) {
440  previous_id[layer] = hw_thread.ids[layer];
441  }
442  }
443  for (int layer = 0; layer < depth; ++layer) {
444  if (max[layer] > ratio[layer])
445  ratio[layer] = max[layer];
446  }
447 }
448 
449 int kmp_topology_t::_get_ncores_with_attr(const kmp_hw_attr_t &attr,
450  int above_level,
451  bool find_all) const {
452  int current, current_max;
453  int previous_id[KMP_HW_LAST];
454  for (int i = 0; i < depth; ++i)
455  previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID;
456  int core_level = get_level(KMP_HW_CORE);
457  if (find_all)
458  above_level = -1;
459  KMP_ASSERT(above_level < core_level);
460  current_max = 0;
461  current = 0;
462  for (int i = 0; i < num_hw_threads; ++i) {
463  kmp_hw_thread_t &hw_thread = hw_threads[i];
464  if (!find_all && hw_thread.ids[above_level] != previous_id[above_level]) {
465  if (current > current_max)
466  current_max = current;
467  current = hw_thread.attrs.contains(attr);
468  } else {
469  for (int level = above_level + 1; level <= core_level; ++level) {
470  if (hw_thread.ids[level] != previous_id[level]) {
471  if (hw_thread.attrs.contains(attr))
472  current++;
473  break;
474  }
475  }
476  }
477  for (int level = 0; level < depth; ++level)
478  previous_id[level] = hw_thread.ids[level];
479  }
480  if (current > current_max)
481  current_max = current;
482  return current_max;
483 }
484 
485 // Find out if the topology is uniform
486 void kmp_topology_t::_discover_uniformity() {
487  int num = 1;
488  for (int level = 0; level < depth; ++level)
489  num *= ratio[level];
490  flags.uniform = (num == count[depth - 1]);
491 }
492 
493 // Set all the sub_ids for each hardware thread
494 void kmp_topology_t::_set_sub_ids() {
495  int previous_id[KMP_HW_LAST];
496  int sub_id[KMP_HW_LAST];
497 
498  for (int i = 0; i < depth; ++i) {
499  previous_id[i] = -1;
500  sub_id[i] = -1;
501  }
502  for (int i = 0; i < num_hw_threads; ++i) {
503  kmp_hw_thread_t &hw_thread = hw_threads[i];
504  // Setup the sub_id
505  for (int j = 0; j < depth; ++j) {
506  if (hw_thread.ids[j] != previous_id[j]) {
507  sub_id[j]++;
508  for (int k = j + 1; k < depth; ++k) {
509  sub_id[k] = 0;
510  }
511  break;
512  }
513  }
514  // Set previous_id
515  for (int j = 0; j < depth; ++j) {
516  previous_id[j] = hw_thread.ids[j];
517  }
518  // Set the sub_ids field
519  for (int j = 0; j < depth; ++j) {
520  hw_thread.sub_ids[j] = sub_id[j];
521  }
522  }
523 }
524 
525 void kmp_topology_t::_set_globals() {
526  // Set nCoresPerPkg, nPackages, __kmp_nThreadsPerCore, __kmp_ncores
527  int core_level, thread_level, package_level;
528  package_level = get_level(KMP_HW_SOCKET);
529 #if KMP_GROUP_AFFINITY
530  if (package_level == -1)
531  package_level = get_level(KMP_HW_PROC_GROUP);
532 #endif
533  core_level = get_level(KMP_HW_CORE);
534  thread_level = get_level(KMP_HW_THREAD);
535 
536  KMP_ASSERT(core_level != -1);
537  KMP_ASSERT(thread_level != -1);
538 
539  __kmp_nThreadsPerCore = calculate_ratio(thread_level, core_level);
540  if (package_level != -1) {
541  nCoresPerPkg = calculate_ratio(core_level, package_level);
542  nPackages = get_count(package_level);
543  } else {
544  // assume one socket
545  nCoresPerPkg = get_count(core_level);
546  nPackages = 1;
547  }
548 #ifndef KMP_DFLT_NTH_CORES
549  __kmp_ncores = get_count(core_level);
550 #endif
551 }
552 
553 kmp_topology_t *kmp_topology_t::allocate(int nproc, int ndepth,
554  const kmp_hw_t *types) {
555  kmp_topology_t *retval;
556  // Allocate all data in one large allocation
557  size_t size = sizeof(kmp_topology_t) + sizeof(kmp_hw_thread_t) * nproc +
558  sizeof(int) * (size_t)KMP_HW_LAST * 3;
559  char *bytes = (char *)__kmp_allocate(size);
560  retval = (kmp_topology_t *)bytes;
561  if (nproc > 0) {
562  retval->hw_threads = (kmp_hw_thread_t *)(bytes + sizeof(kmp_topology_t));
563  } else {
564  retval->hw_threads = nullptr;
565  }
566  retval->num_hw_threads = nproc;
567  retval->depth = ndepth;
568  int *arr =
569  (int *)(bytes + sizeof(kmp_topology_t) + sizeof(kmp_hw_thread_t) * nproc);
570  retval->types = (kmp_hw_t *)arr;
571  retval->ratio = arr + (size_t)KMP_HW_LAST;
572  retval->count = arr + 2 * (size_t)KMP_HW_LAST;
573  retval->num_core_efficiencies = 0;
574  retval->num_core_types = 0;
575  for (int i = 0; i < KMP_HW_MAX_NUM_CORE_TYPES; ++i)
576  retval->core_types[i] = KMP_HW_CORE_TYPE_UNKNOWN;
577  KMP_FOREACH_HW_TYPE(type) { retval->equivalent[type] = KMP_HW_UNKNOWN; }
578  for (int i = 0; i < ndepth; ++i) {
579  retval->types[i] = types[i];
580  retval->equivalent[types[i]] = types[i];
581  }
582  return retval;
583 }
584 
585 void kmp_topology_t::deallocate(kmp_topology_t *topology) {
586  if (topology)
587  __kmp_free(topology);
588 }
589 
590 bool kmp_topology_t::check_ids() const {
591  // Assume ids have been sorted
592  if (num_hw_threads == 0)
593  return true;
594  for (int i = 1; i < num_hw_threads; ++i) {
595  kmp_hw_thread_t &current_thread = hw_threads[i];
596  kmp_hw_thread_t &previous_thread = hw_threads[i - 1];
597  bool unique = false;
598  for (int j = 0; j < depth; ++j) {
599  if (previous_thread.ids[j] != current_thread.ids[j]) {
600  unique = true;
601  break;
602  }
603  }
604  if (unique)
605  continue;
606  return false;
607  }
608  return true;
609 }
610 
611 void kmp_topology_t::dump() const {
612  printf("***********************\n");
613  printf("*** __kmp_topology: ***\n");
614  printf("***********************\n");
615  printf("* depth: %d\n", depth);
616 
617  printf("* types: ");
618  for (int i = 0; i < depth; ++i)
619  printf("%15s ", __kmp_hw_get_keyword(types[i]));
620  printf("\n");
621 
622  printf("* ratio: ");
623  for (int i = 0; i < depth; ++i) {
624  printf("%15d ", ratio[i]);
625  }
626  printf("\n");
627 
628  printf("* count: ");
629  for (int i = 0; i < depth; ++i) {
630  printf("%15d ", count[i]);
631  }
632  printf("\n");
633 
634  printf("* num_core_eff: %d\n", num_core_efficiencies);
635  printf("* num_core_types: %d\n", num_core_types);
636  printf("* core_types: ");
637  for (int i = 0; i < num_core_types; ++i)
638  printf("%3d ", core_types[i]);
639  printf("\n");
640 
641  printf("* equivalent map:\n");
642  KMP_FOREACH_HW_TYPE(i) {
643  const char *key = __kmp_hw_get_keyword(i);
644  const char *value = __kmp_hw_get_keyword(equivalent[i]);
645  printf("%-15s -> %-15s\n", key, value);
646  }
647 
648  printf("* uniform: %s\n", (is_uniform() ? "Yes" : "No"));
649 
650  printf("* num_hw_threads: %d\n", num_hw_threads);
651  printf("* hw_threads:\n");
652  for (int i = 0; i < num_hw_threads; ++i) {
653  hw_threads[i].print();
654  }
655  printf("***********************\n");
656 }
657 
658 void kmp_topology_t::print(const char *env_var) const {
659  kmp_str_buf_t buf;
660  int print_types_depth;
661  __kmp_str_buf_init(&buf);
662  kmp_hw_t print_types[KMP_HW_LAST + 2];
663 
664  // Num Available Threads
665  KMP_INFORM(AvailableOSProc, env_var, num_hw_threads);
666 
667  // Uniform or not
668  if (is_uniform()) {
669  KMP_INFORM(Uniform, env_var);
670  } else {
671  KMP_INFORM(NonUniform, env_var);
672  }
673 
674  // Equivalent types
675  KMP_FOREACH_HW_TYPE(type) {
676  kmp_hw_t eq_type = equivalent[type];
677  if (eq_type != KMP_HW_UNKNOWN && eq_type != type) {
678  KMP_INFORM(AffEqualTopologyTypes, env_var,
679  __kmp_hw_get_catalog_string(type),
680  __kmp_hw_get_catalog_string(eq_type));
681  }
682  }
683 
684  // Quick topology
685  KMP_ASSERT(depth > 0 && depth <= (int)KMP_HW_LAST);
686  // Create a print types array that always guarantees printing
687  // the core and thread level
688  print_types_depth = 0;
689  for (int level = 0; level < depth; ++level)
690  print_types[print_types_depth++] = types[level];
691  if (equivalent[KMP_HW_CORE] != KMP_HW_CORE) {
692  // Force in the core level for quick topology
693  if (print_types[print_types_depth - 1] == KMP_HW_THREAD) {
694  // Force core before thread e.g., 1 socket X 2 threads/socket
695  // becomes 1 socket X 1 core/socket X 2 threads/socket
696  print_types[print_types_depth - 1] = KMP_HW_CORE;
697  print_types[print_types_depth++] = KMP_HW_THREAD;
698  } else {
699  print_types[print_types_depth++] = KMP_HW_CORE;
700  }
701  }
702  // Always put threads at very end of quick topology
703  if (equivalent[KMP_HW_THREAD] != KMP_HW_THREAD)
704  print_types[print_types_depth++] = KMP_HW_THREAD;
705 
706  __kmp_str_buf_clear(&buf);
707  kmp_hw_t numerator_type;
708  kmp_hw_t denominator_type = KMP_HW_UNKNOWN;
709  int core_level = get_level(KMP_HW_CORE);
710  int ncores = get_count(core_level);
711 
712  for (int plevel = 0, level = 0; plevel < print_types_depth; ++plevel) {
713  int c;
714  bool plural;
715  numerator_type = print_types[plevel];
716  KMP_ASSERT_VALID_HW_TYPE(numerator_type);
717  if (equivalent[numerator_type] != numerator_type)
718  c = 1;
719  else
720  c = get_ratio(level++);
721  plural = (c > 1);
722  if (plevel == 0) {
723  __kmp_str_buf_print(&buf, "%d %s", c,
724  __kmp_hw_get_catalog_string(numerator_type, plural));
725  } else {
726  __kmp_str_buf_print(&buf, " x %d %s/%s", c,
727  __kmp_hw_get_catalog_string(numerator_type, plural),
728  __kmp_hw_get_catalog_string(denominator_type));
729  }
730  denominator_type = numerator_type;
731  }
732  KMP_INFORM(TopologyGeneric, env_var, buf.str, ncores);
733 
734  // Hybrid topology information
735  if (__kmp_is_hybrid_cpu()) {
736  for (int i = 0; i < num_core_types; ++i) {
737  kmp_hw_core_type_t core_type = core_types[i];
738  kmp_hw_attr_t attr;
739  attr.clear();
740  attr.set_core_type(core_type);
741  int ncores = get_ncores_with_attr(attr);
742  if (ncores > 0) {
743  KMP_INFORM(TopologyHybrid, env_var, ncores,
744  __kmp_hw_get_core_type_string(core_type));
745  KMP_ASSERT(num_core_efficiencies <= KMP_HW_MAX_NUM_CORE_EFFS)
746  for (int eff = 0; eff < num_core_efficiencies; ++eff) {
747  attr.set_core_eff(eff);
748  int ncores_with_eff = get_ncores_with_attr(attr);
749  if (ncores_with_eff > 0) {
750  KMP_INFORM(TopologyHybridCoreEff, env_var, ncores_with_eff, eff);
751  }
752  }
753  }
754  }
755  }
756 
757  if (num_hw_threads <= 0) {
758  __kmp_str_buf_free(&buf);
759  return;
760  }
761 
762  // Full OS proc to hardware thread map
763  KMP_INFORM(OSProcToPhysicalThreadMap, env_var);
764  for (int i = 0; i < num_hw_threads; i++) {
765  __kmp_str_buf_clear(&buf);
766  for (int level = 0; level < depth; ++level) {
767  kmp_hw_t type = types[level];
768  __kmp_str_buf_print(&buf, "%s ", __kmp_hw_get_catalog_string(type));
769  __kmp_str_buf_print(&buf, "%d ", hw_threads[i].ids[level]);
770  }
771  if (__kmp_is_hybrid_cpu())
772  __kmp_str_buf_print(
773  &buf, "(%s)",
774  __kmp_hw_get_core_type_string(hw_threads[i].attrs.get_core_type()));
775  KMP_INFORM(OSProcMapToPack, env_var, hw_threads[i].os_id, buf.str);
776  }
777 
778  __kmp_str_buf_free(&buf);
779 }
780 
781 void kmp_topology_t::canonicalize() {
782 #if KMP_GROUP_AFFINITY
783  _insert_windows_proc_groups();
784 #endif
785  _remove_radix1_layers();
786  _gather_enumeration_information();
787  _discover_uniformity();
788  _set_sub_ids();
789  _set_globals();
790  _set_last_level_cache();
791 
792 #if KMP_MIC_SUPPORTED
793  // Manually Add L2 = Tile equivalence
794  if (__kmp_mic_type == mic3) {
795  if (get_level(KMP_HW_L2) != -1)
796  set_equivalent_type(KMP_HW_TILE, KMP_HW_L2);
797  else if (get_level(KMP_HW_TILE) != -1)
798  set_equivalent_type(KMP_HW_L2, KMP_HW_TILE);
799  }
800 #endif
801 
802  // Perform post canonicalization checking
803  KMP_ASSERT(depth > 0);
804  for (int level = 0; level < depth; ++level) {
805  // All counts, ratios, and types must be valid
806  KMP_ASSERT(count[level] > 0 && ratio[level] > 0);
807  KMP_ASSERT_VALID_HW_TYPE(types[level]);
808  // Detected types must point to themselves
809  KMP_ASSERT(equivalent[types[level]] == types[level]);
810  }
811 
812 #if KMP_AFFINITY_SUPPORTED
813  // Set the number of affinity granularity levels
814  if (__kmp_affinity_gran_levels < 0) {
815  kmp_hw_t gran_type = get_equivalent_type(__kmp_affinity_gran);
816  // Check if user's granularity request is valid
817  if (gran_type == KMP_HW_UNKNOWN) {
818  // First try core, then thread, then package
819  kmp_hw_t gran_types[3] = {KMP_HW_CORE, KMP_HW_THREAD, KMP_HW_SOCKET};
820  for (auto g : gran_types) {
821  if (__kmp_topology->get_equivalent_type(g) != KMP_HW_UNKNOWN) {
822  gran_type = g;
823  break;
824  }
825  }
826  KMP_ASSERT(gran_type != KMP_HW_UNKNOWN);
827  // Warn user what granularity setting will be used instead
828  KMP_WARNING(AffGranularityBad, "KMP_AFFINITY",
829  __kmp_hw_get_catalog_string(__kmp_affinity_gran),
830  __kmp_hw_get_catalog_string(gran_type));
831  __kmp_affinity_gran = gran_type;
832  }
833 #if KMP_GROUP_AFFINITY
834  // If more than one processor group exists, and the level of
835  // granularity specified by the user is too coarse, then the
836  // granularity must be adjusted "down" to processor group affinity
837  // because threads can only exist within one processor group.
838  // For example, if a user sets granularity=socket and there are two
839  // processor groups that cover a socket, then the runtime must
840  // restrict the granularity down to the processor group level.
841  if (__kmp_num_proc_groups > 1) {
842  int gran_depth = __kmp_topology->get_level(gran_type);
843  int proc_group_depth = __kmp_topology->get_level(KMP_HW_PROC_GROUP);
844  if (gran_depth >= 0 && proc_group_depth >= 0 &&
845  gran_depth < proc_group_depth) {
846  KMP_WARNING(AffGranTooCoarseProcGroup, "KMP_AFFINITY",
847  __kmp_hw_get_catalog_string(__kmp_affinity_gran));
848  __kmp_affinity_gran = gran_type = KMP_HW_PROC_GROUP;
849  }
850  }
851 #endif
852  __kmp_affinity_gran_levels = 0;
853  for (int i = depth - 1; i >= 0 && get_type(i) != gran_type; --i)
854  __kmp_affinity_gran_levels++;
855  }
856 #endif // KMP_AFFINITY_SUPPORTED
857 }
858 
859 // Canonicalize an explicit packages X cores/pkg X threads/core topology
860 void kmp_topology_t::canonicalize(int npackages, int ncores_per_pkg,
861  int nthreads_per_core, int ncores) {
862  int ndepth = 3;
863  depth = ndepth;
864  KMP_FOREACH_HW_TYPE(i) { equivalent[i] = KMP_HW_UNKNOWN; }
865  for (int level = 0; level < depth; ++level) {
866  count[level] = 0;
867  ratio[level] = 0;
868  }
869  count[0] = npackages;
870  count[1] = ncores;
871  count[2] = __kmp_xproc;
872  ratio[0] = npackages;
873  ratio[1] = ncores_per_pkg;
874  ratio[2] = nthreads_per_core;
875  equivalent[KMP_HW_SOCKET] = KMP_HW_SOCKET;
876  equivalent[KMP_HW_CORE] = KMP_HW_CORE;
877  equivalent[KMP_HW_THREAD] = KMP_HW_THREAD;
878  types[0] = KMP_HW_SOCKET;
879  types[1] = KMP_HW_CORE;
880  types[2] = KMP_HW_THREAD;
881  //__kmp_avail_proc = __kmp_xproc;
882  _discover_uniformity();
883 }
884 
885 // Represents running sub IDs for a single core attribute where
886 // attribute values have SIZE possibilities.
887 template <size_t SIZE, typename IndexFunc> struct kmp_sub_ids_t {
888  int last_level; // last level in topology to consider for sub_ids
889  int sub_id[SIZE]; // The sub ID for a given attribute value
890  int prev_sub_id[KMP_HW_LAST];
891  IndexFunc indexer;
892 
893 public:
894  kmp_sub_ids_t(int last_level) : last_level(last_level) {
895  KMP_ASSERT(last_level < KMP_HW_LAST);
896  for (size_t i = 0; i < SIZE; ++i)
897  sub_id[i] = -1;
898  for (size_t i = 0; i < KMP_HW_LAST; ++i)
899  prev_sub_id[i] = -1;
900  }
901  void update(const kmp_hw_thread_t &hw_thread) {
902  int idx = indexer(hw_thread);
903  KMP_ASSERT(idx < (int)SIZE);
904  for (int level = 0; level <= last_level; ++level) {
905  if (hw_thread.sub_ids[level] != prev_sub_id[level]) {
906  if (level < last_level)
907  sub_id[idx] = -1;
908  sub_id[idx]++;
909  break;
910  }
911  }
912  for (int level = 0; level <= last_level; ++level)
913  prev_sub_id[level] = hw_thread.sub_ids[level];
914  }
915  int get_sub_id(const kmp_hw_thread_t &hw_thread) const {
916  return sub_id[indexer(hw_thread)];
917  }
918 };
919 
920 static kmp_str_buf_t *
921 __kmp_hw_get_catalog_core_string(const kmp_hw_attr_t &attr, kmp_str_buf_t *buf,
922  bool plural) {
923  __kmp_str_buf_init(buf);
924  if (attr.is_core_type_valid())
925  __kmp_str_buf_print(buf, "%s %s",
926  __kmp_hw_get_core_type_string(attr.get_core_type()),
927  __kmp_hw_get_catalog_string(KMP_HW_CORE, plural));
928  else
929  __kmp_str_buf_print(buf, "%s eff=%d",
930  __kmp_hw_get_catalog_string(KMP_HW_CORE, plural),
931  attr.get_core_eff());
932  return buf;
933 }
934 
935 // Apply the KMP_HW_SUBSET envirable to the topology
936 // Returns true if KMP_HW_SUBSET filtered any processors
937 // otherwise, returns false
938 bool kmp_topology_t::filter_hw_subset() {
939  // If KMP_HW_SUBSET wasn't requested, then do nothing.
940  if (!__kmp_hw_subset)
941  return false;
942 
943  // First, sort the KMP_HW_SUBSET items by the machine topology
944  __kmp_hw_subset->sort();
945 
946  // Check to see if KMP_HW_SUBSET is a valid subset of the detected topology
947  bool using_core_types = false;
948  bool using_core_effs = false;
949  int hw_subset_depth = __kmp_hw_subset->get_depth();
950  kmp_hw_t specified[KMP_HW_LAST];
951  int topology_levels[hw_subset_depth];
952  KMP_ASSERT(hw_subset_depth > 0);
953  KMP_FOREACH_HW_TYPE(i) { specified[i] = KMP_HW_UNKNOWN; }
954  int core_level = get_level(KMP_HW_CORE);
955  for (int i = 0; i < hw_subset_depth; ++i) {
956  int max_count;
957  const kmp_hw_subset_t::item_t &item = __kmp_hw_subset->at(i);
958  int num = item.num[0];
959  int offset = item.offset[0];
960  kmp_hw_t type = item.type;
961  kmp_hw_t equivalent_type = equivalent[type];
962  int level = get_level(type);
963  topology_levels[i] = level;
964 
965  // Check to see if current layer is in detected machine topology
966  if (equivalent_type != KMP_HW_UNKNOWN) {
967  __kmp_hw_subset->at(i).type = equivalent_type;
968  } else {
969  KMP_WARNING(AffHWSubsetNotExistGeneric,
970  __kmp_hw_get_catalog_string(type));
971  return false;
972  }
973 
974  // Check to see if current layer has already been
975  // specified either directly or through an equivalent type
976  if (specified[equivalent_type] != KMP_HW_UNKNOWN) {
977  KMP_WARNING(AffHWSubsetEqvLayers, __kmp_hw_get_catalog_string(type),
978  __kmp_hw_get_catalog_string(specified[equivalent_type]));
979  return false;
980  }
981  specified[equivalent_type] = type;
982 
983  // Check to see if each layer's num & offset parameters are valid
984  max_count = get_ratio(level);
985  if (max_count < 0 ||
986  (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) {
987  bool plural = (num > 1);
988  KMP_WARNING(AffHWSubsetManyGeneric,
989  __kmp_hw_get_catalog_string(type, plural));
990  return false;
991  }
992 
993  // Check to see if core attributes are consistent
994  if (core_level == level) {
995  // Determine which core attributes are specified
996  for (int j = 0; j < item.num_attrs; ++j) {
997  if (item.attr[j].is_core_type_valid())
998  using_core_types = true;
999  if (item.attr[j].is_core_eff_valid())
1000  using_core_effs = true;
1001  }
1002 
1003  // Check if using a single core attribute on non-hybrid arch.
1004  // Do not ignore all of KMP_HW_SUBSET, just ignore the attribute.
1005  //
1006  // Check if using multiple core attributes on non-hyrbid arch.
1007  // Ignore all of KMP_HW_SUBSET if this is the case.
1008  if ((using_core_effs || using_core_types) && !__kmp_is_hybrid_cpu()) {
1009  if (item.num_attrs == 1) {
1010  if (using_core_effs) {
1011  KMP_WARNING(AffHWSubsetIgnoringAttr, "efficiency");
1012  } else {
1013  KMP_WARNING(AffHWSubsetIgnoringAttr, "core_type");
1014  }
1015  using_core_effs = false;
1016  using_core_types = false;
1017  } else {
1018  KMP_WARNING(AffHWSubsetAttrsNonHybrid);
1019  return false;
1020  }
1021  }
1022 
1023  // Check if using both core types and core efficiencies together
1024  if (using_core_types && using_core_effs) {
1025  KMP_WARNING(AffHWSubsetIncompat, "core_type", "efficiency");
1026  return false;
1027  }
1028 
1029  // Check that core efficiency values are valid
1030  if (using_core_effs) {
1031  for (int j = 0; j < item.num_attrs; ++j) {
1032  if (item.attr[j].is_core_eff_valid()) {
1033  int core_eff = item.attr[j].get_core_eff();
1034  if (core_eff < 0 || core_eff >= num_core_efficiencies) {
1035  kmp_str_buf_t buf;
1036  __kmp_str_buf_init(&buf);
1037  __kmp_str_buf_print(&buf, "%d", item.attr[j].get_core_eff());
1038  __kmp_msg(kmp_ms_warning,
1039  KMP_MSG(AffHWSubsetAttrInvalid, "efficiency", buf.str),
1040  KMP_HNT(ValidValuesRange, 0, num_core_efficiencies - 1),
1041  __kmp_msg_null);
1042  __kmp_str_buf_free(&buf);
1043  return false;
1044  }
1045  }
1046  }
1047  }
1048 
1049  // Check that the number of requested cores with attributes is valid
1050  if (using_core_types || using_core_effs) {
1051  for (int j = 0; j < item.num_attrs; ++j) {
1052  int num = item.num[j];
1053  int offset = item.offset[j];
1054  int level_above = core_level - 1;
1055  if (level_above >= 0) {
1056  max_count = get_ncores_with_attr_per(item.attr[j], level_above);
1057  if (max_count <= 0 ||
1058  (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) {
1059  kmp_str_buf_t buf;
1060  __kmp_hw_get_catalog_core_string(item.attr[j], &buf, num > 0);
1061  KMP_WARNING(AffHWSubsetManyGeneric, buf.str);
1062  __kmp_str_buf_free(&buf);
1063  return false;
1064  }
1065  }
1066  }
1067  }
1068 
1069  if ((using_core_types || using_core_effs) && item.num_attrs > 1) {
1070  for (int j = 0; j < item.num_attrs; ++j) {
1071  // Ambiguous use of specific core attribute + generic core
1072  // e.g., 4c & 3c:intel_core or 4c & 3c:eff1
1073  if (!item.attr[j]) {
1074  kmp_hw_attr_t other_attr;
1075  for (int k = 0; k < item.num_attrs; ++k) {
1076  if (item.attr[k] != item.attr[j]) {
1077  other_attr = item.attr[k];
1078  break;
1079  }
1080  }
1081  kmp_str_buf_t buf;
1082  __kmp_hw_get_catalog_core_string(other_attr, &buf, item.num[j] > 0);
1083  KMP_WARNING(AffHWSubsetIncompat,
1084  __kmp_hw_get_catalog_string(KMP_HW_CORE), buf.str);
1085  __kmp_str_buf_free(&buf);
1086  return false;
1087  }
1088  // Allow specifying a specific core type or core eff exactly once
1089  for (int k = 0; k < j; ++k) {
1090  if (!item.attr[j] || !item.attr[k])
1091  continue;
1092  if (item.attr[k] == item.attr[j]) {
1093  kmp_str_buf_t buf;
1094  __kmp_hw_get_catalog_core_string(item.attr[j], &buf,
1095  item.num[j] > 0);
1096  KMP_WARNING(AffHWSubsetAttrRepeat, buf.str);
1097  __kmp_str_buf_free(&buf);
1098  return false;
1099  }
1100  }
1101  }
1102  }
1103  }
1104  }
1105 
1106  struct core_type_indexer {
1107  int operator()(const kmp_hw_thread_t &t) const {
1108  switch (t.attrs.get_core_type()) {
1109 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1110  case KMP_HW_CORE_TYPE_ATOM:
1111  return 1;
1112  case KMP_HW_CORE_TYPE_CORE:
1113  return 2;
1114 #endif
1115  case KMP_HW_CORE_TYPE_UNKNOWN:
1116  return 0;
1117  }
1118  KMP_ASSERT(0);
1119  return 0;
1120  }
1121  };
1122  struct core_eff_indexer {
1123  int operator()(const kmp_hw_thread_t &t) const {
1124  return t.attrs.get_core_eff();
1125  }
1126  };
1127 
1128  kmp_sub_ids_t<KMP_HW_MAX_NUM_CORE_TYPES, core_type_indexer> core_type_sub_ids(
1129  core_level);
1130  kmp_sub_ids_t<KMP_HW_MAX_NUM_CORE_EFFS, core_eff_indexer> core_eff_sub_ids(
1131  core_level);
1132 
1133  // Determine which hardware threads should be filtered.
1134  int num_filtered = 0;
1135  bool *filtered = (bool *)__kmp_allocate(sizeof(bool) * num_hw_threads);
1136  for (int i = 0; i < num_hw_threads; ++i) {
1137  kmp_hw_thread_t &hw_thread = hw_threads[i];
1138  // Update type_sub_id
1139  if (using_core_types)
1140  core_type_sub_ids.update(hw_thread);
1141  if (using_core_effs)
1142  core_eff_sub_ids.update(hw_thread);
1143 
1144  // Check to see if this hardware thread should be filtered
1145  bool should_be_filtered = false;
1146  for (int hw_subset_index = 0; hw_subset_index < hw_subset_depth;
1147  ++hw_subset_index) {
1148  const auto &hw_subset_item = __kmp_hw_subset->at(hw_subset_index);
1149  int level = topology_levels[hw_subset_index];
1150  if (level == -1)
1151  continue;
1152  if ((using_core_effs || using_core_types) && level == core_level) {
1153  // Look for the core attribute in KMP_HW_SUBSET which corresponds
1154  // to this hardware thread's core attribute. Use this num,offset plus
1155  // the running sub_id for the particular core attribute of this hardware
1156  // thread to determine if the hardware thread should be filtered or not.
1157  int attr_idx;
1158  kmp_hw_core_type_t core_type = hw_thread.attrs.get_core_type();
1159  int core_eff = hw_thread.attrs.get_core_eff();
1160  for (attr_idx = 0; attr_idx < hw_subset_item.num_attrs; ++attr_idx) {
1161  if (using_core_types &&
1162  hw_subset_item.attr[attr_idx].get_core_type() == core_type)
1163  break;
1164  if (using_core_effs &&
1165  hw_subset_item.attr[attr_idx].get_core_eff() == core_eff)
1166  break;
1167  }
1168  // This core attribute isn't in the KMP_HW_SUBSET so always filter it.
1169  if (attr_idx == hw_subset_item.num_attrs) {
1170  should_be_filtered = true;
1171  break;
1172  }
1173  int sub_id;
1174  int num = hw_subset_item.num[attr_idx];
1175  int offset = hw_subset_item.offset[attr_idx];
1176  if (using_core_types)
1177  sub_id = core_type_sub_ids.get_sub_id(hw_thread);
1178  else
1179  sub_id = core_eff_sub_ids.get_sub_id(hw_thread);
1180  if (sub_id < offset ||
1181  (num != kmp_hw_subset_t::USE_ALL && sub_id >= offset + num)) {
1182  should_be_filtered = true;
1183  break;
1184  }
1185  } else {
1186  int num = hw_subset_item.num[0];
1187  int offset = hw_subset_item.offset[0];
1188  if (hw_thread.sub_ids[level] < offset ||
1189  (num != kmp_hw_subset_t::USE_ALL &&
1190  hw_thread.sub_ids[level] >= offset + num)) {
1191  should_be_filtered = true;
1192  break;
1193  }
1194  }
1195  }
1196  // Collect filtering information
1197  filtered[i] = should_be_filtered;
1198  if (should_be_filtered)
1199  num_filtered++;
1200  }
1201 
1202  // One last check that we shouldn't allow filtering entire machine
1203  if (num_filtered == num_hw_threads) {
1204  KMP_WARNING(AffHWSubsetAllFiltered);
1205  __kmp_free(filtered);
1206  return false;
1207  }
1208 
1209  // Apply the filter
1210  int new_index = 0;
1211  for (int i = 0; i < num_hw_threads; ++i) {
1212  if (!filtered[i]) {
1213  if (i != new_index)
1214  hw_threads[new_index] = hw_threads[i];
1215  new_index++;
1216  } else {
1217 #if KMP_AFFINITY_SUPPORTED
1218  KMP_CPU_CLR(hw_threads[i].os_id, __kmp_affin_fullMask);
1219 #endif
1220  __kmp_avail_proc--;
1221  }
1222  }
1223 
1224  KMP_DEBUG_ASSERT(new_index <= num_hw_threads);
1225  num_hw_threads = new_index;
1226 
1227  // Post hardware subset canonicalization
1228  _gather_enumeration_information();
1229  _discover_uniformity();
1230  _set_globals();
1231  _set_last_level_cache();
1232  __kmp_free(filtered);
1233  return true;
1234 }
1235 
1236 bool kmp_topology_t::is_close(int hwt1, int hwt2, int hw_level) const {
1237  if (hw_level >= depth)
1238  return true;
1239  bool retval = true;
1240  const kmp_hw_thread_t &t1 = hw_threads[hwt1];
1241  const kmp_hw_thread_t &t2 = hw_threads[hwt2];
1242  for (int i = 0; i < (depth - hw_level); ++i) {
1243  if (t1.ids[i] != t2.ids[i])
1244  return false;
1245  }
1246  return retval;
1247 }
1248 
1250 
1251 #if KMP_AFFINITY_SUPPORTED
1252 class kmp_affinity_raii_t {
1253  kmp_affin_mask_t *mask;
1254  bool restored;
1255 
1256 public:
1257  kmp_affinity_raii_t() : restored(false) {
1258  KMP_CPU_ALLOC(mask);
1259  KMP_ASSERT(mask != NULL);
1260  __kmp_get_system_affinity(mask, TRUE);
1261  }
1262  void restore() {
1263  __kmp_set_system_affinity(mask, TRUE);
1264  KMP_CPU_FREE(mask);
1265  restored = true;
1266  }
1267  ~kmp_affinity_raii_t() {
1268  if (!restored) {
1269  __kmp_set_system_affinity(mask, TRUE);
1270  KMP_CPU_FREE(mask);
1271  }
1272  }
1273 };
1274 
1275 bool KMPAffinity::picked_api = false;
1276 
1277 void *KMPAffinity::Mask::operator new(size_t n) { return __kmp_allocate(n); }
1278 void *KMPAffinity::Mask::operator new[](size_t n) { return __kmp_allocate(n); }
1279 void KMPAffinity::Mask::operator delete(void *p) { __kmp_free(p); }
1280 void KMPAffinity::Mask::operator delete[](void *p) { __kmp_free(p); }
1281 void *KMPAffinity::operator new(size_t n) { return __kmp_allocate(n); }
1282 void KMPAffinity::operator delete(void *p) { __kmp_free(p); }
1283 
1284 void KMPAffinity::pick_api() {
1285  KMPAffinity *affinity_dispatch;
1286  if (picked_api)
1287  return;
1288 #if KMP_USE_HWLOC
1289  // Only use Hwloc if affinity isn't explicitly disabled and
1290  // user requests Hwloc topology method
1291  if (__kmp_affinity_top_method == affinity_top_method_hwloc &&
1292  __kmp_affinity_type != affinity_disabled) {
1293  affinity_dispatch = new KMPHwlocAffinity();
1294  } else
1295 #endif
1296  {
1297  affinity_dispatch = new KMPNativeAffinity();
1298  }
1299  __kmp_affinity_dispatch = affinity_dispatch;
1300  picked_api = true;
1301 }
1302 
1303 void KMPAffinity::destroy_api() {
1304  if (__kmp_affinity_dispatch != NULL) {
1305  delete __kmp_affinity_dispatch;
1306  __kmp_affinity_dispatch = NULL;
1307  picked_api = false;
1308  }
1309 }
1310 
1311 #define KMP_ADVANCE_SCAN(scan) \
1312  while (*scan != '\0') { \
1313  scan++; \
1314  }
1315 
1316 // Print the affinity mask to the character array in a pretty format.
1317 // The format is a comma separated list of non-negative integers or integer
1318 // ranges: e.g., 1,2,3-5,7,9-15
1319 // The format can also be the string "{<empty>}" if no bits are set in mask
1320 char *__kmp_affinity_print_mask(char *buf, int buf_len,
1321  kmp_affin_mask_t *mask) {
1322  int start = 0, finish = 0, previous = 0;
1323  bool first_range;
1324  KMP_ASSERT(buf);
1325  KMP_ASSERT(buf_len >= 40);
1326  KMP_ASSERT(mask);
1327  char *scan = buf;
1328  char *end = buf + buf_len - 1;
1329 
1330  // Check for empty set.
1331  if (mask->begin() == mask->end()) {
1332  KMP_SNPRINTF(scan, end - scan + 1, "{<empty>}");
1333  KMP_ADVANCE_SCAN(scan);
1334  KMP_ASSERT(scan <= end);
1335  return buf;
1336  }
1337 
1338  first_range = true;
1339  start = mask->begin();
1340  while (1) {
1341  // Find next range
1342  // [start, previous] is inclusive range of contiguous bits in mask
1343  for (finish = mask->next(start), previous = start;
1344  finish == previous + 1 && finish != mask->end();
1345  finish = mask->next(finish)) {
1346  previous = finish;
1347  }
1348 
1349  // The first range does not need a comma printed before it, but the rest
1350  // of the ranges do need a comma beforehand
1351  if (!first_range) {
1352  KMP_SNPRINTF(scan, end - scan + 1, "%s", ",");
1353  KMP_ADVANCE_SCAN(scan);
1354  } else {
1355  first_range = false;
1356  }
1357  // Range with three or more contiguous bits in the affinity mask
1358  if (previous - start > 1) {
1359  KMP_SNPRINTF(scan, end - scan + 1, "%u-%u", start, previous);
1360  } else {
1361  // Range with one or two contiguous bits in the affinity mask
1362  KMP_SNPRINTF(scan, end - scan + 1, "%u", start);
1363  KMP_ADVANCE_SCAN(scan);
1364  if (previous - start > 0) {
1365  KMP_SNPRINTF(scan, end - scan + 1, ",%u", previous);
1366  }
1367  }
1368  KMP_ADVANCE_SCAN(scan);
1369  // Start over with new start point
1370  start = finish;
1371  if (start == mask->end())
1372  break;
1373  // Check for overflow
1374  if (end - scan < 2)
1375  break;
1376  }
1377 
1378  // Check for overflow
1379  KMP_ASSERT(scan <= end);
1380  return buf;
1381 }
1382 #undef KMP_ADVANCE_SCAN
1383 
1384 // Print the affinity mask to the string buffer object in a pretty format
1385 // The format is a comma separated list of non-negative integers or integer
1386 // ranges: e.g., 1,2,3-5,7,9-15
1387 // The format can also be the string "{<empty>}" if no bits are set in mask
1388 kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf,
1389  kmp_affin_mask_t *mask) {
1390  int start = 0, finish = 0, previous = 0;
1391  bool first_range;
1392  KMP_ASSERT(buf);
1393  KMP_ASSERT(mask);
1394 
1395  __kmp_str_buf_clear(buf);
1396 
1397  // Check for empty set.
1398  if (mask->begin() == mask->end()) {
1399  __kmp_str_buf_print(buf, "%s", "{<empty>}");
1400  return buf;
1401  }
1402 
1403  first_range = true;
1404  start = mask->begin();
1405  while (1) {
1406  // Find next range
1407  // [start, previous] is inclusive range of contiguous bits in mask
1408  for (finish = mask->next(start), previous = start;
1409  finish == previous + 1 && finish != mask->end();
1410  finish = mask->next(finish)) {
1411  previous = finish;
1412  }
1413 
1414  // The first range does not need a comma printed before it, but the rest
1415  // of the ranges do need a comma beforehand
1416  if (!first_range) {
1417  __kmp_str_buf_print(buf, "%s", ",");
1418  } else {
1419  first_range = false;
1420  }
1421  // Range with three or more contiguous bits in the affinity mask
1422  if (previous - start > 1) {
1423  __kmp_str_buf_print(buf, "%u-%u", start, previous);
1424  } else {
1425  // Range with one or two contiguous bits in the affinity mask
1426  __kmp_str_buf_print(buf, "%u", start);
1427  if (previous - start > 0) {
1428  __kmp_str_buf_print(buf, ",%u", previous);
1429  }
1430  }
1431  // Start over with new start point
1432  start = finish;
1433  if (start == mask->end())
1434  break;
1435  }
1436  return buf;
1437 }
1438 
1439 // Return (possibly empty) affinity mask representing the offline CPUs
1440 // Caller must free the mask
1441 kmp_affin_mask_t *__kmp_affinity_get_offline_cpus() {
1442  kmp_affin_mask_t *offline;
1443  KMP_CPU_ALLOC(offline);
1444  KMP_CPU_ZERO(offline);
1445 #if KMP_OS_LINUX
1446  int n, begin_cpu, end_cpu;
1447  kmp_safe_raii_file_t offline_file;
1448  auto skip_ws = [](FILE *f) {
1449  int c;
1450  do {
1451  c = fgetc(f);
1452  } while (isspace(c));
1453  if (c != EOF)
1454  ungetc(c, f);
1455  };
1456  // File contains CSV of integer ranges representing the offline CPUs
1457  // e.g., 1,2,4-7,9,11-15
1458  int status = offline_file.try_open("/sys/devices/system/cpu/offline", "r");
1459  if (status != 0)
1460  return offline;
1461  while (!feof(offline_file)) {
1462  skip_ws(offline_file);
1463  n = fscanf(offline_file, "%d", &begin_cpu);
1464  if (n != 1)
1465  break;
1466  skip_ws(offline_file);
1467  int c = fgetc(offline_file);
1468  if (c == EOF || c == ',') {
1469  // Just single CPU
1470  end_cpu = begin_cpu;
1471  } else if (c == '-') {
1472  // Range of CPUs
1473  skip_ws(offline_file);
1474  n = fscanf(offline_file, "%d", &end_cpu);
1475  if (n != 1)
1476  break;
1477  skip_ws(offline_file);
1478  c = fgetc(offline_file); // skip ','
1479  } else {
1480  // Syntax problem
1481  break;
1482  }
1483  // Ensure a valid range of CPUs
1484  if (begin_cpu < 0 || begin_cpu >= __kmp_xproc || end_cpu < 0 ||
1485  end_cpu >= __kmp_xproc || begin_cpu > end_cpu) {
1486  continue;
1487  }
1488  // Insert [begin_cpu, end_cpu] into offline mask
1489  for (int cpu = begin_cpu; cpu <= end_cpu; ++cpu) {
1490  KMP_CPU_SET(cpu, offline);
1491  }
1492  }
1493 #endif
1494  return offline;
1495 }
1496 
1497 // Return the number of available procs
1498 int __kmp_affinity_entire_machine_mask(kmp_affin_mask_t *mask) {
1499  int avail_proc = 0;
1500  KMP_CPU_ZERO(mask);
1501 
1502 #if KMP_GROUP_AFFINITY
1503 
1504  if (__kmp_num_proc_groups > 1) {
1505  int group;
1506  KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount != NULL);
1507  for (group = 0; group < __kmp_num_proc_groups; group++) {
1508  int i;
1509  int num = __kmp_GetActiveProcessorCount(group);
1510  for (i = 0; i < num; i++) {
1511  KMP_CPU_SET(i + group * (CHAR_BIT * sizeof(DWORD_PTR)), mask);
1512  avail_proc++;
1513  }
1514  }
1515  } else
1516 
1517 #endif /* KMP_GROUP_AFFINITY */
1518 
1519  {
1520  int proc;
1521  kmp_affin_mask_t *offline_cpus = __kmp_affinity_get_offline_cpus();
1522  for (proc = 0; proc < __kmp_xproc; proc++) {
1523  // Skip offline CPUs
1524  if (KMP_CPU_ISSET(proc, offline_cpus))
1525  continue;
1526  KMP_CPU_SET(proc, mask);
1527  avail_proc++;
1528  }
1529  KMP_CPU_FREE(offline_cpus);
1530  }
1531 
1532  return avail_proc;
1533 }
1534 
1535 // All of the __kmp_affinity_create_*_map() routines should allocate the
1536 // internal topology object and set the layer ids for it. Each routine
1537 // returns a boolean on whether it was successful at doing so.
1538 kmp_affin_mask_t *__kmp_affin_fullMask = NULL;
1539 
1540 #if KMP_USE_HWLOC
1541 static inline bool __kmp_hwloc_is_cache_type(hwloc_obj_t obj) {
1542 #if HWLOC_API_VERSION >= 0x00020000
1543  return hwloc_obj_type_is_cache(obj->type);
1544 #else
1545  return obj->type == HWLOC_OBJ_CACHE;
1546 #endif
1547 }
1548 
1549 // Returns KMP_HW_* type derived from HWLOC_* type
1550 static inline kmp_hw_t __kmp_hwloc_type_2_topology_type(hwloc_obj_t obj) {
1551 
1552  if (__kmp_hwloc_is_cache_type(obj)) {
1553  if (obj->attr->cache.type == HWLOC_OBJ_CACHE_INSTRUCTION)
1554  return KMP_HW_UNKNOWN;
1555  switch (obj->attr->cache.depth) {
1556  case 1:
1557  return KMP_HW_L1;
1558  case 2:
1559 #if KMP_MIC_SUPPORTED
1560  if (__kmp_mic_type == mic3) {
1561  return KMP_HW_TILE;
1562  }
1563 #endif
1564  return KMP_HW_L2;
1565  case 3:
1566  return KMP_HW_L3;
1567  }
1568  return KMP_HW_UNKNOWN;
1569  }
1570 
1571  switch (obj->type) {
1572  case HWLOC_OBJ_PACKAGE:
1573  return KMP_HW_SOCKET;
1574  case HWLOC_OBJ_NUMANODE:
1575  return KMP_HW_NUMA;
1576  case HWLOC_OBJ_CORE:
1577  return KMP_HW_CORE;
1578  case HWLOC_OBJ_PU:
1579  return KMP_HW_THREAD;
1580  case HWLOC_OBJ_GROUP:
1581  if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_DIE)
1582  return KMP_HW_DIE;
1583  else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_TILE)
1584  return KMP_HW_TILE;
1585  else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_MODULE)
1586  return KMP_HW_MODULE;
1587  else if (obj->attr->group.kind == HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP)
1588  return KMP_HW_PROC_GROUP;
1589  return KMP_HW_UNKNOWN;
1590 #if HWLOC_API_VERSION >= 0x00020100
1591  case HWLOC_OBJ_DIE:
1592  return KMP_HW_DIE;
1593 #endif
1594  }
1595  return KMP_HW_UNKNOWN;
1596 }
1597 
1598 // Returns the number of objects of type 'type' below 'obj' within the topology
1599 // tree structure. e.g., if obj is a HWLOC_OBJ_PACKAGE object, and type is
1600 // HWLOC_OBJ_PU, then this will return the number of PU's under the SOCKET
1601 // object.
1602 static int __kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj,
1603  hwloc_obj_type_t type) {
1604  int retval = 0;
1605  hwloc_obj_t first;
1606  for (first = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, obj->type,
1607  obj->logical_index, type, 0);
1608  first != NULL && hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology,
1609  obj->type, first) == obj;
1610  first = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, first->type,
1611  first)) {
1612  ++retval;
1613  }
1614  return retval;
1615 }
1616 
1617 // This gets the sub_id for a lower object under a higher object in the
1618 // topology tree
1619 static int __kmp_hwloc_get_sub_id(hwloc_topology_t t, hwloc_obj_t higher,
1620  hwloc_obj_t lower) {
1621  hwloc_obj_t obj;
1622  hwloc_obj_type_t ltype = lower->type;
1623  int lindex = lower->logical_index - 1;
1624  int sub_id = 0;
1625  // Get the previous lower object
1626  obj = hwloc_get_obj_by_type(t, ltype, lindex);
1627  while (obj && lindex >= 0 &&
1628  hwloc_bitmap_isincluded(obj->cpuset, higher->cpuset)) {
1629  if (obj->userdata) {
1630  sub_id = (int)(RCAST(kmp_intptr_t, obj->userdata));
1631  break;
1632  }
1633  sub_id++;
1634  lindex--;
1635  obj = hwloc_get_obj_by_type(t, ltype, lindex);
1636  }
1637  // store sub_id + 1 so that 0 is differed from NULL
1638  lower->userdata = RCAST(void *, sub_id + 1);
1639  return sub_id;
1640 }
1641 
1642 static bool __kmp_affinity_create_hwloc_map(kmp_i18n_id_t *const msg_id) {
1643  kmp_hw_t type;
1644  int hw_thread_index, sub_id;
1645  int depth;
1646  hwloc_obj_t pu, obj, root, prev;
1647  kmp_hw_t types[KMP_HW_LAST];
1648  hwloc_obj_type_t hwloc_types[KMP_HW_LAST];
1649 
1650  hwloc_topology_t tp = __kmp_hwloc_topology;
1651  *msg_id = kmp_i18n_null;
1652  if (__kmp_affinity_verbose) {
1653  KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY");
1654  }
1655 
1656  if (!KMP_AFFINITY_CAPABLE()) {
1657  // Hack to try and infer the machine topology using only the data
1658  // available from hwloc on the current thread, and __kmp_xproc.
1659  KMP_ASSERT(__kmp_affinity_type == affinity_none);
1660  // hwloc only guarantees existance of PU object, so check PACKAGE and CORE
1661  hwloc_obj_t o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PACKAGE, 0);
1662  if (o != NULL)
1663  nCoresPerPkg = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_CORE);
1664  else
1665  nCoresPerPkg = 1; // no PACKAGE found
1666  o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_CORE, 0);
1667  if (o != NULL)
1668  __kmp_nThreadsPerCore = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_PU);
1669  else
1670  __kmp_nThreadsPerCore = 1; // no CORE found
1671  __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
1672  if (nCoresPerPkg == 0)
1673  nCoresPerPkg = 1; // to prevent possible division by 0
1674  nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
1675  return true;
1676  }
1677 
1678  // Handle multiple types of cores if they exist on the system
1679  int nr_cpu_kinds = hwloc_cpukinds_get_nr(tp, 0);
1680 
1681  typedef struct kmp_hwloc_cpukinds_info_t {
1682  int efficiency;
1683  kmp_hw_core_type_t core_type;
1684  hwloc_bitmap_t mask;
1685  } kmp_hwloc_cpukinds_info_t;
1686  kmp_hwloc_cpukinds_info_t *cpukinds = nullptr;
1687 
1688  if (nr_cpu_kinds > 0) {
1689  unsigned nr_infos;
1690  struct hwloc_info_s *infos;
1691  cpukinds = (kmp_hwloc_cpukinds_info_t *)__kmp_allocate(
1692  sizeof(kmp_hwloc_cpukinds_info_t) * nr_cpu_kinds);
1693  for (unsigned idx = 0; idx < (unsigned)nr_cpu_kinds; ++idx) {
1694  cpukinds[idx].efficiency = -1;
1695  cpukinds[idx].core_type = KMP_HW_CORE_TYPE_UNKNOWN;
1696  cpukinds[idx].mask = hwloc_bitmap_alloc();
1697  if (hwloc_cpukinds_get_info(tp, idx, cpukinds[idx].mask,
1698  &cpukinds[idx].efficiency, &nr_infos, &infos,
1699  0) == 0) {
1700  for (unsigned i = 0; i < nr_infos; ++i) {
1701  if (__kmp_str_match("CoreType", 8, infos[i].name)) {
1702 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1703  if (__kmp_str_match("IntelAtom", 9, infos[i].value)) {
1704  cpukinds[idx].core_type = KMP_HW_CORE_TYPE_ATOM;
1705  break;
1706  } else if (__kmp_str_match("IntelCore", 9, infos[i].value)) {
1707  cpukinds[idx].core_type = KMP_HW_CORE_TYPE_CORE;
1708  break;
1709  }
1710 #endif
1711  }
1712  }
1713  }
1714  }
1715  }
1716 
1717  root = hwloc_get_root_obj(tp);
1718 
1719  // Figure out the depth and types in the topology
1720  depth = 0;
1721  pu = hwloc_get_pu_obj_by_os_index(tp, __kmp_affin_fullMask->begin());
1722  KMP_ASSERT(pu);
1723  obj = pu;
1724  types[depth] = KMP_HW_THREAD;
1725  hwloc_types[depth] = obj->type;
1726  depth++;
1727  while (obj != root && obj != NULL) {
1728  obj = obj->parent;
1729 #if HWLOC_API_VERSION >= 0x00020000
1730  if (obj->memory_arity) {
1731  hwloc_obj_t memory;
1732  for (memory = obj->memory_first_child; memory;
1733  memory = hwloc_get_next_child(tp, obj, memory)) {
1734  if (memory->type == HWLOC_OBJ_NUMANODE)
1735  break;
1736  }
1737  if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
1738  types[depth] = KMP_HW_NUMA;
1739  hwloc_types[depth] = memory->type;
1740  depth++;
1741  }
1742  }
1743 #endif
1744  type = __kmp_hwloc_type_2_topology_type(obj);
1745  if (type != KMP_HW_UNKNOWN) {
1746  types[depth] = type;
1747  hwloc_types[depth] = obj->type;
1748  depth++;
1749  }
1750  }
1751  KMP_ASSERT(depth > 0);
1752 
1753  // Get the order for the types correct
1754  for (int i = 0, j = depth - 1; i < j; ++i, --j) {
1755  hwloc_obj_type_t hwloc_temp = hwloc_types[i];
1756  kmp_hw_t temp = types[i];
1757  types[i] = types[j];
1758  types[j] = temp;
1759  hwloc_types[i] = hwloc_types[j];
1760  hwloc_types[j] = hwloc_temp;
1761  }
1762 
1763  // Allocate the data structure to be returned.
1764  __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1765 
1766  hw_thread_index = 0;
1767  pu = NULL;
1768  while (pu = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, pu)) {
1769  int index = depth - 1;
1770  bool included = KMP_CPU_ISSET(pu->os_index, __kmp_affin_fullMask);
1771  kmp_hw_thread_t &hw_thread = __kmp_topology->at(hw_thread_index);
1772  if (included) {
1773  hw_thread.clear();
1774  hw_thread.ids[index] = pu->logical_index;
1775  hw_thread.os_id = pu->os_index;
1776  // If multiple core types, then set that attribute for the hardware thread
1777  if (cpukinds) {
1778  int cpukind_index = -1;
1779  for (int i = 0; i < nr_cpu_kinds; ++i) {
1780  if (hwloc_bitmap_isset(cpukinds[i].mask, hw_thread.os_id)) {
1781  cpukind_index = i;
1782  break;
1783  }
1784  }
1785  if (cpukind_index >= 0) {
1786  hw_thread.attrs.set_core_type(cpukinds[cpukind_index].core_type);
1787  hw_thread.attrs.set_core_eff(cpukinds[cpukind_index].efficiency);
1788  }
1789  }
1790  index--;
1791  }
1792  obj = pu;
1793  prev = obj;
1794  while (obj != root && obj != NULL) {
1795  obj = obj->parent;
1796 #if HWLOC_API_VERSION >= 0x00020000
1797  // NUMA Nodes are handled differently since they are not within the
1798  // parent/child structure anymore. They are separate children
1799  // of obj (memory_first_child points to first memory child)
1800  if (obj->memory_arity) {
1801  hwloc_obj_t memory;
1802  for (memory = obj->memory_first_child; memory;
1803  memory = hwloc_get_next_child(tp, obj, memory)) {
1804  if (memory->type == HWLOC_OBJ_NUMANODE)
1805  break;
1806  }
1807  if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
1808  sub_id = __kmp_hwloc_get_sub_id(tp, memory, prev);
1809  if (included) {
1810  hw_thread.ids[index] = memory->logical_index;
1811  hw_thread.ids[index + 1] = sub_id;
1812  index--;
1813  }
1814  prev = memory;
1815  }
1816  prev = obj;
1817  }
1818 #endif
1819  type = __kmp_hwloc_type_2_topology_type(obj);
1820  if (type != KMP_HW_UNKNOWN) {
1821  sub_id = __kmp_hwloc_get_sub_id(tp, obj, prev);
1822  if (included) {
1823  hw_thread.ids[index] = obj->logical_index;
1824  hw_thread.ids[index + 1] = sub_id;
1825  index--;
1826  }
1827  prev = obj;
1828  }
1829  }
1830  if (included)
1831  hw_thread_index++;
1832  }
1833 
1834  // Free the core types information
1835  if (cpukinds) {
1836  for (int idx = 0; idx < nr_cpu_kinds; ++idx)
1837  hwloc_bitmap_free(cpukinds[idx].mask);
1838  __kmp_free(cpukinds);
1839  }
1840  __kmp_topology->sort_ids();
1841  return true;
1842 }
1843 #endif // KMP_USE_HWLOC
1844 
1845 // If we don't know how to retrieve the machine's processor topology, or
1846 // encounter an error in doing so, this routine is called to form a "flat"
1847 // mapping of os thread id's <-> processor id's.
1848 static bool __kmp_affinity_create_flat_map(kmp_i18n_id_t *const msg_id) {
1849  *msg_id = kmp_i18n_null;
1850  int depth = 3;
1851  kmp_hw_t types[] = {KMP_HW_SOCKET, KMP_HW_CORE, KMP_HW_THREAD};
1852 
1853  if (__kmp_affinity_verbose) {
1854  KMP_INFORM(UsingFlatOS, "KMP_AFFINITY");
1855  }
1856 
1857  // Even if __kmp_affinity_type == affinity_none, this routine might still
1858  // called to set __kmp_ncores, as well as
1859  // __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
1860  if (!KMP_AFFINITY_CAPABLE()) {
1861  KMP_ASSERT(__kmp_affinity_type == affinity_none);
1862  __kmp_ncores = nPackages = __kmp_xproc;
1863  __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1864  return true;
1865  }
1866 
1867  // When affinity is off, this routine will still be called to set
1868  // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
1869  // Make sure all these vars are set correctly, and return now if affinity is
1870  // not enabled.
1871  __kmp_ncores = nPackages = __kmp_avail_proc;
1872  __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1873 
1874  // Construct the data structure to be returned.
1875  __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1876  int avail_ct = 0;
1877  int i;
1878  KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1879  // Skip this proc if it is not included in the machine model.
1880  if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1881  continue;
1882  }
1883  kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct);
1884  hw_thread.clear();
1885  hw_thread.os_id = i;
1886  hw_thread.ids[0] = i;
1887  hw_thread.ids[1] = 0;
1888  hw_thread.ids[2] = 0;
1889  avail_ct++;
1890  }
1891  if (__kmp_affinity_verbose) {
1892  KMP_INFORM(OSProcToPackage, "KMP_AFFINITY");
1893  }
1894  return true;
1895 }
1896 
1897 #if KMP_GROUP_AFFINITY
1898 // If multiple Windows* OS processor groups exist, we can create a 2-level
1899 // topology map with the groups at level 0 and the individual procs at level 1.
1900 // This facilitates letting the threads float among all procs in a group,
1901 // if granularity=group (the default when there are multiple groups).
1902 static bool __kmp_affinity_create_proc_group_map(kmp_i18n_id_t *const msg_id) {
1903  *msg_id = kmp_i18n_null;
1904  int depth = 3;
1905  kmp_hw_t types[] = {KMP_HW_PROC_GROUP, KMP_HW_CORE, KMP_HW_THREAD};
1906  const static size_t BITS_PER_GROUP = CHAR_BIT * sizeof(DWORD_PTR);
1907 
1908  if (__kmp_affinity_verbose) {
1909  KMP_INFORM(AffWindowsProcGroupMap, "KMP_AFFINITY");
1910  }
1911 
1912  // If we aren't affinity capable, then use flat topology
1913  if (!KMP_AFFINITY_CAPABLE()) {
1914  KMP_ASSERT(__kmp_affinity_type == affinity_none);
1915  nPackages = __kmp_num_proc_groups;
1916  __kmp_nThreadsPerCore = 1;
1917  __kmp_ncores = __kmp_xproc;
1918  nCoresPerPkg = nPackages / __kmp_ncores;
1919  return true;
1920  }
1921 
1922  // Construct the data structure to be returned.
1923  __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1924  int avail_ct = 0;
1925  int i;
1926  KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1927  // Skip this proc if it is not included in the machine model.
1928  if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1929  continue;
1930  }
1931  kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct++);
1932  hw_thread.clear();
1933  hw_thread.os_id = i;
1934  hw_thread.ids[0] = i / BITS_PER_GROUP;
1935  hw_thread.ids[1] = hw_thread.ids[2] = i % BITS_PER_GROUP;
1936  }
1937  return true;
1938 }
1939 #endif /* KMP_GROUP_AFFINITY */
1940 
1941 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1942 
1943 template <kmp_uint32 LSB, kmp_uint32 MSB>
1944 static inline unsigned __kmp_extract_bits(kmp_uint32 v) {
1945  const kmp_uint32 SHIFT_LEFT = sizeof(kmp_uint32) * 8 - 1 - MSB;
1946  const kmp_uint32 SHIFT_RIGHT = LSB;
1947  kmp_uint32 retval = v;
1948  retval <<= SHIFT_LEFT;
1949  retval >>= (SHIFT_LEFT + SHIFT_RIGHT);
1950  return retval;
1951 }
1952 
1953 static int __kmp_cpuid_mask_width(int count) {
1954  int r = 0;
1955 
1956  while ((1 << r) < count)
1957  ++r;
1958  return r;
1959 }
1960 
1961 class apicThreadInfo {
1962 public:
1963  unsigned osId; // param to __kmp_affinity_bind_thread
1964  unsigned apicId; // from cpuid after binding
1965  unsigned maxCoresPerPkg; // ""
1966  unsigned maxThreadsPerPkg; // ""
1967  unsigned pkgId; // inferred from above values
1968  unsigned coreId; // ""
1969  unsigned threadId; // ""
1970 };
1971 
1972 static int __kmp_affinity_cmp_apicThreadInfo_phys_id(const void *a,
1973  const void *b) {
1974  const apicThreadInfo *aa = (const apicThreadInfo *)a;
1975  const apicThreadInfo *bb = (const apicThreadInfo *)b;
1976  if (aa->pkgId < bb->pkgId)
1977  return -1;
1978  if (aa->pkgId > bb->pkgId)
1979  return 1;
1980  if (aa->coreId < bb->coreId)
1981  return -1;
1982  if (aa->coreId > bb->coreId)
1983  return 1;
1984  if (aa->threadId < bb->threadId)
1985  return -1;
1986  if (aa->threadId > bb->threadId)
1987  return 1;
1988  return 0;
1989 }
1990 
1991 class kmp_cache_info_t {
1992 public:
1993  struct info_t {
1994  unsigned level, mask;
1995  };
1996  kmp_cache_info_t() : depth(0) { get_leaf4_levels(); }
1997  size_t get_depth() const { return depth; }
1998  info_t &operator[](size_t index) { return table[index]; }
1999  const info_t &operator[](size_t index) const { return table[index]; }
2000 
2001  static kmp_hw_t get_topology_type(unsigned level) {
2002  KMP_DEBUG_ASSERT(level >= 1 && level <= MAX_CACHE_LEVEL);
2003  switch (level) {
2004  case 1:
2005  return KMP_HW_L1;
2006  case 2:
2007  return KMP_HW_L2;
2008  case 3:
2009  return KMP_HW_L3;
2010  }
2011  return KMP_HW_UNKNOWN;
2012  }
2013 
2014 private:
2015  static const int MAX_CACHE_LEVEL = 3;
2016 
2017  size_t depth;
2018  info_t table[MAX_CACHE_LEVEL];
2019 
2020  void get_leaf4_levels() {
2021  unsigned level = 0;
2022  while (depth < MAX_CACHE_LEVEL) {
2023  unsigned cache_type, max_threads_sharing;
2024  unsigned cache_level, cache_mask_width;
2025  kmp_cpuid buf2;
2026  __kmp_x86_cpuid(4, level, &buf2);
2027  cache_type = __kmp_extract_bits<0, 4>(buf2.eax);
2028  if (!cache_type)
2029  break;
2030  // Skip instruction caches
2031  if (cache_type == 2) {
2032  level++;
2033  continue;
2034  }
2035  max_threads_sharing = __kmp_extract_bits<14, 25>(buf2.eax) + 1;
2036  cache_mask_width = __kmp_cpuid_mask_width(max_threads_sharing);
2037  cache_level = __kmp_extract_bits<5, 7>(buf2.eax);
2038  table[depth].level = cache_level;
2039  table[depth].mask = ((-1) << cache_mask_width);
2040  depth++;
2041  level++;
2042  }
2043  }
2044 };
2045 
2046 // On IA-32 architecture and Intel(R) 64 architecture, we attempt to use
2047 // an algorithm which cycles through the available os threads, setting
2048 // the current thread's affinity mask to that thread, and then retrieves
2049 // the Apic Id for each thread context using the cpuid instruction.
2050 static bool __kmp_affinity_create_apicid_map(kmp_i18n_id_t *const msg_id) {
2051  kmp_cpuid buf;
2052  *msg_id = kmp_i18n_null;
2053 
2054  if (__kmp_affinity_verbose) {
2055  KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(DecodingLegacyAPIC));
2056  }
2057 
2058  // Check if cpuid leaf 4 is supported.
2059  __kmp_x86_cpuid(0, 0, &buf);
2060  if (buf.eax < 4) {
2061  *msg_id = kmp_i18n_str_NoLeaf4Support;
2062  return false;
2063  }
2064 
2065  // The algorithm used starts by setting the affinity to each available thread
2066  // and retrieving info from the cpuid instruction, so if we are not capable of
2067  // calling __kmp_get_system_affinity() and _kmp_get_system_affinity(), then we
2068  // need to do something else - use the defaults that we calculated from
2069  // issuing cpuid without binding to each proc.
2070  if (!KMP_AFFINITY_CAPABLE()) {
2071  // Hack to try and infer the machine topology using only the data
2072  // available from cpuid on the current thread, and __kmp_xproc.
2073  KMP_ASSERT(__kmp_affinity_type == affinity_none);
2074 
2075  // Get an upper bound on the number of threads per package using cpuid(1).
2076  // On some OS/chps combinations where HT is supported by the chip but is
2077  // disabled, this value will be 2 on a single core chip. Usually, it will be
2078  // 2 if HT is enabled and 1 if HT is disabled.
2079  __kmp_x86_cpuid(1, 0, &buf);
2080  int maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
2081  if (maxThreadsPerPkg == 0) {
2082  maxThreadsPerPkg = 1;
2083  }
2084 
2085  // The num cores per pkg comes from cpuid(4). 1 must be added to the encoded
2086  // value.
2087  //
2088  // The author of cpu_count.cpp treated this only an upper bound on the
2089  // number of cores, but I haven't seen any cases where it was greater than
2090  // the actual number of cores, so we will treat it as exact in this block of
2091  // code.
2092  //
2093  // First, we need to check if cpuid(4) is supported on this chip. To see if
2094  // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n or
2095  // greater.
2096  __kmp_x86_cpuid(0, 0, &buf);
2097  if (buf.eax >= 4) {
2098  __kmp_x86_cpuid(4, 0, &buf);
2099  nCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
2100  } else {
2101  nCoresPerPkg = 1;
2102  }
2103 
2104  // There is no way to reliably tell if HT is enabled without issuing the
2105  // cpuid instruction from every thread, can correlating the cpuid info, so
2106  // if the machine is not affinity capable, we assume that HT is off. We have
2107  // seen quite a few machines where maxThreadsPerPkg is 2, yet the machine
2108  // does not support HT.
2109  //
2110  // - Older OSes are usually found on machines with older chips, which do not
2111  // support HT.
2112  // - The performance penalty for mistakenly identifying a machine as HT when
2113  // it isn't (which results in blocktime being incorrectly set to 0) is
2114  // greater than the penalty when for mistakenly identifying a machine as
2115  // being 1 thread/core when it is really HT enabled (which results in
2116  // blocktime being incorrectly set to a positive value).
2117  __kmp_ncores = __kmp_xproc;
2118  nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
2119  __kmp_nThreadsPerCore = 1;
2120  return true;
2121  }
2122 
2123  // From here on, we can assume that it is safe to call
2124  // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if
2125  // __kmp_affinity_type = affinity_none.
2126 
2127  // Save the affinity mask for the current thread.
2128  kmp_affinity_raii_t previous_affinity;
2129 
2130  // Run through each of the available contexts, binding the current thread
2131  // to it, and obtaining the pertinent information using the cpuid instr.
2132  //
2133  // The relevant information is:
2134  // - Apic Id: Bits 24:31 of ebx after issuing cpuid(1) - each thread context
2135  // has a uniqie Apic Id, which is of the form pkg# : core# : thread#.
2136  // - Max Threads Per Pkg: Bits 16:23 of ebx after issuing cpuid(1). The value
2137  // of this field determines the width of the core# + thread# fields in the
2138  // Apic Id. It is also an upper bound on the number of threads per
2139  // package, but it has been verified that situations happen were it is not
2140  // exact. In particular, on certain OS/chip combinations where Intel(R)
2141  // Hyper-Threading Technology is supported by the chip but has been
2142  // disabled, the value of this field will be 2 (for a single core chip).
2143  // On other OS/chip combinations supporting Intel(R) Hyper-Threading
2144  // Technology, the value of this field will be 1 when Intel(R)
2145  // Hyper-Threading Technology is disabled and 2 when it is enabled.
2146  // - Max Cores Per Pkg: Bits 26:31 of eax after issuing cpuid(4). The value
2147  // of this field (+1) determines the width of the core# field in the Apic
2148  // Id. The comments in "cpucount.cpp" say that this value is an upper
2149  // bound, but the IA-32 architecture manual says that it is exactly the
2150  // number of cores per package, and I haven't seen any case where it
2151  // wasn't.
2152  //
2153  // From this information, deduce the package Id, core Id, and thread Id,
2154  // and set the corresponding fields in the apicThreadInfo struct.
2155  unsigned i;
2156  apicThreadInfo *threadInfo = (apicThreadInfo *)__kmp_allocate(
2157  __kmp_avail_proc * sizeof(apicThreadInfo));
2158  unsigned nApics = 0;
2159  KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
2160  // Skip this proc if it is not included in the machine model.
2161  if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
2162  continue;
2163  }
2164  KMP_DEBUG_ASSERT((int)nApics < __kmp_avail_proc);
2165 
2166  __kmp_affinity_dispatch->bind_thread(i);
2167  threadInfo[nApics].osId = i;
2168 
2169  // The apic id and max threads per pkg come from cpuid(1).
2170  __kmp_x86_cpuid(1, 0, &buf);
2171  if (((buf.edx >> 9) & 1) == 0) {
2172  __kmp_free(threadInfo);
2173  *msg_id = kmp_i18n_str_ApicNotPresent;
2174  return false;
2175  }
2176  threadInfo[nApics].apicId = (buf.ebx >> 24) & 0xff;
2177  threadInfo[nApics].maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
2178  if (threadInfo[nApics].maxThreadsPerPkg == 0) {
2179  threadInfo[nApics].maxThreadsPerPkg = 1;
2180  }
2181 
2182  // Max cores per pkg comes from cpuid(4). 1 must be added to the encoded
2183  // value.
2184  //
2185  // First, we need to check if cpuid(4) is supported on this chip. To see if
2186  // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n
2187  // or greater.
2188  __kmp_x86_cpuid(0, 0, &buf);
2189  if (buf.eax >= 4) {
2190  __kmp_x86_cpuid(4, 0, &buf);
2191  threadInfo[nApics].maxCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
2192  } else {
2193  threadInfo[nApics].maxCoresPerPkg = 1;
2194  }
2195 
2196  // Infer the pkgId / coreId / threadId using only the info obtained locally.
2197  int widthCT = __kmp_cpuid_mask_width(threadInfo[nApics].maxThreadsPerPkg);
2198  threadInfo[nApics].pkgId = threadInfo[nApics].apicId >> widthCT;
2199 
2200  int widthC = __kmp_cpuid_mask_width(threadInfo[nApics].maxCoresPerPkg);
2201  int widthT = widthCT - widthC;
2202  if (widthT < 0) {
2203  // I've never seen this one happen, but I suppose it could, if the cpuid
2204  // instruction on a chip was really screwed up. Make sure to restore the
2205  // affinity mask before the tail call.
2206  __kmp_free(threadInfo);
2207  *msg_id = kmp_i18n_str_InvalidCpuidInfo;
2208  return false;
2209  }
2210 
2211  int maskC = (1 << widthC) - 1;
2212  threadInfo[nApics].coreId = (threadInfo[nApics].apicId >> widthT) & maskC;
2213 
2214  int maskT = (1 << widthT) - 1;
2215  threadInfo[nApics].threadId = threadInfo[nApics].apicId & maskT;
2216 
2217  nApics++;
2218  }
2219 
2220  // We've collected all the info we need.
2221  // Restore the old affinity mask for this thread.
2222  previous_affinity.restore();
2223 
2224  // Sort the threadInfo table by physical Id.
2225  qsort(threadInfo, nApics, sizeof(*threadInfo),
2226  __kmp_affinity_cmp_apicThreadInfo_phys_id);
2227 
2228  // The table is now sorted by pkgId / coreId / threadId, but we really don't
2229  // know the radix of any of the fields. pkgId's may be sparsely assigned among
2230  // the chips on a system. Although coreId's are usually assigned
2231  // [0 .. coresPerPkg-1] and threadId's are usually assigned
2232  // [0..threadsPerCore-1], we don't want to make any such assumptions.
2233  //
2234  // For that matter, we don't know what coresPerPkg and threadsPerCore (or the
2235  // total # packages) are at this point - we want to determine that now. We
2236  // only have an upper bound on the first two figures.
2237  //
2238  // We also perform a consistency check at this point: the values returned by
2239  // the cpuid instruction for any thread bound to a given package had better
2240  // return the same info for maxThreadsPerPkg and maxCoresPerPkg.
2241  nPackages = 1;
2242  nCoresPerPkg = 1;
2243  __kmp_nThreadsPerCore = 1;
2244  unsigned nCores = 1;
2245 
2246  unsigned pkgCt = 1; // to determine radii
2247  unsigned lastPkgId = threadInfo[0].pkgId;
2248  unsigned coreCt = 1;
2249  unsigned lastCoreId = threadInfo[0].coreId;
2250  unsigned threadCt = 1;
2251  unsigned lastThreadId = threadInfo[0].threadId;
2252 
2253  // intra-pkg consist checks
2254  unsigned prevMaxCoresPerPkg = threadInfo[0].maxCoresPerPkg;
2255  unsigned prevMaxThreadsPerPkg = threadInfo[0].maxThreadsPerPkg;
2256 
2257  for (i = 1; i < nApics; i++) {
2258  if (threadInfo[i].pkgId != lastPkgId) {
2259  nCores++;
2260  pkgCt++;
2261  lastPkgId = threadInfo[i].pkgId;
2262  if ((int)coreCt > nCoresPerPkg)
2263  nCoresPerPkg = coreCt;
2264  coreCt = 1;
2265  lastCoreId = threadInfo[i].coreId;
2266  if ((int)threadCt > __kmp_nThreadsPerCore)
2267  __kmp_nThreadsPerCore = threadCt;
2268  threadCt = 1;
2269  lastThreadId = threadInfo[i].threadId;
2270 
2271  // This is a different package, so go on to the next iteration without
2272  // doing any consistency checks. Reset the consistency check vars, though.
2273  prevMaxCoresPerPkg = threadInfo[i].maxCoresPerPkg;
2274  prevMaxThreadsPerPkg = threadInfo[i].maxThreadsPerPkg;
2275  continue;
2276  }
2277 
2278  if (threadInfo[i].coreId != lastCoreId) {
2279  nCores++;
2280  coreCt++;
2281  lastCoreId = threadInfo[i].coreId;
2282  if ((int)threadCt > __kmp_nThreadsPerCore)
2283  __kmp_nThreadsPerCore = threadCt;
2284  threadCt = 1;
2285  lastThreadId = threadInfo[i].threadId;
2286  } else if (threadInfo[i].threadId != lastThreadId) {
2287  threadCt++;
2288  lastThreadId = threadInfo[i].threadId;
2289  } else {
2290  __kmp_free(threadInfo);
2291  *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
2292  return false;
2293  }
2294 
2295  // Check to make certain that the maxCoresPerPkg and maxThreadsPerPkg
2296  // fields agree between all the threads bounds to a given package.
2297  if ((prevMaxCoresPerPkg != threadInfo[i].maxCoresPerPkg) ||
2298  (prevMaxThreadsPerPkg != threadInfo[i].maxThreadsPerPkg)) {
2299  __kmp_free(threadInfo);
2300  *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
2301  return false;
2302  }
2303  }
2304  // When affinity is off, this routine will still be called to set
2305  // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
2306  // Make sure all these vars are set correctly
2307  nPackages = pkgCt;
2308  if ((int)coreCt > nCoresPerPkg)
2309  nCoresPerPkg = coreCt;
2310  if ((int)threadCt > __kmp_nThreadsPerCore)
2311  __kmp_nThreadsPerCore = threadCt;
2312  __kmp_ncores = nCores;
2313  KMP_DEBUG_ASSERT(nApics == (unsigned)__kmp_avail_proc);
2314 
2315  // Now that we've determined the number of packages, the number of cores per
2316  // package, and the number of threads per core, we can construct the data
2317  // structure that is to be returned.
2318  int idx = 0;
2319  int pkgLevel = 0;
2320  int coreLevel = 1;
2321  int threadLevel = 2;
2322  //(__kmp_nThreadsPerCore <= 1) ? -1 : ((coreLevel >= 0) ? 2 : 1);
2323  int depth = (pkgLevel >= 0) + (coreLevel >= 0) + (threadLevel >= 0);
2324  kmp_hw_t types[3];
2325  if (pkgLevel >= 0)
2326  types[idx++] = KMP_HW_SOCKET;
2327  if (coreLevel >= 0)
2328  types[idx++] = KMP_HW_CORE;
2329  if (threadLevel >= 0)
2330  types[idx++] = KMP_HW_THREAD;
2331 
2332  KMP_ASSERT(depth > 0);
2333  __kmp_topology = kmp_topology_t::allocate(nApics, depth, types);
2334 
2335  for (i = 0; i < nApics; ++i) {
2336  idx = 0;
2337  unsigned os = threadInfo[i].osId;
2338  kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
2339  hw_thread.clear();
2340 
2341  if (pkgLevel >= 0) {
2342  hw_thread.ids[idx++] = threadInfo[i].pkgId;
2343  }
2344  if (coreLevel >= 0) {
2345  hw_thread.ids[idx++] = threadInfo[i].coreId;
2346  }
2347  if (threadLevel >= 0) {
2348  hw_thread.ids[idx++] = threadInfo[i].threadId;
2349  }
2350  hw_thread.os_id = os;
2351  }
2352 
2353  __kmp_free(threadInfo);
2354  __kmp_topology->sort_ids();
2355  if (!__kmp_topology->check_ids()) {
2356  kmp_topology_t::deallocate(__kmp_topology);
2357  __kmp_topology = nullptr;
2358  *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
2359  return false;
2360  }
2361  return true;
2362 }
2363 
2364 // Hybrid cpu detection using CPUID.1A
2365 // Thread should be pinned to processor already
2366 static void __kmp_get_hybrid_info(kmp_hw_core_type_t *type, int *efficiency,
2367  unsigned *native_model_id) {
2368  kmp_cpuid buf;
2369  __kmp_x86_cpuid(0x1a, 0, &buf);
2370  *type = (kmp_hw_core_type_t)__kmp_extract_bits<24, 31>(buf.eax);
2371  switch (*type) {
2372  case KMP_HW_CORE_TYPE_ATOM:
2373  *efficiency = 0;
2374  break;
2375  case KMP_HW_CORE_TYPE_CORE:
2376  *efficiency = 1;
2377  break;
2378  default:
2379  *efficiency = 0;
2380  }
2381  *native_model_id = __kmp_extract_bits<0, 23>(buf.eax);
2382 }
2383 
2384 // Intel(R) microarchitecture code name Nehalem, Dunnington and later
2385 // architectures support a newer interface for specifying the x2APIC Ids,
2386 // based on CPUID.B or CPUID.1F
2387 /*
2388  * CPUID.B or 1F, Input ECX (sub leaf # aka level number)
2389  Bits Bits Bits Bits
2390  31-16 15-8 7-4 4-0
2391 ---+-----------+--------------+-------------+-----------------+
2392 EAX| reserved | reserved | reserved | Bits to Shift |
2393 ---+-----------|--------------+-------------+-----------------|
2394 EBX| reserved | Num logical processors at level (16 bits) |
2395 ---+-----------|--------------+-------------------------------|
2396 ECX| reserved | Level Type | Level Number (8 bits) |
2397 ---+-----------+--------------+-------------------------------|
2398 EDX| X2APIC ID (32 bits) |
2399 ---+----------------------------------------------------------+
2400 */
2401 
2402 enum {
2403  INTEL_LEVEL_TYPE_INVALID = 0, // Package level
2404  INTEL_LEVEL_TYPE_SMT = 1,
2405  INTEL_LEVEL_TYPE_CORE = 2,
2406  INTEL_LEVEL_TYPE_TILE = 3,
2407  INTEL_LEVEL_TYPE_MODULE = 4,
2408  INTEL_LEVEL_TYPE_DIE = 5,
2409  INTEL_LEVEL_TYPE_LAST = 6,
2410 };
2411 
2412 struct cpuid_level_info_t {
2413  unsigned level_type, mask, mask_width, nitems, cache_mask;
2414 };
2415 
2416 static kmp_hw_t __kmp_intel_type_2_topology_type(int intel_type) {
2417  switch (intel_type) {
2418  case INTEL_LEVEL_TYPE_INVALID:
2419  return KMP_HW_SOCKET;
2420  case INTEL_LEVEL_TYPE_SMT:
2421  return KMP_HW_THREAD;
2422  case INTEL_LEVEL_TYPE_CORE:
2423  return KMP_HW_CORE;
2424  case INTEL_LEVEL_TYPE_TILE:
2425  return KMP_HW_TILE;
2426  case INTEL_LEVEL_TYPE_MODULE:
2427  return KMP_HW_MODULE;
2428  case INTEL_LEVEL_TYPE_DIE:
2429  return KMP_HW_DIE;
2430  }
2431  return KMP_HW_UNKNOWN;
2432 }
2433 
2434 // This function takes the topology leaf, a levels array to store the levels
2435 // detected and a bitmap of the known levels.
2436 // Returns the number of levels in the topology
2437 static unsigned
2438 __kmp_x2apicid_get_levels(int leaf,
2439  cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST],
2440  kmp_uint64 known_levels) {
2441  unsigned level, levels_index;
2442  unsigned level_type, mask_width, nitems;
2443  kmp_cpuid buf;
2444 
2445  // New algorithm has known topology layers act as highest unknown topology
2446  // layers when unknown topology layers exist.
2447  // e.g., Suppose layers were SMT <X> CORE <Y> <Z> PACKAGE, where <X> <Y> <Z>
2448  // are unknown topology layers, Then SMT will take the characteristics of
2449  // (SMT x <X>) and CORE will take the characteristics of (CORE x <Y> x <Z>).
2450  // This eliminates unknown portions of the topology while still keeping the
2451  // correct structure.
2452  level = levels_index = 0;
2453  do {
2454  __kmp_x86_cpuid(leaf, level, &buf);
2455  level_type = __kmp_extract_bits<8, 15>(buf.ecx);
2456  mask_width = __kmp_extract_bits<0, 4>(buf.eax);
2457  nitems = __kmp_extract_bits<0, 15>(buf.ebx);
2458  if (level_type != INTEL_LEVEL_TYPE_INVALID && nitems == 0)
2459  return 0;
2460 
2461  if (known_levels & (1ull << level_type)) {
2462  // Add a new level to the topology
2463  KMP_ASSERT(levels_index < INTEL_LEVEL_TYPE_LAST);
2464  levels[levels_index].level_type = level_type;
2465  levels[levels_index].mask_width = mask_width;
2466  levels[levels_index].nitems = nitems;
2467  levels_index++;
2468  } else {
2469  // If it is an unknown level, then logically move the previous layer up
2470  if (levels_index > 0) {
2471  levels[levels_index - 1].mask_width = mask_width;
2472  levels[levels_index - 1].nitems = nitems;
2473  }
2474  }
2475  level++;
2476  } while (level_type != INTEL_LEVEL_TYPE_INVALID);
2477 
2478  // Set the masks to & with apicid
2479  for (unsigned i = 0; i < levels_index; ++i) {
2480  if (levels[i].level_type != INTEL_LEVEL_TYPE_INVALID) {
2481  levels[i].mask = ~((-1) << levels[i].mask_width);
2482  levels[i].cache_mask = (-1) << levels[i].mask_width;
2483  for (unsigned j = 0; j < i; ++j)
2484  levels[i].mask ^= levels[j].mask;
2485  } else {
2486  KMP_DEBUG_ASSERT(levels_index > 0);
2487  levels[i].mask = (-1) << levels[i - 1].mask_width;
2488  levels[i].cache_mask = 0;
2489  }
2490  }
2491  return levels_index;
2492 }
2493 
2494 static bool __kmp_affinity_create_x2apicid_map(kmp_i18n_id_t *const msg_id) {
2495 
2496  cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST];
2497  kmp_hw_t types[INTEL_LEVEL_TYPE_LAST];
2498  unsigned levels_index;
2499  kmp_cpuid buf;
2500  kmp_uint64 known_levels;
2501  int topology_leaf, highest_leaf, apic_id;
2502  int num_leaves;
2503  static int leaves[] = {0, 0};
2504 
2505  kmp_i18n_id_t leaf_message_id;
2506 
2507  KMP_BUILD_ASSERT(sizeof(known_levels) * CHAR_BIT > KMP_HW_LAST);
2508 
2509  *msg_id = kmp_i18n_null;
2510  if (__kmp_affinity_verbose) {
2511  KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC));
2512  }
2513 
2514  // Figure out the known topology levels
2515  known_levels = 0ull;
2516  for (int i = 0; i < INTEL_LEVEL_TYPE_LAST; ++i) {
2517  if (__kmp_intel_type_2_topology_type(i) != KMP_HW_UNKNOWN) {
2518  known_levels |= (1ull << i);
2519  }
2520  }
2521 
2522  // Get the highest cpuid leaf supported
2523  __kmp_x86_cpuid(0, 0, &buf);
2524  highest_leaf = buf.eax;
2525 
2526  // If a specific topology method was requested, only allow that specific leaf
2527  // otherwise, try both leaves 31 and 11 in that order
2528  num_leaves = 0;
2529  if (__kmp_affinity_top_method == affinity_top_method_x2apicid) {
2530  num_leaves = 1;
2531  leaves[0] = 11;
2532  leaf_message_id = kmp_i18n_str_NoLeaf11Support;
2533  } else if (__kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
2534  num_leaves = 1;
2535  leaves[0] = 31;
2536  leaf_message_id = kmp_i18n_str_NoLeaf31Support;
2537  } else {
2538  num_leaves = 2;
2539  leaves[0] = 31;
2540  leaves[1] = 11;
2541  leaf_message_id = kmp_i18n_str_NoLeaf11Support;
2542  }
2543 
2544  // Check to see if cpuid leaf 31 or 11 is supported.
2545  __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
2546  topology_leaf = -1;
2547  for (int i = 0; i < num_leaves; ++i) {
2548  int leaf = leaves[i];
2549  if (highest_leaf < leaf)
2550  continue;
2551  __kmp_x86_cpuid(leaf, 0, &buf);
2552  if (buf.ebx == 0)
2553  continue;
2554  topology_leaf = leaf;
2555  levels_index = __kmp_x2apicid_get_levels(leaf, levels, known_levels);
2556  if (levels_index == 0)
2557  continue;
2558  break;
2559  }
2560  if (topology_leaf == -1 || levels_index == 0) {
2561  *msg_id = leaf_message_id;
2562  return false;
2563  }
2564  KMP_ASSERT(levels_index <= INTEL_LEVEL_TYPE_LAST);
2565 
2566  // The algorithm used starts by setting the affinity to each available thread
2567  // and retrieving info from the cpuid instruction, so if we are not capable of
2568  // calling __kmp_get_system_affinity() and __kmp_get_system_affinity(), then
2569  // we need to do something else - use the defaults that we calculated from
2570  // issuing cpuid without binding to each proc.
2571  if (!KMP_AFFINITY_CAPABLE()) {
2572  // Hack to try and infer the machine topology using only the data
2573  // available from cpuid on the current thread, and __kmp_xproc.
2574  KMP_ASSERT(__kmp_affinity_type == affinity_none);
2575  for (unsigned i = 0; i < levels_index; ++i) {
2576  if (levels[i].level_type == INTEL_LEVEL_TYPE_SMT) {
2577  __kmp_nThreadsPerCore = levels[i].nitems;
2578  } else if (levels[i].level_type == INTEL_LEVEL_TYPE_CORE) {
2579  nCoresPerPkg = levels[i].nitems;
2580  }
2581  }
2582  __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
2583  nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
2584  return true;
2585  }
2586 
2587  // Allocate the data structure to be returned.
2588  int depth = levels_index;
2589  for (int i = depth - 1, j = 0; i >= 0; --i, ++j)
2590  types[j] = __kmp_intel_type_2_topology_type(levels[i].level_type);
2591  __kmp_topology =
2592  kmp_topology_t::allocate(__kmp_avail_proc, levels_index, types);
2593 
2594  // Insert equivalent cache types if they exist
2595  kmp_cache_info_t cache_info;
2596  for (size_t i = 0; i < cache_info.get_depth(); ++i) {
2597  const kmp_cache_info_t::info_t &info = cache_info[i];
2598  unsigned cache_mask = info.mask;
2599  unsigned cache_level = info.level;
2600  for (unsigned j = 0; j < levels_index; ++j) {
2601  unsigned hw_cache_mask = levels[j].cache_mask;
2602  kmp_hw_t cache_type = kmp_cache_info_t::get_topology_type(cache_level);
2603  if (hw_cache_mask == cache_mask && j < levels_index - 1) {
2604  kmp_hw_t type =
2605  __kmp_intel_type_2_topology_type(levels[j + 1].level_type);
2606  __kmp_topology->set_equivalent_type(cache_type, type);
2607  }
2608  }
2609  }
2610 
2611  // From here on, we can assume that it is safe to call
2612  // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if
2613  // __kmp_affinity_type = affinity_none.
2614 
2615  // Save the affinity mask for the current thread.
2616  kmp_affinity_raii_t previous_affinity;
2617 
2618  // Run through each of the available contexts, binding the current thread
2619  // to it, and obtaining the pertinent information using the cpuid instr.
2620  unsigned int proc;
2621  int hw_thread_index = 0;
2622  KMP_CPU_SET_ITERATE(proc, __kmp_affin_fullMask) {
2623  cpuid_level_info_t my_levels[INTEL_LEVEL_TYPE_LAST];
2624  unsigned my_levels_index;
2625 
2626  // Skip this proc if it is not included in the machine model.
2627  if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
2628  continue;
2629  }
2630  KMP_DEBUG_ASSERT(hw_thread_index < __kmp_avail_proc);
2631 
2632  __kmp_affinity_dispatch->bind_thread(proc);
2633 
2634  // New algorithm
2635  __kmp_x86_cpuid(topology_leaf, 0, &buf);
2636  apic_id = buf.edx;
2637  kmp_hw_thread_t &hw_thread = __kmp_topology->at(hw_thread_index);
2638  my_levels_index =
2639  __kmp_x2apicid_get_levels(topology_leaf, my_levels, known_levels);
2640  if (my_levels_index == 0 || my_levels_index != levels_index) {
2641  *msg_id = kmp_i18n_str_InvalidCpuidInfo;
2642  return false;
2643  }
2644  hw_thread.clear();
2645  hw_thread.os_id = proc;
2646  // Put in topology information
2647  for (unsigned j = 0, idx = depth - 1; j < my_levels_index; ++j, --idx) {
2648  hw_thread.ids[idx] = apic_id & my_levels[j].mask;
2649  if (j > 0) {
2650  hw_thread.ids[idx] >>= my_levels[j - 1].mask_width;
2651  }
2652  }
2653  // Hybrid information
2654  if (__kmp_is_hybrid_cpu() && highest_leaf >= 0x1a) {
2655  kmp_hw_core_type_t type;
2656  unsigned native_model_id;
2657  int efficiency;
2658  __kmp_get_hybrid_info(&type, &efficiency, &native_model_id);
2659  hw_thread.attrs.set_core_type(type);
2660  hw_thread.attrs.set_core_eff(efficiency);
2661  }
2662  hw_thread_index++;
2663  }
2664  KMP_ASSERT(hw_thread_index > 0);
2665  __kmp_topology->sort_ids();
2666  if (!__kmp_topology->check_ids()) {
2667  kmp_topology_t::deallocate(__kmp_topology);
2668  __kmp_topology = nullptr;
2669  *msg_id = kmp_i18n_str_x2ApicIDsNotUnique;
2670  return false;
2671  }
2672  return true;
2673 }
2674 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
2675 
2676 #define osIdIndex 0
2677 #define threadIdIndex 1
2678 #define coreIdIndex 2
2679 #define pkgIdIndex 3
2680 #define nodeIdIndex 4
2681 
2682 typedef unsigned *ProcCpuInfo;
2683 static unsigned maxIndex = pkgIdIndex;
2684 
2685 static int __kmp_affinity_cmp_ProcCpuInfo_phys_id(const void *a,
2686  const void *b) {
2687  unsigned i;
2688  const unsigned *aa = *(unsigned *const *)a;
2689  const unsigned *bb = *(unsigned *const *)b;
2690  for (i = maxIndex;; i--) {
2691  if (aa[i] < bb[i])
2692  return -1;
2693  if (aa[i] > bb[i])
2694  return 1;
2695  if (i == osIdIndex)
2696  break;
2697  }
2698  return 0;
2699 }
2700 
2701 #if KMP_USE_HIER_SCHED
2702 // Set the array sizes for the hierarchy layers
2703 static void __kmp_dispatch_set_hierarchy_values() {
2704  // Set the maximum number of L1's to number of cores
2705  // Set the maximum number of L2's to to either number of cores / 2 for
2706  // Intel(R) Xeon Phi(TM) coprocessor formally codenamed Knights Landing
2707  // Or the number of cores for Intel(R) Xeon(R) processors
2708  // Set the maximum number of NUMA nodes and L3's to number of packages
2709  __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1] =
2710  nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
2711  __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L1 + 1] = __kmp_ncores;
2712 #if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) && \
2713  KMP_MIC_SUPPORTED
2714  if (__kmp_mic_type >= mic3)
2715  __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores / 2;
2716  else
2717 #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
2718  __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores;
2719  __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L3 + 1] = nPackages;
2720  __kmp_hier_max_units[kmp_hier_layer_e::LAYER_NUMA + 1] = nPackages;
2721  __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LOOP + 1] = 1;
2722  // Set the number of threads per unit
2723  // Number of hardware threads per L1/L2/L3/NUMA/LOOP
2724  __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_THREAD + 1] = 1;
2725  __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L1 + 1] =
2726  __kmp_nThreadsPerCore;
2727 #if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) && \
2728  KMP_MIC_SUPPORTED
2729  if (__kmp_mic_type >= mic3)
2730  __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
2731  2 * __kmp_nThreadsPerCore;
2732  else
2733 #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
2734  __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
2735  __kmp_nThreadsPerCore;
2736  __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L3 + 1] =
2737  nCoresPerPkg * __kmp_nThreadsPerCore;
2738  __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_NUMA + 1] =
2739  nCoresPerPkg * __kmp_nThreadsPerCore;
2740  __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LOOP + 1] =
2741  nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
2742 }
2743 
2744 // Return the index into the hierarchy for this tid and layer type (L1, L2, etc)
2745 // i.e., this thread's L1 or this thread's L2, etc.
2746 int __kmp_dispatch_get_index(int tid, kmp_hier_layer_e type) {
2747  int index = type + 1;
2748  int num_hw_threads = __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1];
2749  KMP_DEBUG_ASSERT(type != kmp_hier_layer_e::LAYER_LAST);
2750  if (type == kmp_hier_layer_e::LAYER_THREAD)
2751  return tid;
2752  else if (type == kmp_hier_layer_e::LAYER_LOOP)
2753  return 0;
2754  KMP_DEBUG_ASSERT(__kmp_hier_max_units[index] != 0);
2755  if (tid >= num_hw_threads)
2756  tid = tid % num_hw_threads;
2757  return (tid / __kmp_hier_threads_per[index]) % __kmp_hier_max_units[index];
2758 }
2759 
2760 // Return the number of t1's per t2
2761 int __kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1, kmp_hier_layer_e t2) {
2762  int i1 = t1 + 1;
2763  int i2 = t2 + 1;
2764  KMP_DEBUG_ASSERT(i1 <= i2);
2765  KMP_DEBUG_ASSERT(t1 != kmp_hier_layer_e::LAYER_LAST);
2766  KMP_DEBUG_ASSERT(t2 != kmp_hier_layer_e::LAYER_LAST);
2767  KMP_DEBUG_ASSERT(__kmp_hier_threads_per[i1] != 0);
2768  // (nthreads/t2) / (nthreads/t1) = t1 / t2
2769  return __kmp_hier_threads_per[i2] / __kmp_hier_threads_per[i1];
2770 }
2771 #endif // KMP_USE_HIER_SCHED
2772 
2773 static inline const char *__kmp_cpuinfo_get_filename() {
2774  const char *filename;
2775  if (__kmp_cpuinfo_file != nullptr)
2776  filename = __kmp_cpuinfo_file;
2777  else
2778  filename = "/proc/cpuinfo";
2779  return filename;
2780 }
2781 
2782 static inline const char *__kmp_cpuinfo_get_envvar() {
2783  const char *envvar = nullptr;
2784  if (__kmp_cpuinfo_file != nullptr)
2785  envvar = "KMP_CPUINFO_FILE";
2786  return envvar;
2787 }
2788 
2789 // Parse /proc/cpuinfo (or an alternate file in the same format) to obtain the
2790 // affinity map.
2791 static bool __kmp_affinity_create_cpuinfo_map(int *line,
2792  kmp_i18n_id_t *const msg_id) {
2793  const char *filename = __kmp_cpuinfo_get_filename();
2794  const char *envvar = __kmp_cpuinfo_get_envvar();
2795  *msg_id = kmp_i18n_null;
2796 
2797  if (__kmp_affinity_verbose) {
2798  KMP_INFORM(AffParseFilename, "KMP_AFFINITY", filename);
2799  }
2800 
2801  kmp_safe_raii_file_t f(filename, "r", envvar);
2802 
2803  // Scan of the file, and count the number of "processor" (osId) fields,
2804  // and find the highest value of <n> for a node_<n> field.
2805  char buf[256];
2806  unsigned num_records = 0;
2807  while (!feof(f)) {
2808  buf[sizeof(buf) - 1] = 1;
2809  if (!fgets(buf, sizeof(buf), f)) {
2810  // Read errors presumably because of EOF
2811  break;
2812  }
2813 
2814  char s1[] = "processor";
2815  if (strncmp(buf, s1, sizeof(s1) - 1) == 0) {
2816  num_records++;
2817  continue;
2818  }
2819 
2820  // FIXME - this will match "node_<n> <garbage>"
2821  unsigned level;
2822  if (KMP_SSCANF(buf, "node_%u id", &level) == 1) {
2823  // validate the input fisrt:
2824  if (level > (unsigned)__kmp_xproc) { // level is too big
2825  level = __kmp_xproc;
2826  }
2827  if (nodeIdIndex + level >= maxIndex) {
2828  maxIndex = nodeIdIndex + level;
2829  }
2830  continue;
2831  }
2832  }
2833 
2834  // Check for empty file / no valid processor records, or too many. The number
2835  // of records can't exceed the number of valid bits in the affinity mask.
2836  if (num_records == 0) {
2837  *msg_id = kmp_i18n_str_NoProcRecords;
2838  return false;
2839  }
2840  if (num_records > (unsigned)__kmp_xproc) {
2841  *msg_id = kmp_i18n_str_TooManyProcRecords;
2842  return false;
2843  }
2844 
2845  // Set the file pointer back to the beginning, so that we can scan the file
2846  // again, this time performing a full parse of the data. Allocate a vector of
2847  // ProcCpuInfo object, where we will place the data. Adding an extra element
2848  // at the end allows us to remove a lot of extra checks for termination
2849  // conditions.
2850  if (fseek(f, 0, SEEK_SET) != 0) {
2851  *msg_id = kmp_i18n_str_CantRewindCpuinfo;
2852  return false;
2853  }
2854 
2855  // Allocate the array of records to store the proc info in. The dummy
2856  // element at the end makes the logic in filling them out easier to code.
2857  unsigned **threadInfo =
2858  (unsigned **)__kmp_allocate((num_records + 1) * sizeof(unsigned *));
2859  unsigned i;
2860  for (i = 0; i <= num_records; i++) {
2861  threadInfo[i] =
2862  (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2863  }
2864 
2865 #define CLEANUP_THREAD_INFO \
2866  for (i = 0; i <= num_records; i++) { \
2867  __kmp_free(threadInfo[i]); \
2868  } \
2869  __kmp_free(threadInfo);
2870 
2871  // A value of UINT_MAX means that we didn't find the field
2872  unsigned __index;
2873 
2874 #define INIT_PROC_INFO(p) \
2875  for (__index = 0; __index <= maxIndex; __index++) { \
2876  (p)[__index] = UINT_MAX; \
2877  }
2878 
2879  for (i = 0; i <= num_records; i++) {
2880  INIT_PROC_INFO(threadInfo[i]);
2881  }
2882 
2883  unsigned num_avail = 0;
2884  *line = 0;
2885  while (!feof(f)) {
2886  // Create an inner scoping level, so that all the goto targets at the end of
2887  // the loop appear in an outer scoping level. This avoids warnings about
2888  // jumping past an initialization to a target in the same block.
2889  {
2890  buf[sizeof(buf) - 1] = 1;
2891  bool long_line = false;
2892  if (!fgets(buf, sizeof(buf), f)) {
2893  // Read errors presumably because of EOF
2894  // If there is valid data in threadInfo[num_avail], then fake
2895  // a blank line in ensure that the last address gets parsed.
2896  bool valid = false;
2897  for (i = 0; i <= maxIndex; i++) {
2898  if (threadInfo[num_avail][i] != UINT_MAX) {
2899  valid = true;
2900  }
2901  }
2902  if (!valid) {
2903  break;
2904  }
2905  buf[0] = 0;
2906  } else if (!buf[sizeof(buf) - 1]) {
2907  // The line is longer than the buffer. Set a flag and don't
2908  // emit an error if we were going to ignore the line, anyway.
2909  long_line = true;
2910 
2911 #define CHECK_LINE \
2912  if (long_line) { \
2913  CLEANUP_THREAD_INFO; \
2914  *msg_id = kmp_i18n_str_LongLineCpuinfo; \
2915  return false; \
2916  }
2917  }
2918  (*line)++;
2919 
2920  char s1[] = "processor";
2921  if (strncmp(buf, s1, sizeof(s1) - 1) == 0) {
2922  CHECK_LINE;
2923  char *p = strchr(buf + sizeof(s1) - 1, ':');
2924  unsigned val;
2925  if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2926  goto no_val;
2927  if (threadInfo[num_avail][osIdIndex] != UINT_MAX)
2928 #if KMP_ARCH_AARCH64
2929  // Handle the old AArch64 /proc/cpuinfo layout differently,
2930  // it contains all of the 'processor' entries listed in a
2931  // single 'Processor' section, therefore the normal looking
2932  // for duplicates in that section will always fail.
2933  num_avail++;
2934 #else
2935  goto dup_field;
2936 #endif
2937  threadInfo[num_avail][osIdIndex] = val;
2938 #if KMP_OS_LINUX && !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
2939  char path[256];
2940  KMP_SNPRINTF(
2941  path, sizeof(path),
2942  "/sys/devices/system/cpu/cpu%u/topology/physical_package_id",
2943  threadInfo[num_avail][osIdIndex]);
2944  __kmp_read_from_file(path, "%u", &threadInfo[num_avail][pkgIdIndex]);
2945 
2946  KMP_SNPRINTF(path, sizeof(path),
2947  "/sys/devices/system/cpu/cpu%u/topology/core_id",
2948  threadInfo[num_avail][osIdIndex]);
2949  __kmp_read_from_file(path, "%u", &threadInfo[num_avail][coreIdIndex]);
2950  continue;
2951 #else
2952  }
2953  char s2[] = "physical id";
2954  if (strncmp(buf, s2, sizeof(s2) - 1) == 0) {
2955  CHECK_LINE;
2956  char *p = strchr(buf + sizeof(s2) - 1, ':');
2957  unsigned val;
2958  if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2959  goto no_val;
2960  if (threadInfo[num_avail][pkgIdIndex] != UINT_MAX)
2961  goto dup_field;
2962  threadInfo[num_avail][pkgIdIndex] = val;
2963  continue;
2964  }
2965  char s3[] = "core id";
2966  if (strncmp(buf, s3, sizeof(s3) - 1) == 0) {
2967  CHECK_LINE;
2968  char *p = strchr(buf + sizeof(s3) - 1, ':');
2969  unsigned val;
2970  if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2971  goto no_val;
2972  if (threadInfo[num_avail][coreIdIndex] != UINT_MAX)
2973  goto dup_field;
2974  threadInfo[num_avail][coreIdIndex] = val;
2975  continue;
2976 #endif // KMP_OS_LINUX && USE_SYSFS_INFO
2977  }
2978  char s4[] = "thread id";
2979  if (strncmp(buf, s4, sizeof(s4) - 1) == 0) {
2980  CHECK_LINE;
2981  char *p = strchr(buf + sizeof(s4) - 1, ':');
2982  unsigned val;
2983  if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2984  goto no_val;
2985  if (threadInfo[num_avail][threadIdIndex] != UINT_MAX)
2986  goto dup_field;
2987  threadInfo[num_avail][threadIdIndex] = val;
2988  continue;
2989  }
2990  unsigned level;
2991  if (KMP_SSCANF(buf, "node_%u id", &level) == 1) {
2992  CHECK_LINE;
2993  char *p = strchr(buf + sizeof(s4) - 1, ':');
2994  unsigned val;
2995  if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2996  goto no_val;
2997  // validate the input before using level:
2998  if (level > (unsigned)__kmp_xproc) { // level is too big
2999  level = __kmp_xproc;
3000  }
3001  if (threadInfo[num_avail][nodeIdIndex + level] != UINT_MAX)
3002  goto dup_field;
3003  threadInfo[num_avail][nodeIdIndex + level] = val;
3004  continue;
3005  }
3006 
3007  // We didn't recognize the leading token on the line. There are lots of
3008  // leading tokens that we don't recognize - if the line isn't empty, go on
3009  // to the next line.
3010  if ((*buf != 0) && (*buf != '\n')) {
3011  // If the line is longer than the buffer, read characters
3012  // until we find a newline.
3013  if (long_line) {
3014  int ch;
3015  while (((ch = fgetc(f)) != EOF) && (ch != '\n'))
3016  ;
3017  }
3018  continue;
3019  }
3020 
3021  // A newline has signalled the end of the processor record.
3022  // Check that there aren't too many procs specified.
3023  if ((int)num_avail == __kmp_xproc) {
3024  CLEANUP_THREAD_INFO;
3025  *msg_id = kmp_i18n_str_TooManyEntries;
3026  return false;
3027  }
3028 
3029  // Check for missing fields. The osId field must be there, and we
3030  // currently require that the physical id field is specified, also.
3031  if (threadInfo[num_avail][osIdIndex] == UINT_MAX) {
3032  CLEANUP_THREAD_INFO;
3033  *msg_id = kmp_i18n_str_MissingProcField;
3034  return false;
3035  }
3036  if (threadInfo[0][pkgIdIndex] == UINT_MAX) {
3037  CLEANUP_THREAD_INFO;
3038  *msg_id = kmp_i18n_str_MissingPhysicalIDField;
3039  return false;
3040  }
3041 
3042  // Skip this proc if it is not included in the machine model.
3043  if (!KMP_CPU_ISSET(threadInfo[num_avail][osIdIndex],
3044  __kmp_affin_fullMask)) {
3045  INIT_PROC_INFO(threadInfo[num_avail]);
3046  continue;
3047  }
3048 
3049  // We have a successful parse of this proc's info.
3050  // Increment the counter, and prepare for the next proc.
3051  num_avail++;
3052  KMP_ASSERT(num_avail <= num_records);
3053  INIT_PROC_INFO(threadInfo[num_avail]);
3054  }
3055  continue;
3056 
3057  no_val:
3058  CLEANUP_THREAD_INFO;
3059  *msg_id = kmp_i18n_str_MissingValCpuinfo;
3060  return false;
3061 
3062  dup_field:
3063  CLEANUP_THREAD_INFO;
3064  *msg_id = kmp_i18n_str_DuplicateFieldCpuinfo;
3065  return false;
3066  }
3067  *line = 0;
3068 
3069 #if KMP_MIC && REDUCE_TEAM_SIZE
3070  unsigned teamSize = 0;
3071 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3072 
3073  // check for num_records == __kmp_xproc ???
3074 
3075  // If it is configured to omit the package level when there is only a single
3076  // package, the logic at the end of this routine won't work if there is only a
3077  // single thread
3078  KMP_ASSERT(num_avail > 0);
3079  KMP_ASSERT(num_avail <= num_records);
3080 
3081  // Sort the threadInfo table by physical Id.
3082  qsort(threadInfo, num_avail, sizeof(*threadInfo),
3083  __kmp_affinity_cmp_ProcCpuInfo_phys_id);
3084 
3085  // The table is now sorted by pkgId / coreId / threadId, but we really don't
3086  // know the radix of any of the fields. pkgId's may be sparsely assigned among
3087  // the chips on a system. Although coreId's are usually assigned
3088  // [0 .. coresPerPkg-1] and threadId's are usually assigned
3089  // [0..threadsPerCore-1], we don't want to make any such assumptions.
3090  //
3091  // For that matter, we don't know what coresPerPkg and threadsPerCore (or the
3092  // total # packages) are at this point - we want to determine that now. We
3093  // only have an upper bound on the first two figures.
3094  unsigned *counts =
3095  (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
3096  unsigned *maxCt =
3097  (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
3098  unsigned *totals =
3099  (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
3100  unsigned *lastId =
3101  (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
3102 
3103  bool assign_thread_ids = false;
3104  unsigned threadIdCt;
3105  unsigned index;
3106 
3107 restart_radix_check:
3108  threadIdCt = 0;
3109 
3110  // Initialize the counter arrays with data from threadInfo[0].
3111  if (assign_thread_ids) {
3112  if (threadInfo[0][threadIdIndex] == UINT_MAX) {
3113  threadInfo[0][threadIdIndex] = threadIdCt++;
3114  } else if (threadIdCt <= threadInfo[0][threadIdIndex]) {
3115  threadIdCt = threadInfo[0][threadIdIndex] + 1;
3116  }
3117  }
3118  for (index = 0; index <= maxIndex; index++) {
3119  counts[index] = 1;
3120  maxCt[index] = 1;
3121  totals[index] = 1;
3122  lastId[index] = threadInfo[0][index];
3123  ;
3124  }
3125 
3126  // Run through the rest of the OS procs.
3127  for (i = 1; i < num_avail; i++) {
3128  // Find the most significant index whose id differs from the id for the
3129  // previous OS proc.
3130  for (index = maxIndex; index >= threadIdIndex; index--) {
3131  if (assign_thread_ids && (index == threadIdIndex)) {
3132  // Auto-assign the thread id field if it wasn't specified.
3133  if (threadInfo[i][threadIdIndex] == UINT_MAX) {
3134  threadInfo[i][threadIdIndex] = threadIdCt++;
3135  }
3136  // Apparently the thread id field was specified for some entries and not
3137  // others. Start the thread id counter off at the next higher thread id.
3138  else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
3139  threadIdCt = threadInfo[i][threadIdIndex] + 1;
3140  }
3141  }
3142  if (threadInfo[i][index] != lastId[index]) {
3143  // Run through all indices which are less significant, and reset the
3144  // counts to 1. At all levels up to and including index, we need to
3145  // increment the totals and record the last id.
3146  unsigned index2;
3147  for (index2 = threadIdIndex; index2 < index; index2++) {
3148  totals[index2]++;
3149  if (counts[index2] > maxCt[index2]) {
3150  maxCt[index2] = counts[index2];
3151  }
3152  counts[index2] = 1;
3153  lastId[index2] = threadInfo[i][index2];
3154  }
3155  counts[index]++;
3156  totals[index]++;
3157  lastId[index] = threadInfo[i][index];
3158 
3159  if (assign_thread_ids && (index > threadIdIndex)) {
3160 
3161 #if KMP_MIC && REDUCE_TEAM_SIZE
3162  // The default team size is the total #threads in the machine
3163  // minus 1 thread for every core that has 3 or more threads.
3164  teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
3165 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3166 
3167  // Restart the thread counter, as we are on a new core.
3168  threadIdCt = 0;
3169 
3170  // Auto-assign the thread id field if it wasn't specified.
3171  if (threadInfo[i][threadIdIndex] == UINT_MAX) {
3172  threadInfo[i][threadIdIndex] = threadIdCt++;
3173  }
3174 
3175  // Apparently the thread id field was specified for some entries and
3176  // not others. Start the thread id counter off at the next higher
3177  // thread id.
3178  else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
3179  threadIdCt = threadInfo[i][threadIdIndex] + 1;
3180  }
3181  }
3182  break;
3183  }
3184  }
3185  if (index < threadIdIndex) {
3186  // If thread ids were specified, it is an error if they are not unique.
3187  // Also, check that we waven't already restarted the loop (to be safe -
3188  // shouldn't need to).
3189  if ((threadInfo[i][threadIdIndex] != UINT_MAX) || assign_thread_ids) {
3190  __kmp_free(lastId);
3191  __kmp_free(totals);
3192  __kmp_free(maxCt);
3193  __kmp_free(counts);
3194  CLEANUP_THREAD_INFO;
3195  *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
3196  return false;
3197  }
3198 
3199  // If the thread ids were not specified and we see entries entries that
3200  // are duplicates, start the loop over and assign the thread ids manually.
3201  assign_thread_ids = true;
3202  goto restart_radix_check;
3203  }
3204  }
3205 
3206 #if KMP_MIC && REDUCE_TEAM_SIZE
3207  // The default team size is the total #threads in the machine
3208  // minus 1 thread for every core that has 3 or more threads.
3209  teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
3210 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3211 
3212  for (index = threadIdIndex; index <= maxIndex; index++) {
3213  if (counts[index] > maxCt[index]) {
3214  maxCt[index] = counts[index];
3215  }
3216  }
3217 
3218  __kmp_nThreadsPerCore = maxCt[threadIdIndex];
3219  nCoresPerPkg = maxCt[coreIdIndex];
3220  nPackages = totals[pkgIdIndex];
3221 
3222  // When affinity is off, this routine will still be called to set
3223  // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
3224  // Make sure all these vars are set correctly, and return now if affinity is
3225  // not enabled.
3226  __kmp_ncores = totals[coreIdIndex];
3227  if (!KMP_AFFINITY_CAPABLE()) {
3228  KMP_ASSERT(__kmp_affinity_type == affinity_none);
3229  return true;
3230  }
3231 
3232 #if KMP_MIC && REDUCE_TEAM_SIZE
3233  // Set the default team size.
3234  if ((__kmp_dflt_team_nth == 0) && (teamSize > 0)) {
3235  __kmp_dflt_team_nth = teamSize;
3236  KA_TRACE(20, ("__kmp_affinity_create_cpuinfo_map: setting "
3237  "__kmp_dflt_team_nth = %d\n",
3238  __kmp_dflt_team_nth));
3239  }
3240 #endif // KMP_MIC && REDUCE_TEAM_SIZE
3241 
3242  KMP_DEBUG_ASSERT(num_avail == (unsigned)__kmp_avail_proc);
3243 
3244  // Count the number of levels which have more nodes at that level than at the
3245  // parent's level (with there being an implicit root node of the top level).
3246  // This is equivalent to saying that there is at least one node at this level
3247  // which has a sibling. These levels are in the map, and the package level is
3248  // always in the map.
3249  bool *inMap = (bool *)__kmp_allocate((maxIndex + 1) * sizeof(bool));
3250  for (index = threadIdIndex; index < maxIndex; index++) {
3251  KMP_ASSERT(totals[index] >= totals[index + 1]);
3252  inMap[index] = (totals[index] > totals[index + 1]);
3253  }
3254  inMap[maxIndex] = (totals[maxIndex] > 1);
3255  inMap[pkgIdIndex] = true;
3256  inMap[coreIdIndex] = true;
3257  inMap[threadIdIndex] = true;
3258 
3259  int depth = 0;
3260  int idx = 0;
3261  kmp_hw_t types[KMP_HW_LAST];
3262  int pkgLevel = -1;
3263  int coreLevel = -1;
3264  int threadLevel = -1;
3265  for (index = threadIdIndex; index <= maxIndex; index++) {
3266  if (inMap[index]) {
3267  depth++;
3268  }
3269  }
3270  if (inMap[pkgIdIndex]) {
3271  pkgLevel = idx;
3272  types[idx++] = KMP_HW_SOCKET;
3273  }
3274  if (inMap[coreIdIndex]) {
3275  coreLevel = idx;
3276  types[idx++] = KMP_HW_CORE;
3277  }
3278  if (inMap[threadIdIndex]) {
3279  threadLevel = idx;
3280  types[idx++] = KMP_HW_THREAD;
3281  }
3282  KMP_ASSERT(depth > 0);
3283 
3284  // Construct the data structure that is to be returned.
3285  __kmp_topology = kmp_topology_t::allocate(num_avail, depth, types);
3286 
3287  for (i = 0; i < num_avail; ++i) {
3288  unsigned os = threadInfo[i][osIdIndex];
3289  int src_index;
3290  int dst_index = 0;
3291  kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
3292  hw_thread.clear();
3293  hw_thread.os_id = os;
3294 
3295  idx = 0;
3296  for (src_index = maxIndex; src_index >= threadIdIndex; src_index--) {
3297  if (!inMap[src_index]) {
3298  continue;
3299  }
3300  if (src_index == pkgIdIndex) {
3301  hw_thread.ids[pkgLevel] = threadInfo[i][src_index];
3302  } else if (src_index == coreIdIndex) {
3303  hw_thread.ids[coreLevel] = threadInfo[i][src_index];
3304  } else if (src_index == threadIdIndex) {
3305  hw_thread.ids[threadLevel] = threadInfo[i][src_index];
3306  }
3307  dst_index++;
3308  }
3309  }
3310 
3311  __kmp_free(inMap);
3312  __kmp_free(lastId);
3313  __kmp_free(totals);
3314  __kmp_free(maxCt);
3315  __kmp_free(counts);
3316  CLEANUP_THREAD_INFO;
3317  __kmp_topology->sort_ids();
3318  if (!__kmp_topology->check_ids()) {
3319  kmp_topology_t::deallocate(__kmp_topology);
3320  __kmp_topology = nullptr;
3321  *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
3322  return false;
3323  }
3324  return true;
3325 }
3326 
3327 // Create and return a table of affinity masks, indexed by OS thread ID.
3328 // This routine handles OR'ing together all the affinity masks of threads
3329 // that are sufficiently close, if granularity > fine.
3330 static kmp_affin_mask_t *__kmp_create_masks(unsigned *maxIndex,
3331  unsigned *numUnique) {
3332  // First form a table of affinity masks in order of OS thread id.
3333  int maxOsId;
3334  int i;
3335  int numAddrs = __kmp_topology->get_num_hw_threads();
3336  int depth = __kmp_topology->get_depth();
3337  KMP_ASSERT(numAddrs);
3338  KMP_ASSERT(depth);
3339 
3340  maxOsId = 0;
3341  for (i = numAddrs - 1;; --i) {
3342  int osId = __kmp_topology->at(i).os_id;
3343  if (osId > maxOsId) {
3344  maxOsId = osId;
3345  }
3346  if (i == 0)
3347  break;
3348  }
3349  kmp_affin_mask_t *osId2Mask;
3350  KMP_CPU_ALLOC_ARRAY(osId2Mask, (maxOsId + 1));
3351  KMP_ASSERT(__kmp_affinity_gran_levels >= 0);
3352  if (__kmp_affinity_verbose && (__kmp_affinity_gran_levels > 0)) {
3353  KMP_INFORM(ThreadsMigrate, "KMP_AFFINITY", __kmp_affinity_gran_levels);
3354  }
3355  if (__kmp_affinity_gran_levels >= (int)depth) {
3356  if (__kmp_affinity_verbose ||
3357  (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {
3358  KMP_WARNING(AffThreadsMayMigrate);
3359  }
3360  }
3361 
3362  // Run through the table, forming the masks for all threads on each core.
3363  // Threads on the same core will have identical kmp_hw_thread_t objects, not
3364  // considering the last level, which must be the thread id. All threads on a
3365  // core will appear consecutively.
3366  int unique = 0;
3367  int j = 0; // index of 1st thread on core
3368  int leader = 0;
3369  kmp_affin_mask_t *sum;
3370  KMP_CPU_ALLOC_ON_STACK(sum);
3371  KMP_CPU_ZERO(sum);
3372  KMP_CPU_SET(__kmp_topology->at(0).os_id, sum);
3373  for (i = 1; i < numAddrs; i++) {
3374  // If this thread is sufficiently close to the leader (within the
3375  // granularity setting), then set the bit for this os thread in the
3376  // affinity mask for this group, and go on to the next thread.
3377  if (__kmp_topology->is_close(leader, i, __kmp_affinity_gran_levels)) {
3378  KMP_CPU_SET(__kmp_topology->at(i).os_id, sum);
3379  continue;
3380  }
3381 
3382  // For every thread in this group, copy the mask to the thread's entry in
3383  // the osId2Mask table. Mark the first address as a leader.
3384  for (; j < i; j++) {
3385  int osId = __kmp_topology->at(j).os_id;
3386  KMP_DEBUG_ASSERT(osId <= maxOsId);
3387  kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
3388  KMP_CPU_COPY(mask, sum);
3389  __kmp_topology->at(j).leader = (j == leader);
3390  }
3391  unique++;
3392 
3393  // Start a new mask.
3394  leader = i;
3395  KMP_CPU_ZERO(sum);
3396  KMP_CPU_SET(__kmp_topology->at(i).os_id, sum);
3397  }
3398 
3399  // For every thread in last group, copy the mask to the thread's
3400  // entry in the osId2Mask table.
3401  for (; j < i; j++) {
3402  int osId = __kmp_topology->at(j).os_id;
3403  KMP_DEBUG_ASSERT(osId <= maxOsId);
3404  kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
3405  KMP_CPU_COPY(mask, sum);
3406  __kmp_topology->at(j).leader = (j == leader);
3407  }
3408  unique++;
3409  KMP_CPU_FREE_FROM_STACK(sum);
3410 
3411  *maxIndex = maxOsId;
3412  *numUnique = unique;
3413  return osId2Mask;
3414 }
3415 
3416 // Stuff for the affinity proclist parsers. It's easier to declare these vars
3417 // as file-static than to try and pass them through the calling sequence of
3418 // the recursive-descent OMP_PLACES parser.
3419 static kmp_affin_mask_t *newMasks;
3420 static int numNewMasks;
3421 static int nextNewMask;
3422 
3423 #define ADD_MASK(_mask) \
3424  { \
3425  if (nextNewMask >= numNewMasks) { \
3426  int i; \
3427  numNewMasks *= 2; \
3428  kmp_affin_mask_t *temp; \
3429  KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks); \
3430  for (i = 0; i < numNewMasks / 2; i++) { \
3431  kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i); \
3432  kmp_affin_mask_t *dest = KMP_CPU_INDEX(temp, i); \
3433  KMP_CPU_COPY(dest, src); \
3434  } \
3435  KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks / 2); \
3436  newMasks = temp; \
3437  } \
3438  KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask)); \
3439  nextNewMask++; \
3440  }
3441 
3442 #define ADD_MASK_OSID(_osId, _osId2Mask, _maxOsId) \
3443  { \
3444  if (((_osId) > _maxOsId) || \
3445  (!KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId))))) { \
3446  if (__kmp_affinity_verbose || \
3447  (__kmp_affinity_warnings && \
3448  (__kmp_affinity_type != affinity_none))) { \
3449  KMP_WARNING(AffIgnoreInvalidProcID, _osId); \
3450  } \
3451  } else { \
3452  ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId))); \
3453  } \
3454  }
3455 
3456 // Re-parse the proclist (for the explicit affinity type), and form the list
3457 // of affinity newMasks indexed by gtid.
3458 static void __kmp_affinity_process_proclist(kmp_affin_mask_t **out_masks,
3459  unsigned int *out_numMasks,
3460  const char *proclist,
3461  kmp_affin_mask_t *osId2Mask,
3462  int maxOsId) {
3463  int i;
3464  const char *scan = proclist;
3465  const char *next = proclist;
3466 
3467  // We use malloc() for the temporary mask vector, so that we can use
3468  // realloc() to extend it.
3469  numNewMasks = 2;
3470  KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
3471  nextNewMask = 0;
3472  kmp_affin_mask_t *sumMask;
3473  KMP_CPU_ALLOC(sumMask);
3474  int setSize = 0;
3475 
3476  for (;;) {
3477  int start, end, stride;
3478 
3479  SKIP_WS(scan);
3480  next = scan;
3481  if (*next == '\0') {
3482  break;
3483  }
3484 
3485  if (*next == '{') {
3486  int num;
3487  setSize = 0;
3488  next++; // skip '{'
3489  SKIP_WS(next);
3490  scan = next;
3491 
3492  // Read the first integer in the set.
3493  KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad proclist");
3494  SKIP_DIGITS(next);
3495  num = __kmp_str_to_int(scan, *next);
3496  KMP_ASSERT2(num >= 0, "bad explicit proc list");
3497 
3498  // Copy the mask for that osId to the sum (union) mask.
3499  if ((num > maxOsId) ||
3500  (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3501  if (__kmp_affinity_verbose ||
3502  (__kmp_affinity_warnings &&
3503  (__kmp_affinity_type != affinity_none))) {
3504  KMP_WARNING(AffIgnoreInvalidProcID, num);
3505  }
3506  KMP_CPU_ZERO(sumMask);
3507  } else {
3508  KMP_CPU_COPY(sumMask, KMP_CPU_INDEX(osId2Mask, num));
3509  setSize = 1;
3510  }
3511 
3512  for (;;) {
3513  // Check for end of set.
3514  SKIP_WS(next);
3515  if (*next == '}') {
3516  next++; // skip '}'
3517  break;
3518  }
3519 
3520  // Skip optional comma.
3521  if (*next == ',') {
3522  next++;
3523  }
3524  SKIP_WS(next);
3525 
3526  // Read the next integer in the set.
3527  scan = next;
3528  KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
3529 
3530  SKIP_DIGITS(next);
3531  num = __kmp_str_to_int(scan, *next);
3532  KMP_ASSERT2(num >= 0, "bad explicit proc list");
3533 
3534  // Add the mask for that osId to the sum mask.
3535  if ((num > maxOsId) ||
3536  (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3537  if (__kmp_affinity_verbose ||
3538  (__kmp_affinity_warnings &&
3539  (__kmp_affinity_type != affinity_none))) {
3540  KMP_WARNING(AffIgnoreInvalidProcID, num);
3541  }
3542  } else {
3543  KMP_CPU_UNION(sumMask, KMP_CPU_INDEX(osId2Mask, num));
3544  setSize++;
3545  }
3546  }
3547  if (setSize > 0) {
3548  ADD_MASK(sumMask);
3549  }
3550 
3551  SKIP_WS(next);
3552  if (*next == ',') {
3553  next++;
3554  }
3555  scan = next;
3556  continue;
3557  }
3558 
3559  // Read the first integer.
3560  KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
3561  SKIP_DIGITS(next);
3562  start = __kmp_str_to_int(scan, *next);
3563  KMP_ASSERT2(start >= 0, "bad explicit proc list");
3564  SKIP_WS(next);
3565 
3566  // If this isn't a range, then add a mask to the list and go on.
3567  if (*next != '-') {
3568  ADD_MASK_OSID(start, osId2Mask, maxOsId);
3569 
3570  // Skip optional comma.
3571  if (*next == ',') {
3572  next++;
3573  }
3574  scan = next;
3575  continue;
3576  }
3577 
3578  // This is a range. Skip over the '-' and read in the 2nd int.
3579  next++; // skip '-'
3580  SKIP_WS(next);
3581  scan = next;
3582  KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
3583  SKIP_DIGITS(next);
3584  end = __kmp_str_to_int(scan, *next);
3585  KMP_ASSERT2(end >= 0, "bad explicit proc list");
3586 
3587  // Check for a stride parameter
3588  stride = 1;
3589  SKIP_WS(next);
3590  if (*next == ':') {
3591  // A stride is specified. Skip over the ':" and read the 3rd int.
3592  int sign = +1;
3593  next++; // skip ':'
3594  SKIP_WS(next);
3595  scan = next;
3596  if (*next == '-') {
3597  sign = -1;
3598  next++;
3599  SKIP_WS(next);
3600  scan = next;
3601  }
3602  KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
3603  SKIP_DIGITS(next);
3604  stride = __kmp_str_to_int(scan, *next);
3605  KMP_ASSERT2(stride >= 0, "bad explicit proc list");
3606  stride *= sign;
3607  }
3608 
3609  // Do some range checks.
3610  KMP_ASSERT2(stride != 0, "bad explicit proc list");
3611  if (stride > 0) {
3612  KMP_ASSERT2(start <= end, "bad explicit proc list");
3613  } else {
3614  KMP_ASSERT2(start >= end, "bad explicit proc list");
3615  }
3616  KMP_ASSERT2((end - start) / stride <= 65536, "bad explicit proc list");
3617 
3618  // Add the mask for each OS proc # to the list.
3619  if (stride > 0) {
3620  do {
3621  ADD_MASK_OSID(start, osId2Mask, maxOsId);
3622  start += stride;
3623  } while (start <= end);
3624  } else {
3625  do {
3626  ADD_MASK_OSID(start, osId2Mask, maxOsId);
3627  start += stride;
3628  } while (start >= end);
3629  }
3630 
3631  // Skip optional comma.
3632  SKIP_WS(next);
3633  if (*next == ',') {
3634  next++;
3635  }
3636  scan = next;
3637  }
3638 
3639  *out_numMasks = nextNewMask;
3640  if (nextNewMask == 0) {
3641  *out_masks = NULL;
3642  KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3643  return;
3644  }
3645  KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3646  for (i = 0; i < nextNewMask; i++) {
3647  kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
3648  kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
3649  KMP_CPU_COPY(dest, src);
3650  }
3651  KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3652  KMP_CPU_FREE(sumMask);
3653 }
3654 
3655 /*-----------------------------------------------------------------------------
3656 Re-parse the OMP_PLACES proc id list, forming the newMasks for the different
3657 places. Again, Here is the grammar:
3658 
3659 place_list := place
3660 place_list := place , place_list
3661 place := num
3662 place := place : num
3663 place := place : num : signed
3664 place := { subplacelist }
3665 place := ! place // (lowest priority)
3666 subplace_list := subplace
3667 subplace_list := subplace , subplace_list
3668 subplace := num
3669 subplace := num : num
3670 subplace := num : num : signed
3671 signed := num
3672 signed := + signed
3673 signed := - signed
3674 -----------------------------------------------------------------------------*/
3675 static void __kmp_process_subplace_list(const char **scan,
3676  kmp_affin_mask_t *osId2Mask,
3677  int maxOsId, kmp_affin_mask_t *tempMask,
3678  int *setSize) {
3679  const char *next;
3680 
3681  for (;;) {
3682  int start, count, stride, i;
3683 
3684  // Read in the starting proc id
3685  SKIP_WS(*scan);
3686  KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list");
3687  next = *scan;
3688  SKIP_DIGITS(next);
3689  start = __kmp_str_to_int(*scan, *next);
3690  KMP_ASSERT(start >= 0);
3691  *scan = next;
3692 
3693  // valid follow sets are ',' ':' and '}'
3694  SKIP_WS(*scan);
3695  if (**scan == '}' || **scan == ',') {
3696  if ((start > maxOsId) ||
3697  (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3698  if (__kmp_affinity_verbose ||
3699  (__kmp_affinity_warnings &&
3700  (__kmp_affinity_type != affinity_none))) {
3701  KMP_WARNING(AffIgnoreInvalidProcID, start);
3702  }
3703  } else {
3704  KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3705  (*setSize)++;
3706  }
3707  if (**scan == '}') {
3708  break;
3709  }
3710  (*scan)++; // skip ','
3711  continue;
3712  }
3713  KMP_ASSERT2(**scan == ':', "bad explicit places list");
3714  (*scan)++; // skip ':'
3715 
3716  // Read count parameter
3717  SKIP_WS(*scan);
3718  KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list");
3719  next = *scan;
3720  SKIP_DIGITS(next);
3721  count = __kmp_str_to_int(*scan, *next);
3722  KMP_ASSERT(count >= 0);
3723  *scan = next;
3724 
3725  // valid follow sets are ',' ':' and '}'
3726  SKIP_WS(*scan);
3727  if (**scan == '}' || **scan == ',') {
3728  for (i = 0; i < count; i++) {
3729  if ((start > maxOsId) ||
3730  (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3731  if (__kmp_affinity_verbose ||
3732  (__kmp_affinity_warnings &&
3733  (__kmp_affinity_type != affinity_none))) {
3734  KMP_WARNING(AffIgnoreInvalidProcID, start);
3735  }
3736  break; // don't proliferate warnings for large count
3737  } else {
3738  KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3739  start++;
3740  (*setSize)++;
3741  }
3742  }
3743  if (**scan == '}') {
3744  break;
3745  }
3746  (*scan)++; // skip ','
3747  continue;
3748  }
3749  KMP_ASSERT2(**scan == ':', "bad explicit places list");
3750  (*scan)++; // skip ':'
3751 
3752  // Read stride parameter
3753  int sign = +1;
3754  for (;;) {
3755  SKIP_WS(*scan);
3756  if (**scan == '+') {
3757  (*scan)++; // skip '+'
3758  continue;
3759  }
3760  if (**scan == '-') {
3761  sign *= -1;
3762  (*scan)++; // skip '-'
3763  continue;
3764  }
3765  break;
3766  }
3767  SKIP_WS(*scan);
3768  KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list");
3769  next = *scan;
3770  SKIP_DIGITS(next);
3771  stride = __kmp_str_to_int(*scan, *next);
3772  KMP_ASSERT(stride >= 0);
3773  *scan = next;
3774  stride *= sign;
3775 
3776  // valid follow sets are ',' and '}'
3777  SKIP_WS(*scan);
3778  if (**scan == '}' || **scan == ',') {
3779  for (i = 0; i < count; i++) {
3780  if ((start > maxOsId) ||
3781  (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3782  if (__kmp_affinity_verbose ||
3783  (__kmp_affinity_warnings &&
3784  (__kmp_affinity_type != affinity_none))) {
3785  KMP_WARNING(AffIgnoreInvalidProcID, start);
3786  }
3787  break; // don't proliferate warnings for large count
3788  } else {
3789  KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3790  start += stride;
3791  (*setSize)++;
3792  }
3793  }
3794  if (**scan == '}') {
3795  break;
3796  }
3797  (*scan)++; // skip ','
3798  continue;
3799  }
3800 
3801  KMP_ASSERT2(0, "bad explicit places list");
3802  }
3803 }
3804 
3805 static void __kmp_process_place(const char **scan, kmp_affin_mask_t *osId2Mask,
3806  int maxOsId, kmp_affin_mask_t *tempMask,
3807  int *setSize) {
3808  const char *next;
3809 
3810  // valid follow sets are '{' '!' and num
3811  SKIP_WS(*scan);
3812  if (**scan == '{') {
3813  (*scan)++; // skip '{'
3814  __kmp_process_subplace_list(scan, osId2Mask, maxOsId, tempMask, setSize);
3815  KMP_ASSERT2(**scan == '}', "bad explicit places list");
3816  (*scan)++; // skip '}'
3817  } else if (**scan == '!') {
3818  (*scan)++; // skip '!'
3819  __kmp_process_place(scan, osId2Mask, maxOsId, tempMask, setSize);
3820  KMP_CPU_COMPLEMENT(maxOsId, tempMask);
3821  } else if ((**scan >= '0') && (**scan <= '9')) {
3822  next = *scan;
3823  SKIP_DIGITS(next);
3824  int num = __kmp_str_to_int(*scan, *next);
3825  KMP_ASSERT(num >= 0);
3826  if ((num > maxOsId) ||
3827  (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3828  if (__kmp_affinity_verbose ||
3829  (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {
3830  KMP_WARNING(AffIgnoreInvalidProcID, num);
3831  }
3832  } else {
3833  KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, num));
3834  (*setSize)++;
3835  }
3836  *scan = next; // skip num
3837  } else {
3838  KMP_ASSERT2(0, "bad explicit places list");
3839  }
3840 }
3841 
3842 // static void
3843 void __kmp_affinity_process_placelist(kmp_affin_mask_t **out_masks,
3844  unsigned int *out_numMasks,
3845  const char *placelist,
3846  kmp_affin_mask_t *osId2Mask,
3847  int maxOsId) {
3848  int i, j, count, stride, sign;
3849  const char *scan = placelist;
3850  const char *next = placelist;
3851 
3852  numNewMasks = 2;
3853  KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
3854  nextNewMask = 0;
3855 
3856  // tempMask is modified based on the previous or initial
3857  // place to form the current place
3858  // previousMask contains the previous place
3859  kmp_affin_mask_t *tempMask;
3860  kmp_affin_mask_t *previousMask;
3861  KMP_CPU_ALLOC(tempMask);
3862  KMP_CPU_ZERO(tempMask);
3863  KMP_CPU_ALLOC(previousMask);
3864  KMP_CPU_ZERO(previousMask);
3865  int setSize = 0;
3866 
3867  for (;;) {
3868  __kmp_process_place(&scan, osId2Mask, maxOsId, tempMask, &setSize);
3869 
3870  // valid follow sets are ',' ':' and EOL
3871  SKIP_WS(scan);
3872  if (*scan == '\0' || *scan == ',') {
3873  if (setSize > 0) {
3874  ADD_MASK(tempMask);
3875  }
3876  KMP_CPU_ZERO(tempMask);
3877  setSize = 0;
3878  if (*scan == '\0') {
3879  break;
3880  }
3881  scan++; // skip ','
3882  continue;
3883  }
3884 
3885  KMP_ASSERT2(*scan == ':', "bad explicit places list");
3886  scan++; // skip ':'
3887 
3888  // Read count parameter
3889  SKIP_WS(scan);
3890  KMP_ASSERT2((*scan >= '0') && (*scan <= '9'), "bad explicit places list");
3891  next = scan;
3892  SKIP_DIGITS(next);
3893  count = __kmp_str_to_int(scan, *next);
3894  KMP_ASSERT(count >= 0);
3895  scan = next;
3896 
3897  // valid follow sets are ',' ':' and EOL
3898  SKIP_WS(scan);
3899  if (*scan == '\0' || *scan == ',') {
3900  stride = +1;
3901  } else {
3902  KMP_ASSERT2(*scan == ':', "bad explicit places list");
3903  scan++; // skip ':'
3904 
3905  // Read stride parameter
3906  sign = +1;
3907  for (;;) {
3908  SKIP_WS(scan);
3909  if (*scan == '+') {
3910  scan++; // skip '+'
3911  continue;
3912  }
3913  if (*scan == '-') {
3914  sign *= -1;
3915  scan++; // skip '-'
3916  continue;
3917  }
3918  break;
3919  }
3920  SKIP_WS(scan);
3921  KMP_ASSERT2((*scan >= '0') && (*scan <= '9'), "bad explicit places list");
3922  next = scan;
3923  SKIP_DIGITS(next);
3924  stride = __kmp_str_to_int(scan, *next);
3925  KMP_DEBUG_ASSERT(stride >= 0);
3926  scan = next;
3927  stride *= sign;
3928  }
3929 
3930  // Add places determined by initial_place : count : stride
3931  for (i = 0; i < count; i++) {
3932  if (setSize == 0) {
3933  break;
3934  }
3935  // Add the current place, then build the next place (tempMask) from that
3936  KMP_CPU_COPY(previousMask, tempMask);
3937  ADD_MASK(previousMask);
3938  KMP_CPU_ZERO(tempMask);
3939  setSize = 0;
3940  KMP_CPU_SET_ITERATE(j, previousMask) {
3941  if (!KMP_CPU_ISSET(j, previousMask)) {
3942  continue;
3943  }
3944  if ((j + stride > maxOsId) || (j + stride < 0) ||
3945  (!KMP_CPU_ISSET(j, __kmp_affin_fullMask)) ||
3946  (!KMP_CPU_ISSET(j + stride,
3947  KMP_CPU_INDEX(osId2Mask, j + stride)))) {
3948  if ((__kmp_affinity_verbose ||
3949  (__kmp_affinity_warnings &&
3950  (__kmp_affinity_type != affinity_none))) &&
3951  i < count - 1) {
3952  KMP_WARNING(AffIgnoreInvalidProcID, j + stride);
3953  }
3954  continue;
3955  }
3956  KMP_CPU_SET(j + stride, tempMask);
3957  setSize++;
3958  }
3959  }
3960  KMP_CPU_ZERO(tempMask);
3961  setSize = 0;
3962 
3963  // valid follow sets are ',' and EOL
3964  SKIP_WS(scan);
3965  if (*scan == '\0') {
3966  break;
3967  }
3968  if (*scan == ',') {
3969  scan++; // skip ','
3970  continue;
3971  }
3972 
3973  KMP_ASSERT2(0, "bad explicit places list");
3974  }
3975 
3976  *out_numMasks = nextNewMask;
3977  if (nextNewMask == 0) {
3978  *out_masks = NULL;
3979  KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3980  return;
3981  }
3982  KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3983  KMP_CPU_FREE(tempMask);
3984  KMP_CPU_FREE(previousMask);
3985  for (i = 0; i < nextNewMask; i++) {
3986  kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
3987  kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
3988  KMP_CPU_COPY(dest, src);
3989  }
3990  KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3991 }
3992 
3993 #undef ADD_MASK
3994 #undef ADD_MASK_OSID
3995 
3996 // This function figures out the deepest level at which there is at least one
3997 // cluster/core with more than one processing unit bound to it.
3998 static int __kmp_affinity_find_core_level(int nprocs, int bottom_level) {
3999  int core_level = 0;
4000 
4001  for (int i = 0; i < nprocs; i++) {
4002  const kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
4003  for (int j = bottom_level; j > 0; j--) {
4004  if (hw_thread.ids[j] > 0) {
4005  if (core_level < (j - 1)) {
4006  core_level = j - 1;
4007  }
4008  }
4009  }
4010  }
4011  return core_level;
4012 }
4013 
4014 // This function counts number of clusters/cores at given level.
4015 static int __kmp_affinity_compute_ncores(int nprocs, int bottom_level,
4016  int core_level) {
4017  return __kmp_topology->get_count(core_level);
4018 }
4019 // This function finds to which cluster/core given processing unit is bound.
4020 static int __kmp_affinity_find_core(int proc, int bottom_level,
4021  int core_level) {
4022  int core = 0;
4023  KMP_DEBUG_ASSERT(proc >= 0 && proc < __kmp_topology->get_num_hw_threads());
4024  for (int i = 0; i <= proc; ++i) {
4025  if (i + 1 <= proc) {
4026  for (int j = 0; j <= core_level; ++j) {
4027  if (__kmp_topology->at(i + 1).sub_ids[j] !=
4028  __kmp_topology->at(i).sub_ids[j]) {
4029  core++;
4030  break;
4031  }
4032  }
4033  }
4034  }
4035  return core;
4036 }
4037 
4038 // This function finds maximal number of processing units bound to a
4039 // cluster/core at given level.
4040 static int __kmp_affinity_max_proc_per_core(int nprocs, int bottom_level,
4041  int core_level) {
4042  if (core_level >= bottom_level)
4043  return 1;
4044  int thread_level = __kmp_topology->get_level(KMP_HW_THREAD);
4045  return __kmp_topology->calculate_ratio(thread_level, core_level);
4046 }
4047 
4048 static int *procarr = NULL;
4049 static int __kmp_aff_depth = 0;
4050 
4051 // Create a one element mask array (set of places) which only contains the
4052 // initial process's affinity mask
4053 static void __kmp_create_affinity_none_places() {
4054  KMP_ASSERT(__kmp_affin_fullMask != NULL);
4055  KMP_ASSERT(__kmp_affinity_type == affinity_none);
4056  __kmp_affinity_num_masks = 1;
4057  KMP_CPU_ALLOC_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4058  kmp_affin_mask_t *dest = KMP_CPU_INDEX(__kmp_affinity_masks, 0);
4059  KMP_CPU_COPY(dest, __kmp_affin_fullMask);
4060 }
4061 
4062 static void __kmp_aux_affinity_initialize(void) {
4063  if (__kmp_affinity_masks != NULL) {
4064  KMP_ASSERT(__kmp_affin_fullMask != NULL);
4065  return;
4066  }
4067 
4068  // Create the "full" mask - this defines all of the processors that we
4069  // consider to be in the machine model. If respect is set, then it is the
4070  // initialization thread's affinity mask. Otherwise, it is all processors that
4071  // we know about on the machine.
4072  if (__kmp_affin_fullMask == NULL) {
4073  KMP_CPU_ALLOC(__kmp_affin_fullMask);
4074  }
4075  if (KMP_AFFINITY_CAPABLE()) {
4076  __kmp_get_system_affinity(__kmp_affin_fullMask, TRUE);
4077  if (__kmp_affinity_respect_mask) {
4078  // Count the number of available processors.
4079  unsigned i;
4080  __kmp_avail_proc = 0;
4081  KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
4082  if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
4083  continue;
4084  }
4085  __kmp_avail_proc++;
4086  }
4087  if (__kmp_avail_proc > __kmp_xproc) {
4088  if (__kmp_affinity_verbose ||
4089  (__kmp_affinity_warnings &&
4090  (__kmp_affinity_type != affinity_none))) {
4091  KMP_WARNING(ErrorInitializeAffinity);
4092  }
4093  __kmp_affinity_type = affinity_none;
4094  KMP_AFFINITY_DISABLE();
4095  return;
4096  }
4097 
4098  if (__kmp_affinity_verbose) {
4099  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4100  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4101  __kmp_affin_fullMask);
4102  KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
4103  }
4104  } else {
4105  if (__kmp_affinity_verbose) {
4106  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4107  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4108  __kmp_affin_fullMask);
4109  KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
4110  }
4111  __kmp_avail_proc =
4112  __kmp_affinity_entire_machine_mask(__kmp_affin_fullMask);
4113 #if KMP_OS_WINDOWS
4114  // Set the process affinity mask since threads' affinity
4115  // masks must be subset of process mask in Windows* OS
4116  __kmp_affin_fullMask->set_process_affinity(true);
4117 #endif
4118  }
4119  }
4120 
4121  kmp_i18n_id_t msg_id = kmp_i18n_null;
4122 
4123  // For backward compatibility, setting KMP_CPUINFO_FILE =>
4124  // KMP_TOPOLOGY_METHOD=cpuinfo
4125  if ((__kmp_cpuinfo_file != NULL) &&
4126  (__kmp_affinity_top_method == affinity_top_method_all)) {
4127  __kmp_affinity_top_method = affinity_top_method_cpuinfo;
4128  }
4129 
4130  bool success = false;
4131  if (__kmp_affinity_top_method == affinity_top_method_all) {
4132 // In the default code path, errors are not fatal - we just try using
4133 // another method. We only emit a warning message if affinity is on, or the
4134 // verbose flag is set, an the nowarnings flag was not set.
4135 #if KMP_USE_HWLOC
4136  if (!success &&
4137  __kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC) {
4138  if (!__kmp_hwloc_error) {
4139  success = __kmp_affinity_create_hwloc_map(&msg_id);
4140  if (!success && __kmp_affinity_verbose) {
4141  KMP_INFORM(AffIgnoringHwloc, "KMP_AFFINITY");
4142  }
4143  } else if (__kmp_affinity_verbose) {
4144  KMP_INFORM(AffIgnoringHwloc, "KMP_AFFINITY");
4145  }
4146  }
4147 #endif
4148 
4149 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4150  if (!success) {
4151  success = __kmp_affinity_create_x2apicid_map(&msg_id);
4152  if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
4153  KMP_INFORM(AffInfoStr, "KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
4154  }
4155  }
4156  if (!success) {
4157  success = __kmp_affinity_create_apicid_map(&msg_id);
4158  if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
4159  KMP_INFORM(AffInfoStr, "KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
4160  }
4161  }
4162 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4163 
4164 #if KMP_OS_LINUX
4165  if (!success) {
4166  int line = 0;
4167  success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id);
4168  if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
4169  KMP_INFORM(AffInfoStr, "KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
4170  }
4171  }
4172 #endif /* KMP_OS_LINUX */
4173 
4174 #if KMP_GROUP_AFFINITY
4175  if (!success && (__kmp_num_proc_groups > 1)) {
4176  success = __kmp_affinity_create_proc_group_map(&msg_id);
4177  if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
4178  KMP_INFORM(AffInfoStr, "KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
4179  }
4180  }
4181 #endif /* KMP_GROUP_AFFINITY */
4182 
4183  if (!success) {
4184  success = __kmp_affinity_create_flat_map(&msg_id);
4185  if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
4186  KMP_INFORM(AffInfoStr, "KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
4187  }
4188  KMP_ASSERT(success);
4189  }
4190  }
4191 
4192 // If the user has specified that a paricular topology discovery method is to be
4193 // used, then we abort if that method fails. The exception is group affinity,
4194 // which might have been implicitly set.
4195 #if KMP_USE_HWLOC
4196  else if (__kmp_affinity_top_method == affinity_top_method_hwloc) {
4197  KMP_ASSERT(__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC);
4198  success = __kmp_affinity_create_hwloc_map(&msg_id);
4199  if (!success) {
4200  KMP_ASSERT(msg_id != kmp_i18n_null);
4201  KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4202  }
4203  }
4204 #endif // KMP_USE_HWLOC
4205 
4206 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4207  else if (__kmp_affinity_top_method == affinity_top_method_x2apicid ||
4208  __kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
4209  success = __kmp_affinity_create_x2apicid_map(&msg_id);
4210  if (!success) {
4211  KMP_ASSERT(msg_id != kmp_i18n_null);
4212  KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4213  }
4214  } else if (__kmp_affinity_top_method == affinity_top_method_apicid) {
4215  success = __kmp_affinity_create_apicid_map(&msg_id);
4216  if (!success) {
4217  KMP_ASSERT(msg_id != kmp_i18n_null);
4218  KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4219  }
4220  }
4221 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4222 
4223  else if (__kmp_affinity_top_method == affinity_top_method_cpuinfo) {
4224  int line = 0;
4225  success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id);
4226  if (!success) {
4227  KMP_ASSERT(msg_id != kmp_i18n_null);
4228  const char *filename = __kmp_cpuinfo_get_filename();
4229  if (line > 0) {
4230  KMP_FATAL(FileLineMsgExiting, filename, line,
4231  __kmp_i18n_catgets(msg_id));
4232  } else {
4233  KMP_FATAL(FileMsgExiting, filename, __kmp_i18n_catgets(msg_id));
4234  }
4235  }
4236  }
4237 
4238 #if KMP_GROUP_AFFINITY
4239  else if (__kmp_affinity_top_method == affinity_top_method_group) {
4240  success = __kmp_affinity_create_proc_group_map(&msg_id);
4241  KMP_ASSERT(success);
4242  if (!success) {
4243  KMP_ASSERT(msg_id != kmp_i18n_null);
4244  KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4245  }
4246  }
4247 #endif /* KMP_GROUP_AFFINITY */
4248 
4249  else if (__kmp_affinity_top_method == affinity_top_method_flat) {
4250  success = __kmp_affinity_create_flat_map(&msg_id);
4251  // should not fail
4252  KMP_ASSERT(success);
4253  }
4254 
4255  // Early exit if topology could not be created
4256  if (!__kmp_topology) {
4257  if (KMP_AFFINITY_CAPABLE() &&
4258  (__kmp_affinity_verbose ||
4259  (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none)))) {
4260  KMP_WARNING(ErrorInitializeAffinity);
4261  }
4262  if (nPackages > 0 && nCoresPerPkg > 0 && __kmp_nThreadsPerCore > 0 &&
4263  __kmp_ncores > 0) {
4264  __kmp_topology = kmp_topology_t::allocate(0, 0, NULL);
4265  __kmp_topology->canonicalize(nPackages, nCoresPerPkg,
4266  __kmp_nThreadsPerCore, __kmp_ncores);
4267  if (__kmp_affinity_verbose) {
4268  __kmp_topology->print("KMP_AFFINITY");
4269  }
4270  }
4271  __kmp_affinity_type = affinity_none;
4272  __kmp_create_affinity_none_places();
4273 #if KMP_USE_HIER_SCHED
4274  __kmp_dispatch_set_hierarchy_values();
4275 #endif
4276  KMP_AFFINITY_DISABLE();
4277  return;
4278  }
4279 
4280  // Canonicalize, print (if requested), apply KMP_HW_SUBSET, and
4281  // initialize other data structures which depend on the topology
4282  __kmp_topology->canonicalize();
4283  if (__kmp_affinity_verbose)
4284  __kmp_topology->print("KMP_AFFINITY");
4285  bool filtered = __kmp_topology->filter_hw_subset();
4286  if (filtered && __kmp_affinity_verbose)
4287  __kmp_topology->print("KMP_HW_SUBSET");
4288  machine_hierarchy.init(__kmp_topology->get_num_hw_threads());
4289  KMP_ASSERT(__kmp_avail_proc == __kmp_topology->get_num_hw_threads());
4290  // If KMP_AFFINITY=none, then only create the single "none" place
4291  // which is the process's initial affinity mask or the number of
4292  // hardware threads depending on respect,norespect
4293  if (__kmp_affinity_type == affinity_none) {
4294  __kmp_create_affinity_none_places();
4295 #if KMP_USE_HIER_SCHED
4296  __kmp_dispatch_set_hierarchy_values();
4297 #endif
4298  return;
4299  }
4300  int depth = __kmp_topology->get_depth();
4301 
4302  // Create the table of masks, indexed by thread Id.
4303  unsigned maxIndex;
4304  unsigned numUnique;
4305  kmp_affin_mask_t *osId2Mask = __kmp_create_masks(&maxIndex, &numUnique);
4306  if (__kmp_affinity_gran_levels == 0) {
4307  KMP_DEBUG_ASSERT((int)numUnique == __kmp_avail_proc);
4308  }
4309 
4310  switch (__kmp_affinity_type) {
4311 
4312  case affinity_explicit:
4313  KMP_DEBUG_ASSERT(__kmp_affinity_proclist != NULL);
4314  if (__kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) {
4315  __kmp_affinity_process_proclist(
4316  &__kmp_affinity_masks, &__kmp_affinity_num_masks,
4317  __kmp_affinity_proclist, osId2Mask, maxIndex);
4318  } else {
4319  __kmp_affinity_process_placelist(
4320  &__kmp_affinity_masks, &__kmp_affinity_num_masks,
4321  __kmp_affinity_proclist, osId2Mask, maxIndex);
4322  }
4323  if (__kmp_affinity_num_masks == 0) {
4324  if (__kmp_affinity_verbose ||
4325  (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {
4326  KMP_WARNING(AffNoValidProcID);
4327  }
4328  __kmp_affinity_type = affinity_none;
4329  __kmp_create_affinity_none_places();
4330  return;
4331  }
4332  break;
4333 
4334  // The other affinity types rely on sorting the hardware threads according to
4335  // some permutation of the machine topology tree. Set __kmp_affinity_compact
4336  // and __kmp_affinity_offset appropriately, then jump to a common code
4337  // fragment to do the sort and create the array of affinity masks.
4338  case affinity_logical:
4339  __kmp_affinity_compact = 0;
4340  if (__kmp_affinity_offset) {
4341  __kmp_affinity_offset =
4342  __kmp_nThreadsPerCore * __kmp_affinity_offset % __kmp_avail_proc;
4343  }
4344  goto sortTopology;
4345 
4346  case affinity_physical:
4347  if (__kmp_nThreadsPerCore > 1) {
4348  __kmp_affinity_compact = 1;
4349  if (__kmp_affinity_compact >= depth) {
4350  __kmp_affinity_compact = 0;
4351  }
4352  } else {
4353  __kmp_affinity_compact = 0;
4354  }
4355  if (__kmp_affinity_offset) {
4356  __kmp_affinity_offset =
4357  __kmp_nThreadsPerCore * __kmp_affinity_offset % __kmp_avail_proc;
4358  }
4359  goto sortTopology;
4360 
4361  case affinity_scatter:
4362  if (__kmp_affinity_compact >= depth) {
4363  __kmp_affinity_compact = 0;
4364  } else {
4365  __kmp_affinity_compact = depth - 1 - __kmp_affinity_compact;
4366  }
4367  goto sortTopology;
4368 
4369  case affinity_compact:
4370  if (__kmp_affinity_compact >= depth) {
4371  __kmp_affinity_compact = depth - 1;
4372  }
4373  goto sortTopology;
4374 
4375  case affinity_balanced:
4376  if (depth <= 1) {
4377  if (__kmp_affinity_verbose || __kmp_affinity_warnings) {
4378  KMP_WARNING(AffBalancedNotAvail, "KMP_AFFINITY");
4379  }
4380  __kmp_affinity_type = affinity_none;
4381  __kmp_create_affinity_none_places();
4382  return;
4383  } else if (!__kmp_topology->is_uniform()) {
4384  // Save the depth for further usage
4385  __kmp_aff_depth = depth;
4386 
4387  int core_level =
4388  __kmp_affinity_find_core_level(__kmp_avail_proc, depth - 1);
4389  int ncores = __kmp_affinity_compute_ncores(__kmp_avail_proc, depth - 1,
4390  core_level);
4391  int maxprocpercore = __kmp_affinity_max_proc_per_core(
4392  __kmp_avail_proc, depth - 1, core_level);
4393 
4394  int nproc = ncores * maxprocpercore;
4395  if ((nproc < 2) || (nproc < __kmp_avail_proc)) {
4396  if (__kmp_affinity_verbose || __kmp_affinity_warnings) {
4397  KMP_WARNING(AffBalancedNotAvail, "KMP_AFFINITY");
4398  }
4399  __kmp_affinity_type = affinity_none;
4400  return;
4401  }
4402 
4403  procarr = (int *)__kmp_allocate(sizeof(int) * nproc);
4404  for (int i = 0; i < nproc; i++) {
4405  procarr[i] = -1;
4406  }
4407 
4408  int lastcore = -1;
4409  int inlastcore = 0;
4410  for (int i = 0; i < __kmp_avail_proc; i++) {
4411  int proc = __kmp_topology->at(i).os_id;
4412  int core = __kmp_affinity_find_core(i, depth - 1, core_level);
4413 
4414  if (core == lastcore) {
4415  inlastcore++;
4416  } else {
4417  inlastcore = 0;
4418  }
4419  lastcore = core;
4420 
4421  procarr[core * maxprocpercore + inlastcore] = proc;
4422  }
4423  }
4424  if (__kmp_affinity_compact >= depth) {
4425  __kmp_affinity_compact = depth - 1;
4426  }
4427 
4428  sortTopology:
4429  // Allocate the gtid->affinity mask table.
4430  if (__kmp_affinity_dups) {
4431  __kmp_affinity_num_masks = __kmp_avail_proc;
4432  } else {
4433  __kmp_affinity_num_masks = numUnique;
4434  }
4435 
4436  if ((__kmp_nested_proc_bind.bind_types[0] != proc_bind_intel) &&
4437  (__kmp_affinity_num_places > 0) &&
4438  ((unsigned)__kmp_affinity_num_places < __kmp_affinity_num_masks)) {
4439  __kmp_affinity_num_masks = __kmp_affinity_num_places;
4440  }
4441 
4442  KMP_CPU_ALLOC_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4443 
4444  // Sort the topology table according to the current setting of
4445  // __kmp_affinity_compact, then fill out __kmp_affinity_masks.
4446  __kmp_topology->sort_compact();
4447  {
4448  int i;
4449  unsigned j;
4450  int num_hw_threads = __kmp_topology->get_num_hw_threads();
4451  for (i = 0, j = 0; i < num_hw_threads; i++) {
4452  if ((!__kmp_affinity_dups) && (!__kmp_topology->at(i).leader)) {
4453  continue;
4454  }
4455  int osId = __kmp_topology->at(i).os_id;
4456 
4457  kmp_affin_mask_t *src = KMP_CPU_INDEX(osId2Mask, osId);
4458  kmp_affin_mask_t *dest = KMP_CPU_INDEX(__kmp_affinity_masks, j);
4459  KMP_ASSERT(KMP_CPU_ISSET(osId, src));
4460  KMP_CPU_COPY(dest, src);
4461  if (++j >= __kmp_affinity_num_masks) {
4462  break;
4463  }
4464  }
4465  KMP_DEBUG_ASSERT(j == __kmp_affinity_num_masks);
4466  }
4467  // Sort the topology back using ids
4468  __kmp_topology->sort_ids();
4469  break;
4470 
4471  default:
4472  KMP_ASSERT2(0, "Unexpected affinity setting");
4473  }
4474 
4475  KMP_CPU_FREE_ARRAY(osId2Mask, maxIndex + 1);
4476 }
4477 
4478 void __kmp_affinity_initialize(void) {
4479  // Much of the code above was written assuming that if a machine was not
4480  // affinity capable, then __kmp_affinity_type == affinity_none. We now
4481  // explicitly represent this as __kmp_affinity_type == affinity_disabled.
4482  // There are too many checks for __kmp_affinity_type == affinity_none
4483  // in this code. Instead of trying to change them all, check if
4484  // __kmp_affinity_type == affinity_disabled, and if so, slam it with
4485  // affinity_none, call the real initialization routine, then restore
4486  // __kmp_affinity_type to affinity_disabled.
4487  int disabled = (__kmp_affinity_type == affinity_disabled);
4488  if (!KMP_AFFINITY_CAPABLE()) {
4489  KMP_ASSERT(disabled);
4490  }
4491  if (disabled) {
4492  __kmp_affinity_type = affinity_none;
4493  }
4494  __kmp_aux_affinity_initialize();
4495  if (disabled) {
4496  __kmp_affinity_type = affinity_disabled;
4497  }
4498 }
4499 
4500 void __kmp_affinity_uninitialize(void) {
4501  if (__kmp_affinity_masks != NULL) {
4502  KMP_CPU_FREE_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4503  __kmp_affinity_masks = NULL;
4504  }
4505  if (__kmp_affin_fullMask != NULL) {
4506  KMP_CPU_FREE(__kmp_affin_fullMask);
4507  __kmp_affin_fullMask = NULL;
4508  }
4509  __kmp_affinity_num_masks = 0;
4510  __kmp_affinity_type = affinity_default;
4511  __kmp_affinity_num_places = 0;
4512  if (__kmp_affinity_proclist != NULL) {
4513  __kmp_free(__kmp_affinity_proclist);
4514  __kmp_affinity_proclist = NULL;
4515  }
4516  if (procarr != NULL) {
4517  __kmp_free(procarr);
4518  procarr = NULL;
4519  }
4520 #if KMP_USE_HWLOC
4521  if (__kmp_hwloc_topology != NULL) {
4522  hwloc_topology_destroy(__kmp_hwloc_topology);
4523  __kmp_hwloc_topology = NULL;
4524  }
4525 #endif
4526  if (__kmp_hw_subset) {
4527  kmp_hw_subset_t::deallocate(__kmp_hw_subset);
4528  __kmp_hw_subset = nullptr;
4529  }
4530  if (__kmp_topology) {
4531  kmp_topology_t::deallocate(__kmp_topology);
4532  __kmp_topology = nullptr;
4533  }
4534  KMPAffinity::destroy_api();
4535 }
4536 
4537 void __kmp_affinity_set_init_mask(int gtid, int isa_root) {
4538  if (!KMP_AFFINITY_CAPABLE()) {
4539  return;
4540  }
4541 
4542  kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4543  if (th->th.th_affin_mask == NULL) {
4544  KMP_CPU_ALLOC(th->th.th_affin_mask);
4545  } else {
4546  KMP_CPU_ZERO(th->th.th_affin_mask);
4547  }
4548 
4549  // Copy the thread mask to the kmp_info_t structure. If
4550  // __kmp_affinity_type == affinity_none, copy the "full" mask, i.e. one that
4551  // has all of the OS proc ids set, or if __kmp_affinity_respect_mask is set,
4552  // then the full mask is the same as the mask of the initialization thread.
4553  kmp_affin_mask_t *mask;
4554  int i;
4555 
4556  if (KMP_AFFINITY_NON_PROC_BIND) {
4557  if ((__kmp_affinity_type == affinity_none) ||
4558  (__kmp_affinity_type == affinity_balanced) ||
4559  KMP_HIDDEN_HELPER_THREAD(gtid)) {
4560 #if KMP_GROUP_AFFINITY
4561  if (__kmp_num_proc_groups > 1) {
4562  return;
4563  }
4564 #endif
4565  KMP_ASSERT(__kmp_affin_fullMask != NULL);
4566  i = 0;
4567  mask = __kmp_affin_fullMask;
4568  } else {
4569  int mask_idx = __kmp_adjust_gtid_for_hidden_helpers(gtid);
4570  KMP_DEBUG_ASSERT(__kmp_affinity_num_masks > 0);
4571  i = (mask_idx + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4572  mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4573  }
4574  } else {
4575  if ((!isa_root) || KMP_HIDDEN_HELPER_THREAD(gtid) ||
4576  (__kmp_nested_proc_bind.bind_types[0] == proc_bind_false)) {
4577 #if KMP_GROUP_AFFINITY
4578  if (__kmp_num_proc_groups > 1) {
4579  return;
4580  }
4581 #endif
4582  KMP_ASSERT(__kmp_affin_fullMask != NULL);
4583  i = KMP_PLACE_ALL;
4584  mask = __kmp_affin_fullMask;
4585  } else {
4586  // int i = some hash function or just a counter that doesn't
4587  // always start at 0. Use adjusted gtid for now.
4588  int mask_idx = __kmp_adjust_gtid_for_hidden_helpers(gtid);
4589  KMP_DEBUG_ASSERT(__kmp_affinity_num_masks > 0);
4590  i = (mask_idx + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4591  mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4592  }
4593  }
4594 
4595  th->th.th_current_place = i;
4596  if (isa_root || KMP_HIDDEN_HELPER_THREAD(gtid)) {
4597  th->th.th_new_place = i;
4598  th->th.th_first_place = 0;
4599  th->th.th_last_place = __kmp_affinity_num_masks - 1;
4600  } else if (KMP_AFFINITY_NON_PROC_BIND) {
4601  // When using a Non-OMP_PROC_BIND affinity method,
4602  // set all threads' place-partition-var to the entire place list
4603  th->th.th_first_place = 0;
4604  th->th.th_last_place = __kmp_affinity_num_masks - 1;
4605  }
4606 
4607  if (i == KMP_PLACE_ALL) {
4608  KA_TRACE(100, ("__kmp_affinity_set_init_mask: binding T#%d to all places\n",
4609  gtid));
4610  } else {
4611  KA_TRACE(100, ("__kmp_affinity_set_init_mask: binding T#%d to place %d\n",
4612  gtid, i));
4613  }
4614 
4615  KMP_CPU_COPY(th->th.th_affin_mask, mask);
4616 
4617  if (__kmp_affinity_verbose && !KMP_HIDDEN_HELPER_THREAD(gtid)
4618  /* to avoid duplicate printing (will be correctly printed on barrier) */
4619  && (__kmp_affinity_type == affinity_none ||
4620  (i != KMP_PLACE_ALL && __kmp_affinity_type != affinity_balanced))) {
4621  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4622  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4623  th->th.th_affin_mask);
4624  KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY", (kmp_int32)getpid(),
4625  __kmp_gettid(), gtid, buf);
4626  }
4627 
4628 #if KMP_DEBUG
4629  // Hidden helper thread affinity only printed for debug builds
4630  if (__kmp_affinity_verbose && KMP_HIDDEN_HELPER_THREAD(gtid)) {
4631  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4632  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4633  th->th.th_affin_mask);
4634  KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY (hidden helper thread)",
4635  (kmp_int32)getpid(), __kmp_gettid(), gtid, buf);
4636  }
4637 #endif
4638 
4639 #if KMP_OS_WINDOWS
4640  // On Windows* OS, the process affinity mask might have changed. If the user
4641  // didn't request affinity and this call fails, just continue silently.
4642  // See CQ171393.
4643  if (__kmp_affinity_type == affinity_none) {
4644  __kmp_set_system_affinity(th->th.th_affin_mask, FALSE);
4645  } else
4646 #endif
4647  __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4648 }
4649 
4650 void __kmp_affinity_set_place(int gtid) {
4651  if (!KMP_AFFINITY_CAPABLE()) {
4652  return;
4653  }
4654 
4655  kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4656 
4657  KA_TRACE(100, ("__kmp_affinity_set_place: binding T#%d to place %d (current "
4658  "place = %d)\n",
4659  gtid, th->th.th_new_place, th->th.th_current_place));
4660 
4661  // Check that the new place is within this thread's partition.
4662  KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4663  KMP_ASSERT(th->th.th_new_place >= 0);
4664  KMP_ASSERT((unsigned)th->th.th_new_place <= __kmp_affinity_num_masks);
4665  if (th->th.th_first_place <= th->th.th_last_place) {
4666  KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place) &&
4667  (th->th.th_new_place <= th->th.th_last_place));
4668  } else {
4669  KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place) ||
4670  (th->th.th_new_place >= th->th.th_last_place));
4671  }
4672 
4673  // Copy the thread mask to the kmp_info_t structure,
4674  // and set this thread's affinity.
4675  kmp_affin_mask_t *mask =
4676  KMP_CPU_INDEX(__kmp_affinity_masks, th->th.th_new_place);
4677  KMP_CPU_COPY(th->th.th_affin_mask, mask);
4678  th->th.th_current_place = th->th.th_new_place;
4679 
4680  if (__kmp_affinity_verbose) {
4681  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4682  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4683  th->th.th_affin_mask);
4684  KMP_INFORM(BoundToOSProcSet, "OMP_PROC_BIND", (kmp_int32)getpid(),
4685  __kmp_gettid(), gtid, buf);
4686  }
4687  __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4688 }
4689 
4690 int __kmp_aux_set_affinity(void **mask) {
4691  int gtid;
4692  kmp_info_t *th;
4693  int retval;
4694 
4695  if (!KMP_AFFINITY_CAPABLE()) {
4696  return -1;
4697  }
4698 
4699  gtid = __kmp_entry_gtid();
4700  KA_TRACE(
4701  1000, (""); {
4702  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4703  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4704  (kmp_affin_mask_t *)(*mask));
4705  __kmp_debug_printf(
4706  "kmp_set_affinity: setting affinity mask for thread %d = %s\n",
4707  gtid, buf);
4708  });
4709 
4710  if (__kmp_env_consistency_check) {
4711  if ((mask == NULL) || (*mask == NULL)) {
4712  KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4713  } else {
4714  unsigned proc;
4715  int num_procs = 0;
4716 
4717  KMP_CPU_SET_ITERATE(proc, ((kmp_affin_mask_t *)(*mask))) {
4718  if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4719  KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4720  }
4721  if (!KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask))) {
4722  continue;
4723  }
4724  num_procs++;
4725  }
4726  if (num_procs == 0) {
4727  KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4728  }
4729 
4730 #if KMP_GROUP_AFFINITY
4731  if (__kmp_get_proc_group((kmp_affin_mask_t *)(*mask)) < 0) {
4732  KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4733  }
4734 #endif /* KMP_GROUP_AFFINITY */
4735  }
4736  }
4737 
4738  th = __kmp_threads[gtid];
4739  KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4740  retval = __kmp_set_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4741  if (retval == 0) {
4742  KMP_CPU_COPY(th->th.th_affin_mask, (kmp_affin_mask_t *)(*mask));
4743  }
4744 
4745  th->th.th_current_place = KMP_PLACE_UNDEFINED;
4746  th->th.th_new_place = KMP_PLACE_UNDEFINED;
4747  th->th.th_first_place = 0;
4748  th->th.th_last_place = __kmp_affinity_num_masks - 1;
4749 
4750  // Turn off 4.0 affinity for the current tread at this parallel level.
4751  th->th.th_current_task->td_icvs.proc_bind = proc_bind_false;
4752 
4753  return retval;
4754 }
4755 
4756 int __kmp_aux_get_affinity(void **mask) {
4757  int gtid;
4758  int retval;
4759 #if KMP_OS_WINDOWS || KMP_DEBUG
4760  kmp_info_t *th;
4761 #endif
4762  if (!KMP_AFFINITY_CAPABLE()) {
4763  return -1;
4764  }
4765 
4766  gtid = __kmp_entry_gtid();
4767 #if KMP_OS_WINDOWS || KMP_DEBUG
4768  th = __kmp_threads[gtid];
4769 #else
4770  (void)gtid; // unused variable
4771 #endif
4772  KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4773 
4774  KA_TRACE(
4775  1000, (""); {
4776  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4777  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4778  th->th.th_affin_mask);
4779  __kmp_printf(
4780  "kmp_get_affinity: stored affinity mask for thread %d = %s\n", gtid,
4781  buf);
4782  });
4783 
4784  if (__kmp_env_consistency_check) {
4785  if ((mask == NULL) || (*mask == NULL)) {
4786  KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity");
4787  }
4788  }
4789 
4790 #if !KMP_OS_WINDOWS
4791 
4792  retval = __kmp_get_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4793  KA_TRACE(
4794  1000, (""); {
4795  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4796  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4797  (kmp_affin_mask_t *)(*mask));
4798  __kmp_printf(
4799  "kmp_get_affinity: system affinity mask for thread %d = %s\n", gtid,
4800  buf);
4801  });
4802  return retval;
4803 
4804 #else
4805  (void)retval;
4806 
4807  KMP_CPU_COPY((kmp_affin_mask_t *)(*mask), th->th.th_affin_mask);
4808  return 0;
4809 
4810 #endif /* KMP_OS_WINDOWS */
4811 }
4812 
4813 int __kmp_aux_get_affinity_max_proc() {
4814  if (!KMP_AFFINITY_CAPABLE()) {
4815  return 0;
4816  }
4817 #if KMP_GROUP_AFFINITY
4818  if (__kmp_num_proc_groups > 1) {
4819  return (int)(__kmp_num_proc_groups * sizeof(DWORD_PTR) * CHAR_BIT);
4820  }
4821 #endif
4822  return __kmp_xproc;
4823 }
4824 
4825 int __kmp_aux_set_affinity_mask_proc(int proc, void **mask) {
4826  if (!KMP_AFFINITY_CAPABLE()) {
4827  return -1;
4828  }
4829 
4830  KA_TRACE(
4831  1000, (""); {
4832  int gtid = __kmp_entry_gtid();
4833  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4834  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4835  (kmp_affin_mask_t *)(*mask));
4836  __kmp_debug_printf("kmp_set_affinity_mask_proc: setting proc %d in "
4837  "affinity mask for thread %d = %s\n",
4838  proc, gtid, buf);
4839  });
4840 
4841  if (__kmp_env_consistency_check) {
4842  if ((mask == NULL) || (*mask == NULL)) {
4843  KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity_mask_proc");
4844  }
4845  }
4846 
4847  if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
4848  return -1;
4849  }
4850  if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4851  return -2;
4852  }
4853 
4854  KMP_CPU_SET(proc, (kmp_affin_mask_t *)(*mask));
4855  return 0;
4856 }
4857 
4858 int __kmp_aux_unset_affinity_mask_proc(int proc, void **mask) {
4859  if (!KMP_AFFINITY_CAPABLE()) {
4860  return -1;
4861  }
4862 
4863  KA_TRACE(
4864  1000, (""); {
4865  int gtid = __kmp_entry_gtid();
4866  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4867  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4868  (kmp_affin_mask_t *)(*mask));
4869  __kmp_debug_printf("kmp_unset_affinity_mask_proc: unsetting proc %d in "
4870  "affinity mask for thread %d = %s\n",
4871  proc, gtid, buf);
4872  });
4873 
4874  if (__kmp_env_consistency_check) {
4875  if ((mask == NULL) || (*mask == NULL)) {
4876  KMP_FATAL(AffinityInvalidMask, "kmp_unset_affinity_mask_proc");
4877  }
4878  }
4879 
4880  if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
4881  return -1;
4882  }
4883  if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4884  return -2;
4885  }
4886 
4887  KMP_CPU_CLR(proc, (kmp_affin_mask_t *)(*mask));
4888  return 0;
4889 }
4890 
4891 int __kmp_aux_get_affinity_mask_proc(int proc, void **mask) {
4892  if (!KMP_AFFINITY_CAPABLE()) {
4893  return -1;
4894  }
4895 
4896  KA_TRACE(
4897  1000, (""); {
4898  int gtid = __kmp_entry_gtid();
4899  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4900  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4901  (kmp_affin_mask_t *)(*mask));
4902  __kmp_debug_printf("kmp_get_affinity_mask_proc: getting proc %d in "
4903  "affinity mask for thread %d = %s\n",
4904  proc, gtid, buf);
4905  });
4906 
4907  if (__kmp_env_consistency_check) {
4908  if ((mask == NULL) || (*mask == NULL)) {
4909  KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity_mask_proc");
4910  }
4911  }
4912 
4913  if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
4914  return -1;
4915  }
4916  if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4917  return 0;
4918  }
4919 
4920  return KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask));
4921 }
4922 
4923 // Dynamic affinity settings - Affinity balanced
4924 void __kmp_balanced_affinity(kmp_info_t *th, int nthreads) {
4925  KMP_DEBUG_ASSERT(th);
4926  bool fine_gran = true;
4927  int tid = th->th.th_info.ds.ds_tid;
4928 
4929  // Do not perform balanced affinity for the hidden helper threads
4930  if (KMP_HIDDEN_HELPER_THREAD(__kmp_gtid_from_thread(th)))
4931  return;
4932 
4933  switch (__kmp_affinity_gran) {
4934  case KMP_HW_THREAD:
4935  break;
4936  case KMP_HW_CORE:
4937  if (__kmp_nThreadsPerCore > 1) {
4938  fine_gran = false;
4939  }
4940  break;
4941  case KMP_HW_SOCKET:
4942  if (nCoresPerPkg > 1) {
4943  fine_gran = false;
4944  }
4945  break;
4946  default:
4947  fine_gran = false;
4948  }
4949 
4950  if (__kmp_topology->is_uniform()) {
4951  int coreID;
4952  int threadID;
4953  // Number of hyper threads per core in HT machine
4954  int __kmp_nth_per_core = __kmp_avail_proc / __kmp_ncores;
4955  // Number of cores
4956  int ncores = __kmp_ncores;
4957  if ((nPackages > 1) && (__kmp_nth_per_core <= 1)) {
4958  __kmp_nth_per_core = __kmp_avail_proc / nPackages;
4959  ncores = nPackages;
4960  }
4961  // How many threads will be bound to each core
4962  int chunk = nthreads / ncores;
4963  // How many cores will have an additional thread bound to it - "big cores"
4964  int big_cores = nthreads % ncores;
4965  // Number of threads on the big cores
4966  int big_nth = (chunk + 1) * big_cores;
4967  if (tid < big_nth) {
4968  coreID = tid / (chunk + 1);
4969  threadID = (tid % (chunk + 1)) % __kmp_nth_per_core;
4970  } else { // tid >= big_nth
4971  coreID = (tid - big_cores) / chunk;
4972  threadID = ((tid - big_cores) % chunk) % __kmp_nth_per_core;
4973  }
4974  KMP_DEBUG_ASSERT2(KMP_AFFINITY_CAPABLE(),
4975  "Illegal set affinity operation when not capable");
4976 
4977  kmp_affin_mask_t *mask = th->th.th_affin_mask;
4978  KMP_CPU_ZERO(mask);
4979 
4980  if (fine_gran) {
4981  int osID =
4982  __kmp_topology->at(coreID * __kmp_nth_per_core + threadID).os_id;
4983  KMP_CPU_SET(osID, mask);
4984  } else {
4985  for (int i = 0; i < __kmp_nth_per_core; i++) {
4986  int osID;
4987  osID = __kmp_topology->at(coreID * __kmp_nth_per_core + i).os_id;
4988  KMP_CPU_SET(osID, mask);
4989  }
4990  }
4991  if (__kmp_affinity_verbose) {
4992  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4993  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
4994  KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY", (kmp_int32)getpid(),
4995  __kmp_gettid(), tid, buf);
4996  }
4997  __kmp_set_system_affinity(mask, TRUE);
4998  } else { // Non-uniform topology
4999 
5000  kmp_affin_mask_t *mask = th->th.th_affin_mask;
5001  KMP_CPU_ZERO(mask);
5002 
5003  int core_level =
5004  __kmp_affinity_find_core_level(__kmp_avail_proc, __kmp_aff_depth - 1);
5005  int ncores = __kmp_affinity_compute_ncores(__kmp_avail_proc,
5006  __kmp_aff_depth - 1, core_level);
5007  int nth_per_core = __kmp_affinity_max_proc_per_core(
5008  __kmp_avail_proc, __kmp_aff_depth - 1, core_level);
5009 
5010  // For performance gain consider the special case nthreads ==
5011  // __kmp_avail_proc
5012  if (nthreads == __kmp_avail_proc) {
5013  if (fine_gran) {
5014  int osID = __kmp_topology->at(tid).os_id;
5015  KMP_CPU_SET(osID, mask);
5016  } else {
5017  int core =
5018  __kmp_affinity_find_core(tid, __kmp_aff_depth - 1, core_level);
5019  for (int i = 0; i < __kmp_avail_proc; i++) {
5020  int osID = __kmp_topology->at(i).os_id;
5021  if (__kmp_affinity_find_core(i, __kmp_aff_depth - 1, core_level) ==
5022  core) {
5023  KMP_CPU_SET(osID, mask);
5024  }
5025  }
5026  }
5027  } else if (nthreads <= ncores) {
5028 
5029  int core = 0;
5030  for (int i = 0; i < ncores; i++) {
5031  // Check if this core from procarr[] is in the mask
5032  int in_mask = 0;
5033  for (int j = 0; j < nth_per_core; j++) {
5034  if (procarr[i * nth_per_core + j] != -1) {
5035  in_mask = 1;
5036  break;
5037  }
5038  }
5039  if (in_mask) {
5040  if (tid == core) {
5041  for (int j = 0; j < nth_per_core; j++) {
5042  int osID = procarr[i * nth_per_core + j];
5043  if (osID != -1) {
5044  KMP_CPU_SET(osID, mask);
5045  // For fine granularity it is enough to set the first available
5046  // osID for this core
5047  if (fine_gran) {
5048  break;
5049  }
5050  }
5051  }
5052  break;
5053  } else {
5054  core++;
5055  }
5056  }
5057  }
5058  } else { // nthreads > ncores
5059  // Array to save the number of processors at each core
5060  int *nproc_at_core = (int *)KMP_ALLOCA(sizeof(int) * ncores);
5061  // Array to save the number of cores with "x" available processors;
5062  int *ncores_with_x_procs =
5063  (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core + 1));
5064  // Array to save the number of cores with # procs from x to nth_per_core
5065  int *ncores_with_x_to_max_procs =
5066  (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core + 1));
5067 
5068  for (int i = 0; i <= nth_per_core; i++) {
5069  ncores_with_x_procs[i] = 0;
5070  ncores_with_x_to_max_procs[i] = 0;
5071  }
5072 
5073  for (int i = 0; i < ncores; i++) {
5074  int cnt = 0;
5075  for (int j = 0; j < nth_per_core; j++) {
5076  if (procarr[i * nth_per_core + j] != -1) {
5077  cnt++;
5078  }
5079  }
5080  nproc_at_core[i] = cnt;
5081  ncores_with_x_procs[cnt]++;
5082  }
5083 
5084  for (int i = 0; i <= nth_per_core; i++) {
5085  for (int j = i; j <= nth_per_core; j++) {
5086  ncores_with_x_to_max_procs[i] += ncores_with_x_procs[j];
5087  }
5088  }
5089 
5090  // Max number of processors
5091  int nproc = nth_per_core * ncores;
5092  // An array to keep number of threads per each context
5093  int *newarr = (int *)__kmp_allocate(sizeof(int) * nproc);
5094  for (int i = 0; i < nproc; i++) {
5095  newarr[i] = 0;
5096  }
5097 
5098  int nth = nthreads;
5099  int flag = 0;
5100  while (nth > 0) {
5101  for (int j = 1; j <= nth_per_core; j++) {
5102  int cnt = ncores_with_x_to_max_procs[j];
5103  for (int i = 0; i < ncores; i++) {
5104  // Skip the core with 0 processors
5105  if (nproc_at_core[i] == 0) {
5106  continue;
5107  }
5108  for (int k = 0; k < nth_per_core; k++) {
5109  if (procarr[i * nth_per_core + k] != -1) {
5110  if (newarr[i * nth_per_core + k] == 0) {
5111  newarr[i * nth_per_core + k] = 1;
5112  cnt--;
5113  nth--;
5114  break;
5115  } else {
5116  if (flag != 0) {
5117  newarr[i * nth_per_core + k]++;
5118  cnt--;
5119  nth--;
5120  break;
5121  }
5122  }
5123  }
5124  }
5125  if (cnt == 0 || nth == 0) {
5126  break;
5127  }
5128  }
5129  if (nth == 0) {
5130  break;
5131  }
5132  }
5133  flag = 1;
5134  }
5135  int sum = 0;
5136  for (int i = 0; i < nproc; i++) {
5137  sum += newarr[i];
5138  if (sum > tid) {
5139  if (fine_gran) {
5140  int osID = procarr[i];
5141  KMP_CPU_SET(osID, mask);
5142  } else {
5143  int coreID = i / nth_per_core;
5144  for (int ii = 0; ii < nth_per_core; ii++) {
5145  int osID = procarr[coreID * nth_per_core + ii];
5146  if (osID != -1) {
5147  KMP_CPU_SET(osID, mask);
5148  }
5149  }
5150  }
5151  break;
5152  }
5153  }
5154  __kmp_free(newarr);
5155  }
5156 
5157  if (__kmp_affinity_verbose) {
5158  char buf[KMP_AFFIN_MASK_PRINT_LEN];
5159  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
5160  KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY", (kmp_int32)getpid(),
5161  __kmp_gettid(), tid, buf);
5162  }
5163  __kmp_set_system_affinity(mask, TRUE);
5164  }
5165 }
5166 
5167 #if KMP_OS_LINUX || KMP_OS_FREEBSD
5168 // We don't need this entry for Windows because
5169 // there is GetProcessAffinityMask() api
5170 //
5171 // The intended usage is indicated by these steps:
5172 // 1) The user gets the current affinity mask
5173 // 2) Then sets the affinity by calling this function
5174 // 3) Error check the return value
5175 // 4) Use non-OpenMP parallelization
5176 // 5) Reset the affinity to what was stored in step 1)
5177 #ifdef __cplusplus
5178 extern "C"
5179 #endif
5180  int
5181  kmp_set_thread_affinity_mask_initial()
5182 // the function returns 0 on success,
5183 // -1 if we cannot bind thread
5184 // >0 (errno) if an error happened during binding
5185 {
5186  int gtid = __kmp_get_gtid();
5187  if (gtid < 0) {
5188  // Do not touch non-omp threads
5189  KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5190  "non-omp thread, returning\n"));
5191  return -1;
5192  }
5193  if (!KMP_AFFINITY_CAPABLE() || !__kmp_init_middle) {
5194  KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5195  "affinity not initialized, returning\n"));
5196  return -1;
5197  }
5198  KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5199  "set full mask for thread %d\n",
5200  gtid));
5201  KMP_DEBUG_ASSERT(__kmp_affin_fullMask != NULL);
5202  return __kmp_set_system_affinity(__kmp_affin_fullMask, FALSE);
5203 }
5204 #endif
5205 
5206 #endif // KMP_AFFINITY_SUPPORTED
int try_open(const char *filename, const char *mode)
Definition: kmp.h:4335