LLVM OpenMP* Runtime Library
kmp_gsupport.cpp
1 /*
2  * kmp_gsupport.cpp
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_atomic.h"
15 
16 #if OMPT_SUPPORT
17 #include "ompt-specific.h"
18 #endif
19 
20 enum {
21  KMP_GOMP_TASK_UNTIED_FLAG = 1,
22  KMP_GOMP_TASK_FINAL_FLAG = 2,
23  KMP_GOMP_TASK_DEPENDS_FLAG = 8
24 };
25 
26 // This class helps convert gomp dependency info into
27 // kmp_depend_info_t structures
28 class kmp_gomp_depends_info_t {
29  void **depend;
30  kmp_int32 num_deps;
31  size_t num_out, num_mutexinout, num_in;
32  size_t offset;
33 
34 public:
35  kmp_gomp_depends_info_t(void **depend) : depend(depend) {
36  size_t ndeps = (kmp_intptr_t)depend[0];
37  size_t num_doable;
38  // GOMP taskdep structure:
39  // if depend[0] != 0:
40  // depend = [ ndeps | nout | &out | ... | &out | &in | ... | &in ]
41  //
42  // if depend[0] == 0:
43  // depend = [ 0 | ndeps | nout | nmtx | nin | &out | ... | &out | &mtx |
44  // ... | &mtx | &in | ... | &in | &depobj | ... | &depobj ]
45  if (ndeps) {
46  num_out = (kmp_intptr_t)depend[1];
47  num_in = ndeps - num_out;
48  num_mutexinout = 0;
49  num_doable = ndeps;
50  offset = 2;
51  } else {
52  ndeps = (kmp_intptr_t)depend[1];
53  num_out = (kmp_intptr_t)depend[2];
54  num_mutexinout = (kmp_intptr_t)depend[3];
55  num_in = (kmp_intptr_t)depend[4];
56  num_doable = num_out + num_mutexinout + num_in;
57  offset = 5;
58  }
59  // TODO: Support gomp depobj
60  if (ndeps != num_doable) {
61  KMP_FATAL(GompFeatureNotSupported, "depobj");
62  }
63  num_deps = static_cast<kmp_int32>(ndeps);
64  }
65  kmp_int32 get_num_deps() const { return num_deps; }
66  kmp_depend_info_t get_kmp_depend(size_t index) const {
67  kmp_depend_info_t retval;
68  memset(&retval, '\0', sizeof(retval));
69  KMP_ASSERT(index < (size_t)num_deps);
70  retval.base_addr = (kmp_intptr_t)depend[offset + index];
71  retval.len = 0;
72  // Because inout and out are logically equivalent,
73  // use inout and in dependency flags. GOMP does not provide a
74  // way to distinguish if user specified out vs. inout.
75  if (index < num_out) {
76  retval.flags.in = 1;
77  retval.flags.out = 1;
78  } else if (index >= num_out && index < (num_out + num_mutexinout)) {
79  retval.flags.mtx = 1;
80  } else {
81  retval.flags.in = 1;
82  }
83  return retval;
84  }
85 };
86 
87 #ifdef __cplusplus
88 extern "C" {
89 #endif // __cplusplus
90 
91 #define MKLOC(loc, routine) \
92  static ident_t loc = {0, KMP_IDENT_KMPC, 0, 0, ";unknown;unknown;0;0;;"};
93 
94 #include "kmp_ftn_os.h"
95 
96 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_BARRIER)(void) {
97  int gtid = __kmp_entry_gtid();
98  MKLOC(loc, "GOMP_barrier");
99  KA_TRACE(20, ("GOMP_barrier: T#%d\n", gtid));
100 #if OMPT_SUPPORT && OMPT_OPTIONAL
101  ompt_frame_t *ompt_frame;
102  if (ompt_enabled.enabled) {
103  __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
104  ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
105  }
106  OMPT_STORE_RETURN_ADDRESS(gtid);
107 #endif
108  __kmpc_barrier(&loc, gtid);
109 #if OMPT_SUPPORT && OMPT_OPTIONAL
110  if (ompt_enabled.enabled) {
111  ompt_frame->enter_frame = ompt_data_none;
112  }
113 #endif
114 }
115 
116 // Mutual exclusion
117 
118 // The symbol that icc/ifort generates for unnamed for unnamed critical sections
119 // - .gomp_critical_user_ - is defined using .comm in any objects reference it.
120 // We can't reference it directly here in C code, as the symbol contains a ".".
121 //
122 // The RTL contains an assembly language definition of .gomp_critical_user_
123 // with another symbol __kmp_unnamed_critical_addr initialized with it's
124 // address.
125 extern kmp_critical_name *__kmp_unnamed_critical_addr;
126 
127 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_START)(void) {
128  int gtid = __kmp_entry_gtid();
129  MKLOC(loc, "GOMP_critical_start");
130  KA_TRACE(20, ("GOMP_critical_start: T#%d\n", gtid));
131 #if OMPT_SUPPORT && OMPT_OPTIONAL
132  OMPT_STORE_RETURN_ADDRESS(gtid);
133 #endif
134  __kmpc_critical(&loc, gtid, __kmp_unnamed_critical_addr);
135 }
136 
137 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_END)(void) {
138  int gtid = __kmp_get_gtid();
139  MKLOC(loc, "GOMP_critical_end");
140  KA_TRACE(20, ("GOMP_critical_end: T#%d\n", gtid));
141 #if OMPT_SUPPORT && OMPT_OPTIONAL
142  OMPT_STORE_RETURN_ADDRESS(gtid);
143 #endif
144  __kmpc_end_critical(&loc, gtid, __kmp_unnamed_critical_addr);
145 }
146 
147 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_NAME_START)(void **pptr) {
148  int gtid = __kmp_entry_gtid();
149  MKLOC(loc, "GOMP_critical_name_start");
150  KA_TRACE(20, ("GOMP_critical_name_start: T#%d\n", gtid));
151  __kmpc_critical(&loc, gtid, (kmp_critical_name *)pptr);
152 }
153 
154 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_NAME_END)(void **pptr) {
155  int gtid = __kmp_get_gtid();
156  MKLOC(loc, "GOMP_critical_name_end");
157  KA_TRACE(20, ("GOMP_critical_name_end: T#%d\n", gtid));
158  __kmpc_end_critical(&loc, gtid, (kmp_critical_name *)pptr);
159 }
160 
161 // The Gnu codegen tries to use locked operations to perform atomic updates
162 // inline. If it can't, then it calls GOMP_atomic_start() before performing
163 // the update and GOMP_atomic_end() afterward, regardless of the data type.
164 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ATOMIC_START)(void) {
165  int gtid = __kmp_entry_gtid();
166  KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
167 
168 #if OMPT_SUPPORT
169  __ompt_thread_assign_wait_id(0);
170 #endif
171 
172  __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);
173 }
174 
175 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ATOMIC_END)(void) {
176  int gtid = __kmp_get_gtid();
177  KA_TRACE(20, ("GOMP_atomic_end: T#%d\n", gtid));
178  __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);
179 }
180 
181 int KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_START)(void) {
182  int gtid = __kmp_entry_gtid();
183  MKLOC(loc, "GOMP_single_start");
184  KA_TRACE(20, ("GOMP_single_start: T#%d\n", gtid));
185 
186  if (!TCR_4(__kmp_init_parallel))
187  __kmp_parallel_initialize();
188  __kmp_resume_if_soft_paused();
189 
190  // 3rd parameter == FALSE prevents kmp_enter_single from pushing a
191  // workshare when USE_CHECKS is defined. We need to avoid the push,
192  // as there is no corresponding GOMP_single_end() call.
193  kmp_int32 rc = __kmp_enter_single(gtid, &loc, FALSE);
194 
195 #if OMPT_SUPPORT && OMPT_OPTIONAL
196  kmp_info_t *this_thr = __kmp_threads[gtid];
197  kmp_team_t *team = this_thr->th.th_team;
198  int tid = __kmp_tid_from_gtid(gtid);
199 
200  if (ompt_enabled.enabled) {
201  if (rc) {
202  if (ompt_enabled.ompt_callback_work) {
203  ompt_callbacks.ompt_callback(ompt_callback_work)(
204  ompt_work_single_executor, ompt_scope_begin,
205  &(team->t.ompt_team_info.parallel_data),
206  &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
207  1, OMPT_GET_RETURN_ADDRESS(0));
208  }
209  } else {
210  if (ompt_enabled.ompt_callback_work) {
211  ompt_callbacks.ompt_callback(ompt_callback_work)(
212  ompt_work_single_other, ompt_scope_begin,
213  &(team->t.ompt_team_info.parallel_data),
214  &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
215  1, OMPT_GET_RETURN_ADDRESS(0));
216  ompt_callbacks.ompt_callback(ompt_callback_work)(
217  ompt_work_single_other, ompt_scope_end,
218  &(team->t.ompt_team_info.parallel_data),
219  &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
220  1, OMPT_GET_RETURN_ADDRESS(0));
221  }
222  }
223  }
224 #endif
225 
226  return rc;
227 }
228 
229 void *KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_COPY_START)(void) {
230  void *retval;
231  int gtid = __kmp_entry_gtid();
232  MKLOC(loc, "GOMP_single_copy_start");
233  KA_TRACE(20, ("GOMP_single_copy_start: T#%d\n", gtid));
234 
235  if (!TCR_4(__kmp_init_parallel))
236  __kmp_parallel_initialize();
237  __kmp_resume_if_soft_paused();
238 
239  // If this is the first thread to enter, return NULL. The generated code will
240  // then call GOMP_single_copy_end() for this thread only, with the
241  // copyprivate data pointer as an argument.
242  if (__kmp_enter_single(gtid, &loc, FALSE))
243  return NULL;
244 
245 // Wait for the first thread to set the copyprivate data pointer,
246 // and for all other threads to reach this point.
247 
248 #if OMPT_SUPPORT && OMPT_OPTIONAL
249  ompt_frame_t *ompt_frame;
250  if (ompt_enabled.enabled) {
251  __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
252  ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
253  }
254  OMPT_STORE_RETURN_ADDRESS(gtid);
255 #endif
256  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
257 
258  // Retrieve the value of the copyprivate data point, and wait for all
259  // threads to do likewise, then return.
260  retval = __kmp_team_from_gtid(gtid)->t.t_copypriv_data;
261  {
262 #if OMPT_SUPPORT && OMPT_OPTIONAL
263  OMPT_STORE_RETURN_ADDRESS(gtid);
264 #endif
265  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
266  }
267 #if OMPT_SUPPORT && OMPT_OPTIONAL
268  if (ompt_enabled.enabled) {
269  ompt_frame->enter_frame = ompt_data_none;
270  }
271 #endif
272  return retval;
273 }
274 
275 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_COPY_END)(void *data) {
276  int gtid = __kmp_get_gtid();
277  KA_TRACE(20, ("GOMP_single_copy_end: T#%d\n", gtid));
278 
279  // Set the copyprivate data pointer fo the team, then hit the barrier so that
280  // the other threads will continue on and read it. Hit another barrier before
281  // continuing, so that the know that the copyprivate data pointer has been
282  // propagated to all threads before trying to reuse the t_copypriv_data field.
283  __kmp_team_from_gtid(gtid)->t.t_copypriv_data = data;
284 #if OMPT_SUPPORT && OMPT_OPTIONAL
285  ompt_frame_t *ompt_frame;
286  if (ompt_enabled.enabled) {
287  __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
288  ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
289  }
290  OMPT_STORE_RETURN_ADDRESS(gtid);
291 #endif
292  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
293  {
294 #if OMPT_SUPPORT && OMPT_OPTIONAL
295  OMPT_STORE_RETURN_ADDRESS(gtid);
296 #endif
297  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
298  }
299 #if OMPT_SUPPORT && OMPT_OPTIONAL
300  if (ompt_enabled.enabled) {
301  ompt_frame->enter_frame = ompt_data_none;
302  }
303 #endif
304 }
305 
306 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ORDERED_START)(void) {
307  int gtid = __kmp_entry_gtid();
308  MKLOC(loc, "GOMP_ordered_start");
309  KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
310 #if OMPT_SUPPORT && OMPT_OPTIONAL
311  OMPT_STORE_RETURN_ADDRESS(gtid);
312 #endif
313  __kmpc_ordered(&loc, gtid);
314 }
315 
316 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ORDERED_END)(void) {
317  int gtid = __kmp_get_gtid();
318  MKLOC(loc, "GOMP_ordered_end");
319  KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
320 #if OMPT_SUPPORT && OMPT_OPTIONAL
321  OMPT_STORE_RETURN_ADDRESS(gtid);
322 #endif
323  __kmpc_end_ordered(&loc, gtid);
324 }
325 
326 // Dispatch macro defs
327 //
328 // They come in two flavors: 64-bit unsigned, and either 32-bit signed
329 // (IA-32 architecture) or 64-bit signed (Intel(R) 64).
330 
331 #if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS
332 #define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_4
333 #define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_4
334 #define KMP_DISPATCH_NEXT __kmpc_dispatch_next_4
335 #else
336 #define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_8
337 #define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_8
338 #define KMP_DISPATCH_NEXT __kmpc_dispatch_next_8
339 #endif /* KMP_ARCH_X86 */
340 
341 #define KMP_DISPATCH_INIT_ULL __kmp_aux_dispatch_init_8u
342 #define KMP_DISPATCH_FINI_CHUNK_ULL __kmp_aux_dispatch_fini_chunk_8u
343 #define KMP_DISPATCH_NEXT_ULL __kmpc_dispatch_next_8u
344 
345 // The parallel construct
346 
347 #ifndef KMP_DEBUG
348 static
349 #endif /* KMP_DEBUG */
350  void
351  __kmp_GOMP_microtask_wrapper(int *gtid, int *npr, void (*task)(void *),
352  void *data) {
353 #if OMPT_SUPPORT
354  kmp_info_t *thr;
355  ompt_frame_t *ompt_frame;
356  ompt_state_t enclosing_state;
357 
358  if (ompt_enabled.enabled) {
359  // get pointer to thread data structure
360  thr = __kmp_threads[*gtid];
361 
362  // save enclosing task state; set current state for task
363  enclosing_state = thr->th.ompt_thread_info.state;
364  thr->th.ompt_thread_info.state = ompt_state_work_parallel;
365 
366  // set task frame
367  __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
368  ompt_frame->exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
369  }
370 #endif
371 
372  task(data);
373 
374 #if OMPT_SUPPORT
375  if (ompt_enabled.enabled) {
376  // clear task frame
377  ompt_frame->exit_frame = ompt_data_none;
378 
379  // restore enclosing state
380  thr->th.ompt_thread_info.state = enclosing_state;
381  }
382 #endif
383 }
384 
385 #ifndef KMP_DEBUG
386 static
387 #endif /* KMP_DEBUG */
388  void
389  __kmp_GOMP_parallel_microtask_wrapper(int *gtid, int *npr,
390  void (*task)(void *), void *data,
391  unsigned num_threads, ident_t *loc,
392  enum sched_type schedule, long start,
393  long end, long incr,
394  long chunk_size) {
395  // Initialize the loop worksharing construct.
396 
397  KMP_DISPATCH_INIT(loc, *gtid, schedule, start, end, incr, chunk_size,
398  schedule != kmp_sch_static);
399 
400 #if OMPT_SUPPORT
401  kmp_info_t *thr;
402  ompt_frame_t *ompt_frame;
403  ompt_state_t enclosing_state;
404 
405  if (ompt_enabled.enabled) {
406  thr = __kmp_threads[*gtid];
407  // save enclosing task state; set current state for task
408  enclosing_state = thr->th.ompt_thread_info.state;
409  thr->th.ompt_thread_info.state = ompt_state_work_parallel;
410 
411  // set task frame
412  __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
413  ompt_frame->exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
414  }
415 #endif
416 
417  // Now invoke the microtask.
418  task(data);
419 
420 #if OMPT_SUPPORT
421  if (ompt_enabled.enabled) {
422  // clear task frame
423  ompt_frame->exit_frame = ompt_data_none;
424 
425  // reset enclosing state
426  thr->th.ompt_thread_info.state = enclosing_state;
427  }
428 #endif
429 }
430 
431 static void __kmp_GOMP_fork_call(ident_t *loc, int gtid, unsigned num_threads,
432  unsigned flags, void (*unwrapped_task)(void *),
433  microtask_t wrapper, int argc, ...) {
434  int rc;
435  kmp_info_t *thr = __kmp_threads[gtid];
436  kmp_team_t *team = thr->th.th_team;
437  int tid = __kmp_tid_from_gtid(gtid);
438 
439  va_list ap;
440  va_start(ap, argc);
441 
442  if (num_threads != 0)
443  __kmp_push_num_threads(loc, gtid, num_threads);
444  if (flags != 0)
445  __kmp_push_proc_bind(loc, gtid, (kmp_proc_bind_t)flags);
446  rc = __kmp_fork_call(loc, gtid, fork_context_gnu, argc, wrapper,
447  __kmp_invoke_task_func, kmp_va_addr_of(ap));
448 
449  va_end(ap);
450 
451  if (rc) {
452  __kmp_run_before_invoked_task(gtid, tid, thr, team);
453  }
454 
455 #if OMPT_SUPPORT
456  int ompt_team_size;
457  if (ompt_enabled.enabled) {
458  ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
459  ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
460 
461  // implicit task callback
462  if (ompt_enabled.ompt_callback_implicit_task) {
463  ompt_team_size = __kmp_team_from_gtid(gtid)->t.t_nproc;
464  ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
465  ompt_scope_begin, &(team_info->parallel_data),
466  &(task_info->task_data), ompt_team_size, __kmp_tid_from_gtid(gtid), ompt_task_implicit); // TODO: Can this be ompt_task_initial?
467  task_info->thread_num = __kmp_tid_from_gtid(gtid);
468  }
469  thr->th.ompt_thread_info.state = ompt_state_work_parallel;
470  }
471 #endif
472 }
473 
474 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_START)(void (*task)(void *),
475  void *data,
476  unsigned num_threads) {
477  int gtid = __kmp_entry_gtid();
478 
479 #if OMPT_SUPPORT
480  ompt_frame_t *parent_frame, *frame;
481 
482  if (ompt_enabled.enabled) {
483  __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL);
484  parent_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
485  }
486  OMPT_STORE_RETURN_ADDRESS(gtid);
487 #endif
488 
489  MKLOC(loc, "GOMP_parallel_start");
490  KA_TRACE(20, ("GOMP_parallel_start: T#%d\n", gtid));
491  __kmp_GOMP_fork_call(&loc, gtid, num_threads, 0u, task,
492  (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task,
493  data);
494 #if OMPT_SUPPORT
495  if (ompt_enabled.enabled) {
496  __ompt_get_task_info_internal(0, NULL, NULL, &frame, NULL, NULL);
497  frame->exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
498  }
499 #endif
500 }
501 
502 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)(void) {
503  int gtid = __kmp_get_gtid();
504  kmp_info_t *thr;
505 
506  thr = __kmp_threads[gtid];
507 
508  MKLOC(loc, "GOMP_parallel_end");
509  KA_TRACE(20, ("GOMP_parallel_end: T#%d\n", gtid));
510 
511  if (!thr->th.th_team->t.t_serialized) {
512  __kmp_run_after_invoked_task(gtid, __kmp_tid_from_gtid(gtid), thr,
513  thr->th.th_team);
514  }
515 #if OMPT_SUPPORT
516  if (ompt_enabled.enabled) {
517  // Implicit task is finished here, in the barrier we might schedule
518  // deferred tasks,
519  // these don't see the implicit task on the stack
520  OMPT_CUR_TASK_INFO(thr)->frame.exit_frame = ompt_data_none;
521  }
522 #endif
523 
524  __kmp_join_call(&loc, gtid
525 #if OMPT_SUPPORT
526  ,
527  fork_context_gnu
528 #endif
529  );
530 }
531 
532 // Loop worksharing constructs
533 
534 // The Gnu codegen passes in an exclusive upper bound for the overall range,
535 // but the libguide dispatch code expects an inclusive upper bound, hence the
536 // "end - incr" 5th argument to KMP_DISPATCH_INIT (and the " ub - str" 11th
537 // argument to __kmp_GOMP_fork_call).
538 //
539 // Conversely, KMP_DISPATCH_NEXT returns and inclusive upper bound in *p_ub,
540 // but the Gnu codegen expects an exclusive upper bound, so the adjustment
541 // "*p_ub += stride" compensates for the discrepancy.
542 //
543 // Correction: the gnu codegen always adjusts the upper bound by +-1, not the
544 // stride value. We adjust the dispatch parameters accordingly (by +-1), but
545 // we still adjust p_ub by the actual stride value.
546 //
547 // The "runtime" versions do not take a chunk_sz parameter.
548 //
549 // The profile lib cannot support construct checking of unordered loops that
550 // are predetermined by the compiler to be statically scheduled, as the gcc
551 // codegen will not always emit calls to GOMP_loop_static_next() to get the
552 // next iteration. Instead, it emits inline code to call omp_get_thread_num()
553 // num and calculate the iteration space using the result. It doesn't do this
554 // with ordered static loop, so they can be checked.
555 
556 #if OMPT_SUPPORT
557 #define IF_OMPT_SUPPORT(code) code
558 #else
559 #define IF_OMPT_SUPPORT(code)
560 #endif
561 
562 #define LOOP_START(func, schedule) \
563  int func(long lb, long ub, long str, long chunk_sz, long *p_lb, \
564  long *p_ub) { \
565  int status; \
566  long stride; \
567  int gtid = __kmp_entry_gtid(); \
568  MKLOC(loc, KMP_STR(func)); \
569  KA_TRACE( \
570  20, \
571  (KMP_STR( \
572  func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
573  gtid, lb, ub, str, chunk_sz)); \
574  \
575  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
576  { \
577  IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \
578  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
579  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
580  (schedule) != kmp_sch_static); \
581  } \
582  { \
583  IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \
584  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
585  (kmp_int *)p_ub, (kmp_int *)&stride); \
586  } \
587  if (status) { \
588  KMP_DEBUG_ASSERT(stride == str); \
589  *p_ub += (str > 0) ? 1 : -1; \
590  } \
591  } else { \
592  status = 0; \
593  } \
594  \
595  KA_TRACE( \
596  20, \
597  (KMP_STR( \
598  func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
599  gtid, *p_lb, *p_ub, status)); \
600  return status; \
601  }
602 
603 #define LOOP_RUNTIME_START(func, schedule) \
604  int func(long lb, long ub, long str, long *p_lb, long *p_ub) { \
605  int status; \
606  long stride; \
607  long chunk_sz = 0; \
608  int gtid = __kmp_entry_gtid(); \
609  MKLOC(loc, KMP_STR(func)); \
610  KA_TRACE( \
611  20, \
612  (KMP_STR(func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n", \
613  gtid, lb, ub, str, chunk_sz)); \
614  \
615  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
616  { \
617  IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \
618  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
619  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
620  TRUE); \
621  } \
622  { \
623  IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \
624  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
625  (kmp_int *)p_ub, (kmp_int *)&stride); \
626  } \
627  if (status) { \
628  KMP_DEBUG_ASSERT(stride == str); \
629  *p_ub += (str > 0) ? 1 : -1; \
630  } \
631  } else { \
632  status = 0; \
633  } \
634  \
635  KA_TRACE( \
636  20, \
637  (KMP_STR( \
638  func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
639  gtid, *p_lb, *p_ub, status)); \
640  return status; \
641  }
642 
643 #define KMP_DOACROSS_FINI(status, gtid) \
644  if (!status && __kmp_threads[gtid]->th.th_dispatch->th_doacross_flags) { \
645  __kmpc_doacross_fini(NULL, gtid); \
646  }
647 
648 #define LOOP_NEXT(func, fini_code) \
649  int func(long *p_lb, long *p_ub) { \
650  int status; \
651  long stride; \
652  int gtid = __kmp_get_gtid(); \
653  MKLOC(loc, KMP_STR(func)); \
654  KA_TRACE(20, (KMP_STR(func) ": T#%d\n", gtid)); \
655  \
656  IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \
657  fini_code status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
658  (kmp_int *)p_ub, (kmp_int *)&stride); \
659  if (status) { \
660  *p_ub += (stride > 0) ? 1 : -1; \
661  } \
662  KMP_DOACROSS_FINI(status, gtid) \
663  \
664  KA_TRACE( \
665  20, \
666  (KMP_STR(func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, stride 0x%lx, " \
667  "returning %d\n", \
668  gtid, *p_lb, *p_ub, stride, status)); \
669  return status; \
670  }
671 
672 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_STATIC_START), kmp_sch_static)
673 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT), {})
674 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START),
675  kmp_sch_dynamic_chunked)
676 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_START),
677  kmp_sch_dynamic_chunked)
678 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT), {})
679 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_NEXT), {})
680 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_GUIDED_START),
682 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_START),
684 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT), {})
685 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_NEXT), {})
686 LOOP_RUNTIME_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_RUNTIME_START),
687  kmp_sch_runtime)
688 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT), {})
689 LOOP_RUNTIME_START(
690  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_START),
691  kmp_sch_runtime)
692 LOOP_RUNTIME_START(
693  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_RUNTIME_START),
694  kmp_sch_runtime)
695 LOOP_NEXT(
696  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_NEXT), {})
697 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_RUNTIME_NEXT), {})
698 
699 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START),
701 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT),
702  { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
703 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START),
704  kmp_ord_dynamic_chunked)
705 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT),
706  { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
707 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START),
708  kmp_ord_guided_chunked)
709 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT),
710  { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
711 LOOP_RUNTIME_START(
712  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START),
713  kmp_ord_runtime)
714 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT),
715  { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
716 
717 #define LOOP_DOACROSS_START(func, schedule) \
718  bool func(unsigned ncounts, long *counts, long chunk_sz, long *p_lb, \
719  long *p_ub) { \
720  int status; \
721  long stride, lb, ub, str; \
722  int gtid = __kmp_entry_gtid(); \
723  struct kmp_dim *dims = \
724  (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \
725  MKLOC(loc, KMP_STR(func)); \
726  for (unsigned i = 0; i < ncounts; ++i) { \
727  dims[i].lo = 0; \
728  dims[i].up = counts[i] - 1; \
729  dims[i].st = 1; \
730  } \
731  __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \
732  lb = 0; \
733  ub = counts[0]; \
734  str = 1; \
735  KA_TRACE(20, (KMP_STR(func) ": T#%d, ncounts %u, lb 0x%lx, ub 0x%lx, str " \
736  "0x%lx, chunk_sz " \
737  "0x%lx\n", \
738  gtid, ncounts, lb, ub, str, chunk_sz)); \
739  \
740  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
741  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
742  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
743  (schedule) != kmp_sch_static); \
744  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
745  (kmp_int *)p_ub, (kmp_int *)&stride); \
746  if (status) { \
747  KMP_DEBUG_ASSERT(stride == str); \
748  *p_ub += (str > 0) ? 1 : -1; \
749  } \
750  } else { \
751  status = 0; \
752  } \
753  KMP_DOACROSS_FINI(status, gtid); \
754  \
755  KA_TRACE( \
756  20, \
757  (KMP_STR( \
758  func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
759  gtid, *p_lb, *p_ub, status)); \
760  __kmp_free(dims); \
761  return status; \
762  }
763 
764 #define LOOP_DOACROSS_RUNTIME_START(func, schedule) \
765  int func(unsigned ncounts, long *counts, long *p_lb, long *p_ub) { \
766  int status; \
767  long stride, lb, ub, str; \
768  long chunk_sz = 0; \
769  int gtid = __kmp_entry_gtid(); \
770  struct kmp_dim *dims = \
771  (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \
772  MKLOC(loc, KMP_STR(func)); \
773  for (unsigned i = 0; i < ncounts; ++i) { \
774  dims[i].lo = 0; \
775  dims[i].up = counts[i] - 1; \
776  dims[i].st = 1; \
777  } \
778  __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \
779  lb = 0; \
780  ub = counts[0]; \
781  str = 1; \
782  KA_TRACE( \
783  20, \
784  (KMP_STR(func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n", \
785  gtid, lb, ub, str, chunk_sz)); \
786  \
787  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
788  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
789  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, TRUE); \
790  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
791  (kmp_int *)p_ub, (kmp_int *)&stride); \
792  if (status) { \
793  KMP_DEBUG_ASSERT(stride == str); \
794  *p_ub += (str > 0) ? 1 : -1; \
795  } \
796  } else { \
797  status = 0; \
798  } \
799  KMP_DOACROSS_FINI(status, gtid); \
800  \
801  KA_TRACE( \
802  20, \
803  (KMP_STR( \
804  func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
805  gtid, *p_lb, *p_ub, status)); \
806  __kmp_free(dims); \
807  return status; \
808  }
809 
810 LOOP_DOACROSS_START(
811  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_STATIC_START),
813 LOOP_DOACROSS_START(
814  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_DYNAMIC_START),
815  kmp_sch_dynamic_chunked)
816 LOOP_DOACROSS_START(
817  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_GUIDED_START),
819 LOOP_DOACROSS_RUNTIME_START(
820  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_RUNTIME_START),
821  kmp_sch_runtime)
822 
823 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END)(void) {
824  int gtid = __kmp_get_gtid();
825  KA_TRACE(20, ("GOMP_loop_end: T#%d\n", gtid))
826 
827 #if OMPT_SUPPORT && OMPT_OPTIONAL
828  ompt_frame_t *ompt_frame;
829  if (ompt_enabled.enabled) {
830  __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
831  ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
832  OMPT_STORE_RETURN_ADDRESS(gtid);
833  }
834 #endif
835  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
836 #if OMPT_SUPPORT && OMPT_OPTIONAL
837  if (ompt_enabled.enabled) {
838  ompt_frame->enter_frame = ompt_data_none;
839  }
840 #endif
841 
842  KA_TRACE(20, ("GOMP_loop_end exit: T#%d\n", gtid))
843 }
844 
845 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END_NOWAIT)(void) {
846  KA_TRACE(20, ("GOMP_loop_end_nowait: T#%d\n", __kmp_get_gtid()))
847 }
848 
849 // Unsigned long long loop worksharing constructs
850 //
851 // These are new with gcc 4.4
852 
853 #define LOOP_START_ULL(func, schedule) \
854  int func(int up, unsigned long long lb, unsigned long long ub, \
855  unsigned long long str, unsigned long long chunk_sz, \
856  unsigned long long *p_lb, unsigned long long *p_ub) { \
857  int status; \
858  long long str2 = up ? ((long long)str) : -((long long)str); \
859  long long stride; \
860  int gtid = __kmp_entry_gtid(); \
861  MKLOC(loc, KMP_STR(func)); \
862  \
863  KA_TRACE(20, (KMP_STR(func) ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str " \
864  "0x%llx, chunk_sz 0x%llx\n", \
865  gtid, up, lb, ub, str, chunk_sz)); \
866  \
867  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
868  KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
869  (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, \
870  (schedule) != kmp_sch_static); \
871  status = \
872  KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
873  (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
874  if (status) { \
875  KMP_DEBUG_ASSERT(stride == str2); \
876  *p_ub += (str > 0) ? 1 : -1; \
877  } \
878  } else { \
879  status = 0; \
880  } \
881  \
882  KA_TRACE( \
883  20, \
884  (KMP_STR( \
885  func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
886  gtid, *p_lb, *p_ub, status)); \
887  return status; \
888  }
889 
890 #define LOOP_RUNTIME_START_ULL(func, schedule) \
891  int func(int up, unsigned long long lb, unsigned long long ub, \
892  unsigned long long str, unsigned long long *p_lb, \
893  unsigned long long *p_ub) { \
894  int status; \
895  long long str2 = up ? ((long long)str) : -((long long)str); \
896  unsigned long long stride; \
897  unsigned long long chunk_sz = 0; \
898  int gtid = __kmp_entry_gtid(); \
899  MKLOC(loc, KMP_STR(func)); \
900  \
901  KA_TRACE(20, (KMP_STR(func) ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str " \
902  "0x%llx, chunk_sz 0x%llx\n", \
903  gtid, up, lb, ub, str, chunk_sz)); \
904  \
905  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
906  KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
907  (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, \
908  TRUE); \
909  status = \
910  KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
911  (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
912  if (status) { \
913  KMP_DEBUG_ASSERT((long long)stride == str2); \
914  *p_ub += (str > 0) ? 1 : -1; \
915  } \
916  } else { \
917  status = 0; \
918  } \
919  \
920  KA_TRACE( \
921  20, \
922  (KMP_STR( \
923  func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
924  gtid, *p_lb, *p_ub, status)); \
925  return status; \
926  }
927 
928 #define LOOP_NEXT_ULL(func, fini_code) \
929  int func(unsigned long long *p_lb, unsigned long long *p_ub) { \
930  int status; \
931  long long stride; \
932  int gtid = __kmp_get_gtid(); \
933  MKLOC(loc, KMP_STR(func)); \
934  KA_TRACE(20, (KMP_STR(func) ": T#%d\n", gtid)); \
935  \
936  fini_code status = \
937  KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
938  (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
939  if (status) { \
940  *p_ub += (stride > 0) ? 1 : -1; \
941  } \
942  \
943  KA_TRACE( \
944  20, \
945  (KMP_STR( \
946  func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, stride 0x%llx, " \
947  "returning %d\n", \
948  gtid, *p_lb, *p_ub, stride, status)); \
949  return status; \
950  }
951 
952 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START),
954 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT), {})
955 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START),
956  kmp_sch_dynamic_chunked)
957 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT), {})
958 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START),
960 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT), {})
961 LOOP_START_ULL(
962  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_START),
963  kmp_sch_dynamic_chunked)
964 LOOP_NEXT_ULL(
965  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_NEXT), {})
966 LOOP_START_ULL(
967  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_START),
969 LOOP_NEXT_ULL(
970  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_NEXT), {})
971 LOOP_RUNTIME_START_ULL(
972  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START), kmp_sch_runtime)
973 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT), {})
974 LOOP_RUNTIME_START_ULL(
975  KMP_EXPAND_NAME(
976  KMP_API_NAME_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_START),
977  kmp_sch_runtime)
978 LOOP_RUNTIME_START_ULL(
979  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_START),
980  kmp_sch_runtime)
981 LOOP_NEXT_ULL(
982  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_NEXT),
983  {})
984 LOOP_NEXT_ULL(
985  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_NEXT), {})
986 
987 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START),
989 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT),
990  { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
991 LOOP_START_ULL(
992  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START),
993  kmp_ord_dynamic_chunked)
994 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT),
995  { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
996 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START),
997  kmp_ord_guided_chunked)
998 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT),
999  { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
1000 LOOP_RUNTIME_START_ULL(
1001  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START),
1002  kmp_ord_runtime)
1003 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT),
1004  { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
1005 
1006 #define LOOP_DOACROSS_START_ULL(func, schedule) \
1007  int func(unsigned ncounts, unsigned long long *counts, \
1008  unsigned long long chunk_sz, unsigned long long *p_lb, \
1009  unsigned long long *p_ub) { \
1010  int status; \
1011  long long stride, str, lb, ub; \
1012  int gtid = __kmp_entry_gtid(); \
1013  struct kmp_dim *dims = \
1014  (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \
1015  MKLOC(loc, KMP_STR(func)); \
1016  for (unsigned i = 0; i < ncounts; ++i) { \
1017  dims[i].lo = 0; \
1018  dims[i].up = counts[i] - 1; \
1019  dims[i].st = 1; \
1020  } \
1021  __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \
1022  lb = 0; \
1023  ub = counts[0]; \
1024  str = 1; \
1025  \
1026  KA_TRACE(20, (KMP_STR(func) ": T#%d, lb 0x%llx, ub 0x%llx, str " \
1027  "0x%llx, chunk_sz 0x%llx\n", \
1028  gtid, lb, ub, str, chunk_sz)); \
1029  \
1030  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
1031  KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
1032  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
1033  (schedule) != kmp_sch_static); \
1034  status = \
1035  KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
1036  (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
1037  if (status) { \
1038  KMP_DEBUG_ASSERT(stride == str); \
1039  *p_ub += (str > 0) ? 1 : -1; \
1040  } \
1041  } else { \
1042  status = 0; \
1043  } \
1044  KMP_DOACROSS_FINI(status, gtid); \
1045  \
1046  KA_TRACE( \
1047  20, \
1048  (KMP_STR( \
1049  func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
1050  gtid, *p_lb, *p_ub, status)); \
1051  __kmp_free(dims); \
1052  return status; \
1053  }
1054 
1055 #define LOOP_DOACROSS_RUNTIME_START_ULL(func, schedule) \
1056  int func(unsigned ncounts, unsigned long long *counts, \
1057  unsigned long long *p_lb, unsigned long long *p_ub) { \
1058  int status; \
1059  unsigned long long stride, str, lb, ub; \
1060  unsigned long long chunk_sz = 0; \
1061  int gtid = __kmp_entry_gtid(); \
1062  struct kmp_dim *dims = \
1063  (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \
1064  MKLOC(loc, KMP_STR(func)); \
1065  for (unsigned i = 0; i < ncounts; ++i) { \
1066  dims[i].lo = 0; \
1067  dims[i].up = counts[i] - 1; \
1068  dims[i].st = 1; \
1069  } \
1070  __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \
1071  lb = 0; \
1072  ub = counts[0]; \
1073  str = 1; \
1074  KA_TRACE(20, (KMP_STR(func) ": T#%d, lb 0x%llx, ub 0x%llx, str " \
1075  "0x%llx, chunk_sz 0x%llx\n", \
1076  gtid, lb, ub, str, chunk_sz)); \
1077  \
1078  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
1079  KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
1080  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
1081  TRUE); \
1082  status = \
1083  KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
1084  (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
1085  if (status) { \
1086  KMP_DEBUG_ASSERT(stride == str); \
1087  *p_ub += (str > 0) ? 1 : -1; \
1088  } \
1089  } else { \
1090  status = 0; \
1091  } \
1092  KMP_DOACROSS_FINI(status, gtid); \
1093  \
1094  KA_TRACE( \
1095  20, \
1096  (KMP_STR( \
1097  func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
1098  gtid, *p_lb, *p_ub, status)); \
1099  __kmp_free(dims); \
1100  return status; \
1101  }
1102 
1103 LOOP_DOACROSS_START_ULL(
1104  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_STATIC_START),
1106 LOOP_DOACROSS_START_ULL(
1107  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_DYNAMIC_START),
1108  kmp_sch_dynamic_chunked)
1109 LOOP_DOACROSS_START_ULL(
1110  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_GUIDED_START),
1112 LOOP_DOACROSS_RUNTIME_START_ULL(
1113  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_RUNTIME_START),
1114  kmp_sch_runtime)
1115 
1116 // Combined parallel / loop worksharing constructs
1117 //
1118 // There are no ull versions (yet).
1119 
1120 #define PARALLEL_LOOP_START(func, schedule, ompt_pre, ompt_post) \
1121  void func(void (*task)(void *), void *data, unsigned num_threads, long lb, \
1122  long ub, long str, long chunk_sz) { \
1123  int gtid = __kmp_entry_gtid(); \
1124  MKLOC(loc, KMP_STR(func)); \
1125  KA_TRACE( \
1126  20, \
1127  (KMP_STR( \
1128  func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
1129  gtid, lb, ub, str, chunk_sz)); \
1130  \
1131  ompt_pre(); \
1132  \
1133  __kmp_GOMP_fork_call(&loc, gtid, num_threads, 0u, task, \
1134  (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, \
1135  9, task, data, num_threads, &loc, (schedule), lb, \
1136  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \
1137  IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid)); \
1138  \
1139  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
1140  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
1141  (schedule) != kmp_sch_static); \
1142  \
1143  ompt_post(); \
1144  \
1145  KA_TRACE(20, (KMP_STR(func) " exit: T#%d\n", gtid)); \
1146  }
1147 
1148 #if OMPT_SUPPORT && OMPT_OPTIONAL
1149 
1150 #define OMPT_LOOP_PRE() \
1151  ompt_frame_t *parent_frame; \
1152  if (ompt_enabled.enabled) { \
1153  __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL); \
1154  parent_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); \
1155  OMPT_STORE_RETURN_ADDRESS(gtid); \
1156  }
1157 
1158 #define OMPT_LOOP_POST() \
1159  if (ompt_enabled.enabled) { \
1160  parent_frame->enter_frame = ompt_data_none; \
1161  }
1162 
1163 #else
1164 
1165 #define OMPT_LOOP_PRE()
1166 
1167 #define OMPT_LOOP_POST()
1168 
1169 #endif
1170 
1171 PARALLEL_LOOP_START(
1172  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START),
1173  kmp_sch_static, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1174 PARALLEL_LOOP_START(
1175  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START),
1176  kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1177 PARALLEL_LOOP_START(
1178  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START),
1179  kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1180 PARALLEL_LOOP_START(
1181  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START),
1182  kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1183 
1184 // Tasking constructs
1185 
1186 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASK)(void (*func)(void *), void *data,
1187  void (*copy_func)(void *, void *),
1188  long arg_size, long arg_align,
1189  bool if_cond, unsigned gomp_flags,
1190  void **depend) {
1191  MKLOC(loc, "GOMP_task");
1192  int gtid = __kmp_entry_gtid();
1193  kmp_int32 flags = 0;
1194  kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags;
1195 
1196  KA_TRACE(20, ("GOMP_task: T#%d\n", gtid));
1197 
1198  // The low-order bit is the "untied" flag
1199  if (!(gomp_flags & KMP_GOMP_TASK_UNTIED_FLAG)) {
1200  input_flags->tiedness = 1;
1201  }
1202  // The second low-order bit is the "final" flag
1203  if (gomp_flags & KMP_GOMP_TASK_FINAL_FLAG) {
1204  input_flags->final = 1;
1205  }
1206  input_flags->native = 1;
1207  // __kmp_task_alloc() sets up all other flags
1208 
1209  if (!if_cond) {
1210  arg_size = 0;
1211  }
1212 
1213  kmp_task_t *task = __kmp_task_alloc(
1214  &loc, gtid, input_flags, sizeof(kmp_task_t),
1215  arg_size ? arg_size + arg_align - 1 : 0, (kmp_routine_entry_t)func);
1216 
1217  if (arg_size > 0) {
1218  if (arg_align > 0) {
1219  task->shareds = (void *)((((size_t)task->shareds) + arg_align - 1) /
1220  arg_align * arg_align);
1221  }
1222  // else error??
1223 
1224  if (copy_func) {
1225  (*copy_func)(task->shareds, data);
1226  } else {
1227  KMP_MEMCPY(task->shareds, data, arg_size);
1228  }
1229  }
1230 
1231 #if OMPT_SUPPORT
1232  kmp_taskdata_t *current_task;
1233  if (ompt_enabled.enabled) {
1234  current_task = __kmp_threads[gtid]->th.th_current_task;
1235  current_task->ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1236  }
1237  OMPT_STORE_RETURN_ADDRESS(gtid);
1238 #endif
1239 
1240  if (if_cond) {
1241  if (gomp_flags & KMP_GOMP_TASK_DEPENDS_FLAG) {
1242  KMP_ASSERT(depend);
1243  kmp_gomp_depends_info_t gomp_depends(depend);
1244  kmp_int32 ndeps = gomp_depends.get_num_deps();
1245  kmp_depend_info_t dep_list[ndeps];
1246  for (kmp_int32 i = 0; i < ndeps; i++)
1247  dep_list[i] = gomp_depends.get_kmp_depend(i);
1248  kmp_int32 ndeps_cnv;
1249  __kmp_type_convert(ndeps, &ndeps_cnv);
1250  __kmpc_omp_task_with_deps(&loc, gtid, task, ndeps_cnv, dep_list, 0, NULL);
1251  } else {
1252  __kmpc_omp_task(&loc, gtid, task);
1253  }
1254  } else {
1255 #if OMPT_SUPPORT
1256  ompt_thread_info_t oldInfo;
1257  kmp_info_t *thread;
1258  kmp_taskdata_t *taskdata;
1259  if (ompt_enabled.enabled) {
1260  // Store the threads states and restore them after the task
1261  thread = __kmp_threads[gtid];
1262  taskdata = KMP_TASK_TO_TASKDATA(task);
1263  oldInfo = thread->th.ompt_thread_info;
1264  thread->th.ompt_thread_info.wait_id = 0;
1265  thread->th.ompt_thread_info.state = ompt_state_work_parallel;
1266  taskdata->ompt_task_info.frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1267  }
1268  OMPT_STORE_RETURN_ADDRESS(gtid);
1269 #endif
1270  if (gomp_flags & KMP_GOMP_TASK_DEPENDS_FLAG) {
1271  KMP_ASSERT(depend);
1272  kmp_gomp_depends_info_t gomp_depends(depend);
1273  kmp_int32 ndeps = gomp_depends.get_num_deps();
1274  kmp_depend_info_t dep_list[ndeps];
1275  for (kmp_int32 i = 0; i < ndeps; i++)
1276  dep_list[i] = gomp_depends.get_kmp_depend(i);
1277  __kmpc_omp_wait_deps(&loc, gtid, ndeps, dep_list, 0, NULL);
1278  }
1279 
1280  __kmpc_omp_task_begin_if0(&loc, gtid, task);
1281  func(data);
1282  __kmpc_omp_task_complete_if0(&loc, gtid, task);
1283 
1284 #if OMPT_SUPPORT
1285  if (ompt_enabled.enabled) {
1286  thread->th.ompt_thread_info = oldInfo;
1287  taskdata->ompt_task_info.frame.exit_frame = ompt_data_none;
1288  }
1289 #endif
1290  }
1291 #if OMPT_SUPPORT
1292  if (ompt_enabled.enabled) {
1293  current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
1294  }
1295 #endif
1296 
1297  KA_TRACE(20, ("GOMP_task exit: T#%d\n", gtid));
1298 }
1299 
1300 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKWAIT)(void) {
1301  MKLOC(loc, "GOMP_taskwait");
1302  int gtid = __kmp_entry_gtid();
1303 
1304 #if OMPT_SUPPORT
1305  OMPT_STORE_RETURN_ADDRESS(gtid);
1306 #endif
1307 
1308  KA_TRACE(20, ("GOMP_taskwait: T#%d\n", gtid));
1309 
1310  __kmpc_omp_taskwait(&loc, gtid);
1311 
1312  KA_TRACE(20, ("GOMP_taskwait exit: T#%d\n", gtid));
1313 }
1314 
1315 // Sections worksharing constructs
1316 //
1317 // For the sections construct, we initialize a dynamically scheduled loop
1318 // worksharing construct with lb 1 and stride 1, and use the iteration #'s
1319 // that its returns as sections ids.
1320 //
1321 // There are no special entry points for ordered sections, so we always use
1322 // the dynamically scheduled workshare, even if the sections aren't ordered.
1323 
1324 unsigned KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_START)(unsigned count) {
1325  int status;
1326  kmp_int lb, ub, stride;
1327  int gtid = __kmp_entry_gtid();
1328  MKLOC(loc, "GOMP_sections_start");
1329  KA_TRACE(20, ("GOMP_sections_start: T#%d\n", gtid));
1330 
1331  KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1332 
1333  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
1334  if (status) {
1335  KMP_DEBUG_ASSERT(stride == 1);
1336  KMP_DEBUG_ASSERT(lb > 0);
1337  KMP_ASSERT(lb == ub);
1338  } else {
1339  lb = 0;
1340  }
1341 
1342  KA_TRACE(20, ("GOMP_sections_start exit: T#%d returning %u\n", gtid,
1343  (unsigned)lb));
1344  return (unsigned)lb;
1345 }
1346 
1347 unsigned KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_NEXT)(void) {
1348  int status;
1349  kmp_int lb, ub, stride;
1350  int gtid = __kmp_get_gtid();
1351  MKLOC(loc, "GOMP_sections_next");
1352  KA_TRACE(20, ("GOMP_sections_next: T#%d\n", gtid));
1353 
1354 #if OMPT_SUPPORT
1355  OMPT_STORE_RETURN_ADDRESS(gtid);
1356 #endif
1357 
1358  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
1359  if (status) {
1360  KMP_DEBUG_ASSERT(stride == 1);
1361  KMP_DEBUG_ASSERT(lb > 0);
1362  KMP_ASSERT(lb == ub);
1363  } else {
1364  lb = 0;
1365  }
1366 
1367  KA_TRACE(
1368  20, ("GOMP_sections_next exit: T#%d returning %u\n", gtid, (unsigned)lb));
1369  return (unsigned)lb;
1370 }
1371 
1372 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START)(
1373  void (*task)(void *), void *data, unsigned num_threads, unsigned count) {
1374  int gtid = __kmp_entry_gtid();
1375 
1376 #if OMPT_SUPPORT
1377  ompt_frame_t *parent_frame;
1378 
1379  if (ompt_enabled.enabled) {
1380  __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL);
1381  parent_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1382  }
1383  OMPT_STORE_RETURN_ADDRESS(gtid);
1384 #endif
1385 
1386  MKLOC(loc, "GOMP_parallel_sections_start");
1387  KA_TRACE(20, ("GOMP_parallel_sections_start: T#%d\n", gtid));
1388 
1389  __kmp_GOMP_fork_call(&loc, gtid, num_threads, 0u, task,
1390  (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9,
1391  task, data, num_threads, &loc, kmp_nm_dynamic_chunked,
1392  (kmp_int)1, (kmp_int)count, (kmp_int)1, (kmp_int)1);
1393 
1394 #if OMPT_SUPPORT
1395  if (ompt_enabled.enabled) {
1396  parent_frame->enter_frame = ompt_data_none;
1397  }
1398 #endif
1399 
1400  KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1401 
1402  KA_TRACE(20, ("GOMP_parallel_sections_start exit: T#%d\n", gtid));
1403 }
1404 
1405 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END)(void) {
1406  int gtid = __kmp_get_gtid();
1407  KA_TRACE(20, ("GOMP_sections_end: T#%d\n", gtid))
1408 
1409 #if OMPT_SUPPORT
1410  ompt_frame_t *ompt_frame;
1411  if (ompt_enabled.enabled) {
1412  __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
1413  ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1414  }
1415  OMPT_STORE_RETURN_ADDRESS(gtid);
1416 #endif
1417  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
1418 #if OMPT_SUPPORT
1419  if (ompt_enabled.enabled) {
1420  ompt_frame->enter_frame = ompt_data_none;
1421  }
1422 #endif
1423 
1424  KA_TRACE(20, ("GOMP_sections_end exit: T#%d\n", gtid))
1425 }
1426 
1427 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT)(void) {
1428  KA_TRACE(20, ("GOMP_sections_end_nowait: T#%d\n", __kmp_get_gtid()))
1429 }
1430 
1431 // libgomp has an empty function for GOMP_taskyield as of 2013-10-10
1432 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKYIELD)(void) {
1433  KA_TRACE(20, ("GOMP_taskyield: T#%d\n", __kmp_get_gtid()))
1434  return;
1435 }
1436 
1437 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL)(void (*task)(void *),
1438  void *data,
1439  unsigned num_threads,
1440  unsigned int flags) {
1441  int gtid = __kmp_entry_gtid();
1442  MKLOC(loc, "GOMP_parallel");
1443  KA_TRACE(20, ("GOMP_parallel: T#%d\n", gtid));
1444 
1445 #if OMPT_SUPPORT
1446  ompt_task_info_t *parent_task_info, *task_info;
1447  if (ompt_enabled.enabled) {
1448  parent_task_info = __ompt_get_task_info_object(0);
1449  parent_task_info->frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1450  }
1451  OMPT_STORE_RETURN_ADDRESS(gtid);
1452 #endif
1453  __kmp_GOMP_fork_call(&loc, gtid, num_threads, flags, task,
1454  (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task,
1455  data);
1456 #if OMPT_SUPPORT
1457  if (ompt_enabled.enabled) {
1458  task_info = __ompt_get_task_info_object(0);
1459  task_info->frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1460  }
1461 #endif
1462  task(data);
1463  {
1464 #if OMPT_SUPPORT
1465  OMPT_STORE_RETURN_ADDRESS(gtid);
1466 #endif
1467  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)();
1468  }
1469 #if OMPT_SUPPORT
1470  if (ompt_enabled.enabled) {
1471  task_info->frame.exit_frame = ompt_data_none;
1472  parent_task_info->frame.enter_frame = ompt_data_none;
1473  }
1474 #endif
1475 }
1476 
1477 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_SECTIONS)(void (*task)(void *),
1478  void *data,
1479  unsigned num_threads,
1480  unsigned count,
1481  unsigned flags) {
1482  int gtid = __kmp_entry_gtid();
1483  MKLOC(loc, "GOMP_parallel_sections");
1484  KA_TRACE(20, ("GOMP_parallel_sections: T#%d\n", gtid));
1485 
1486 #if OMPT_SUPPORT
1487  OMPT_STORE_RETURN_ADDRESS(gtid);
1488 #endif
1489 
1490  __kmp_GOMP_fork_call(&loc, gtid, num_threads, flags, task,
1491  (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9,
1492  task, data, num_threads, &loc, kmp_nm_dynamic_chunked,
1493  (kmp_int)1, (kmp_int)count, (kmp_int)1, (kmp_int)1);
1494 
1495  {
1496 #if OMPT_SUPPORT
1497  OMPT_STORE_RETURN_ADDRESS(gtid);
1498 #endif
1499 
1500  KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1501  }
1502  task(data);
1503  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)();
1504  KA_TRACE(20, ("GOMP_parallel_sections exit: T#%d\n", gtid));
1505 }
1506 
1507 #define PARALLEL_LOOP(func, schedule, ompt_pre, ompt_post) \
1508  void func(void (*task)(void *), void *data, unsigned num_threads, long lb, \
1509  long ub, long str, long chunk_sz, unsigned flags) { \
1510  int gtid = __kmp_entry_gtid(); \
1511  MKLOC(loc, KMP_STR(func)); \
1512  KA_TRACE( \
1513  20, \
1514  (KMP_STR( \
1515  func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
1516  gtid, lb, ub, str, chunk_sz)); \
1517  \
1518  ompt_pre(); \
1519  IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \
1520  __kmp_GOMP_fork_call(&loc, gtid, num_threads, flags, task, \
1521  (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, \
1522  9, task, data, num_threads, &loc, (schedule), lb, \
1523  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \
1524  \
1525  { \
1526  IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \
1527  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
1528  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
1529  (schedule) != kmp_sch_static); \
1530  } \
1531  task(data); \
1532  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)(); \
1533  ompt_post(); \
1534  \
1535  KA_TRACE(20, (KMP_STR(func) " exit: T#%d\n", gtid)); \
1536  }
1537 
1538 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC),
1539  kmp_sch_static, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1540 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC),
1541  kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1542 PARALLEL_LOOP(
1543  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_GUIDED),
1544  kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1545 PARALLEL_LOOP(
1546  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_DYNAMIC),
1547  kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1548 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED),
1549  kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1550 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME),
1551  kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1552 PARALLEL_LOOP(
1553  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_MAYBE_NONMONOTONIC_RUNTIME),
1554  kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1555 PARALLEL_LOOP(
1556  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_RUNTIME),
1557  kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1558 
1559 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKGROUP_START)(void) {
1560  int gtid = __kmp_entry_gtid();
1561  MKLOC(loc, "GOMP_taskgroup_start");
1562  KA_TRACE(20, ("GOMP_taskgroup_start: T#%d\n", gtid));
1563 
1564 #if OMPT_SUPPORT
1565  OMPT_STORE_RETURN_ADDRESS(gtid);
1566 #endif
1567 
1568  __kmpc_taskgroup(&loc, gtid);
1569 
1570  return;
1571 }
1572 
1573 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKGROUP_END)(void) {
1574  int gtid = __kmp_get_gtid();
1575  MKLOC(loc, "GOMP_taskgroup_end");
1576  KA_TRACE(20, ("GOMP_taskgroup_end: T#%d\n", gtid));
1577 
1578 #if OMPT_SUPPORT
1579  OMPT_STORE_RETURN_ADDRESS(gtid);
1580 #endif
1581 
1582  __kmpc_end_taskgroup(&loc, gtid);
1583 
1584  return;
1585 }
1586 
1587 static kmp_int32 __kmp_gomp_to_omp_cancellation_kind(int gomp_kind) {
1588  kmp_int32 cncl_kind = 0;
1589  switch (gomp_kind) {
1590  case 1:
1591  cncl_kind = cancel_parallel;
1592  break;
1593  case 2:
1594  cncl_kind = cancel_loop;
1595  break;
1596  case 4:
1597  cncl_kind = cancel_sections;
1598  break;
1599  case 8:
1600  cncl_kind = cancel_taskgroup;
1601  break;
1602  }
1603  return cncl_kind;
1604 }
1605 
1606 // Return true if cancellation should take place, false otherwise
1607 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CANCELLATION_POINT)(int which) {
1608  int gtid = __kmp_get_gtid();
1609  MKLOC(loc, "GOMP_cancellation_point");
1610  KA_TRACE(20, ("GOMP_cancellation_point: T#%d which:%d\n", gtid, which));
1611  kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which);
1612  return __kmpc_cancellationpoint(&loc, gtid, cncl_kind);
1613 }
1614 
1615 // Return true if cancellation should take place, false otherwise
1616 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CANCEL)(int which, bool do_cancel) {
1617  int gtid = __kmp_get_gtid();
1618  MKLOC(loc, "GOMP_cancel");
1619  KA_TRACE(20, ("GOMP_cancel: T#%d which:%d do_cancel:%d\n", gtid, which,
1620  (int)do_cancel));
1621  kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which);
1622 
1623  if (do_cancel == FALSE) {
1624  return __kmpc_cancellationpoint(&loc, gtid, cncl_kind);
1625  } else {
1626  return __kmpc_cancel(&loc, gtid, cncl_kind);
1627  }
1628 }
1629 
1630 // Return true if cancellation should take place, false otherwise
1631 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_BARRIER_CANCEL)(void) {
1632  int gtid = __kmp_get_gtid();
1633  KA_TRACE(20, ("GOMP_barrier_cancel: T#%d\n", gtid));
1634  return __kmp_barrier_gomp_cancel(gtid);
1635 }
1636 
1637 // Return true if cancellation should take place, false otherwise
1638 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL)(void) {
1639  int gtid = __kmp_get_gtid();
1640  KA_TRACE(20, ("GOMP_sections_end_cancel: T#%d\n", gtid));
1641  return __kmp_barrier_gomp_cancel(gtid);
1642 }
1643 
1644 // Return true if cancellation should take place, false otherwise
1645 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END_CANCEL)(void) {
1646  int gtid = __kmp_get_gtid();
1647  KA_TRACE(20, ("GOMP_loop_end_cancel: T#%d\n", gtid));
1648  return __kmp_barrier_gomp_cancel(gtid);
1649 }
1650 
1651 // All target functions are empty as of 2014-05-29
1652 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET)(int device, void (*fn)(void *),
1653  const void *openmp_target,
1654  size_t mapnum, void **hostaddrs,
1655  size_t *sizes,
1656  unsigned char *kinds) {
1657  return;
1658 }
1659 
1660 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_DATA)(
1661  int device, const void *openmp_target, size_t mapnum, void **hostaddrs,
1662  size_t *sizes, unsigned char *kinds) {
1663  return;
1664 }
1665 
1666 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_END_DATA)(void) { return; }
1667 
1668 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_UPDATE)(
1669  int device, const void *openmp_target, size_t mapnum, void **hostaddrs,
1670  size_t *sizes, unsigned char *kinds) {
1671  return;
1672 }
1673 
1674 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TEAMS)(unsigned int num_teams,
1675  unsigned int thread_limit) {
1676  return;
1677 }
1678 
1679 // Task duplication function which copies src to dest (both are
1680 // preallocated task structures)
1681 static void __kmp_gomp_task_dup(kmp_task_t *dest, kmp_task_t *src,
1682  kmp_int32 last_private) {
1683  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(src);
1684  if (taskdata->td_copy_func) {
1685  (taskdata->td_copy_func)(dest->shareds, src->shareds);
1686  }
1687 }
1688 
1689 #ifdef __cplusplus
1690 } // extern "C"
1691 #endif
1692 
1693 template <typename T>
1694 void __GOMP_taskloop(void (*func)(void *), void *data,
1695  void (*copy_func)(void *, void *), long arg_size,
1696  long arg_align, unsigned gomp_flags,
1697  unsigned long num_tasks, int priority, T start, T end,
1698  T step) {
1699  typedef void (*p_task_dup_t)(kmp_task_t *, kmp_task_t *, kmp_int32);
1700  MKLOC(loc, "GOMP_taskloop");
1701  int sched;
1702  T *loop_bounds;
1703  int gtid = __kmp_entry_gtid();
1704  kmp_int32 flags = 0;
1705  int if_val = gomp_flags & (1u << 10);
1706  int nogroup = gomp_flags & (1u << 11);
1707  int up = gomp_flags & (1u << 8);
1708  p_task_dup_t task_dup = NULL;
1709  kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags;
1710 #ifdef KMP_DEBUG
1711  {
1712  char *buff;
1713  buff = __kmp_str_format(
1714  "GOMP_taskloop: T#%%d: func:%%p data:%%p copy_func:%%p "
1715  "arg_size:%%ld arg_align:%%ld gomp_flags:0x%%x num_tasks:%%lu "
1716  "priority:%%d start:%%%s end:%%%s step:%%%s\n",
1717  traits_t<T>::spec, traits_t<T>::spec, traits_t<T>::spec);
1718  KA_TRACE(20, (buff, gtid, func, data, copy_func, arg_size, arg_align,
1719  gomp_flags, num_tasks, priority, start, end, step));
1720  __kmp_str_free(&buff);
1721  }
1722 #endif
1723  KMP_ASSERT((size_t)arg_size >= 2 * sizeof(T));
1724  KMP_ASSERT(arg_align > 0);
1725  // The low-order bit is the "untied" flag
1726  if (!(gomp_flags & 1)) {
1727  input_flags->tiedness = 1;
1728  }
1729  // The second low-order bit is the "final" flag
1730  if (gomp_flags & 2) {
1731  input_flags->final = 1;
1732  }
1733  // Negative step flag
1734  if (!up) {
1735  // If step is flagged as negative, but isn't properly sign extended
1736  // Then manually sign extend it. Could be a short, int, char embedded
1737  // in a long. So cannot assume any cast.
1738  if (step > 0) {
1739  for (int i = sizeof(T) * CHAR_BIT - 1; i >= 0L; --i) {
1740  // break at the first 1 bit
1741  if (step & ((T)1 << i))
1742  break;
1743  step |= ((T)1 << i);
1744  }
1745  }
1746  }
1747  input_flags->native = 1;
1748  // Figure out if none/grainsize/num_tasks clause specified
1749  if (num_tasks > 0) {
1750  if (gomp_flags & (1u << 9))
1751  sched = 1; // grainsize specified
1752  else
1753  sched = 2; // num_tasks specified
1754  // neither grainsize nor num_tasks specified
1755  } else {
1756  sched = 0;
1757  }
1758 
1759  // __kmp_task_alloc() sets up all other flags
1760  kmp_task_t *task =
1761  __kmp_task_alloc(&loc, gtid, input_flags, sizeof(kmp_task_t),
1762  arg_size + arg_align - 1, (kmp_routine_entry_t)func);
1763  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
1764  taskdata->td_copy_func = copy_func;
1765  taskdata->td_size_loop_bounds = sizeof(T);
1766 
1767  // re-align shareds if needed and setup firstprivate copy constructors
1768  // through the task_dup mechanism
1769  task->shareds = (void *)((((size_t)task->shareds) + arg_align - 1) /
1770  arg_align * arg_align);
1771  if (copy_func) {
1772  task_dup = __kmp_gomp_task_dup;
1773  }
1774  KMP_MEMCPY(task->shareds, data, arg_size);
1775 
1776  loop_bounds = (T *)task->shareds;
1777  loop_bounds[0] = start;
1778  loop_bounds[1] = end + (up ? -1 : 1);
1779  __kmpc_taskloop(&loc, gtid, task, if_val, (kmp_uint64 *)&(loop_bounds[0]),
1780  (kmp_uint64 *)&(loop_bounds[1]), (kmp_int64)step, nogroup,
1781  sched, (kmp_uint64)num_tasks, (void *)task_dup);
1782 }
1783 
1784 // 4 byte version of GOMP_doacross_post
1785 // This verison needs to create a temporary array which converts 4 byte
1786 // integers into 8 byte integers
1787 template <typename T, bool need_conversion = (sizeof(long) == 4)>
1788 void __kmp_GOMP_doacross_post(T *count);
1789 
1790 template <> void __kmp_GOMP_doacross_post<long, true>(long *count) {
1791  int gtid = __kmp_entry_gtid();
1792  kmp_info_t *th = __kmp_threads[gtid];
1793  MKLOC(loc, "GOMP_doacross_post");
1794  kmp_int64 num_dims = th->th.th_dispatch->th_doacross_info[0];
1795  kmp_int64 *vec = (kmp_int64 *)__kmp_thread_malloc(
1796  th, (size_t)(sizeof(kmp_int64) * num_dims));
1797  for (kmp_int64 i = 0; i < num_dims; ++i) {
1798  vec[i] = (kmp_int64)count[i];
1799  }
1800  __kmpc_doacross_post(&loc, gtid, vec);
1801  __kmp_thread_free(th, vec);
1802 }
1803 
1804 // 8 byte versions of GOMP_doacross_post
1805 // This version can just pass in the count array directly instead of creating
1806 // a temporary array
1807 template <> void __kmp_GOMP_doacross_post<long, false>(long *count) {
1808  int gtid = __kmp_entry_gtid();
1809  MKLOC(loc, "GOMP_doacross_post");
1810  __kmpc_doacross_post(&loc, gtid, RCAST(kmp_int64 *, count));
1811 }
1812 
1813 template <typename T> void __kmp_GOMP_doacross_wait(T first, va_list args) {
1814  int gtid = __kmp_entry_gtid();
1815  kmp_info_t *th = __kmp_threads[gtid];
1816  MKLOC(loc, "GOMP_doacross_wait");
1817  kmp_int64 num_dims = th->th.th_dispatch->th_doacross_info[0];
1818  kmp_int64 *vec = (kmp_int64 *)__kmp_thread_malloc(
1819  th, (size_t)(sizeof(kmp_int64) * num_dims));
1820  vec[0] = (kmp_int64)first;
1821  for (kmp_int64 i = 1; i < num_dims; ++i) {
1822  T item = va_arg(args, T);
1823  vec[i] = (kmp_int64)item;
1824  }
1825  __kmpc_doacross_wait(&loc, gtid, vec);
1826  __kmp_thread_free(th, vec);
1827  return;
1828 }
1829 
1830 #ifdef __cplusplus
1831 extern "C" {
1832 #endif // __cplusplus
1833 
1834 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKLOOP)(
1835  void (*func)(void *), void *data, void (*copy_func)(void *, void *),
1836  long arg_size, long arg_align, unsigned gomp_flags, unsigned long num_tasks,
1837  int priority, long start, long end, long step) {
1838  __GOMP_taskloop<long>(func, data, copy_func, arg_size, arg_align, gomp_flags,
1839  num_tasks, priority, start, end, step);
1840 }
1841 
1842 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKLOOP_ULL)(
1843  void (*func)(void *), void *data, void (*copy_func)(void *, void *),
1844  long arg_size, long arg_align, unsigned gomp_flags, unsigned long num_tasks,
1845  int priority, unsigned long long start, unsigned long long end,
1846  unsigned long long step) {
1847  __GOMP_taskloop<unsigned long long>(func, data, copy_func, arg_size,
1848  arg_align, gomp_flags, num_tasks,
1849  priority, start, end, step);
1850 }
1851 
1852 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_POST)(long *count) {
1853  __kmp_GOMP_doacross_post(count);
1854 }
1855 
1856 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_WAIT)(long first, ...) {
1857  va_list args;
1858  va_start(args, first);
1859  __kmp_GOMP_doacross_wait<long>(first, args);
1860  va_end(args);
1861 }
1862 
1863 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_ULL_POST)(
1864  unsigned long long *count) {
1865  int gtid = __kmp_entry_gtid();
1866  MKLOC(loc, "GOMP_doacross_ull_post");
1867  __kmpc_doacross_post(&loc, gtid, RCAST(kmp_int64 *, count));
1868 }
1869 
1870 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_ULL_WAIT)(
1871  unsigned long long first, ...) {
1872  va_list args;
1873  va_start(args, first);
1874  __kmp_GOMP_doacross_wait<unsigned long long>(first, args);
1875  va_end(args);
1876 }
1877 
1878 // fn: the function each master thread of new team will call
1879 // data: argument to fn
1880 // num_teams, thread_limit: max bounds on respective ICV
1881 // flags: unused
1882 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TEAMS_REG)(void (*fn)(void *),
1883  void *data,
1884  unsigned num_teams,
1885  unsigned thread_limit,
1886  unsigned flags) {
1887  MKLOC(loc, "GOMP_teams_reg");
1888  int gtid = __kmp_entry_gtid();
1889  KA_TRACE(20, ("GOMP_teams_reg: T#%d num_teams=%u thread_limit=%u flag=%u\n",
1890  gtid, num_teams, thread_limit, flags));
1891  __kmpc_push_num_teams(&loc, gtid, num_teams, thread_limit);
1892  __kmpc_fork_teams(&loc, 2, (microtask_t)__kmp_GOMP_microtask_wrapper, fn,
1893  data);
1894  KA_TRACE(20, ("GOMP_teams_reg exit: T#%d\n", gtid));
1895 }
1896 
1897 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKWAIT_DEPEND)(void **depend) {
1898  MKLOC(loc, "GOMP_taskwait_depend");
1899  int gtid = __kmp_entry_gtid();
1900  KA_TRACE(20, ("GOMP_taskwait_depend: T#%d\n", gtid));
1901  kmp_gomp_depends_info_t gomp_depends(depend);
1902  kmp_int32 ndeps = gomp_depends.get_num_deps();
1903  kmp_depend_info_t dep_list[ndeps];
1904  for (kmp_int32 i = 0; i < ndeps; i++)
1905  dep_list[i] = gomp_depends.get_kmp_depend(i);
1906 #if OMPT_SUPPORT
1907  OMPT_STORE_RETURN_ADDRESS(gtid);
1908 #endif
1909  __kmpc_omp_wait_deps(&loc, gtid, ndeps, dep_list, 0, NULL);
1910  KA_TRACE(20, ("GOMP_taskwait_depend exit: T#%d\n", gtid));
1911 }
1912 
1913 /* The following sections of code create aliases for the GOMP_* functions, then
1914  create versioned symbols using the assembler directive .symver. This is only
1915  pertinent for ELF .so library. The KMP_VERSION_SYMBOL macro is defined in
1916  kmp_os.h */
1917 
1918 #ifdef KMP_USE_VERSION_SYMBOLS
1919 // GOMP_1.0 versioned symbols
1920 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ATOMIC_END, 10, "GOMP_1.0");
1921 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ATOMIC_START, 10, "GOMP_1.0");
1922 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_BARRIER, 10, "GOMP_1.0");
1923 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_END, 10, "GOMP_1.0");
1924 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10, "GOMP_1.0");
1925 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10, "GOMP_1.0");
1926 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_START, 10, "GOMP_1.0");
1927 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10, "GOMP_1.0");
1928 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10, "GOMP_1.0");
1929 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END, 10, "GOMP_1.0");
1930 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10, "GOMP_1.0");
1931 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10, "GOMP_1.0");
1932 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10, "GOMP_1.0");
1933 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10, "GOMP_1.0");
1934 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10,
1935  "GOMP_1.0");
1936 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10, "GOMP_1.0");
1937 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10, "GOMP_1.0");
1938 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10, "GOMP_1.0");
1939 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10,
1940  "GOMP_1.0");
1941 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10, "GOMP_1.0");
1942 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10, "GOMP_1.0");
1943 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10, "GOMP_1.0");
1944 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10, "GOMP_1.0");
1945 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10, "GOMP_1.0");
1946 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10, "GOMP_1.0");
1947 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ORDERED_END, 10, "GOMP_1.0");
1948 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ORDERED_START, 10, "GOMP_1.0");
1949 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_END, 10, "GOMP_1.0");
1950 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10,
1951  "GOMP_1.0");
1952 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10,
1953  "GOMP_1.0");
1954 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10,
1955  "GOMP_1.0");
1956 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10,
1957  "GOMP_1.0");
1958 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10, "GOMP_1.0");
1959 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_START, 10, "GOMP_1.0");
1960 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END, 10, "GOMP_1.0");
1961 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10, "GOMP_1.0");
1962 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10, "GOMP_1.0");
1963 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_START, 10, "GOMP_1.0");
1964 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10, "GOMP_1.0");
1965 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10, "GOMP_1.0");
1966 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_START, 10, "GOMP_1.0");
1967 
1968 // GOMP_2.0 versioned symbols
1969 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASK, 20, "GOMP_2.0");
1970 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKWAIT, 20, "GOMP_2.0");
1971 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20, "GOMP_2.0");
1972 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20, "GOMP_2.0");
1973 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20, "GOMP_2.0");
1974 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20, "GOMP_2.0");
1975 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20,
1976  "GOMP_2.0");
1977 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20,
1978  "GOMP_2.0");
1979 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20,
1980  "GOMP_2.0");
1981 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20,
1982  "GOMP_2.0");
1983 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20,
1984  "GOMP_2.0");
1985 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20,
1986  "GOMP_2.0");
1987 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20,
1988  "GOMP_2.0");
1989 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20,
1990  "GOMP_2.0");
1991 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20, "GOMP_2.0");
1992 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20, "GOMP_2.0");
1993 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20, "GOMP_2.0");
1994 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20, "GOMP_2.0");
1995 
1996 // GOMP_3.0 versioned symbols
1997 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKYIELD, 30, "GOMP_3.0");
1998 
1999 // GOMP_4.0 versioned symbols
2000 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL, 40, "GOMP_4.0");
2001 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40, "GOMP_4.0");
2002 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40, "GOMP_4.0");
2003 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40, "GOMP_4.0");
2004 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40, "GOMP_4.0");
2005 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40, "GOMP_4.0");
2006 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKGROUP_START, 40, "GOMP_4.0");
2007 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKGROUP_END, 40, "GOMP_4.0");
2008 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40, "GOMP_4.0");
2009 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CANCEL, 40, "GOMP_4.0");
2010 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40, "GOMP_4.0");
2011 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40, "GOMP_4.0");
2012 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40, "GOMP_4.0");
2013 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET, 40, "GOMP_4.0");
2014 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_DATA, 40, "GOMP_4.0");
2015 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_END_DATA, 40, "GOMP_4.0");
2016 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_UPDATE, 40, "GOMP_4.0");
2017 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TEAMS, 40, "GOMP_4.0");
2018 
2019 // GOMP_4.5 versioned symbols
2020 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKLOOP, 45, "GOMP_4.5");
2021 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKLOOP_ULL, 45, "GOMP_4.5");
2022 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_POST, 45, "GOMP_4.5");
2023 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_WAIT, 45, "GOMP_4.5");
2024 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_STATIC_START, 45,
2025  "GOMP_4.5");
2026 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_DYNAMIC_START, 45,
2027  "GOMP_4.5");
2028 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_GUIDED_START, 45,
2029  "GOMP_4.5");
2030 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_RUNTIME_START, 45,
2031  "GOMP_4.5");
2032 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_ULL_POST, 45, "GOMP_4.5");
2033 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_ULL_WAIT, 45, "GOMP_4.5");
2034 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_STATIC_START, 45,
2035  "GOMP_4.5");
2036 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_DYNAMIC_START, 45,
2037  "GOMP_4.5");
2038 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_GUIDED_START, 45,
2039  "GOMP_4.5");
2040 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_RUNTIME_START, 45,
2041  "GOMP_4.5");
2042 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_START, 45,
2043  "GOMP_4.5");
2044 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_NEXT, 45,
2045  "GOMP_4.5");
2046 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_START, 45,
2047  "GOMP_4.5");
2048 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_NEXT, 45,
2049  "GOMP_4.5");
2050 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_START, 45,
2051  "GOMP_4.5");
2052 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_NEXT, 45,
2053  "GOMP_4.5");
2054 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_START, 45,
2055  "GOMP_4.5");
2056 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_NEXT, 45,
2057  "GOMP_4.5");
2058 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_DYNAMIC, 45,
2059  "GOMP_4.5");
2060 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_GUIDED, 45,
2061  "GOMP_4.5");
2062 
2063 // GOMP_5.0 versioned symbols
2064 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_NEXT, 50,
2065  "GOMP_5.0");
2066 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_START, 50,
2067  "GOMP_5.0");
2068 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_RUNTIME_NEXT, 50,
2069  "GOMP_5.0");
2070 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_RUNTIME_START, 50,
2071  "GOMP_5.0");
2072 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_NEXT,
2073  50, "GOMP_5.0");
2074 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_START,
2075  50, "GOMP_5.0");
2076 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_NEXT, 50,
2077  "GOMP_5.0");
2078 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_START, 50,
2079  "GOMP_5.0");
2080 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_RUNTIME, 50,
2081  "GOMP_5.0");
2082 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_MAYBE_NONMONOTONIC_RUNTIME,
2083  50, "GOMP_5.0");
2084 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TEAMS_REG, 50, "GOMP_5.0");
2085 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKWAIT_DEPEND, 50, "GOMP_5.0");
2086 
2087 #endif // KMP_USE_VERSION_SYMBOLS
2088 
2089 #ifdef __cplusplus
2090 } // extern "C"
2091 #endif // __cplusplus
KMP_EXPORT void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask,...)
KMP_EXPORT void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_barrier(ident_t *, kmp_int32 global_tid)
void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int sched, kmp_uint64 grainsize, void *task_dup)
KMP_EXPORT kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
KMP_EXPORT void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
KMP_EXPORT void __kmpc_end_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
sched_type
Definition: kmp.h:351
KMP_EXPORT void __kmpc_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
@ kmp_sch_static
Definition: kmp.h:354
@ kmp_sch_guided_chunked
Definition: kmp.h:356
@ kmp_ord_static
Definition: kmp.h:380
Definition: kmp.h:229