LLVM OpenMP* Runtime Library
kmp_taskdeps.cpp
1 /*
2  * kmp_taskdeps.cpp
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 //#define KMP_SUPPORT_GRAPH_OUTPUT 1
14 
15 #include "kmp.h"
16 #include "kmp_io.h"
17 #include "kmp_wait_release.h"
18 #include "kmp_taskdeps.h"
19 #if OMPT_SUPPORT
20 #include "ompt-specific.h"
21 #endif
22 
23 // TODO: Improve memory allocation? keep a list of pre-allocated structures?
24 // allocate in blocks? re-use list finished list entries?
25 // TODO: don't use atomic ref counters for stack-allocated nodes.
26 // TODO: find an alternate to atomic refs for heap-allocated nodes?
27 // TODO: Finish graph output support
28 // TODO: kmp_lock_t seems a tad to big (and heavy weight) for this. Check other
29 // runtime locks
30 // TODO: Any ITT support needed?
31 
32 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
33 static std::atomic<kmp_int32> kmp_node_id_seed = ATOMIC_VAR_INIT(0);
34 #endif
35 
36 static void __kmp_init_node(kmp_depnode_t *node) {
37  node->dn.successors = NULL;
38  node->dn.task = NULL; // will point to the right task
39  // once dependences have been processed
40  for (int i = 0; i < MAX_MTX_DEPS; ++i)
41  node->dn.mtx_locks[i] = NULL;
42  node->dn.mtx_num_locks = 0;
43  __kmp_init_lock(&node->dn.lock);
44  KMP_ATOMIC_ST_RLX(&node->dn.nrefs, 1); // init creates the first reference
45 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
46  node->dn.id = KMP_ATOMIC_INC(&kmp_node_id_seed);
47 #endif
48 }
49 
50 static inline kmp_depnode_t *__kmp_node_ref(kmp_depnode_t *node) {
51  KMP_ATOMIC_INC(&node->dn.nrefs);
52  return node;
53 }
54 
55 enum { KMP_DEPHASH_OTHER_SIZE = 97, KMP_DEPHASH_MASTER_SIZE = 997 };
56 
57 size_t sizes[] = { 997, 2003, 4001, 8191, 16001, 32003, 64007, 131071, 270029 };
58 const size_t MAX_GEN = 8;
59 
60 static inline size_t __kmp_dephash_hash(kmp_intptr_t addr, size_t hsize) {
61  // TODO alternate to try: set = (((Addr64)(addrUsefulBits * 9.618)) %
62  // m_num_sets );
63  return ((addr >> 6) ^ (addr >> 2)) % hsize;
64 }
65 
66 static kmp_dephash_t *__kmp_dephash_extend(kmp_info_t *thread,
67  kmp_dephash_t *current_dephash) {
68  kmp_dephash_t *h;
69 
70  size_t gen = current_dephash->generation + 1;
71  if (gen >= MAX_GEN)
72  return current_dephash;
73  size_t new_size = sizes[gen];
74 
75  size_t size_to_allocate =
76  new_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t);
77 
78 #if USE_FAST_MEMORY
79  h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size_to_allocate);
80 #else
81  h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size_to_allocate);
82 #endif
83 
84  h->size = new_size;
85  h->nelements = current_dephash->nelements;
86  h->buckets = (kmp_dephash_entry **)(h + 1);
87  h->generation = gen;
88  h->nconflicts = 0;
89 
90  // make sure buckets are properly initialized
91  for (size_t i = 0; i < new_size; i++) {
92  h->buckets[i] = NULL;
93  }
94 
95  // insert existing elements in the new table
96  for (size_t i = 0; i < current_dephash->size; i++) {
97  kmp_dephash_entry_t *next, *entry;
98  for (entry = current_dephash->buckets[i]; entry; entry = next) {
99  next = entry->next_in_bucket;
100  // Compute the new hash using the new size, and insert the entry in
101  // the new bucket.
102  size_t new_bucket = __kmp_dephash_hash(entry->addr, h->size);
103  entry->next_in_bucket = h->buckets[new_bucket];
104  if (entry->next_in_bucket) {
105  h->nconflicts++;
106  }
107  h->buckets[new_bucket] = entry;
108  }
109  }
110 
111  // Free old hash table
112 #if USE_FAST_MEMORY
113  __kmp_fast_free(thread, current_dephash);
114 #else
115  __kmp_thread_free(thread, current_dephash);
116 #endif
117 
118  return h;
119 }
120 
121 static kmp_dephash_t *__kmp_dephash_create(kmp_info_t *thread,
122  kmp_taskdata_t *current_task) {
123  kmp_dephash_t *h;
124 
125  size_t h_size;
126 
127  if (current_task->td_flags.tasktype == TASK_IMPLICIT)
128  h_size = KMP_DEPHASH_MASTER_SIZE;
129  else
130  h_size = KMP_DEPHASH_OTHER_SIZE;
131 
132  size_t size = h_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t);
133 
134 #if USE_FAST_MEMORY
135  h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size);
136 #else
137  h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size);
138 #endif
139  h->size = h_size;
140 
141  h->generation = 0;
142  h->nelements = 0;
143  h->nconflicts = 0;
144  h->buckets = (kmp_dephash_entry **)(h + 1);
145 
146  for (size_t i = 0; i < h_size; i++)
147  h->buckets[i] = 0;
148 
149  return h;
150 }
151 
152 #define ENTRY_LAST_INS 0
153 #define ENTRY_LAST_MTXS 1
154 
155 static kmp_dephash_entry *
156 __kmp_dephash_find(kmp_info_t *thread, kmp_dephash_t **hash, kmp_intptr_t addr) {
157  kmp_dephash_t *h = *hash;
158  if (h->nelements != 0
159  && h->nconflicts/h->size >= 1) {
160  *hash = __kmp_dephash_extend(thread, h);
161  h = *hash;
162  }
163  size_t bucket = __kmp_dephash_hash(addr, h->size);
164 
165  kmp_dephash_entry_t *entry;
166  for (entry = h->buckets[bucket]; entry; entry = entry->next_in_bucket)
167  if (entry->addr == addr)
168  break;
169 
170  if (entry == NULL) {
171 // create entry. This is only done by one thread so no locking required
172 #if USE_FAST_MEMORY
173  entry = (kmp_dephash_entry_t *)__kmp_fast_allocate(
174  thread, sizeof(kmp_dephash_entry_t));
175 #else
176  entry = (kmp_dephash_entry_t *)__kmp_thread_malloc(
177  thread, sizeof(kmp_dephash_entry_t));
178 #endif
179  entry->addr = addr;
180  entry->last_out = NULL;
181  entry->last_ins = NULL;
182  entry->last_mtxs = NULL;
183  entry->last_flag = ENTRY_LAST_INS;
184  entry->mtx_lock = NULL;
185  entry->next_in_bucket = h->buckets[bucket];
186  h->buckets[bucket] = entry;
187  h->nelements++;
188  if (entry->next_in_bucket)
189  h->nconflicts++;
190  }
191  return entry;
192 }
193 
194 static kmp_depnode_list_t *__kmp_add_node(kmp_info_t *thread,
195  kmp_depnode_list_t *list,
196  kmp_depnode_t *node) {
197  kmp_depnode_list_t *new_head;
198 
199 #if USE_FAST_MEMORY
200  new_head = (kmp_depnode_list_t *)__kmp_fast_allocate(
201  thread, sizeof(kmp_depnode_list_t));
202 #else
203  new_head = (kmp_depnode_list_t *)__kmp_thread_malloc(
204  thread, sizeof(kmp_depnode_list_t));
205 #endif
206 
207  new_head->node = __kmp_node_ref(node);
208  new_head->next = list;
209 
210  return new_head;
211 }
212 
213 static inline void __kmp_track_dependence(kmp_int32 gtid, kmp_depnode_t *source,
214  kmp_depnode_t *sink,
215  kmp_task_t *sink_task) {
216 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
217  kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
218  // do not use sink->dn.task as that is only filled after the dependencies
219  // are already processed!
220  kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task);
221 
222  __kmp_printf("%d(%s) -> %d(%s)\n", source->dn.id,
223  task_source->td_ident->psource, sink->dn.id,
224  task_sink->td_ident->psource);
225 #endif
226 #if OMPT_SUPPORT && OMPT_OPTIONAL
227  /* OMPT tracks dependences between task (a=source, b=sink) in which
228  task a blocks the execution of b through the ompt_new_dependence_callback
229  */
230  if (ompt_enabled.ompt_callback_task_dependence) {
231  kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
232  ompt_data_t *sink_data;
233  if (sink_task)
234  sink_data = &(KMP_TASK_TO_TASKDATA(sink_task)->ompt_task_info.task_data);
235  else
236  sink_data = &__kmp_threads[gtid]->th.ompt_thread_info.task_data;
237 
238  ompt_callbacks.ompt_callback(ompt_callback_task_dependence)(
239  &(task_source->ompt_task_info.task_data), sink_data);
240  }
241 #endif /* OMPT_SUPPORT && OMPT_OPTIONAL */
242 }
243 
244 static inline kmp_int32
245 __kmp_depnode_link_successor(kmp_int32 gtid, kmp_info_t *thread,
246  kmp_task_t *task, kmp_depnode_t *node,
247  kmp_depnode_list_t *plist) {
248  if (!plist)
249  return 0;
250  kmp_int32 npredecessors = 0;
251  // link node as successor of list elements
252  for (kmp_depnode_list_t *p = plist; p; p = p->next) {
253  kmp_depnode_t *dep = p->node;
254  if (dep->dn.task) {
255  KMP_ACQUIRE_DEPNODE(gtid, dep);
256  if (dep->dn.task) {
257  __kmp_track_dependence(gtid, dep, node, task);
258  dep->dn.successors = __kmp_add_node(thread, dep->dn.successors, node);
259  KA_TRACE(40, ("__kmp_process_deps: T#%d adding dependence from %p to "
260  "%p\n",
261  gtid, KMP_TASK_TO_TASKDATA(dep->dn.task),
262  KMP_TASK_TO_TASKDATA(task)));
263  npredecessors++;
264  }
265  KMP_RELEASE_DEPNODE(gtid, dep);
266  }
267  }
268  return npredecessors;
269 }
270 
271 static inline kmp_int32 __kmp_depnode_link_successor(kmp_int32 gtid,
272  kmp_info_t *thread,
273  kmp_task_t *task,
274  kmp_depnode_t *source,
275  kmp_depnode_t *sink) {
276  if (!sink)
277  return 0;
278  kmp_int32 npredecessors = 0;
279  if (sink->dn.task) {
280  // synchronously add source to sink' list of successors
281  KMP_ACQUIRE_DEPNODE(gtid, sink);
282  if (sink->dn.task) {
283  __kmp_track_dependence(gtid, sink, source, task);
284  sink->dn.successors = __kmp_add_node(thread, sink->dn.successors, source);
285  KA_TRACE(40, ("__kmp_process_deps: T#%d adding dependence from %p to "
286  "%p\n",
287  gtid, KMP_TASK_TO_TASKDATA(sink->dn.task),
288  KMP_TASK_TO_TASKDATA(task)));
289  npredecessors++;
290  }
291  KMP_RELEASE_DEPNODE(gtid, sink);
292  }
293  return npredecessors;
294 }
295 
296 template <bool filter>
297 static inline kmp_int32
298 __kmp_process_deps(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t **hash,
299  bool dep_barrier, kmp_int32 ndeps,
300  kmp_depend_info_t *dep_list, kmp_task_t *task) {
301  KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d processing %d dependencies : "
302  "dep_barrier = %d\n",
303  filter, gtid, ndeps, dep_barrier));
304 
305  kmp_info_t *thread = __kmp_threads[gtid];
306  kmp_int32 npredecessors = 0;
307  for (kmp_int32 i = 0; i < ndeps; i++) {
308  const kmp_depend_info_t *dep = &dep_list[i];
309 
310  if (filter && dep->base_addr == 0)
311  continue; // skip filtered entries
312 
313  kmp_dephash_entry_t *info =
314  __kmp_dephash_find(thread, hash, dep->base_addr);
315  kmp_depnode_t *last_out = info->last_out;
316  kmp_depnode_list_t *last_ins = info->last_ins;
317  kmp_depnode_list_t *last_mtxs = info->last_mtxs;
318 
319  if (dep->flags.out) { // out --> clean lists of ins and mtxs if any
320  if (last_ins || last_mtxs) {
321  if (info->last_flag == ENTRY_LAST_INS) { // INS were last
322  npredecessors +=
323  __kmp_depnode_link_successor(gtid, thread, task, node, last_ins);
324  } else { // MTXS were last
325  npredecessors +=
326  __kmp_depnode_link_successor(gtid, thread, task, node, last_mtxs);
327  }
328  __kmp_depnode_list_free(thread, last_ins);
329  __kmp_depnode_list_free(thread, last_mtxs);
330  info->last_ins = NULL;
331  info->last_mtxs = NULL;
332  } else {
333  npredecessors +=
334  __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
335  }
336  __kmp_node_deref(thread, last_out);
337  if (dep_barrier) {
338  // if this is a sync point in the serial sequence, then the previous
339  // outputs are guaranteed to be completed after the execution of this
340  // task so the previous output nodes can be cleared.
341  info->last_out = NULL;
342  } else {
343  info->last_out = __kmp_node_ref(node);
344  }
345  } else if (dep->flags.in) {
346  // in --> link node to either last_out or last_mtxs, clean earlier deps
347  if (last_mtxs) {
348  npredecessors +=
349  __kmp_depnode_link_successor(gtid, thread, task, node, last_mtxs);
350  __kmp_node_deref(thread, last_out);
351  info->last_out = NULL;
352  if (info->last_flag == ENTRY_LAST_MTXS && last_ins) { // MTXS were last
353  // clean old INS before creating new list
354  __kmp_depnode_list_free(thread, last_ins);
355  info->last_ins = NULL;
356  }
357  } else {
358  // link node as successor of the last_out if any
359  npredecessors +=
360  __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
361  }
362  info->last_flag = ENTRY_LAST_INS;
363  info->last_ins = __kmp_add_node(thread, info->last_ins, node);
364  } else {
365  KMP_DEBUG_ASSERT(dep->flags.mtx == 1);
366  // mtx --> link node to either last_out or last_ins, clean earlier deps
367  if (last_ins) {
368  npredecessors +=
369  __kmp_depnode_link_successor(gtid, thread, task, node, last_ins);
370  __kmp_node_deref(thread, last_out);
371  info->last_out = NULL;
372  if (info->last_flag == ENTRY_LAST_INS && last_mtxs) { // INS were last
373  // clean old MTXS before creating new list
374  __kmp_depnode_list_free(thread, last_mtxs);
375  info->last_mtxs = NULL;
376  }
377  } else {
378  // link node as successor of the last_out if any
379  npredecessors +=
380  __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
381  }
382  info->last_flag = ENTRY_LAST_MTXS;
383  info->last_mtxs = __kmp_add_node(thread, info->last_mtxs, node);
384  if (info->mtx_lock == NULL) {
385  info->mtx_lock = (kmp_lock_t *)__kmp_allocate(sizeof(kmp_lock_t));
386  __kmp_init_lock(info->mtx_lock);
387  }
388  KMP_DEBUG_ASSERT(node->dn.mtx_num_locks < MAX_MTX_DEPS);
389  kmp_int32 m;
390  // Save lock in node's array
391  for (m = 0; m < MAX_MTX_DEPS; ++m) {
392  // sort pointers in decreasing order to avoid potential livelock
393  if (node->dn.mtx_locks[m] < info->mtx_lock) {
394  KMP_DEBUG_ASSERT(node->dn.mtx_locks[node->dn.mtx_num_locks] == NULL);
395  for (int n = node->dn.mtx_num_locks; n > m; --n) {
396  // shift right all lesser non-NULL pointers
397  KMP_DEBUG_ASSERT(node->dn.mtx_locks[n - 1] != NULL);
398  node->dn.mtx_locks[n] = node->dn.mtx_locks[n - 1];
399  }
400  node->dn.mtx_locks[m] = info->mtx_lock;
401  break;
402  }
403  }
404  KMP_DEBUG_ASSERT(m < MAX_MTX_DEPS); // must break from loop
405  node->dn.mtx_num_locks++;
406  }
407  }
408  KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d found %d predecessors\n", filter,
409  gtid, npredecessors));
410  return npredecessors;
411 }
412 
413 #define NO_DEP_BARRIER (false)
414 #define DEP_BARRIER (true)
415 
416 // returns true if the task has any outstanding dependence
417 static bool __kmp_check_deps(kmp_int32 gtid, kmp_depnode_t *node,
418  kmp_task_t *task, kmp_dephash_t **hash,
419  bool dep_barrier, kmp_int32 ndeps,
420  kmp_depend_info_t *dep_list,
421  kmp_int32 ndeps_noalias,
422  kmp_depend_info_t *noalias_dep_list) {
423  int i, n_mtxs = 0;
424 #if KMP_DEBUG
425  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
426 #endif
427  KA_TRACE(20, ("__kmp_check_deps: T#%d checking dependencies for task %p : %d "
428  "possibly aliased dependencies, %d non-aliased dependencies : "
429  "dep_barrier=%d .\n",
430  gtid, taskdata, ndeps, ndeps_noalias, dep_barrier));
431 
432  // Filter deps in dep_list
433  // TODO: Different algorithm for large dep_list ( > 10 ? )
434  for (i = 0; i < ndeps; i++) {
435  if (dep_list[i].base_addr != 0) {
436  for (int j = i + 1; j < ndeps; j++) {
437  if (dep_list[i].base_addr == dep_list[j].base_addr) {
438  dep_list[i].flags.in |= dep_list[j].flags.in;
439  dep_list[i].flags.out |=
440  (dep_list[j].flags.out ||
441  (dep_list[i].flags.in && dep_list[j].flags.mtx) ||
442  (dep_list[i].flags.mtx && dep_list[j].flags.in));
443  dep_list[i].flags.mtx =
444  dep_list[i].flags.mtx | dep_list[j].flags.mtx &&
445  !dep_list[i].flags.out;
446  dep_list[j].base_addr = 0; // Mark j element as void
447  }
448  }
449  if (dep_list[i].flags.mtx) {
450  // limit number of mtx deps to MAX_MTX_DEPS per node
451  if (n_mtxs < MAX_MTX_DEPS && task != NULL) {
452  ++n_mtxs;
453  } else {
454  dep_list[i].flags.in = 1; // downgrade mutexinoutset to inout
455  dep_list[i].flags.out = 1;
456  dep_list[i].flags.mtx = 0;
457  }
458  }
459  }
460  }
461 
462  // doesn't need to be atomic as no other thread is going to be accessing this
463  // node just yet.
464  // npredecessors is set -1 to ensure that none of the releasing tasks queues
465  // this task before we have finished processing all the dependencies
466  node->dn.npredecessors = -1;
467 
468  // used to pack all npredecessors additions into a single atomic operation at
469  // the end
470  int npredecessors;
471 
472  npredecessors = __kmp_process_deps<true>(gtid, node, hash, dep_barrier, ndeps,
473  dep_list, task);
474  npredecessors += __kmp_process_deps<false>(
475  gtid, node, hash, dep_barrier, ndeps_noalias, noalias_dep_list, task);
476 
477  node->dn.task = task;
478  KMP_MB();
479 
480  // Account for our initial fake value
481  npredecessors++;
482 
483  // Update predecessors and obtain current value to check if there are still
484  // any outstanding dependences (some tasks may have finished while we
485  // processed the dependences)
486  npredecessors =
487  node->dn.npredecessors.fetch_add(npredecessors) + npredecessors;
488 
489  KA_TRACE(20, ("__kmp_check_deps: T#%d found %d predecessors for task %p \n",
490  gtid, npredecessors, taskdata));
491 
492  // beyond this point the task could be queued (and executed) by a releasing
493  // task...
494  return npredecessors > 0 ? true : false;
495 }
496 
513 kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid,
514  kmp_task_t *new_task, kmp_int32 ndeps,
515  kmp_depend_info_t *dep_list,
516  kmp_int32 ndeps_noalias,
517  kmp_depend_info_t *noalias_dep_list) {
518 
519  kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
520  KA_TRACE(10, ("__kmpc_omp_task_with_deps(enter): T#%d loc=%p task=%p\n", gtid,
521  loc_ref, new_taskdata));
522  __kmp_assert_valid_gtid(gtid);
523  kmp_info_t *thread = __kmp_threads[gtid];
524  kmp_taskdata_t *current_task = thread->th.th_current_task;
525 
526 #if OMPT_SUPPORT
527  if (ompt_enabled.enabled) {
528  if (!current_task->ompt_task_info.frame.enter_frame.ptr)
529  current_task->ompt_task_info.frame.enter_frame.ptr =
530  OMPT_GET_FRAME_ADDRESS(0);
531  if (ompt_enabled.ompt_callback_task_create) {
532  ompt_callbacks.ompt_callback(ompt_callback_task_create)(
533  &(current_task->ompt_task_info.task_data),
534  &(current_task->ompt_task_info.frame),
535  &(new_taskdata->ompt_task_info.task_data),
536  ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 1,
537  OMPT_LOAD_OR_GET_RETURN_ADDRESS(gtid));
538  }
539 
540  new_taskdata->ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
541  }
542 
543 #if OMPT_OPTIONAL
544  /* OMPT grab all dependences if requested by the tool */
545  if (ndeps + ndeps_noalias > 0 &&
546  ompt_enabled.ompt_callback_dependences) {
547  kmp_int32 i;
548 
549  int ompt_ndeps = ndeps + ndeps_noalias;
550  ompt_dependence_t *ompt_deps = (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC(
551  thread, (ndeps + ndeps_noalias) * sizeof(ompt_dependence_t));
552 
553  KMP_ASSERT(ompt_deps != NULL);
554 
555  for (i = 0; i < ndeps; i++) {
556  ompt_deps[i].variable.ptr = (void *)dep_list[i].base_addr;
557  if (dep_list[i].flags.in && dep_list[i].flags.out)
558  ompt_deps[i].dependence_type = ompt_dependence_type_inout;
559  else if (dep_list[i].flags.out)
560  ompt_deps[i].dependence_type = ompt_dependence_type_out;
561  else if (dep_list[i].flags.in)
562  ompt_deps[i].dependence_type = ompt_dependence_type_in;
563  else if (dep_list[i].flags.mtx)
564  ompt_deps[i].dependence_type = ompt_dependence_type_mutexinoutset;
565  }
566  for (i = 0; i < ndeps_noalias; i++) {
567  ompt_deps[ndeps + i].variable.ptr = (void *)noalias_dep_list[i].base_addr;
568  if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
569  ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inout;
570  else if (noalias_dep_list[i].flags.out)
571  ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_out;
572  else if (noalias_dep_list[i].flags.in)
573  ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_in;
574  else if (noalias_dep_list[i].flags.mtx)
575  ompt_deps[ndeps + i].dependence_type =
576  ompt_dependence_type_mutexinoutset;
577  }
578  ompt_callbacks.ompt_callback(ompt_callback_dependences)(
579  &(new_taskdata->ompt_task_info.task_data), ompt_deps, ompt_ndeps);
580  /* We can now free the allocated memory for the dependencies */
581  /* For OMPD we might want to delay the free until end of this function */
582  KMP_OMPT_DEPS_FREE(thread, ompt_deps);
583  }
584 #endif /* OMPT_OPTIONAL */
585 #endif /* OMPT_SUPPORT */
586 
587  bool serial = current_task->td_flags.team_serial ||
588  current_task->td_flags.tasking_ser ||
589  current_task->td_flags.final;
590  kmp_task_team_t *task_team = thread->th.th_task_team;
591  serial = serial &&
592  !(task_team && (task_team->tt.tt_found_proxy_tasks ||
593  task_team->tt.tt_hidden_helper_task_encountered));
594 
595  if (!serial && (ndeps > 0 || ndeps_noalias > 0)) {
596  /* if no dependencies have been tracked yet, create the dependence hash */
597  if (current_task->td_dephash == NULL)
598  current_task->td_dephash = __kmp_dephash_create(thread, current_task);
599 
600 #if USE_FAST_MEMORY
601  kmp_depnode_t *node =
602  (kmp_depnode_t *)__kmp_fast_allocate(thread, sizeof(kmp_depnode_t));
603 #else
604  kmp_depnode_t *node =
605  (kmp_depnode_t *)__kmp_thread_malloc(thread, sizeof(kmp_depnode_t));
606 #endif
607 
608  __kmp_init_node(node);
609  new_taskdata->td_depnode = node;
610 
611  if (__kmp_check_deps(gtid, node, new_task, &current_task->td_dephash,
612  NO_DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
613  noalias_dep_list)) {
614  KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had blocking "
615  "dependencies: "
616  "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
617  gtid, loc_ref, new_taskdata));
618 #if OMPT_SUPPORT
619  if (ompt_enabled.enabled) {
620  current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
621  }
622 #endif
623  return TASK_CURRENT_NOT_QUEUED;
624  }
625  } else {
626  KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d ignored dependencies "
627  "for task (serialized)"
628  "loc=%p task=%p\n",
629  gtid, loc_ref, new_taskdata));
630  }
631 
632  KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had no blocking "
633  "dependencies : "
634  "loc=%p task=%p, transferring to __kmp_omp_task\n",
635  gtid, loc_ref, new_taskdata));
636 
637  kmp_int32 ret = __kmp_omp_task(gtid, new_task, true);
638 #if OMPT_SUPPORT
639  if (ompt_enabled.enabled) {
640  current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
641  }
642 #endif
643  return ret;
644 }
645 
646 #if OMPT_SUPPORT
647 void __ompt_taskwait_dep_finish(kmp_taskdata_t *current_task,
648  ompt_data_t *taskwait_task_data) {
649  if (ompt_enabled.ompt_callback_task_schedule) {
650  ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
651  &(current_task->ompt_task_info.task_data), ompt_task_switch,
652  taskwait_task_data);
653  ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
654  taskwait_task_data, ompt_task_complete,
655  &(current_task->ompt_task_info.task_data));
656  }
657  current_task->ompt_task_info.frame.enter_frame.ptr = NULL;
658  *taskwait_task_data = ompt_data_none;
659 }
660 #endif /* OMPT_SUPPORT */
661 
673 void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps,
674  kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
675  kmp_depend_info_t *noalias_dep_list) {
676  KA_TRACE(10, ("__kmpc_omp_wait_deps(enter): T#%d loc=%p\n", gtid, loc_ref));
677 
678  if (ndeps == 0 && ndeps_noalias == 0) {
679  KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no dependencies to "
680  "wait upon : loc=%p\n",
681  gtid, loc_ref));
682  return;
683  }
684  __kmp_assert_valid_gtid(gtid);
685  kmp_info_t *thread = __kmp_threads[gtid];
686  kmp_taskdata_t *current_task = thread->th.th_current_task;
687 
688 #if OMPT_SUPPORT
689  // this function represents a taskwait construct with depend clause
690  // We signal 4 events:
691  // - creation of the taskwait task
692  // - dependences of the taskwait task
693  // - schedule and finish of the taskwait task
694  ompt_data_t *taskwait_task_data = &thread->th.ompt_thread_info.task_data;
695  KMP_ASSERT(taskwait_task_data->ptr == NULL);
696  if (ompt_enabled.enabled) {
697  if (!current_task->ompt_task_info.frame.enter_frame.ptr)
698  current_task->ompt_task_info.frame.enter_frame.ptr =
699  OMPT_GET_FRAME_ADDRESS(0);
700  if (ompt_enabled.ompt_callback_task_create) {
701  ompt_callbacks.ompt_callback(ompt_callback_task_create)(
702  &(current_task->ompt_task_info.task_data),
703  &(current_task->ompt_task_info.frame), taskwait_task_data,
704  ompt_task_explicit | ompt_task_undeferred | ompt_task_mergeable, 1,
705  OMPT_LOAD_OR_GET_RETURN_ADDRESS(gtid));
706  }
707  }
708 
709 #if OMPT_OPTIONAL
710  /* OMPT grab all dependences if requested by the tool */
711  if (ndeps + ndeps_noalias > 0 && ompt_enabled.ompt_callback_dependences) {
712  kmp_int32 i;
713 
714  int ompt_ndeps = ndeps + ndeps_noalias;
715  ompt_dependence_t *ompt_deps = (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC(
716  thread, (ndeps + ndeps_noalias) * sizeof(ompt_dependence_t));
717 
718  KMP_ASSERT(ompt_deps != NULL);
719 
720  for (i = 0; i < ndeps; i++) {
721  ompt_deps[i].variable.ptr = (void *)dep_list[i].base_addr;
722  if (dep_list[i].flags.in && dep_list[i].flags.out)
723  ompt_deps[i].dependence_type = ompt_dependence_type_inout;
724  else if (dep_list[i].flags.out)
725  ompt_deps[i].dependence_type = ompt_dependence_type_out;
726  else if (dep_list[i].flags.in)
727  ompt_deps[i].dependence_type = ompt_dependence_type_in;
728  else if (dep_list[i].flags.mtx)
729  ompt_deps[ndeps + i].dependence_type =
730  ompt_dependence_type_mutexinoutset;
731  }
732  for (i = 0; i < ndeps_noalias; i++) {
733  ompt_deps[ndeps + i].variable.ptr = (void *)noalias_dep_list[i].base_addr;
734  if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
735  ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inout;
736  else if (noalias_dep_list[i].flags.out)
737  ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_out;
738  else if (noalias_dep_list[i].flags.in)
739  ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_in;
740  else if (noalias_dep_list[i].flags.mtx)
741  ompt_deps[ndeps + i].dependence_type =
742  ompt_dependence_type_mutexinoutset;
743  }
744  ompt_callbacks.ompt_callback(ompt_callback_dependences)(
745  taskwait_task_data, ompt_deps, ompt_ndeps);
746  /* We can now free the allocated memory for the dependencies */
747  /* For OMPD we might want to delay the free until end of this function */
748  KMP_OMPT_DEPS_FREE(thread, ompt_deps);
749  ompt_deps = NULL;
750  }
751 #endif /* OMPT_OPTIONAL */
752 #endif /* OMPT_SUPPORT */
753 
754  // We can return immediately as:
755  // - dependences are not computed in serial teams (except with proxy tasks)
756  // - if the dephash is not yet created it means we have nothing to wait for
757  bool ignore = current_task->td_flags.team_serial ||
758  current_task->td_flags.tasking_ser ||
759  current_task->td_flags.final;
760  ignore = ignore && thread->th.th_task_team != NULL &&
761  thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE;
762  ignore = ignore || current_task->td_dephash == NULL;
763 
764  if (ignore) {
765  KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking "
766  "dependencies : loc=%p\n",
767  gtid, loc_ref));
768 #if OMPT_SUPPORT
769  __ompt_taskwait_dep_finish(current_task, taskwait_task_data);
770 #endif /* OMPT_SUPPORT */
771  return;
772  }
773 
774  kmp_depnode_t node = {0};
775  __kmp_init_node(&node);
776  // the stack owns the node
777  __kmp_node_ref(&node);
778 
779  if (!__kmp_check_deps(gtid, &node, NULL, &current_task->td_dephash,
780  DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
781  noalias_dep_list)) {
782  KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking "
783  "dependencies : loc=%p\n",
784  gtid, loc_ref));
785 #if OMPT_SUPPORT
786  __ompt_taskwait_dep_finish(current_task, taskwait_task_data);
787 #endif /* OMPT_SUPPORT */
788  return;
789  }
790 
791  int thread_finished = FALSE;
792  kmp_flag_32<false, false> flag(
793  (std::atomic<kmp_uint32> *)&node.dn.npredecessors, 0U);
794  while (node.dn.npredecessors > 0) {
795  flag.execute_tasks(thread, gtid, FALSE,
796  &thread_finished USE_ITT_BUILD_ARG(NULL),
797  __kmp_task_stealing_constraint);
798  }
799 
800 #if OMPT_SUPPORT
801  __ompt_taskwait_dep_finish(current_task, taskwait_task_data);
802 #endif /* OMPT_SUPPORT */
803  KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d finished waiting : loc=%p\n",
804  gtid, loc_ref));
805 }
kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
Definition: kmp.h:229