Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
task_group_context.cpp
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 
16 
17 
18 
19 */
20 
21 #include "scheduler.h"
22 
23 #include "itt_notify.h"
24 
25 namespace tbb {
26 
27 #if __TBB_TASK_GROUP_CONTEXT
28 
29 using namespace internal;
30 
31 //------------------------------------------------------------------------
32 // captured_exception
33 //------------------------------------------------------------------------
34 
35 inline char* duplicate_string ( const char* src ) {
36  char* dst = NULL;
37  if ( src ) {
38  size_t len = strlen(src) + 1;
39  dst = (char*)allocate_via_handler_v3(len);
40  strncpy (dst, src, len);
41  }
42  return dst;
43 }
44 
46  clear();
47 }
48 
49 void captured_exception::set ( const char* a_name, const char* info ) throw() {
50  my_exception_name = duplicate_string( a_name );
51  my_exception_info = duplicate_string( info );
52 }
53 
54 void captured_exception::clear () throw() {
55  deallocate_via_handler_v3 (const_cast<char*>(my_exception_name));
56  deallocate_via_handler_v3 (const_cast<char*>(my_exception_info));
57 }
58 
59 captured_exception* captured_exception::move () throw() {
60  captured_exception *e = (captured_exception*)allocate_via_handler_v3(sizeof(captured_exception));
61  if ( e ) {
62  ::new (e) captured_exception();
63  e->my_exception_name = my_exception_name;
64  e->my_exception_info = my_exception_info;
65  e->my_dynamic = true;
66  my_exception_name = my_exception_info = NULL;
67  }
68  return e;
69 }
70 
71 void captured_exception::destroy () throw() {
72  __TBB_ASSERT ( my_dynamic, "Method destroy can be used only on objects created by clone or allocate" );
73  if ( my_dynamic ) {
76  }
77 }
78 
79 captured_exception* captured_exception::allocate ( const char* a_name, const char* info ) {
80  captured_exception *e = (captured_exception*)allocate_via_handler_v3( sizeof(captured_exception) );
81  if ( e ) {
82  ::new (e) captured_exception(a_name, info);
83  e->my_dynamic = true;
84  }
85  return e;
86 }
87 
88 const char* captured_exception::name() const throw() {
89  return my_exception_name;
90 }
91 
92 const char* captured_exception::what() const throw() {
93  return my_exception_info;
94 }
95 
96 
97 //------------------------------------------------------------------------
98 // tbb_exception_ptr
99 //------------------------------------------------------------------------
100 
101 #if !TBB_USE_CAPTURED_EXCEPTION
102 
103 namespace internal {
104 
105 template<typename T>
106 tbb_exception_ptr* AllocateExceptionContainer( const T& src ) {
107  tbb_exception_ptr *eptr = (tbb_exception_ptr*)allocate_via_handler_v3( sizeof(tbb_exception_ptr) );
108  if ( eptr )
109  new (eptr) tbb_exception_ptr(src);
110  return eptr;
111 }
112 
113 tbb_exception_ptr* tbb_exception_ptr::allocate () {
114  return AllocateExceptionContainer( std::current_exception() );
115 }
116 
117 tbb_exception_ptr* tbb_exception_ptr::allocate ( const tbb_exception& ) {
118  return AllocateExceptionContainer( std::current_exception() );
119 }
120 
121 tbb_exception_ptr* tbb_exception_ptr::allocate ( captured_exception& src ) {
122  tbb_exception_ptr *res = AllocateExceptionContainer( src );
123  src.destroy();
124  return res;
125 }
126 
127 void tbb_exception_ptr::destroy () throw() {
128  this->tbb_exception_ptr::~tbb_exception_ptr();
130 }
131 
132 } // namespace internal
133 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
134 
135 
136 //------------------------------------------------------------------------
137 // task_group_context
138 //------------------------------------------------------------------------
139 
141  if ( __TBB_load_relaxed(my_kind) == binding_completed ) {
142  if ( governor::is_set(my_owner) ) {
143  // Local update of the context list
144  uintptr_t local_count_snapshot = my_owner->my_context_state_propagation_epoch;
145  my_owner->my_local_ctx_list_update.store<relaxed>(1);
146  // Prevent load of nonlocal update flag from being hoisted before the
147  // store to local update flag.
148  atomic_fence();
149  if ( my_owner->my_nonlocal_ctx_list_update.load<relaxed>() ) {
150  spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex);
151  my_node.my_prev->my_next = my_node.my_next;
152  my_node.my_next->my_prev = my_node.my_prev;
153  my_owner->my_local_ctx_list_update.store<relaxed>(0);
154  }
155  else {
156  my_node.my_prev->my_next = my_node.my_next;
157  my_node.my_next->my_prev = my_node.my_prev;
158  // Release fence is necessary so that update of our neighbors in
159  // the context list was committed when possible concurrent destroyer
160  // proceeds after local update flag is reset by the following store.
161  my_owner->my_local_ctx_list_update.store<release>(0);
162  if ( local_count_snapshot != the_context_state_propagation_epoch ) {
163  // Another thread was propagating cancellation request when we removed
164  // ourselves from the list. We must ensure that it is not accessing us
165  // when this destructor finishes. We'll be able to acquire the lock
166  // below only after the other thread finishes with us.
167  spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex);
168  }
169  }
170  }
171  else {
172  // Nonlocal update of the context list
173  // Synchronizes with generic_scheduler::cleanup_local_context_list()
174  // TODO: evaluate and perhaps relax, or add some lock instead
175  if ( internal::as_atomic(my_kind).fetch_and_store(dying) == detached ) {
176  my_node.my_prev->my_next = my_node.my_next;
177  my_node.my_next->my_prev = my_node.my_prev;
178  }
179  else {
180  //TODO: evaluate and perhaps relax
181  my_owner->my_nonlocal_ctx_list_update.fetch_and_increment<full_fence>();
182  //TODO: evaluate and perhaps remove
183  spin_wait_until_eq( my_owner->my_local_ctx_list_update, 0u );
184  my_owner->my_context_list_mutex.lock();
185  my_node.my_prev->my_next = my_node.my_next;
186  my_node.my_next->my_prev = my_node.my_prev;
187  my_owner->my_context_list_mutex.unlock();
188  //TODO: evaluate and perhaps relax
189  my_owner->my_nonlocal_ctx_list_update.fetch_and_decrement<full_fence>();
190  }
191  }
192  }
193 #if __TBB_FP_CONTEXT
194  internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env)->~cpu_ctl_env();
195 #endif
196  poison_value(my_version_and_traits);
197  if ( my_exception )
198  my_exception->destroy();
199  ITT_STACK(itt_caller != ITT_CALLER_NULL, caller_destroy, itt_caller);
200 }
201 
202 void task_group_context::init () {
203 #if DO_ITT_NOTIFY
204  // Check version of task group context to avoid reporting misleading identifier.
205  if( ( my_version_and_traits & version_mask ) < 3 )
206  my_name = internal::CUSTOM_CTX;
207 #endif
208  ITT_TASK_GROUP(this, my_name, NULL);
209  __TBB_STATIC_ASSERT ( sizeof(my_version_and_traits) >= 4, "Layout of my_version_and_traits must be reconsidered on this platform" );
210  __TBB_STATIC_ASSERT ( sizeof(task_group_context) == 2 * NFS_MaxLineSize, "Context class has wrong size - check padding and members alignment" );
211  __TBB_ASSERT ( (uintptr_t(this) & (sizeof(my_cancellation_requested) - 1)) == 0, "Context is improperly aligned" );
212  __TBB_ASSERT ( __TBB_load_relaxed(my_kind) == isolated || __TBB_load_relaxed(my_kind) == bound, "Context can be created only as isolated or bound" );
213  my_parent = NULL;
214  my_cancellation_requested = 0;
215  my_exception = NULL;
216  my_owner = NULL;
217  my_state = 0;
218  itt_caller = ITT_CALLER_NULL;
219 #if __TBB_TASK_PRIORITY
220  my_priority = normalized_normal_priority;
221 #endif /* __TBB_TASK_PRIORITY */
222 #if __TBB_FP_CONTEXT
223  __TBB_STATIC_ASSERT( sizeof(my_cpu_ctl_env) == sizeof(internal::uint64_t), "The reserved space for FPU settings are not equal sizeof(uint64_t)" );
224  __TBB_STATIC_ASSERT( sizeof(cpu_ctl_env) <= sizeof(my_cpu_ctl_env), "FPU settings storage does not fit to uint64_t" );
225  suppress_unused_warning( my_cpu_ctl_env.space );
226 
227  cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);
228  new ( &ctl ) cpu_ctl_env;
229  if ( my_version_and_traits & fp_settings )
230  ctl.get_env();
231 #endif
232 }
233 
234 void task_group_context::register_with ( generic_scheduler *local_sched ) {
235  __TBB_ASSERT( local_sched, NULL );
236  my_owner = local_sched;
237  // state propagation logic assumes new contexts are bound to head of the list
238  my_node.my_prev = &local_sched->my_context_list_head;
239  // Notify threads that may be concurrently destroying contexts registered
240  // in this scheduler's list that local list update is underway.
241  local_sched->my_local_ctx_list_update.store<relaxed>(1);
242  // Prevent load of global propagation epoch counter from being hoisted before
243  // speculative stores above, as well as load of nonlocal update flag from
244  // being hoisted before the store to local update flag.
245  atomic_fence();
246  // Finalize local context list update
247  if ( local_sched->my_nonlocal_ctx_list_update.load<relaxed>() ) {
248  spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex);
249  local_sched->my_context_list_head.my_next->my_prev = &my_node;
250  my_node.my_next = local_sched->my_context_list_head.my_next;
251  my_owner->my_local_ctx_list_update.store<relaxed>(0);
252  local_sched->my_context_list_head.my_next = &my_node;
253  }
254  else {
255  local_sched->my_context_list_head.my_next->my_prev = &my_node;
256  my_node.my_next = local_sched->my_context_list_head.my_next;
257  my_owner->my_local_ctx_list_update.store<release>(0);
258  // Thread-local list of contexts allows concurrent traversal by another thread
259  // while propagating state change. To ensure visibility of my_node's members
260  // to the concurrently traversing thread, the list's head is updated by means
261  // of store-with-release.
262  __TBB_store_with_release(local_sched->my_context_list_head.my_next, &my_node);
263  }
264 }
265 
266 void task_group_context::bind_to ( generic_scheduler *local_sched ) {
267  __TBB_ASSERT ( __TBB_load_relaxed(my_kind) == binding_required, "Already bound or isolated?" );
268  __TBB_ASSERT ( !my_parent, "Parent is set before initial binding" );
269  my_parent = local_sched->my_innermost_running_task->prefix().context;
270 #if __TBB_FP_CONTEXT
271  // Inherit FPU settings only if the context has not captured FPU settings yet.
272  if ( !(my_version_and_traits & fp_settings) )
273  copy_fp_settings(*my_parent);
274 #endif
275 
276  // Condition below prevents unnecessary thrashing parent context's cache line
277  if ( !(my_parent->my_state & may_have_children) )
278  my_parent->my_state |= may_have_children; // full fence is below
279  if ( my_parent->my_parent ) {
280  // Even if this context were made accessible for state change propagation
281  // (by placing __TBB_store_with_release(s->my_context_list_head.my_next, &my_node)
282  // above), it still could be missed if state propagation from a grand-ancestor
283  // was underway concurrently with binding.
284  // Speculative propagation from the parent together with epoch counters
285  // detecting possibility of such a race allow to avoid taking locks when
286  // there is no contention.
287 
288  // Acquire fence is necessary to prevent reordering subsequent speculative
289  // loads of parent state data out of the scope where epoch counters comparison
290  // can reliably validate it.
291  uintptr_t local_count_snapshot = __TBB_load_with_acquire( my_parent->my_owner->my_context_state_propagation_epoch );
292  // Speculative propagation of parent's state. The speculation will be
293  // validated by the epoch counters check further on.
294  my_cancellation_requested = my_parent->my_cancellation_requested;
295 #if __TBB_TASK_PRIORITY
296  my_priority = my_parent->my_priority;
297 #endif /* __TBB_TASK_PRIORITY */
298  register_with( local_sched ); // Issues full fence
299 
300  // If no state propagation was detected by the following condition, the above
301  // full fence guarantees that the parent had correct state during speculative
302  // propagation before the fence. Otherwise the propagation from parent is
303  // repeated under the lock.
304  if ( local_count_snapshot != the_context_state_propagation_epoch ) {
305  // Another thread may be propagating state change right now. So resort to lock.
306  context_state_propagation_mutex_type::scoped_lock lock(the_context_state_propagation_mutex);
307  my_cancellation_requested = my_parent->my_cancellation_requested;
308 #if __TBB_TASK_PRIORITY
309  my_priority = my_parent->my_priority;
310 #endif /* __TBB_TASK_PRIORITY */
311  }
312  }
313  else {
314  register_with( local_sched ); // Issues full fence
315  // As we do not have grand-ancestors, concurrent state propagation (if any)
316  // may originate only from the parent context, and thus it is safe to directly
317  // copy the state from it.
318  my_cancellation_requested = my_parent->my_cancellation_requested;
319 #if __TBB_TASK_PRIORITY
320  my_priority = my_parent->my_priority;
321 #endif /* __TBB_TASK_PRIORITY */
322  }
323  __TBB_store_relaxed(my_kind, binding_completed);
324 }
325 
326 template <typename T>
327 void task_group_context::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {
328  if (this->*mptr_state == new_state) {
329  // Nothing to do, whether descending from "src" or not, so no need to scan.
330  // Hopefully this happens often thanks to earlier invocations.
331  // This optimization is enabled by LIFO order in the context lists:
332  // - new contexts are bound to the beginning of lists;
333  // - descendants are newer than ancestors;
334  // - earlier invocations are therefore likely to "paint" long chains.
335  }
336  else if (this == &src) {
337  // This clause is disjunct from the traversal below, which skips src entirely.
338  // Note that src.*mptr_state is not necessarily still equal to new_state (another thread may have changed it again).
339  // Such interference is probably not frequent enough to aim for optimisation by writing new_state again (to make the other thread back down).
340  // Letting the other thread prevail may also be fairer.
341  }
342  else {
343  for ( task_group_context *ancestor = my_parent; ancestor != NULL; ancestor = ancestor->my_parent ) {
344  __TBB_ASSERT(internal::is_alive(ancestor->my_version_and_traits), "context tree was corrupted");
345  if ( ancestor == &src ) {
346  for ( task_group_context *ctx = this; ctx != ancestor; ctx = ctx->my_parent )
347  ctx->*mptr_state = new_state;
348  break;
349  }
350  }
351  }
352 }
353 
354 template <typename T>
355 void generic_scheduler::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {
356  spin_mutex::scoped_lock lock(my_context_list_mutex);
357  // Acquire fence is necessary to ensure that the subsequent node->my_next load
358  // returned the correct value in case it was just inserted in another thread.
359  // The fence also ensures visibility of the correct my_parent value.
360  context_list_node_t *node = __TBB_load_with_acquire(my_context_list_head.my_next);
361  while ( node != &my_context_list_head ) {
362  task_group_context &ctx = __TBB_get_object_ref(task_group_context, my_node, node);
363  if ( ctx.*mptr_state != new_state )
364  ctx.propagate_task_group_state( mptr_state, src, new_state );
365  node = node->my_next;
366  __TBB_ASSERT( is_alive(ctx.my_version_and_traits), "Local context list contains destroyed object" );
367  }
368  // Sync up local propagation epoch with the global one. Release fence prevents
369  // reordering of possible store to *mptr_state after the sync point.
370  __TBB_store_with_release(my_context_state_propagation_epoch, the_context_state_propagation_epoch);
371 }
372 
373 template <typename T>
374 bool market::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {
375  if ( !(src.my_state & task_group_context::may_have_children) )
376  return true;
377  // The whole propagation algorithm is under the lock in order to ensure correctness
378  // in case of concurrent state changes at the different levels of the context tree.
379  // See comment at the bottom of scheduler.cpp
380  context_state_propagation_mutex_type::scoped_lock lock(the_context_state_propagation_mutex);
381  if ( src.*mptr_state != new_state )
382  // Another thread has concurrently changed the state. Back down.
383  return false;
384  // Advance global state propagation epoch
385  __TBB_FetchAndAddWrelease(&the_context_state_propagation_epoch, 1);
386  // Propagate to all workers and masters and sync up their local epochs with the global one
387  unsigned num_workers = my_first_unused_worker_idx;
388  for ( unsigned i = 0; i < num_workers; ++i ) {
389  generic_scheduler *s = my_workers[i];
390  // If the worker is only about to be registered, skip it.
391  if ( s )
392  s->propagate_task_group_state( mptr_state, src, new_state );
393  }
394  // Propagate to all master threads
395  // The whole propagation sequence is locked, thus no contention is expected
396  for( scheduler_list_type::iterator it = my_masters.begin(); it != my_masters.end(); it++ )
397  it->propagate_task_group_state( mptr_state, src, new_state );
398  return true;
399 }
400 
402  __TBB_ASSERT ( my_cancellation_requested == 0 || my_cancellation_requested == 1, "Invalid cancellation state");
403  if ( my_cancellation_requested || as_atomic(my_cancellation_requested).compare_and_swap(1, 0) ) {
404  // This task group and any descendants have already been canceled.
405  // (A newly added descendant would inherit its parent's my_cancellation_requested,
406  // not missing out on any cancellation still being propagated, and a context cannot be uncanceled.)
407  return false;
408  }
409  governor::local_scheduler_weak()->my_market->propagate_task_group_state( &task_group_context::my_cancellation_requested, *this, (uintptr_t)1 );
410  return true;
411 }
412 
414  return my_cancellation_requested != 0;
415 }
416 
417 // IMPORTANT: It is assumed that this method is not used concurrently!
420  // No fences are necessary since this context can be accessed from another thread
421  // only after stealing happened (which means necessary fences were used).
422  if ( my_exception ) {
423  my_exception->destroy();
424  my_exception = NULL;
425  }
426  my_cancellation_requested = 0;
427 }
428 
429 #if __TBB_FP_CONTEXT
430 // IMPORTANT: It is assumed that this method is not used concurrently!
433  // No fences are necessary since this context can be accessed from another thread
434  // only after stealing happened (which means necessary fences were used).
435  cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);
436  if ( !(my_version_and_traits & fp_settings) ) {
437  new ( &ctl ) cpu_ctl_env;
438  my_version_and_traits |= fp_settings;
439  }
440  ctl.get_env();
441 }
442 
443 void task_group_context::copy_fp_settings( const task_group_context &src ) {
444  __TBB_ASSERT( !(my_version_and_traits & fp_settings), "The context already has FPU settings." );
445  __TBB_ASSERT( src.my_version_and_traits & fp_settings, "The source context does not have FPU settings." );
446 
447  cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);
448  cpu_ctl_env &src_ctl = *internal::punned_cast<cpu_ctl_env*>(&src.my_cpu_ctl_env);
449  new (&ctl) cpu_ctl_env( src_ctl );
450  my_version_and_traits |= fp_settings;
451 }
452 #endif /* __TBB_FP_CONTEXT */
453 
455  if ( my_cancellation_requested )
456  return;
457 #if TBB_USE_EXCEPTIONS
458  try {
459  throw;
460  } TbbCatchAll( this );
461 #endif /* TBB_USE_EXCEPTIONS */
462 }
463 
464 #if __TBB_TASK_PRIORITY
466  __TBB_ASSERT( prio == priority_low || prio == priority_normal || prio == priority_high, "Invalid priority level value" );
467  intptr_t p = normalize_priority(prio);
468  if ( my_priority == p && !(my_state & task_group_context::may_have_children))
469  return;
470  my_priority = p;
471  internal::generic_scheduler* s = governor::local_scheduler_if_initialized();
472  if ( !s || !s->my_arena || !s->my_market->propagate_task_group_state(&task_group_context::my_priority, *this, p) )
473  return;
474 
476  // need to find out the right arena for priority update.
477  // The executing status check only guarantees being inside some working arena.
478  if ( s->my_innermost_running_task->state() == task::executing )
479  // Updating arena priority here does not eliminate necessity of checking each
480  // task priority and updating arena priority if necessary before the task execution.
481  // These checks will be necessary because:
482  // a) set_priority() may be invoked before any tasks from this task group are spawned;
483  // b) all spawned tasks from this task group are retrieved from the task pools.
484  // These cases create a time window when arena priority may be lowered.
485  s->my_market->update_arena_priority( *s->my_arena, p );
486 }
487 
489  return static_cast<priority_t>(priority_from_normalized_rep[my_priority]);
490 }
491 #endif /* __TBB_TASK_PRIORITY */
492 
493 #endif /* __TBB_TASK_GROUP_CONTEXT */
494 
495 } // namespace tbb
Sequential consistency.
Definition: atomic.h:45
#define ITT_STACK(precond, name, obj)
Definition: itt_notify.h:126
__TBB_EXPORTED_METHOD ~task_group_context()
const char *__TBB_EXPORTED_METHOD name() const __TBB_override
Returns RTTI name of the originally intercepted exception.
void __TBB_EXPORTED_METHOD register_pending_exception()
Records the pending exception, and cancels the task group.
#define __TBB_get_object_ref(class_name, member_name, member_addr)
Returns address of the object containing a member with the given name and address.
Definition: tbb_stddef.h:274
No ordering.
Definition: atomic.h:51
T __TBB_load_with_acquire(const volatile T &location)
Definition: tbb_machine.h:713
#define __TBB_STATIC_ASSERT(condition, msg)
Definition: tbb_stddef.h:536
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
priority_t priority() const
Retrieves current priority of the current task group.
#define ITT_TASK_GROUP(type, name, parent)
Definition: itt_notify.h:128
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Definition: tbb_stddef.h:381
#define __TBB_FetchAndAddWrelease(P, V)
Definition: tbb_machine.h:313
static generic_scheduler * local_scheduler_if_initialized()
Definition: governor.h:136
bool __TBB_EXPORTED_METHOD cancel_group_execution()
Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
task is running, and will be destroyed after method execute() completes.
Definition: task.h:614
static tbb_exception_ptr * allocate()
void bind_to(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler and binds it to its parent context.
void atomic_fence()
Sequentially consistent full memory fence.
Definition: tbb_machine.h:343
market * my_market
The market I am in.
Definition: scheduler.h:159
void const char const char int ITT_FORMAT __itt_group_sync p
void __TBB_EXPORTED_METHOD reset()
Forcefully reinitializes the context after the task tree it was associated with is completed.
#define poison_value(g)
__TBB_EXPORTED_METHOD ~captured_exception()
void __TBB_store_relaxed(volatile T &location, V value)
Definition: tbb_machine.h:743
priority_t
Definition: task.h:295
friend class scoped_lock
Definition: spin_mutex.h:180
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
void __TBB_EXPORTED_METHOD clear()
void *__TBB_EXPORTED_FUNC allocate_via_handler_v3(size_t n)
Allocates memory using MallocHandler.
captured_exception *__TBB_EXPORTED_METHOD move() __TBB_override
Creates and returns pointer to the deep copy of this exception object.
void set_priority(priority_t)
Changes priority of the task group.
The graph class.
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
Definition: task.h:417
static captured_exception * allocate(const char *name, const char *info)
Functionally equivalent to {captured_exception e(name,info); return e.move();}.
intptr_t my_priority
Priority level of the task group (in normalized representation)
Definition: task.h:436
atomic< unsigned > my_first_unused_worker_idx
First unused index of worker.
Definition: market.h:90
void propagate_task_group_state(T task_group_context::*mptr_state, task_group_context &src, T new_state)
Propagates any state change detected to *this, and as an optimisation possibly also upward along the ...
void __TBB_EXPORTED_FUNC deallocate_via_handler_v3(void *p)
Deallocates memory using FreeHandler.
bool __TBB_EXPORTED_METHOD is_group_execution_cancelled() const
Returns true if the context received cancellation request.
void spin_wait_until_eq(const volatile T &location, const U value)
Spin UNTIL the value of the variable is equal to a given value.
Definition: tbb_machine.h:403
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
Definition: tbb_stddef.h:220
void const char const char int ITT_FORMAT __itt_group_sync s
static generic_scheduler * local_scheduler_weak()
Definition: governor.h:131
T __TBB_load_relaxed(const volatile T &location)
Definition: tbb_machine.h:739
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
Definition: governor.cpp:124
const char *__TBB_EXPORTED_METHOD what() const __TBB_override
Returns the result of originally intercepted exception's what() method.
void __TBB_EXPORTED_METHOD set(const char *name, const char *info)
void __TBB_EXPORTED_METHOD init()
Out-of-line part of the constructor.
void copy_fp_settings(const task_group_context &src)
Copies FPU control setting from another context.
#define ITT_CALLER_NULL
Definition: itt_notify.h:52
void register_with(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler.
void __TBB_EXPORTED_METHOD capture_fp_settings()
Captures the current FPU control settings to the context.
Release.
Definition: atomic.h:49
void __TBB_store_with_release(volatile T &location, V value)
Definition: tbb_machine.h:717
atomic< T > & as_atomic(T &t)
Definition: atomic.h:547
friend class generic_scheduler
Definition: market.h:50
void __TBB_EXPORTED_METHOD destroy() __TBB_override
Destroys objects created by the move() method.
void destroy()
Destroys this objects.

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.