39 extern generic_scheduler* (*AllocateSchedulerPtr)( market& );
45 #if __TBB_TASK_GROUP_CONTEXT 46 context_state_propagation_mutex_type the_context_state_propagation_mutex;
48 uintptr_t the_context_state_propagation_epoch = 0;
59 #if __TBB_TASK_GROUP_CONTEXT 63 #if __TBB_TASK_PRIORITY 82 #if _MSC_VER && !defined(__INTEL_COMPILER) 85 #pragma warning(disable:4355) 92 , my_small_task_count(1)
94 , my_cilk_state(cs_none)
101 #if __TBB_PREVIEW_CRITICAL_TASKS 105 #if __TBB_TASK_PRIORITY 106 my_ref_top_priority = &m.my_global_top_priority;
107 my_ref_reload_epoch = &m.my_global_reload_epoch;
109 #if __TBB_TASK_GROUP_CONTEXT 111 my_context_state_propagation_epoch = the_context_state_propagation_epoch;
112 my_context_list_head.my_prev = &my_context_list_head;
113 my_context_list_head.my_next = &my_context_list_head;
114 ITT_SYNC_CREATE(&my_context_list_mutex, SyncType_Scheduler, SyncObj_ContextsList);
120 #if _MSC_VER && !defined(__INTEL_COMPILER) 122 #endif // warning 4355 is back 124 #if TBB_USE_ASSERT > 1 135 for (
size_t i = 0; i < H; ++i )
136 __TBB_ASSERT( tp[i] == poisoned_ptr,
"Task pool corrupted" );
137 for (
size_t i = H; i < T; ++i ) {
141 tp[i]->prefix().extra_state ==
es_task_proxy,
"task in the deque has invalid state" );
145 __TBB_ASSERT( tp[i] == poisoned_ptr,
"Task pool corrupted" );
156 #if defined(_MSC_VER)&&_MSC_VER<1400 && !_WIN64 158 __asm mov eax, fs:[0x18]
161 NT_TIB *pteb = (NT_TIB*)NtCurrentTeb();
163 __TBB_ASSERT( &pteb < pteb->StackBase && &pteb > pteb->StackLimit,
"invalid stack info in TEB" );
164 __TBB_ASSERT( stack_size >0,
"stack_size not initialized?" );
182 void *stack_base = &stack_size;
183 #if __linux__ && !__bg__ 187 size_t np_stack_size = 0;
188 void *stack_limit = NULL;
189 pthread_attr_t np_attr_stack;
190 if( 0 == pthread_getattr_np(pthread_self(), &np_attr_stack) ) {
191 if ( 0 == pthread_attr_getstack(&np_attr_stack, &stack_limit, &np_stack_size) ) {
193 pthread_attr_t attr_stack;
194 if ( 0 == pthread_attr_init(&attr_stack) ) {
195 if ( 0 == pthread_attr_getstacksize(&attr_stack, &stack_size) ) {
196 if ( np_stack_size < stack_size ) {
199 rsb_base = stack_limit;
200 stack_size = np_stack_size/2;
202 stack_limit = (
char*)stack_limit + stack_size;
208 pthread_attr_destroy(&attr_stack);
211 my_rsb_stealing_threshold = (uintptr_t)((
char*)rsb_base + stack_size/2);
214 stack_size = size_t((
char*)stack_base - (
char*)stack_limit);
216 pthread_attr_destroy(&np_attr_stack);
219 __TBB_ASSERT( stack_size>0,
"stack size must be positive" );
224 #if __TBB_TASK_GROUP_CONTEXT 230 void generic_scheduler::cleanup_local_context_list () {
232 bool wait_for_concurrent_destroyers_to_leave =
false;
233 uintptr_t local_count_snapshot = my_context_state_propagation_epoch;
234 my_local_ctx_list_update.store<
relaxed>(1);
242 if ( my_nonlocal_ctx_list_update.load<
relaxed>() || local_count_snapshot != the_context_state_propagation_epoch )
243 lock.acquire(my_context_list_mutex);
247 while ( node != &my_context_list_head ) {
254 wait_for_concurrent_destroyers_to_leave =
true;
257 my_local_ctx_list_update.store<
release>(0);
259 if ( wait_for_concurrent_destroyers_to_leave )
266 #if __TBB_PREVIEW_CRITICAL_TASKS 269 #if __TBB_TASK_GROUP_CONTEXT 270 cleanup_local_context_list();
274 #if __TBB_HOARD_NONLOCAL_TASKS 275 while(
task* t = my_nonlocal_free_list ) {
277 my_nonlocal_free_list =
p.next;
294 #if __TBB_COUNT_TASK_NODES 295 my_market->update_task_node_count( my_task_node_count );
309 #if __TBB_HOARD_NONLOCAL_TASKS 310 if( (t = my_nonlocal_free_list) ) {
313 my_nonlocal_free_list = t->
prefix().next;
323 __TBB_ASSERT( t,
"another thread emptied the my_return_list" );
329 #if __TBB_COUNT_TASK_NODES 330 ++my_task_node_count;
332 t->
prefix().origin =
this;
336 #if __TBB_PREFETCHING 339 #if __TBB_HOARD_NONLOCAL_TASKS 355 #if __TBB_COUNT_TASK_NODES 356 ++my_task_node_count;
358 t->
prefix().origin = NULL;
361 #if __TBB_TASK_GROUP_CONTEXT 384 task* old =
s.my_return_list;
390 if(
as_atomic(
s.my_return_list).compare_and_swap(&t, old )==old ) {
391 #if __TBB_PREFETCHING 408 if ( T + num_tasks <= my_arena_slot->my_task_pool_size )
426 for (
size_t i = H; i < T; ++i )
435 if ( new_size < 2 * my_arena_slot->my_task_pool_size )
441 for (
size_t i = H; i < T; ++i )
463 bool sync_prepare_done =
false;
479 else if( !sync_prepare_done ) {
482 sync_prepare_done =
true;
505 task** victim_task_pool;
506 bool sync_prepare_done =
false;
508 victim_task_pool = victim_arena_slot->
task_pool;
514 if( sync_prepare_done )
525 else if( !sync_prepare_done ) {
528 sync_prepare_done =
true;
532 #if __TBB_STEALING_ABORT_ON_CONTENTION 533 if(!backoff.bounded_pause()) {
549 "not really locked victim's task pool?" );
550 return victim_task_pool;
554 task** victim_task_pool )
const {
555 __TBB_ASSERT( victim_arena_slot,
"empty victim arena slot pointer" );
568 __TBB_ASSERT( ref_count>=0,
"attempt to spawn task whose parent has a ref_count<0" );
569 __TBB_ASSERT( ref_count!=0,
"attempt to spawn task whose parent has a ref_count==0 (forgot to set_ref_count?)" );
575 "backwards compatibility to TBB 2.0 tasks is broken" );
576 #if __TBB_TASK_ISOLATION 578 t->
prefix().isolation = isolation;
588 #if __TBB_TASK_PRIORITY 600 #if __TBB_PREVIEW_CRITICAL_TASKS 601 bool generic_scheduler::handled_as_critical(
task& t ) {
604 #if __TBB_TASK_ISOLATION 610 my_arena->my_critical_task_stream.push(
625 if ( &
first->prefix().next == &next ) {
634 #if __TBB_PREVIEW_CRITICAL_TASKS 635 if( !handled_as_critical( *
first ) )
665 t_next = t->
prefix().next;
666 #if __TBB_PREVIEW_CRITICAL_TASKS 667 if( !handled_as_critical( *t ) )
673 if(
size_t num_tasks = tasks.
size() ) {
693 t->
prefix().parent = &dummy;
694 if( &t->
prefix().next==&next )
break;
695 #if __TBB_TASK_GROUP_CONTEXT 697 "all the root tasks in list must share the same context");
718 s->my_arena->enqueue_task(t, (intptr_t)prio,
s->my_random );
721 #if __TBB_TASK_PRIORITY 722 class auto_indicator :
no_copy {
723 volatile bool& my_indicator;
725 auto_indicator (
volatile bool& indicator ) : my_indicator(indicator) { my_indicator = true ;}
726 ~auto_indicator () { my_indicator =
false; }
734 #if __TBB_TASK_ISOLATION 736 bool tasks_omitted =
false;
737 while ( !t && T>H0 ) {
738 t =
get_task( --T, isolation, tasks_omitted );
739 if ( !tasks_omitted ) {
745 if ( t && tasks_omitted ) {
775 #if __TBB_TASK_ISOLATION 790 __TBB_ASSERT( my_offloaded_tasks,
"At least one task is expected to be already offloaded" );
797 auto_indicator indicator( my_pool_reshuffling_pending );
807 for (
size_t src = H0; src<T0; ++src ) {
811 intptr_t
p = priority( *t );
812 if (
p<*my_ref_top_priority ) {
813 offload_task( *t,
p );
829 #if __TBB_TASK_ISOLATION 840 task **link = &offloaded_tasks;
841 while (
task *t = *link ) {
842 task** next_ptr = &t->prefix().next_offloaded;
844 if ( priority(*t) >= top_priority ) {
845 tasks.push_back( t );
848 task* next = *next_ptr;
849 t->prefix().owner =
this;
857 if ( link == &offloaded_tasks ) {
858 offloaded_tasks = NULL;
860 offloaded_task_list_link = NULL;
867 offloaded_task_list_link = link;
870 size_t num_tasks = tasks.size();
883 if ( t ) --num_tasks;
891 uintptr_t reload_epoch = *my_ref_reload_epoch;
894 || my_local_reload_epoch - reload_epoch > uintptr_t(-1)/2,
895 "Reload epoch counter overflow?" );
896 if ( my_local_reload_epoch == reload_epoch )
899 intptr_t top_priority = effective_reference_priority();
901 task *t = reload_tasks( my_offloaded_tasks, my_offloaded_task_list_tail_link,
__TBB_ISOLATION_ARG( top_priority, isolation ) );
918 my_local_reload_epoch = reload_epoch;
923 #if __TBB_TASK_ISOLATION 930 || is_local_task_pool_quiescent(),
"Is it safe to get a task at position T?" );
932 task* result = my_arena_slot->task_pool_ptr[T];
933 __TBB_ASSERT( !is_poisoned( result ),
"The poisoned task is going to be processed" );
934 #if __TBB_TASK_ISOLATION 939 if ( !omit && !is_proxy( *result ) )
942 tasks_omitted =
true;
947 if ( !result || !is_proxy( *result ) )
951 task_proxy& tp = static_cast<task_proxy&>(*result);
955 __TBB_ASSERT( is_version_3_task( *t ),
"backwards compatibility with TBB 2.0 broken" );
957 my_innermost_running_task = t;
958 #if __TBB_TASK_ISOLATION 960 if ( !tasks_omitted )
964 t->note_affinity( my_affinity_id );
970 free_task<small_task>( tp );
971 #if __TBB_TASK_ISOLATION 973 my_arena_slot->task_pool_ptr[T] = NULL;
983 size_t H0 = (size_t)-1, T = T0;
985 bool task_pool_empty =
false;
994 if ( (intptr_t)H0 > (intptr_t)T ) {
998 && H0 == T + 1,
"victim/thief arbitration algorithm failure" );
1001 task_pool_empty =
true;
1003 }
else if ( H0 == T ) {
1006 task_pool_empty =
true;
1015 #if __TBB_TASK_ISOLATION 1016 result =
get_task( T, isolation, tasks_omitted );
1020 }
else if ( !tasks_omitted ) {
1028 }
while ( !result && !task_pool_empty );
1030 #if __TBB_TASK_ISOLATION 1031 if ( tasks_omitted ) {
1032 if ( task_pool_empty ) {
1095 free_task<no_cache_small_task>(tp);
1103 t->
prefix().owner =
this;
1114 task* result = NULL;
1117 bool tasks_omitted =
false;
1129 result = victim_pool[H-1];
1137 task_proxy& tp = *static_cast<task_proxy*>(result);
1145 tasks_omitted =
true;
1146 }
else if ( !tasks_omitted ) {
1152 }
while ( !result );
1156 ITT_NOTIFY( sync_acquired, (
void*)((uintptr_t)&victim_slot+
sizeof( uintptr_t )) );
1158 if ( tasks_omitted ) {
1160 victim_pool[H-1] = NULL;
1165 #if __TBB_PREFETCHING 1169 if ( tasks_omitted )
1175 #if __TBB_PREVIEW_CRITICAL_TASKS 1183 if(
my_arena->my_critical_task_stream.empty(0) )
1185 task* critical_task = NULL;
1188 #if __TBB_TASK_ISOLATION 1190 critical_task =
my_arena->my_critical_task_stream.pop_specific( 0, start_lane, isolation );
1196 return critical_task;
1209 free_task<no_cache_small_task>(*tp);
1216 __TBB_ASSERT ( my_arena_index < my_arena->my_num_slots,
"arena slot index is out-of-bound" );
1220 "entering arena without tasks to share" );
1242 s->my_arena_index = index;
1243 s->my_dummy_task->prefix().ref_count = 2;
1246 s->init_stack_info();
1257 task& t = *
s->my_dummy_task;
1259 t.
prefix().ref_count = 1;
1260 #if __TBB_TASK_GROUP_CONTEXT 1263 #if __TBB_FP_CONTEXT 1264 s->default_context()->capture_fp_settings();
1267 s->init_stack_info();
1268 context_state_propagation_mutex_type::scoped_lock
lock(the_context_state_propagation_mutex);
1269 s->my_market->my_masters.push_front( *
s );
1274 s->attach_arena( a, 0,
true );
1275 s->my_arena_slot->my_scheduler =
s;
1276 a->my_default_ctx =
s->default_context();
1278 __TBB_ASSERT(
s->my_arena_index == 0,
"Master thread must occupy the first slot in its arena" );
1282 s->my_market->register_master(
s->master_exec_resource );
1285 #if __TBB_ARENA_OBSERVER 1286 __TBB_ASSERT( !a || a->my_observers.empty(),
"Just created arena cannot have any observers associated with it" );
1288 #if __TBB_SCHEDULER_OBSERVER 1289 the_global_observer_list.notify_entry_observers(
s->my_last_global_observer,
false );
1296 __TBB_ASSERT( !
s.my_arena_slot,
"cleaning up attached worker" );
1297 #if __TBB_SCHEDULER_OBSERVER 1299 the_global_observer_list.notify_exit_observers(
s.my_last_global_observer,
true );
1325 #if __TBB_ARENA_OBSERVER 1327 a->my_observers.notify_exit_observers( my_last_local_observer,
false );
1329 #if __TBB_SCHEDULER_OBSERVER 1330 the_global_observer_list.notify_exit_observers( my_last_global_observer,
false );
1333 m->unregister_master( master_exec_resource );
1337 #if __TBB_STATISTICS 1342 #if __TBB_TASK_GROUP_CONTEXT 1344 default_context()->~task_group_context();
1347 context_state_propagation_mutex_type::scoped_lock
lock(the_context_state_propagation_mutex);
1357 return m->
release( a != NULL, blocking_terminate );
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp end
static bool is_shared(intptr_t tat)
True if the proxy is stored both in its sender's pool and in the destination mailbox.
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
void publish_task_pool()
Used by workers to enter the task pool.
static const intptr_t mailbox_bit
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
static const kind_type detached
#define __TBB_CONTEXT_ARG(arg1, context)
Memory prefix to a task object.
unsigned short affinity_id
An id as used for specifying affinity.
#define __TBB_get_object_ref(class_name, member_name, member_addr)
Returns address of the object containing a member with the given name and address.
void fill_with_canary_pattern(size_t, size_t)
task * steal_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempts to steal a task from a randomly chosen thread/scheduler.
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
Set if ref_count might be changed by another thread. Used for debugging.
#define __TBB_FetchAndDecrementWrelease(P)
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
const isolation_tag no_isolation
task **__TBB_atomic task_pool_ptr
static generic_scheduler * create_master(arena *a)
Initialize a scheduler for a master thread.
task object is freshly allocated or recycled.
#define __TBB_ISOLATION_EXPR(isolation)
#define ITT_SYNC_CREATE(obj, type, name)
void push_back(const T &val)
static market & global_market(bool is_public, unsigned max_num_workers=0, size_t stack_size=0)
Factory method creating new market object.
task ** lock_task_pool(arena_slot *victim_arena_slot) const
Locks victim's task pool, and returns pointer to it. The pointer can be NULL.
task * steal_task_from(__TBB_ISOLATION_ARG(arena_slot &victim_arena_slot, isolation_tag isolation))
Steal task from another scheduler's ready pool.
bool is_quiescent_local_task_pool_reset() const
auto first(Container &c) -> decltype(begin(c))
void on_thread_leaving()
Notification that worker or master leaves its arena.
Used to form groups of tasks.
FastRandom my_random
Random number generator used for picking a random victim from which to steal.
bool cleanup_master(bool blocking_terminate)
Perform necessary cleanup when a master thread stops using TBB.
void allocate_task_pool(size_t n)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t new_size
bool is_worker() const
True if running on a worker thread, false otherwise.
Base class for types that should not be copied or assigned.
Vector that grows without reallocations, and stores items in the reverse order.
void pause()
Pause for a while.
__TBB_atomic size_t head
Index of the first ready task in the deque.
Base class for user-defined tasks.
void release_task_pool() const
Unlocks the local task pool.
Work stealing task scheduler.
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
static void sign_off(generic_scheduler *s)
Unregister TBB scheduler instance from thread-local storage.
Set if the task has been stolen.
static const unsigned ref_external
Reference increment values for externals and workers.
void unlock_task_pool(arena_slot *victim_arena_slot, task **victim_task_pool) const
Unlocks victim's task pool.
intptr_t isolation_tag
A tag for task isolation.
static const kind_type binding_required
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.
task * get_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Get a task from the local pool.
void atomic_fence()
Sequentially consistent full memory fence.
market * my_market
The market I am in.
void const char const char int ITT_FORMAT __itt_group_sync p
void free_scheduler()
Destroy and deallocate this scheduler object.
task * get_mailbox_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempt to get a task from the mailbox.
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
bool is_critical(task &t)
static void cleanup_worker(void *arg, bool worker)
Perform necessary cleanup when a worker thread finishes.
Smart holder for the empty task class with automatic destruction.
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
#define __TBB_ISOLATION_ARG(arg1, isolation)
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
size_t prepare_task_pool(size_t n)
Makes sure that the task pool can accommodate at least n more elements.
void __TBB_store_relaxed(volatile T &location, V value)
void init_stack_info()
Sets up the data necessary for the stealing limiting heuristics.
generic_scheduler(market &)
bool is_quiescent_local_task_pool_empty() const
T max(const T &val1, const T &val2)
Utility template function returning greater of the two values.
task **__TBB_atomic task_pool
context_list_node_t * my_next
#define __TBB_cl_evict(p)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
#define __TBB_cl_prefetch(p)
void commit_spawned_tasks(size_t new_tail)
Makes newly spawned tasks visible to thieves.
__TBB_atomic intptr_t my_small_task_count
Number of small tasks that have been allocated by this scheduler.
static void sign_on(generic_scheduler *s)
Register TBB scheduler instance in thread-local storage.
task * my_free_list
Free list of small tasks that can be reused.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
__TBB_atomic size_t tail
Index of the element following the last ready task in the deque.
void deallocate_task(task &t)
Return task object to the memory allocator.
static const intptr_t num_priority_levels
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p sync_cancel
static const intptr_t location_mask
void local_spawn(task *first, task *&next)
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
static bool is_version_3_task(task &t)
unsigned short get()
Get a random number.
int my_num_workers_requested
The number of workers that are currently requested from the resource manager.
task * extract_task()
Returns a pointer to the encapsulated task or NULL, and frees proxy if necessary.
void enqueue(task &, void *reserved) __TBB_override
For internal use only.
bool is_local_task_pool_quiescent() const
#define GATHER_STATISTIC(x)
void commit_relocated_tasks(size_t new_tail)
Makes relocated tasks visible to thieves and releases the local task pool.
bool release(bool is_public, bool blocking_terminate)
Decrements market's refcount and destroys it in the end.
Represents acquisition of a mutex.
static bool is_proxy(const task &t)
True if t is a task_proxy.
void assert_task_pool_valid() const
size_t worker_stack_size() const
Returns the requested stack size of worker threads.
void spawn(task &first, task *&next) __TBB_override
For internal use only.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
task * prepare_for_spawning(task *t)
Checks if t is affinitized to another thread, and if so, bundles it as proxy.
task is in ready pool, or is going to be put there, or was just taken off.
static task * plugged_return_list()
Special value used to mark my_return_list as not taking any more entries.
task object is on free list, or is going to be put there, or was just taken off.
bool outermost
Indicates that a scheduler is on outermost level.
void acquire_task_pool() const
Locks the local task pool.
void poison_pointer(T *__TBB_atomic &)
scheduler_properties my_properties
Class that implements exponential backoff.
atomic< unsigned > my_limit
The maximal number of currently busy slots.
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
#define ITT_NOTIFY(name, obj)
void push(task_proxy *t)
Push task_proxy onto the mailbox queue of another thread.
void spin_wait_until_eq(const volatile T &location, const U value)
Spin UNTIL the value of the variable is equal to a given value.
void const char const char int ITT_FORMAT __itt_group_sync s
task * my_dummy_task
Fake root task created by slave threads.
void copy_memory(T *dst) const
Copies the contents of the vector into the dst array.
T __TBB_load_relaxed(const volatile T &location)
mail_outbox & mailbox(affinity_id id)
Get reference to mailbox corresponding to given affinity_id.
task * my_return_list
List of small tasks that have been returned to this scheduler by other schedulers.
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
static const size_t quick_task_size
If sizeof(task) is <=quick_task_size, it is handled on a free list instead of malloc'd.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
void * __TBB_get_bsp()
Retrieves the current RSE backing store pointer. IA64 specific.
generic_scheduler * allocate_scheduler(market &m)
static generic_scheduler * create_worker(market &m, size_t index)
Initialize a scheduler for a worker thread.
bool recipient_is_idle()
True if thread that owns this mailbox is looking for work.
void Scheduler_OneTimeInitialization(bool itt_present)
Defined in scheduler.cpp.
virtual void local_wait_for_all(task &parent, task *child)=0
task & allocate_task(size_t number_of_bytes, __TBB_CONTEXT_ARG(task *parent, task_group_context *context))
Allocate task object, either from the heap or a free list.
static const intptr_t pool_bit
size_t my_task_pool_size
Capacity of the primary task pool (number of elements - pointers to task).
virtual ~scheduler()=0
Pure virtual destructor;.
void local_spawn_root_and_wait(task *first, task *&next)
state_type state() const
Current execution state.
task_group_context * context()
This method is deprecated and will be removed in the future.
intptr_t reference_count
A reference count.
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
task_proxy * pop(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Get next piece of mail, or NULL if mailbox is empty.
void leave_task_pool()
Leave the task pool.
void assert_task_valid(const task *)
void reset_task_pool_and_leave()
Resets head and tail indices to 0, and leaves task pool.
const size_t task_prefix_reservation_size
Number of bytes reserved for a task prefix.
#define __TBB_control_consistency_helper()
A scheduler with a customized evaluation loop.
mail_outbox * outbox
Mailbox to which this was mailed.
void spawn_root_and_wait(task &first, task *&next) __TBB_override
For internal use only.
virtual void __TBB_EXPORTED_METHOD note_affinity(affinity_id id)
Invoked by scheduler to notify task that it ran on unexpected thread.
void advertise_new_work()
If necessary, raise a flag that there is new job in arena.
bool is_task_pool_published() const
uintptr_t my_stealing_threshold
Position in the call stack specifying its maximal filling when stealing is still allowed.
void __TBB_store_with_release(volatile T &location, V value)
atomic< T > & as_atomic(T &t)
affinity_id my_affinity_id
The mailbox id assigned to this scheduler.
generic_scheduler *(* AllocateSchedulerPtr)(market &)
Pointer to the scheduler factory function.
static const kind_type dying
void free_nonlocal_small_task(task &t)
Free a small task t that that was allocated by a different scheduler.
static const size_t min_task_pool_size
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)