Intel(R) Threading Building Blocks Doxygen Documentation version 4.2.3
Loading...
Searching...
No Matches
task_arena.h
Go to the documentation of this file.
1/*
2 Copyright (c) 2005-2020 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#ifndef __TBB_task_arena_H
18#define __TBB_task_arena_H
19
20#define __TBB_task_arena_H_include_area
22
23#include "task.h"
24#include "tbb_exception.h"
26#if __TBB_NUMA_SUPPORT
27#include "info.h"
28#endif /*__TBB_NUMA_SUPPORT*/
29#if TBB_USE_THREADING_TOOLS
30#include "atomic.h" // for as_atomic
31#endif
32#include "aligned_space.h"
33
34namespace tbb {
35
36namespace this_task_arena {
37 int max_concurrency();
38} // namespace this_task_arena
39
41namespace internal {
43
44 class arena;
46} // namespace internal
48
49namespace interface7 {
50class task_arena;
51
53namespace internal {
54using namespace tbb::internal; //e.g. function_task from task.h
55
57public:
58 virtual void operator()() const = 0;
59 virtual ~delegate_base() {}
60};
61
62// If decltype is available, the helper detects the return type of functor of specified type,
63// otherwise it defines the void type.
64template <typename F>
66#if __TBB_CPP11_DECLTYPE_PRESENT && !__TBB_CPP11_DECLTYPE_OF_FUNCTION_RETURN_TYPE_BROKEN
67 typedef decltype(declval<F>()()) type;
68#else
69 typedef void type;
70#endif
71};
72
73template<typename F, typename R>
76 tbb::aligned_space<R> my_return_storage;
77 // The function should be called only once.
79 new (my_return_storage.begin()) R(my_func());
80 }
81public:
83 // The function can be called only after operator() and only once.
84 R consume_result() const {
85 return tbb::internal::move(*(my_return_storage.begin()));
86 }
88 my_return_storage.begin()->~R();
89 }
90};
91
92template<typename F>
96 my_func();
97 }
98public:
100 void consume_result() const {}
101
102 friend class task_arena_base;
103};
104
106#if __TBB_NUMA_SUPPORT
107public:
108 // TODO: consider version approach to resolve backward compatibility potential issues.
109 struct constraints {
110 constraints(numa_node_id id = automatic, int maximal_concurrency = automatic)
111 : numa_id(id)
112 , max_concurrency(maximal_concurrency)
113 {}
114 numa_node_id numa_id;
115 int max_concurrency;
116 };
117#endif /*__TBB_NUMA_SUPPORT*/
118protected:
121
122#if __TBB_TASK_GROUP_CONTEXT
125#endif
126
129
132
135
137
138#if __TBB_NUMA_SUPPORT
140 numa_node_id my_numa_id;
141
142 // Do not access my_numa_id without the following runtime check.
143 // Despite my_numa_id is accesible, it does not exist in task_arena_base on user side
144 // if TBB_PREVIEW_NUMA_SUPPORT macro is not defined by the user. To be sure that
145 // my_numa_id exists in task_arena_base layout we check the traits.
146 // TODO: Consider increasing interface version for task_arena_base instead of this runtime check.
147 numa_node_id numa_id() {
148 return (my_version_and_traits & numa_support_flag) == numa_support_flag ? my_numa_id : automatic;
149 }
150#endif
151
152 enum {
153 default_flags = 0
154#if __TBB_TASK_GROUP_CONTEXT
156 , exact_exception_flag = task_group_context::exact_exception // used to specify flag for context directly
157#endif
158#if __TBB_NUMA_SUPPORT
159 , numa_support_flag = 1
160#endif
161 };
162
163 task_arena_base(int max_concurrency, unsigned reserved_for_masters)
164 : my_arena(0)
166 , my_context(0)
167#endif
168 , my_max_concurrency(max_concurrency)
169 , my_master_slots(reserved_for_masters)
170#if __TBB_NUMA_SUPPORT
171 , my_version_and_traits(default_flags | numa_support_flag)
172#else
174#endif
175 , my_initialized(false)
176#if __TBB_NUMA_SUPPORT
177 , my_numa_id(automatic)
178#endif
179 {}
180
181#if __TBB_NUMA_SUPPORT
182 task_arena_base(const constraints& constraints_, unsigned reserved_for_masters)
183 : my_arena(0)
185 , my_context(0)
186#endif
187 , my_max_concurrency(constraints_.max_concurrency)
188 , my_master_slots(reserved_for_masters)
189 , my_version_and_traits(default_flags | numa_support_flag)
190 , my_initialized(false)
191 , my_numa_id(constraints_.numa_id )
192 {}
193#endif /*__TBB_NUMA_SUPPORT*/
194
203public:
205 static const int automatic = -1;
206 static const int not_initialized = -2;
207
208};
209
210#if __TBB_TASK_ISOLATION
212
213template<typename R, typename F>
217 return d.consume_result();
218}
219#endif /* __TBB_TASK_ISOLATION */
220} // namespace internal
222
232 , priority_t
233#endif
234 );
237 __TBB_ASSERT( my_arena, "task_arena initialization is incomplete" );
238#if __TBB_TASK_GROUP_CONTEXT
239 __TBB_ASSERT( my_context, "task_arena initialization is incomplete" );
240#endif
241#if TBB_USE_THREADING_TOOLS
242 // Actual synchronization happens in internal_initialize & internal_attach.
243 // The race on setting my_initialized is benign, but should be hidden from Intel(R) Inspector
244 internal::as_atomic(my_initialized).fetch_and_store<release>(true);
245#else
246 my_initialized = true;
247#endif
248 }
249
250 template<typename F>
253 , priority_t p = priority_t(0)
254#endif
255 ) {
256#if !__TBB_TASK_PRIORITY
257 intptr_t p = 0;
258#endif
259 initialize();
260#if __TBB_TASK_GROUP_CONTEXT
262#else
263 internal_enqueue(*new(task::allocate_root()) internal::function_task< typename internal::strip<F>::type >(internal::forward<F>(f)), p);
264#endif /* __TBB_TASK_GROUP_CONTEXT */
265 }
266
267 template<typename R, typename F>
268 R execute_impl(F& f) {
269 initialize();
272 return d.consume_result();
273 }
274
275public:
277
282 task_arena(int max_concurrency_ = automatic, unsigned reserved_for_masters = 1)
283 : task_arena_base(max_concurrency_, reserved_for_masters)
284 {}
285
286#if __TBB_NUMA_SUPPORT
288 task_arena(const constraints& constraints_, unsigned reserved_for_masters = 1)
289 : task_arena_base(constraints_, reserved_for_masters)
290 {}
291
293 task_arena(const task_arena &s) // copy settings but not the reference or instance
294 : task_arena_base(constraints(s.my_numa_id, s.my_max_concurrency), s.my_master_slots)
295 {}
296#else
298 task_arena(const task_arena &s) // copy settings but not the reference or instance
300 {}
301#endif /*__TBB_NUMA_SUPPORT*/
302
304 struct attach {};
305
307 explicit task_arena( attach )
308 : task_arena_base(automatic, 1) // use default settings if attach fails
309 {
311 if( my_arena ) my_initialized = true;
312 }
313
315 inline void initialize() {
316 if( !my_initialized ) {
319 }
320 }
321
323 inline void initialize(int max_concurrency_, unsigned reserved_for_masters = 1) {
324 // TODO: decide if this call must be thread-safe
325 __TBB_ASSERT(!my_arena, "Impossible to modify settings of an already initialized task_arena");
326 if( !my_initialized ) {
327 my_max_concurrency = max_concurrency_;
328 my_master_slots = reserved_for_masters;
329 initialize();
330 }
331 }
332
333#if __TBB_NUMA_SUPPORT
334 inline void initialize(constraints constraints_, unsigned reserved_for_masters = 1) {
335 // TODO: decide if this call must be thread-safe
336 __TBB_ASSERT(!my_arena, "Impossible to modify settings of an already initialized task_arena");
337 if( !my_initialized ) {
338 my_numa_id = constraints_.numa_id;
339 my_max_concurrency = constraints_.max_concurrency;
340 my_master_slots = reserved_for_masters;
341 initialize();
342 }
343 }
344#endif /*__TBB_NUMA_SUPPORT*/
345
347 inline void initialize(attach) {
348 // TODO: decide if this call must be thread-safe
349 __TBB_ASSERT(!my_arena, "Impossible to modify settings of an already initialized task_arena");
350 if( !my_initialized ) {
354 }
355 }
356
359 inline void terminate() {
360 if( my_initialized ) {
362 my_initialized = false;
363 }
364 }
365
369 terminate();
370 }
371
374 bool is_active() const { return my_initialized; }
375
378
379#if __TBB_CPP11_RVALUE_REF_PRESENT
380 template<typename F>
381 void enqueue( F&& f ) {
382 enqueue_impl(std::forward<F>(f));
383 }
384#else
385 template<typename F>
386 void enqueue( const F& f ) {
387 enqueue_impl(f);
388 }
389#endif
390
391#if __TBB_TASK_PRIORITY
394 template<typename F>
395#if __TBB_CPP11_RVALUE_REF_PRESENT
397#if __TBB_PREVIEW_CRITICAL_TASKS
399 || p == internal::priority_critical, "Invalid priority level value");
400#else
401 __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value");
402#endif
403 enqueue_impl(std::forward<F>(f), p);
404 }
405#else
406 __TBB_DEPRECATED void enqueue( const F& f, priority_t p ) {
407#if __TBB_PREVIEW_CRITICAL_TASKS
409 || p == internal::priority_critical, "Invalid priority level value");
410#else
411 __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value");
412#endif
413 enqueue_impl(f,p);
414 }
415#endif
416#endif// __TBB_TASK_PRIORITY
417
422 template<typename F>
424 return execute_impl<typename internal::return_type_or_void<F>::type>(f);
425 }
426
431 template<typename F>
433 return execute_impl<typename internal::return_type_or_void<F>::type>(f);
434 }
435
436#if __TBB_EXTRA_DEBUG
440 void debug_wait_until_empty() {
441 initialize();
443 }
444#endif //__TBB_EXTRA_DEBUG
445
448 inline static int current_thread_index() {
449 return internal_current_slot();
450 }
451
453 inline int max_concurrency() const {
454 // Handle special cases inside the library
456 }
457};
458
459namespace this_task_arena {
460#if __TBB_TASK_ISOLATION
463 template<typename F>
465 return internal::isolate_impl<typename internal::return_type_or_void<F>::type>(f);
466 }
467
470 template<typename F>
472 return internal::isolate_impl<typename internal::return_type_or_void<F>::type>(f);
473 }
474#endif /* __TBB_TASK_ISOLATION */
475} // namespace this_task_arena
476} // namespace interfaceX
477
479
480namespace this_task_arena {
481 using namespace interface7::this_task_arena;
482
484 inline int current_thread_index() {
486 return idx == -1 ? tbb::task_arena::not_initialized : idx;
487 }
488
490 inline int max_concurrency() {
492 }
493} // namespace this_task_arena
494
496#if __TBB_TASK_PRIORITY
498#else
499void task::enqueue( task& t, task_arena& arena ) {
500 intptr_t p = 0;
501#endif
502 arena.initialize();
504 arena.internal_enqueue(t, p);
505}
506} // namespace tbb
507
509#undef __TBB_task_arena_H_include_area
510
511#endif /* __TBB_task_arena_H */
#define __TBB_EXPORTED_FUNC
#define __TBB_DEPRECATED
Definition: tbb_config.h:636
#define __TBB_TASK_PRIORITY
Definition: tbb_config.h:571
#define __TBB_TASK_GROUP_CONTEXT
Definition: tbb_config.h:541
#define __TBB_EXPORTED_METHOD
Definition: tbb_stddef.h:98
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
#define __TBB_override
Definition: tbb_stddef.h:240
#define __TBB_FORWARDING_REF(A)
Definition: tbb_stddef.h:517
void const char const char int ITT_FORMAT __itt_group_sync s
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
void const char const char int ITT_FORMAT __itt_group_sync p
The graph class.
priority_t
Definition: task.h:317
@ priority_normal
Definition: task.h:318
@ priority_high
Definition: task.h:320
@ priority_low
Definition: task.h:319
@ release
Release.
Definition: atomic.h:59
Identifiers declared inside namespace internal should never be used directly by client code.
Definition: atomic.h:65
static const int priority_critical
Definition: task.h:313
atomic< T > & as_atomic(T &t)
Definition: atomic.h:572
void __TBB_EXPORTED_FUNC isolate_within_arena(delegate_base &d, intptr_t isolation=0)
int current_thread_index()
Returns the index, aka slot number, of the calling thread in its current arena.
Definition: task_arena.h:484
int max_concurrency()
Returns the maximal number of threads that can work inside the arena.
Definition: task_arena.h:490
internal::return_type_or_void< F >::type isolate(F &f)
Definition: task_arena.h:464
Used to form groups of tasks.
Definition: task.h:358
Base class for user-defined tasks.
Definition: task.h:615
static void enqueue(task &t)
Enqueue task for starvation-resistant execution.
Definition: task.h:836
static internal::allocate_root_proxy allocate_root()
Returns proxy for overloaded new that allocates a root task.
Definition: task.h:663
void operator()() const __TBB_override
Definition: task_arena.h:78
tbb::aligned_space< R > my_return_storage
Definition: task_arena.h:76
static const int automatic
Typedef for number of threads that is automatic.
Definition: task_arena.h:205
void __TBB_EXPORTED_METHOD internal_wait() const
intptr_t my_version_and_traits
Special settings.
Definition: task_arena.h:134
static int __TBB_EXPORTED_FUNC internal_max_concurrency(const task_arena *)
void __TBB_EXPORTED_METHOD internal_enqueue(task &, intptr_t) const
internal::arena * my_arena
NULL if not currently initialized.
Definition: task_arena.h:120
task_group_context * my_context
default context of the arena
Definition: task_arena.h:124
task_arena_base(int max_concurrency, unsigned reserved_for_masters)
Definition: task_arena.h:163
void __TBB_EXPORTED_METHOD internal_attach()
unsigned my_master_slots
Reserved master slots.
Definition: task_arena.h:131
void __TBB_EXPORTED_METHOD internal_execute(delegate_base &) const
int my_max_concurrency
Concurrency level for deferred initialization.
Definition: task_arena.h:128
void __TBB_EXPORTED_METHOD internal_initialize()
static int __TBB_EXPORTED_FUNC internal_current_slot()
void __TBB_EXPORTED_METHOD internal_terminate()
internal::return_type_or_void< F >::type execute(const F &f)
Definition: task_arena.h:432
int max_concurrency() const
Returns the maximal number of threads that can work inside the arena.
Definition: task_arena.h:453
task_arena(const task_arena &s)
Copies settings from another task_arena.
Definition: task_arena.h:298
task_arena(attach)
Creates an instance of task_arena attached to the current arena of the thread.
Definition: task_arena.h:307
internal::return_type_or_void< F >::type execute(F &f)
Definition: task_arena.h:423
void initialize()
Forces allocation of the resources for the task_arena as specified in constructor arguments.
Definition: task_arena.h:315
friend void task::enqueue(task &, task_arena &, priority_t)
void initialize(attach)
Attaches this instance to the current arena of the thread.
Definition: task_arena.h:347
task_arena(int max_concurrency_=automatic, unsigned reserved_for_masters=1)
Creates task_arena with certain concurrency limits.
Definition: task_arena.h:282
void enqueue_impl(__TBB_FORWARDING_REF(F) f, priority_t p=priority_t(0))
Definition: task_arena.h:251
void initialize(int max_concurrency_, unsigned reserved_for_masters=1)
Overrides concurrency level and forces initialization of internal representation.
Definition: task_arena.h:323
__TBB_DEPRECATED void enqueue(F &&f, priority_t p)
Definition: task_arena.h:396
static int current_thread_index()
Definition: task_arena.h:448
Tag class used to indicate the "attaching" constructor.
Definition: task_arena.h:304
Base class for types that should not be assigned.
Definition: tbb_stddef.h:322

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.