21 #ifndef __TBB__flow_graph_cache_impl_H 22 #define __TBB__flow_graph_cache_impl_H 24 #ifndef __TBB_flow_graph_H 25 #error Do not #include this internal file directly; use public TBB headers instead. 33 template<
typename T,
typename M=spin_mutex >
53 if ( &
s == &n )
return;
59 while( !
my_q.empty()) (
void)
my_q.pop();
60 #if TBB_DEPRECATED_FLOW_NODE_EXTRACTION 61 my_built_predecessors.clear();
65 #if TBB_DEPRECATED_FLOW_NODE_EXTRACTION 66 typedef edge_container<T> built_predecessors_type;
67 built_predecessors_type &built_predecessors() {
return my_built_predecessors; }
69 typedef typename edge_container<T>::edge_list_type predecessor_list_type;
70 void internal_add_built_predecessor( T &n ) {
72 my_built_predecessors.add_edge(n);
75 void internal_delete_built_predecessor( T &n ) {
77 my_built_predecessors.delete_edge(n);
80 void copy_predecessors( predecessor_list_type &v) {
82 my_built_predecessors.copy_edges(v);
85 size_t predecessor_count() {
87 return (
size_t)(my_built_predecessors.edge_count());
96 #if TBB_DEPRECATED_FLOW_NODE_EXTRACTION 97 built_predecessors_type my_built_predecessors;
125 template<
typename T,
typename M=spin_mutex >
126 #if __TBB_PREVIEW_ASYNC_MSG 128 class predecessor_cache :
public node_cache< untyped_sender, M > {
131 #endif // __TBB_PREVIEW_ASYNC_MSG 135 #if __TBB_PREVIEW_ASYNC_MSG 141 #endif // __TBB_PREVIEW_ASYNC_MSG 162 msg = src->try_get( v );
167 src->register_successor( *
my_owner );
172 }
while ( msg ==
false );
185 src->register_successor( *
my_owner );
192 #if TBB_DEPRECATED_FLOW_NODE_EXTRACTION 200 template<
typename T,
typename M=spin_mutex >
205 #if __TBB_PREVIEW_ASYNC_MSG 211 #endif // __TBB_PREVIEW_ASYNC_MSG 229 msg = reserved_src->try_reserve( v );
234 reserved_src->register_successor( *this->
my_owner );
238 this->
add( *reserved_src );
240 }
while ( msg ==
false );
247 reserved_src->try_release( );
254 reserved_src->try_consume( );
276 template<
typename T,
typename M=spin_rw_mutex >
283 #if __TBB_PREVIEW_ASYNC_MSG 291 #endif // __TBB_PREVIEW_ASYNC_MSG 293 #if TBB_DEPRECATED_FLOW_NODE_EXTRACTION 294 edge_container<successor_type> my_built_successors;
301 #if TBB_DEPRECATED_FLOW_NODE_EXTRACTION 302 typedef typename edge_container<successor_type>::edge_list_type successor_list_type;
304 edge_container<successor_type> &built_successors() {
return my_built_successors; }
307 typename mutex_type::scoped_lock l(
my_mutex,
true);
308 my_built_successors.add_edge( r );
312 typename mutex_type::scoped_lock l(
my_mutex,
true);
313 my_built_successors.delete_edge(r);
316 void copy_successors( successor_list_type &v) {
317 typename mutex_type::scoped_lock l(
my_mutex,
false);
318 my_built_successors.copy_edges(v);
321 size_t successor_count() {
322 typename mutex_type::scoped_lock l(
my_mutex,
false);
323 return my_built_successors.edge_count();
335 typename mutex_type::scoped_lock l(
my_mutex,
true);
336 my_successors.push_back( &r );
340 typename mutex_type::scoped_lock l(
my_mutex,
true);
341 for (
typename successors_type::iterator i = my_successors.begin();
342 i != my_successors.end(); ++i ) {
344 my_successors.erase(i);
351 typename mutex_type::scoped_lock l(
my_mutex,
false);
352 return my_successors.empty();
356 my_successors.clear();
357 #if TBB_DEPRECATED_FLOW_NODE_EXTRACTION 358 my_built_successors.clear();
362 #if !__TBB_PREVIEW_ASYNC_MSG 363 virtual task * try_put_task(
const T &t ) = 0;
364 #endif // __TBB_PREVIEW_ASYNC_MSG 375 #if __TBB_PREVIEW_ASYNC_MSG 381 #endif // __TBB_PREVIEW_ASYNC_MSG 384 #if TBB_DEPRECATED_FLOW_NODE_EXTRACTION 385 edge_container<successor_type> my_built_successors;
386 typedef edge_container<successor_type>::edge_list_type successor_list_type;
393 #if TBB_DEPRECATED_FLOW_NODE_EXTRACTION 395 edge_container<successor_type> &built_successors() {
return my_built_successors; }
398 mutex_type::scoped_lock l(
my_mutex,
true);
399 my_built_successors.add_edge( r );
403 mutex_type::scoped_lock l(
my_mutex,
true);
404 my_built_successors.delete_edge(r);
407 void copy_successors( successor_list_type &v) {
408 mutex_type::scoped_lock l(
my_mutex,
false);
409 my_built_successors.copy_edges(v);
412 size_t successor_count() {
413 mutex_type::scoped_lock l(
my_mutex,
false);
414 return my_built_successors.edge_count();
426 mutex_type::scoped_lock l(
my_mutex,
true);
427 my_successors.push_back( &r );
428 if (
my_owner && r.is_continue_receiver() ) {
429 r.register_predecessor( *
my_owner );
434 mutex_type::scoped_lock l(
my_mutex,
true);
435 for ( successors_type::iterator i = my_successors.begin();
436 i != my_successors.end(); ++i ) {
442 my_successors.erase(i);
449 mutex_type::scoped_lock l(
my_mutex,
false);
450 return my_successors.empty();
454 my_successors.clear();
455 #if TBB_DEPRECATED_FLOW_NODE_EXTRACTION 456 my_built_successors.clear();
460 #if !__TBB_PREVIEW_ASYNC_MSG 461 virtual task * try_put_task(
const continue_msg &t ) = 0;
462 #endif // __TBB_PREVIEW_ASYNC_MSG 468 template<
typename T,
typename M=spin_rw_mutex>
478 #if __TBB_PREVIEW_ASYNC_MSG 480 task * try_put_task(
const X &t ) {
483 #endif // __TBB_PREVIEW_ASYNC_MSG 484 task * last_task = NULL;
485 bool upgraded =
true;
486 typename mutex_type::scoped_lock l(this->my_mutex, upgraded);
487 typename successors_type::iterator i = this->my_successors.begin();
488 while ( i != this->my_successors.end() ) {
489 task *new_task = (*i)->try_put_task(t);
491 graph& graph_ref = (*i)->graph_reference();
497 if ( (*i)->register_predecessor(*this->my_owner) ) {
499 l.upgrade_to_writer();
502 i = this->my_successors.erase(i);
515 template<
typename T,
typename M=spin_rw_mutex >
526 typename mutex_type::scoped_lock l(this->my_mutex,
false);
527 return this->my_successors.size();
530 #if __TBB_PREVIEW_ASYNC_MSG 532 task * try_put_task(
const X &t ) {
535 #endif // __TBB_PREVIEW_ASYNC_MSG 536 bool upgraded =
true;
537 typename mutex_type::scoped_lock l(this->my_mutex, upgraded);
538 typename successors_type::iterator i = this->my_successors.begin();
539 while ( i != this->my_successors.end() ) {
540 task *new_task = (*i)->try_put_task(t);
544 if ( (*i)->register_predecessor(*this->my_owner) ) {
546 l.upgrade_to_writer();
549 i = this->my_successors.erase(i);
562 #endif // __TBB__flow_graph_cache_impl_H
receiver< continue_msg > * pointer_type
A cache of predecessors that only supports try_get.
void remove_successor(successor_type &r)
void set_owner(successor_type *owner)
void set_owner(sender< continue_msg > *owner)
A cache of successors that are put in a round-robin fashion.
task * try_put_task(const T &t) __TBB_override
sender< output_type > owner_type
Base class for types that should not be copied or assigned.
void register_successor(successor_type &r)
successors_type my_successors
successor_cache< T, M >::successors_type successors_type
predecessor_type * reserved_src
receiver< output_type > * pointer_type
static tbb::task * combine_tasks(graph &g, tbb::task *left, tbb::task *right)
receiver< T > successor_type
receiver< T > successor_type
sender< T > predecessor_type
bool try_reserve(output_type &v)
sender< output_type > predecessor_type
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
std::list< pointer_type > successors_type
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
std::list< pointer_type > successors_type
void set_owner(owner_type *owner)
receiver< continue_msg > successor_type
virtual ~successor_cache()
virtual ~successor_cache()
spin_rw_mutex_v3 spin_rw_mutex
An abstract cache of successors.
A node_cache maintains a std::queue of elements of type T. Each operation is protected by a lock.
successor_type * my_owner
size_type internal_size()
void const char const char int ITT_FORMAT __itt_group_sync s
void remove_successor(successor_type &r)
sender< continue_msg > * my_owner
A cache of successors that are broadcast to.
void register_successor(successor_type &r)
successors_type my_successors
successor_cache< T, M >::successors_type successors_type
receiver< output_type > successor_type
bool get_item(output_type &v)
An cache of predecessors that supports requests and reservations.
reservable_predecessor_cache()
task * try_put_task(const T &t) __TBB_override