Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
queuing_rw_mutex.cpp
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 
16 
17 
18 
19 */
20 
25 #include "tbb/queuing_rw_mutex.h"
26 #include "tbb/tbb_machine.h"
27 #include "tbb/tbb_stddef.h"
28 #include "tbb/tbb_machine.h"
29 #include "itt_notify.h"
30 
31 
32 namespace tbb {
33 
34 using namespace internal;
35 
39  STATE_WRITER = 1<<0,
40  STATE_READER = 1<<1,
49 };
50 
51 const unsigned char RELEASED = 0;
52 const unsigned char ACQUIRED = 1;
53 
55 {
56  return as_atomic(my_internal_lock).compare_and_swap<tbb::acquire>(ACQUIRED,RELEASED) == RELEASED;
57 }
58 
60 {
61  // Usually, we would use the test-test-and-set idiom here, with exponential backoff.
62  // But so far, experiments indicate there is no value in doing so here.
63  while( !try_acquire_internal_lock() ) {
64  __TBB_Pause(1);
65  }
66 }
67 
69 {
70  __TBB_store_with_release(my_internal_lock,RELEASED);
71 }
72 
74 {
75  spin_wait_until_eq(my_internal_lock, RELEASED);
76 }
77 
79  if( flag )
80  wait_for_release_of_internal_lock();
81  else
82  release_internal_lock();
83 }
84 
85 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
86  // Workaround for overzealous compiler warnings
87  #pragma warning (push)
88  #pragma warning (disable: 4311 4312)
89 #endif
90 
92 template<typename T>
94 public:
95  typedef typename atomic_selector<sizeof(T*)>::word word;
96 
97  template<memory_semantics M>
98  static T* fetch_and_add( T* volatile * location, word addend ) {
99  return reinterpret_cast<T*>( atomic_traits<sizeof(T*),M>::fetch_and_add(location, addend) );
100  }
101  template<memory_semantics M>
102  static T* fetch_and_store( T* volatile * location, T* value ) {
103  return reinterpret_cast<T*>( atomic_traits<sizeof(T*),M>::fetch_and_store(location, reinterpret_cast<word>(value)) );
104  }
105  template<memory_semantics M>
106  static T* compare_and_swap( T* volatile * location, T* value, T* comparand ) {
107  return reinterpret_cast<T*>(
108  atomic_traits<sizeof(T*),M>::compare_and_swap(location, reinterpret_cast<word>(value),
109  reinterpret_cast<word>(comparand))
110  );
111  }
112 
113  T* & ref;
114  tricky_atomic_pointer( T*& original ) : ref(original) {};
115  tricky_atomic_pointer( T* volatile & original ) : ref(original) {};
116  T* operator&( word operand2 ) const {
117  return reinterpret_cast<T*>( reinterpret_cast<word>(ref) & operand2 );
118  }
119  T* operator|( word operand2 ) const {
120  return reinterpret_cast<T*>( reinterpret_cast<word>(ref) | operand2 );
121  }
122 };
123 
125 
126 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
127  // Workaround for overzealous compiler warnings
128  #pragma warning (pop)
129 #endif
130 
132 static const tricky_pointer::word FLAG = 0x1;
133 
134 inline
136  return uintptr_t(ptr) & FLAG;
137 }
138 
139 //------------------------------------------------------------------------
140 // Methods of queuing_rw_mutex::scoped_lock
141 //------------------------------------------------------------------------
142 
145 {
146  __TBB_ASSERT( !my_mutex, "scoped_lock is already holding a mutex");
147 
148  // Must set all fields before the fetch_and_store, because once the
149  // fetch_and_store executes, *this becomes accessible to other threads.
150  my_mutex = &m;
151  __TBB_store_relaxed(my_prev , (scoped_lock*)0);
152  __TBB_store_relaxed(my_next , (scoped_lock*)0);
153  __TBB_store_relaxed(my_going, 0);
154  my_state = state_t(write ? STATE_WRITER : STATE_READER);
155  my_internal_lock = RELEASED;
156 
157  queuing_rw_mutex::scoped_lock* pred = m.q_tail.fetch_and_store<tbb::release>(this);
158 
159  if( write ) { // Acquiring for write
160 
161  if( pred ) {
162  ITT_NOTIFY(sync_prepare, my_mutex);
163  pred = tricky_pointer(pred) & ~FLAG;
164  __TBB_ASSERT( !( uintptr_t(pred) & FLAG ), "use of corrupted pointer!" );
165 #if TBB_USE_ASSERT
166  __TBB_control_consistency_helper(); // on "m.q_tail"
167  __TBB_ASSERT( !__TBB_load_relaxed(pred->my_next), "the predecessor has another successor!");
168 #endif
169  __TBB_store_with_release(pred->my_next,this);
170  spin_wait_until_eq(my_going, 1);
171  }
172 
173  } else { // Acquiring for read
174 #if DO_ITT_NOTIFY
175  bool sync_prepare_done = false;
176 #endif
177  if( pred ) {
178  unsigned short pred_state;
179  __TBB_ASSERT( !__TBB_load_relaxed(my_prev), "the predecessor is already set" );
180  if( uintptr_t(pred) & FLAG ) {
181  /* this is only possible if pred is an upgrading reader and it signals us to wait */
182  pred_state = STATE_UPGRADE_WAITING;
183  pred = tricky_pointer(pred) & ~FLAG;
184  } else {
185  // Load pred->my_state now, because once pred->my_next becomes
186  // non-NULL, we must assume that *pred might be destroyed.
188  }
189  __TBB_store_relaxed(my_prev, pred);
190  __TBB_ASSERT( !( uintptr_t(pred) & FLAG ), "use of corrupted pointer!" );
191 #if TBB_USE_ASSERT
192  __TBB_control_consistency_helper(); // on "m.q_tail"
193  __TBB_ASSERT( !__TBB_load_relaxed(pred->my_next), "the predecessor has another successor!");
194 #endif
195  __TBB_store_with_release(pred->my_next,this);
196  if( pred_state != STATE_ACTIVEREADER ) {
197 #if DO_ITT_NOTIFY
198  sync_prepare_done = true;
199  ITT_NOTIFY(sync_prepare, my_mutex);
200 #endif
201  spin_wait_until_eq(my_going, 1);
202  }
203  }
204 
205  // The protected state must have been acquired here before it can be further released to any other reader(s):
206  unsigned short old_state = my_state.compare_and_swap<tbb::acquire>(STATE_ACTIVEREADER, STATE_READER);
207  if( old_state!=STATE_READER ) {
208 #if DO_ITT_NOTIFY
209  if( !sync_prepare_done )
210  ITT_NOTIFY(sync_prepare, my_mutex);
211 #endif
212  // Failed to become active reader -> need to unblock the next waiting reader first
213  __TBB_ASSERT( my_state==STATE_READER_UNBLOCKNEXT, "unexpected state" );
214  spin_wait_while_eq(my_next, (scoped_lock*)NULL);
215  /* my_state should be changed before unblocking the next otherwise it might finish
216  and another thread can get our old state and left blocked */
217  my_state = STATE_ACTIVEREADER;
218  __TBB_store_with_release(my_next->my_going,1);
219  }
220  }
221 
222  ITT_NOTIFY(sync_acquired, my_mutex);
223 
224  // Force acquire so that user's critical section receives correct values
225  // from processor that was previously in the user's critical section.
226  __TBB_load_with_acquire(my_going);
227 }
228 
231 {
232  __TBB_ASSERT( !my_mutex, "scoped_lock is already holding a mutex");
233 
234  if( load<relaxed>(m.q_tail) )
235  return false; // Someone already took the lock
236 
237  // Must set all fields before the fetch_and_store, because once the
238  // fetch_and_store executes, *this becomes accessible to other threads.
239  __TBB_store_relaxed(my_prev, (scoped_lock*)0);
240  __TBB_store_relaxed(my_next, (scoped_lock*)0);
241  __TBB_store_relaxed(my_going, 0); // TODO: remove dead assignment?
242  my_state = state_t(write ? STATE_WRITER : STATE_ACTIVEREADER);
243  my_internal_lock = RELEASED;
244 
245  // The CAS must have release semantics, because we are
246  // "sending" the fields initialized above to other processors.
247  if( m.q_tail.compare_and_swap<tbb::release>(this, NULL) )
248  return false; // Someone already took the lock
249  // Force acquire so that user's critical section receives correct values
250  // from processor that was previously in the user's critical section.
251  __TBB_load_with_acquire(my_going);
252  my_mutex = &m;
253  ITT_NOTIFY(sync_acquired, my_mutex);
254  return true;
255 }
256 
259 {
260  __TBB_ASSERT(my_mutex!=NULL, "no lock acquired");
261 
262  ITT_NOTIFY(sync_releasing, my_mutex);
263 
264  if( my_state == STATE_WRITER ) { // Acquired for write
265 
266  // The logic below is the same as "writerUnlock", but elides
267  // "return" from the middle of the routine.
268  // In the statement below, acquire semantics of reading my_next is required
269  // so that following operations with fields of my_next are safe.
270  scoped_lock* n = __TBB_load_with_acquire(my_next);
271  if( !n ) {
272  if( this == my_mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) {
273  // this was the only item in the queue, and the queue is now empty.
274  goto done;
275  }
276  spin_wait_while_eq( my_next, (scoped_lock*)NULL );
277  n = __TBB_load_with_acquire(my_next);
278  }
279  __TBB_store_relaxed(n->my_going, 2); // protect next queue node from being destroyed too early
280  if( n->my_state==STATE_UPGRADE_WAITING ) {
281  // the next waiting for upgrade means this writer was upgraded before.
282  acquire_internal_lock();
283  queuing_rw_mutex::scoped_lock* tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->my_prev), NULL);
286  unblock_or_wait_on_internal_lock(get_flag(tmp));
287  } else {
288  __TBB_ASSERT( my_state & (STATE_COMBINED_WAITINGREADER | STATE_WRITER), "unexpected state" );
289  __TBB_ASSERT( !( uintptr_t(__TBB_load_relaxed(n->my_prev)) & FLAG ), "use of corrupted pointer!" );
292  }
293 
294  } else { // Acquired for read
295 
296  queuing_rw_mutex::scoped_lock *tmp = NULL;
297 retry:
298  // Addition to the original paper: Mark my_prev as in use
299  queuing_rw_mutex::scoped_lock *pred = tricky_pointer::fetch_and_add<tbb::acquire>(&my_prev, FLAG);
300 
301  if( pred ) {
302  if( !(pred->try_acquire_internal_lock()) )
303  {
304  // Failed to acquire the lock on pred. The predecessor either unlinks or upgrades.
305  // In the second case, it could or could not know my "in use" flag - need to check
306  tmp = tricky_pointer::compare_and_swap<tbb::release>(&my_prev, pred, tricky_pointer(pred) | FLAG );
307  if( !(uintptr_t(tmp) & FLAG) ) {
308  // Wait for the predecessor to change my_prev (e.g. during unlink)
309  spin_wait_while_eq( my_prev, tricky_pointer(pred)|FLAG );
310  // Now owner of pred is waiting for _us_ to release its lock
311  pred->release_internal_lock();
312  }
313  // else the "in use" flag is back -> the predecessor didn't get it and will release itself; nothing to do
314 
315  tmp = NULL;
316  goto retry;
317  }
318  __TBB_ASSERT(pred && pred->my_internal_lock==ACQUIRED, "predecessor's lock is not acquired");
319  __TBB_store_relaxed(my_prev, pred);
320  acquire_internal_lock();
321 
322  __TBB_store_with_release(pred->my_next,static_cast<scoped_lock *>(NULL));
323 
324  if( !__TBB_load_relaxed(my_next) && this != my_mutex->q_tail.compare_and_swap<tbb::release>(pred, this) ) {
325  spin_wait_while_eq( my_next, (void*)NULL );
326  }
327  __TBB_ASSERT( !get_flag(__TBB_load_relaxed(my_next)), "use of corrupted pointer" );
328 
329  // ensure acquire semantics of reading 'my_next'
330  if( scoped_lock *const l_next = __TBB_load_with_acquire(my_next) ) { // I->next != nil, TODO: rename to n after clearing up and adapting the n in the comment two lines below
331  // Equivalent to I->next->prev = I->prev but protected against (prev[n]&FLAG)!=0
332  tmp = tricky_pointer::fetch_and_store<tbb::release>(&(l_next->my_prev), pred);
333  // I->prev->next = I->next;
334  __TBB_ASSERT(__TBB_load_relaxed(my_prev)==pred, NULL);
335  __TBB_store_with_release(pred->my_next, my_next);
336  }
337  // Safe to release in the order opposite to acquiring which makes the code simpler
338  pred->release_internal_lock();
339 
340  } else { // No predecessor when we looked
341  acquire_internal_lock(); // "exclusiveLock(&I->EL)"
342  scoped_lock* n = __TBB_load_with_acquire(my_next);
343  if( !n ) {
344  if( this != my_mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) {
345  spin_wait_while_eq( my_next, (scoped_lock*)NULL );
346  n = __TBB_load_relaxed(my_next);
347  } else {
348  goto unlock_self;
349  }
350  }
351  __TBB_store_relaxed(n->my_going, 2); // protect next queue node from being destroyed too early
352  tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->my_prev), NULL);
354  }
355 unlock_self:
356  unblock_or_wait_on_internal_lock(get_flag(tmp));
357  }
358 done:
359  spin_wait_while_eq( my_going, 2 );
360 
361  initialize();
362 }
363 
365 {
366  if ( my_state == STATE_ACTIVEREADER ) return true; // Already a reader
367 
368  ITT_NOTIFY(sync_releasing, my_mutex);
369  my_state = STATE_READER;
370  if( ! __TBB_load_relaxed(my_next) ) {
371  // the following load of q_tail must not be reordered with setting STATE_READER above
372  if( this==my_mutex->q_tail.load<full_fence>() ) {
373  unsigned short old_state = my_state.compare_and_swap<tbb::release>(STATE_ACTIVEREADER, STATE_READER);
374  if( old_state==STATE_READER )
375  return true; // Downgrade completed
376  }
377  /* wait for the next to register */
378  spin_wait_while_eq( my_next, (void*)NULL );
379  }
380  scoped_lock *const n = __TBB_load_with_acquire(my_next);
381  __TBB_ASSERT( n, "still no successor at this point!" );
384  else if( n->my_state==STATE_UPGRADE_WAITING )
385  // the next waiting for upgrade means this writer was upgraded before.
387  my_state = STATE_ACTIVEREADER;
388  return true;
389 }
390 
392 {
393  if ( my_state == STATE_WRITER ) return true; // Already a writer
394 
396  queuing_rw_mutex::scoped_lock * me = this;
397 
398  ITT_NOTIFY(sync_releasing, my_mutex);
399  my_state = STATE_UPGRADE_REQUESTED;
400 requested:
401  __TBB_ASSERT( !(uintptr_t(__TBB_load_relaxed(my_next)) & FLAG), "use of corrupted pointer!" );
402  acquire_internal_lock();
403  if( this != my_mutex->q_tail.compare_and_swap<tbb::release>(tricky_pointer(me)|FLAG, this) ) {
404  spin_wait_while_eq( my_next, (void*)NULL );
406  n = tricky_pointer::fetch_and_add<tbb::acquire>(&my_next, FLAG);
407  unsigned short n_state = n->my_state;
408  /* the next reader can be blocked by our state. the best thing to do is to unblock it */
409  if( n_state & STATE_COMBINED_WAITINGREADER )
411  tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->my_prev), this);
412  unblock_or_wait_on_internal_lock(get_flag(tmp));
413  if( n_state & (STATE_COMBINED_READER | STATE_UPGRADE_REQUESTED) ) {
414  // save n|FLAG for simplicity of following comparisons
415  tmp = tricky_pointer(n)|FLAG;
416  for( atomic_backoff b; __TBB_load_relaxed(my_next)==tmp; b.pause() ) {
417  if( my_state & STATE_COMBINED_UPGRADING ) {
418  if( __TBB_load_with_acquire(my_next)==tmp )
419  __TBB_store_relaxed(my_next, n);
420  goto waiting;
421  }
422  }
423  __TBB_ASSERT(__TBB_load_relaxed(my_next) != (tricky_pointer(n)|FLAG), NULL);
424  goto requested;
425  } else {
426  __TBB_ASSERT( n_state & (STATE_WRITER | STATE_UPGRADE_WAITING), "unexpected state");
427  __TBB_ASSERT( (tricky_pointer(n)|FLAG) == __TBB_load_relaxed(my_next), NULL);
428  __TBB_store_relaxed(my_next, n);
429  }
430  } else {
431  /* We are in the tail; whoever comes next is blocked by q_tail&FLAG */
432  release_internal_lock();
433  } // if( this != my_mutex->q_tail... )
434  my_state.compare_and_swap<tbb::acquire>(STATE_UPGRADE_WAITING, STATE_UPGRADE_REQUESTED);
435 
436 waiting:
437  __TBB_ASSERT( !( intptr_t(__TBB_load_relaxed(my_next)) & FLAG ), "use of corrupted pointer!" );
438  __TBB_ASSERT( my_state & STATE_COMBINED_UPGRADING, "wrong state at upgrade waiting_retry" );
439  __TBB_ASSERT( me==this, NULL );
440  ITT_NOTIFY(sync_prepare, my_mutex);
441  /* if no one was blocked by the "corrupted" q_tail, turn it back */
442  my_mutex->q_tail.compare_and_swap<tbb::release>( this, tricky_pointer(me)|FLAG );
444  pred = tricky_pointer::fetch_and_add<tbb::acquire>(&my_prev, FLAG);
445  if( pred ) {
446  bool success = pred->try_acquire_internal_lock();
447  pred->my_state.compare_and_swap<tbb::release>(STATE_UPGRADE_WAITING, STATE_UPGRADE_REQUESTED);
448  if( !success ) {
449  tmp = tricky_pointer::compare_and_swap<tbb::release>(&my_prev, pred, tricky_pointer(pred)|FLAG );
450  if( uintptr_t(tmp) & FLAG ) {
451  spin_wait_while_eq(my_prev, pred);
452  pred = __TBB_load_relaxed(my_prev);
453  } else {
454  spin_wait_while_eq( my_prev, tricky_pointer(pred)|FLAG );
455  pred->release_internal_lock();
456  }
457  } else {
458  __TBB_store_relaxed(my_prev, pred);
459  pred->release_internal_lock();
460  spin_wait_while_eq(my_prev, pred);
461  pred = __TBB_load_relaxed(my_prev);
462  }
463  if( pred )
464  goto waiting;
465  } else {
466  // restore the corrupted my_prev field for possible further use (e.g. if downgrade back to reader)
467  __TBB_store_relaxed(my_prev, pred);
468  }
469  __TBB_ASSERT( !pred && !__TBB_load_relaxed(my_prev), NULL );
470 
471  // additional lifetime issue prevention checks
472  // wait for the successor to finish working with my fields
473  wait_for_release_of_internal_lock();
474  // now wait for the predecessor to finish working with my fields
475  spin_wait_while_eq( my_going, 2 );
476 
477  // Acquire critical section indirectly from previous owner or directly from predecessor (TODO: not clear).
478  __TBB_control_consistency_helper(); // on either "my_mutex->q_tail" or "my_going" (TODO: not clear)
479 
480  bool result = ( my_state != STATE_UPGRADE_LOSER );
481  my_state = STATE_WRITER;
482  __TBB_store_relaxed(my_going, 1);
483 
484  ITT_NOTIFY(sync_acquired, my_mutex);
485  return result;
486 }
487 
489  ITT_SYNC_CREATE(this, _T("tbb::queuing_rw_mutex"), _T(""));
490 }
491 
492 } // namespace tbb
void unblock_or_wait_on_internal_lock(uintptr_t)
A helper function.
Sequential consistency.
Definition: atomic.h:45
uintptr_t get_flag(queuing_rw_mutex::scoped_lock *ptr)
static T * fetch_and_add(T *volatile *location, word addend)
T __TBB_load_with_acquire(const volatile T &location)
Definition: tbb_machine.h:713
unsigned char my_internal_lock
A tiny internal lock.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
#define ITT_SYNC_CREATE(obj, type, name)
Definition: itt_notify.h:123
The scoped locking pattern.
static T * fetch_and_store(T *volatile *location, T *value)
T * operator&(word operand2) const
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:335
void acquire(queuing_rw_mutex &m, bool write=true)
Acquire lock on given mutex.
Queuing reader-writer mutex with local-only spinning.
bool downgrade_to_reader()
Downgrade writer to become a reader.
Acquire.
Definition: atomic.h:47
state_t_flags
Flag bits in a state_t that specify information about a locking request.
atomic< scoped_lock * > q_tail
The last competitor requesting the lock.
void __TBB_store_relaxed(volatile T &location, V value)
Definition: tbb_machine.h:743
bool try_acquire_internal_lock()
Try to acquire the internal lock.
bool upgrade_to_writer()
Upgrade reader to become a writer.
The graph class.
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
Definition: tbb_machine.h:395
const unsigned char RELEASED
#define _T(string_literal)
Standard Windows style macro to markup the string literals.
Definition: itt_notify.h:66
scoped_lock *__TBB_atomic my_prev
The pointer to the previous and next competitors for a mutex.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
void acquire_internal_lock()
Acquire the internal lock.
Class that implements exponential backoff.
Definition: tbb_machine.h:349
T * operator|(word operand2) const
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:120
void spin_wait_until_eq(const volatile T &location, const U value)
Spin UNTIL the value of the variable is equal to a given value.
Definition: tbb_machine.h:403
atomic< state_t > my_state
State of the request: reader, writer, active reader, other service states.
T __TBB_load_relaxed(const volatile T &location)
Definition: tbb_machine.h:739
A view of a T* with additional functionality for twiddling low-order bits.
void release_internal_lock()
Release the internal lock.
void wait_for_release_of_internal_lock()
Wait for internal lock to be released.
value_type compare_and_swap(value_type value, value_type comparand)
Definition: atomic.h:289
atomic_selector< sizeof(T *)>::word word
void __TBB_EXPORTED_METHOD internal_construct()
scoped_lock *__TBB_atomic *__TBB_atomic my_next
static const tricky_pointer::word FLAG
Mask for low order bit of a pointer.
#define __TBB_control_consistency_helper()
Definition: gcc_generic.h:64
unsigned char __TBB_atomic my_going
The local spin-wait variable.
Release.
Definition: atomic.h:49
void __TBB_store_with_release(volatile T &location, V value)
Definition: tbb_machine.h:717
tricky_atomic_pointer(T *volatile &original)
atomic< T > & as_atomic(T &t)
Definition: atomic.h:547
const unsigned char ACQUIRED
tricky_atomic_pointer< queuing_rw_mutex::scoped_lock > tricky_pointer
bool try_acquire(queuing_rw_mutex &m, bool write=true)
Acquire lock on given mutex if free (i.e. non-blocking)
static T * compare_and_swap(T *volatile *location, T *value, T *comparand)
void __TBB_Pause(int32_t)
Definition: tbb_machine.h:335

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.