Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
_concurrent_unordered_impl.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 /* Container implementations in this header are based on PPL implementations
18  provided by Microsoft. */
19 
20 #ifndef __TBB__concurrent_unordered_impl_H
21 #define __TBB__concurrent_unordered_impl_H
22 #if !defined(__TBB_concurrent_unordered_map_H) && !defined(__TBB_concurrent_unordered_set_H) && !defined(__TBB_concurrent_hash_map_H)
23 #error Do not #include this internal file directly; use public TBB headers instead.
24 #endif
25 
26 #include "../tbb_stddef.h"
27 
28 #include <iterator>
29 #include <utility> // Need std::pair
30 #include <functional> // Need std::equal_to (in ../concurrent_unordered_*.h)
31 #include <string> // For tbb_hasher
32 #include <cstring> // Need std::memset
33 #include __TBB_STD_SWAP_HEADER
34 
35 #include "../atomic.h"
36 #include "../tbb_exception.h"
37 #include "../tbb_allocator.h"
38 
39 #if __TBB_INITIALIZER_LISTS_PRESENT
40  #include <initializer_list>
41 #endif
42 
43 #if __TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_COPY_DELETION_BROKEN
44  #define __TBB_UNORDERED_NODE_HANDLE_PRESENT 1
45 #endif
46 
47 #include "_allocator_traits.h"
48 #include "_tbb_hash_compare_impl.h"
49 #include "_template_helpers.h"
50 
51 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
52 #include "_node_handle_impl.h"
53 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
54 
55 namespace tbb {
56 namespace interface5 {
58 namespace internal {
59 
60 template <typename T, typename Allocator>
62 template <typename Traits>
64 
65 // Forward list iterators (without skipping dummy elements)
66 template<class Solist, typename Value>
67 class flist_iterator : public std::iterator<std::forward_iterator_tag, Value>
68 {
69  template <typename T, typename Allocator>
70  friend class split_ordered_list;
71  template <typename Traits>
73  template<class M, typename V>
74  friend class flist_iterator;
75 
76  typedef typename Solist::nodeptr_t nodeptr_t;
77 public:
78  typedef typename Solist::value_type value_type;
79  typedef typename Solist::difference_type difference_type;
80  typedef typename Solist::pointer pointer;
81  typedef typename Solist::reference reference;
82 
85  : my_node_ptr(other.my_node_ptr) {}
86 
87  reference operator*() const { return my_node_ptr->my_element; }
88  pointer operator->() const { return &**this; }
89 
91  my_node_ptr = my_node_ptr->my_next;
92  return *this;
93  }
94 
96  flist_iterator tmp = *this;
97  ++*this;
98  return tmp;
99  }
100 
101 protected:
103  nodeptr_t get_node_ptr() const { return my_node_ptr; }
104 
106 
107  template<typename M, typename T, typename U>
108  friend bool operator==( const flist_iterator<M,T> &i, const flist_iterator<M,U> &j );
109  template<typename M, typename T, typename U>
110  friend bool operator!=( const flist_iterator<M,T>& i, const flist_iterator<M,U>& j );
111 };
112 
113 template<typename Solist, typename T, typename U>
115  return i.my_node_ptr == j.my_node_ptr;
116 }
117 template<typename Solist, typename T, typename U>
119  return i.my_node_ptr != j.my_node_ptr;
120 }
121 
122 // Split-order list iterators, needed to skip dummy elements
123 template<class Solist, typename Value>
124 class solist_iterator : public flist_iterator<Solist, Value>
125 {
127  typedef typename Solist::nodeptr_t nodeptr_t;
129  template <typename T, typename Allocator>
130  friend class split_ordered_list;
131  template<class M, typename V>
132  friend class solist_iterator;
133  template <typename Traits>
135  template<typename M, typename T, typename U>
136  friend bool operator==( const solist_iterator<M,T> &i, const solist_iterator<M,U> &j );
137  template<typename M, typename T, typename U>
138  friend bool operator!=( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
139 
140  const Solist *my_list_ptr;
141  solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {}
142 
143 public:
144  typedef typename Solist::value_type value_type;
145  typedef typename Solist::difference_type difference_type;
146  typedef typename Solist::pointer pointer;
147  typedef typename Solist::reference reference;
148 
151  : base_type(other), my_list_ptr(other.my_list_ptr) {}
152 
154  return this->base_type::operator*();
155  }
156 
157  pointer operator->() const {
158  return (&**this);
159  }
160 
162  do ++(*(base_type *)this);
163  while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
164 
165  return (*this);
166  }
167 
169  solist_iterator tmp = *this;
170  do ++*this;
171  while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
172 
173  return (tmp);
174  }
175 };
176 
177 template<typename Solist, typename T, typename U>
179  return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr;
180 }
181 template<typename Solist, typename T, typename U>
183  return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr;
184 }
185 
186 // Forward type and class definitions
187 typedef size_t sokey_t;
188 
189 
190 // Forward list in which elements are sorted in a split-order
191 template <typename T, typename Allocator>
192 class split_ordered_list
193 {
194 public:
196 
198 
199  struct node;
200  typedef node *nodeptr_t;
201 
207  // No support for reference/const_reference in allocator traits
209  typedef const value_type& const_reference;
210 
215 
216  // Node that holds the element in a split-ordered list
218  {
219  private:
220  // for compilers that try to generate default constructors though they are not needed.
221  node(); // VS 2008, 2010, 2012
222  public:
223  // Initialize the node with the given order key
224  void init(sokey_t order_key) {
225  my_order_key = order_key;
226  my_next = NULL;
227  }
228 
229  // Return the order key (needed for hashing)
230  sokey_t get_order_key() const { // TODO: remove
231  return my_order_key;
232  }
233 
234  // get() and value() is a common interface for getting access to node`s element (required by node_handle)
236  return reinterpret_cast<value_type*>(&my_element);
237  }
238 
240  return *storage();
241  }
242 
243  // Inserts the new element in the list in an atomic fashion
245  {
246  // Try to change the next pointer on the current element to a new element, only if it still points to the cached next
247  nodeptr_t exchange_node = tbb::internal::as_atomic(my_next).compare_and_swap(new_node, current_node);
248 
249  if (exchange_node == current_node) // TODO: why this branch?
250  {
251  // Operation succeeded, return the new node
252  return new_node;
253  }
254  else
255  {
256  // Operation failed, return the "interfering" node
257  return exchange_node;
258  }
259  }
260 
261  // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets
262  // in the hash table to quickly index into the right subsection of the split-ordered list.
263  bool is_dummy() const {
264  return (my_order_key & 0x1) == 0;
265  }
266 
267 
268  nodeptr_t my_next; // Next element in the list
269  value_type my_element; // Element storage
270  sokey_t my_order_key; // Order key for this element
271  };
272 
273  // Allocate a new node with the given order key; used to allocate dummy nodes
275  nodeptr_t pnode = my_node_allocator.allocate(1);
276  pnode->init(order_key);
277  return (pnode);
278  }
279 
280  // Allocate a new node with the given order key and value
281  template<typename Arg>
284  nodeptr_t pnode = my_node_allocator.allocate(1);
285 
286  //TODO: use RAII scoped guard instead of explicit catch
287  __TBB_TRY {
288  new(static_cast<void*>(&pnode->my_element)) T(tbb::internal::forward<Arg>(t));
289  pnode->init(order_key);
290  } __TBB_CATCH(...) {
291  my_node_allocator.deallocate(pnode, 1);
292  __TBB_RETHROW();
293  }
294 
295  return (pnode);
296  }
297 
298  // A helper to avoid excessive requiremens in internal_insert
299  template<typename Arg>
301  /*AllowCreate=*/tbb::internal::false_type){
302  __TBB_ASSERT(false, "This compile-time helper should never get called");
303  return nodeptr_t();
304  }
305 
306  // Allocate a new node with the given parameters for constructing value
307  template<typename __TBB_PARAMETER_PACK Args>
309  nodeptr_t pnode = my_node_allocator.allocate(1);
310 
311  //TODO: use RAII scoped guard instead of explicit catch
312  __TBB_TRY {
313  new(static_cast<void*>(&pnode->my_element)) T(__TBB_PACK_EXPANSION(tbb::internal::forward<Args>(args)));
314  } __TBB_CATCH(...) {
315  my_node_allocator.deallocate(pnode, 1);
316  __TBB_RETHROW();
317  }
318 
319  return (pnode);
320  }
321 
324  {
325  // Immediately allocate a dummy node with order key of 0. This node
326  // will always be the head of the list.
328  }
329 
331  {
332  // Clear the list
333  clear();
334 
335  // Remove the head element which is not cleared by clear()
336  nodeptr_t pnode = my_head;
337  my_head = NULL;
338 
339  __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node");
340 
341  destroy_node(pnode);
342  }
343 
344  // Common forward list functions
345 
347  return (my_node_allocator);
348  }
349 
350  void clear() {
351  nodeptr_t pnext;
352  nodeptr_t pnode = my_head;
353 
354  __TBB_ASSERT(my_head != NULL, "Invalid head list node");
355  pnext = pnode->my_next;
356  pnode->my_next = NULL;
357  pnode = pnext;
358 
359  while (pnode != NULL)
360  {
361  pnext = pnode->my_next;
362  destroy_node(pnode);
363  pnode = pnext;
364  }
365 
366  my_element_count = 0;
367  }
368 
369  // Returns a first non-dummy element in the SOL
371  return first_real_iterator(raw_begin());
372  }
373 
374  // Returns a first non-dummy element in the SOL
376  return first_real_iterator(raw_begin());
377  }
378 
380  return (iterator(0, this));
381  }
382 
383  const_iterator end() const {
384  return (const_iterator(0, this));
385  }
386 
388  return (((const self_type *)this)->begin());
389  }
390 
392  return (((const self_type *)this)->end());
393  }
394 
395  // Checks if the number of elements (non-dummy) is 0
396  bool empty() const {
397  return (my_element_count == 0);
398  }
399 
400  // Returns the number of non-dummy elements in the list
401  size_type size() const {
402  return my_element_count;
403  }
404 
405  // Returns the maximum size of the list, determined by the allocator
406  size_type max_size() const {
407  return my_node_allocator.max_size();
408  }
409 
410  // Swaps 'this' list with the passed in one
411  void swap(self_type& other)
412  {
413  if (this == &other)
414  {
415  // Nothing to do
416  return;
417  }
418 
420  std::swap(my_head, other.my_head);
421  }
422 
423  // Split-order list functions
424 
425  // Returns a first element in the SOL, which is always a dummy
427  return raw_iterator(my_head);
428  }
429 
430  // Returns a first element in the SOL, which is always a dummy
432  return raw_const_iterator(my_head);
433  }
434 
436  return raw_iterator(0);
437  }
438 
440  return raw_const_iterator(0);
441  }
442 
444  return it.get_node_ptr()->get_order_key();
445  }
446 
448  if( !it.get_node_ptr() ) return ~sokey_t(0);
449  return it.get_node_ptr()->get_order_key();
450  }
451 
452  // Returns a public iterator version of the internal iterator. Public iterator must not
453  // be a dummy private iterator.
455  __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
456  return iterator(it.get_node_ptr(), this);
457  }
458 
459  // Returns a public iterator version of the internal iterator. Public iterator must not
460  // be a dummy private iterator.
462  __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
463  return const_iterator(it.get_node_ptr(), this);
464  }
465 
466  // Returns a non-const version of the raw_iterator
468  return raw_iterator(it.get_node_ptr());
469  }
470 
471  // Returns a non-const version of the iterator
473  return iterator(it.my_node_ptr, it.my_list_ptr);
474  }
475 
476  // Returns a public iterator version of a first non-dummy internal iterator at or after
477  // the passed in internal iterator.
479  {
480  // Skip all dummy, internal only iterators
481  while (it != raw_end() && it.get_node_ptr()->is_dummy())
482  ++it;
483 
484  return iterator(it.get_node_ptr(), this);
485  }
486 
487  // Returns a public iterator version of a first non-dummy internal iterator at or after
488  // the passed in internal iterator.
490  {
491  // Skip all dummy, internal only iterators
492  while (it != raw_end() && it.get_node_ptr()->is_dummy())
493  ++it;
494 
495  return const_iterator(it.get_node_ptr(), this);
496  }
497 
498  // Erase an element using the allocator
499  void destroy_node(nodeptr_t pnode) {
500  if (!pnode->is_dummy()) my_node_allocator.destroy(pnode);
501  my_node_allocator.deallocate(pnode, 1);
502  }
503 
504  // Try to insert a new element in the list.
505  // If insert fails, return the node that was inserted instead.
506  static nodeptr_t try_insert_atomic(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) {
507  new_node->my_next = current_node;
508  return previous->atomic_set_next(new_node, current_node);
509  }
510 
511  // Insert a new element between passed in iterators
512  std::pair<iterator, bool> try_insert(raw_iterator it, raw_iterator next, nodeptr_t pnode, size_type *new_count)
513  {
514  nodeptr_t inserted_node = try_insert_atomic(it.get_node_ptr(), pnode, next.get_node_ptr());
515 
516  if (inserted_node == pnode)
517  {
518  // If the insert succeeded, check that the order is correct and increment the element count
519  check_range(it, next);
520  *new_count = tbb::internal::as_atomic(my_element_count).fetch_and_increment();
521  return std::pair<iterator, bool>(iterator(pnode, this), true);
522  }
523  else
524  {
525  return std::pair<iterator, bool>(end(), false);
526  }
527  }
528 
529  // Insert a new dummy element, starting search at a parent dummy element
531  {
533  raw_iterator where = it;
534 
535  __TBB_ASSERT(where != last, "Invalid head node");
536 
537  ++where;
538 
539  // Create a dummy element up front, even though it may be discarded (due to concurrent insertion)
540  nodeptr_t dummy_node = create_node(order_key);
541 
542  for (;;)
543  {
544  __TBB_ASSERT(it != last, "Invalid head list node");
545 
546  // If the head iterator is at the end of the list, or past the point where this dummy
547  // node needs to be inserted, then try to insert it.
548  if (where == last || get_order_key(where) > order_key)
549  {
550  __TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list");
551 
552  // Try to insert it in the right place
553  nodeptr_t inserted_node = try_insert_atomic(it.get_node_ptr(), dummy_node, where.get_node_ptr());
554 
555  if (inserted_node == dummy_node)
556  {
557  // Insertion succeeded, check the list for order violations
558  check_range(it, where);
559  return raw_iterator(dummy_node);
560  }
561  else
562  {
563  // Insertion failed: either dummy node was inserted by another thread, or
564  // a real element was inserted at exactly the same place as dummy node.
565  // Proceed with the search from the previous location where order key was
566  // known to be larger (note: this is legal only because there is no safe
567  // concurrent erase operation supported).
568  where = it;
569  ++where;
570  continue;
571  }
572  }
573  else if (get_order_key(where) == order_key)
574  {
575  // Another dummy node with the same value found, discard the new one.
576  destroy_node(dummy_node);
577  return where;
578  }
579 
580  // Move the iterator forward
581  it = where;
582  ++where;
583  }
584 
585  }
586 
588  nodeptr_t pnode = (where++).get_node_ptr();
589  nodeptr_t prevnode = previous.get_node_ptr();
590  __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators");
591  prevnode->my_next = pnode->my_next;
592  return pnode;
593  }
594 
595  // This erase function can handle both real and dummy nodes
597  /*allow_destroy*/tbb::internal::true_type)
598  {
599  nodeptr_t pnode = erase_node_impl(previous, where);
600  destroy_node(pnode);
601  }
602 
604  /*allow_destroy*/tbb::internal::false_type)
605  {
606  erase_node_impl(previous, where);
607  }
608 
609  void erase_node(raw_iterator previous, raw_const_iterator& where) {
610  erase_node(previous, where, /*allow_destroy*/tbb::internal::true_type());
611  }
612 
613  // Erase the element (previous node needs to be passed because this is a forward only list)
614  template<typename AllowDestroy>
615  iterator erase_node(raw_iterator previous, const_iterator where, AllowDestroy)
616  {
617  raw_const_iterator it = where;
618  erase_node(previous, it, AllowDestroy());
620 
621  return get_iterator(first_real_iterator(it));
622  }
623 
625  return erase_node(previous, where, /*allow_destroy*/tbb::internal::true_type());
626  }
627 
628 
629 
630  // Move all elements from the passed in split-ordered list to this one
631  void move_all(self_type& source)
632  {
634  raw_const_iterator last = source.raw_end();
635 
636  if (first == last)
637  return;
638 
639  nodeptr_t previous_node = my_head;
640  raw_const_iterator begin_iterator = first++;
641 
642  // Move all elements one by one, including dummy ones
643  for (raw_const_iterator it = first; it != last;)
644  {
645  nodeptr_t pnode = it.get_node_ptr();
646 
647  nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element);
648  previous_node = try_insert_atomic(previous_node, dummy_node, NULL);
649  __TBB_ASSERT(previous_node != NULL, "Insertion must succeed");
650  raw_const_iterator where = it++;
651  source.erase_node(get_iterator(begin_iterator), where);
652  }
653  check_range();
654  }
655 
656 
657 private:
658  //Need to setup private fields of split_ordered_list in move constructor and assignment of concurrent_unordered_base
659  template <typename Traits>
661 
662  // Check the list for order violations
664  {
665 #if TBB_USE_ASSERT
666  for (raw_iterator it = first; it != last; ++it)
667  {
668  raw_iterator next = it;
669  ++next;
670 
671  __TBB_ASSERT(next == raw_end() || get_order_key(next) >= get_order_key(it), "!!! List order inconsistency !!!");
672  }
673 #else
675 #endif
676  }
677  void check_range()
678  {
679 #if TBB_USE_ASSERT
680  check_range( raw_begin(), raw_end() );
681 #endif
682  }
683 
685  size_type my_element_count; // Total item count, not counting dummy nodes
686  nodeptr_t my_head; // pointer to head node
687 };
688 
689 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
690 #pragma warning(push)
691 #pragma warning(disable: 4127) // warning C4127: conditional expression is constant
692 #endif
693 
694 template <typename Traits>
695 class concurrent_unordered_base : public Traits
696 {
697 protected:
698  // Type definitions
700  typedef typename Traits::value_type value_type;
701  typedef typename Traits::key_type key_type;
702  typedef typename Traits::hash_compare hash_compare;
703  typedef typename Traits::allocator_type allocator_type;
704  typedef typename hash_compare::hasher hasher;
706 
711  // No support for reference/const_reference in allocator
714 
716  typedef typename solist_t::nodeptr_t nodeptr_t;
717  // Iterators that walk the entire split-order list, including dummy nodes
720  typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets
724 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
725  typedef typename Traits::node_type node_type;
726 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
727  using Traits::my_hash_compare;
728  using Traits::get_key;
729  using Traits::allow_multimapping;
730 
731  static const size_type initial_bucket_number = 8; // Initial number of buckets
732 
733 private:
734  template<typename OtherTraits>
736 
737  typedef std::pair<iterator, iterator> pairii_t;
738  typedef std::pair<const_iterator, const_iterator> paircc_t;
739 
740  static size_type const pointers_per_table = sizeof(size_type) * 8; // One bucket segment per bit
741  static const size_type initial_bucket_load = 4; // Initial maximum number of elements per bucket
742 
746  void dismiss(){ my_instance = NULL;}
748  if (my_instance){
750  }
751  }
752  };
753 protected:
754  // Constructors/Destructors
756  const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type())
757  : Traits(hc), my_solist(a),
759  {
760  if( n_of_buckets == 0) ++n_of_buckets;
761  my_number_of_buckets = size_type(1)<<__TBB_Log2((uintptr_t)n_of_buckets*2-1); // round up to power of 2
762  internal_init();
763  }
764 
766  : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
767  {
768  internal_init();
769  internal_copy(right);
770  }
771 
773  : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())
774  {
775  //FIXME:exception safety seems to be broken here
776  internal_init();
777  internal_copy(right);
778  }
779 
780 #if __TBB_CPP11_RVALUE_REF_PRESENT
782  : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator()),
784  {
786  internal_init();
787  swap(right);
788  }
789 
791  : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
792  {
793  call_internal_clear_on_exit clear_buckets_on_exception(this);
794 
795  internal_init();
796  if (a == right.get_allocator()){
799  this->swap(right);
800  }else{
801  my_maximum_bucket_size = right.my_maximum_bucket_size;
802  my_number_of_buckets = right.my_number_of_buckets;
803  my_solist.my_element_count = right.my_solist.my_element_count;
804 
805  if (! right.my_solist.empty()){
806  nodeptr_t previous_node = my_solist.my_head;
807 
808  // Move all elements one by one, including dummy ones
809  for (raw_const_iterator it = ++(right.my_solist.raw_begin()), last = right.my_solist.raw_end(); it != last; ++it)
810  {
811  const nodeptr_t pnode = it.get_node_ptr();
812  nodeptr_t node;
813  if (pnode->is_dummy()) {
814  node = my_solist.create_node(pnode->get_order_key());
815  size_type bucket = __TBB_ReverseBits(pnode->get_order_key()) % my_number_of_buckets;
816  set_bucket(bucket, node);
817  }else{
818  node = my_solist.create_node(pnode->get_order_key(), std::move(pnode->my_element));
819  }
820 
821  previous_node = my_solist.try_insert_atomic(previous_node, node, NULL);
822  __TBB_ASSERT(previous_node != NULL, "Insertion of node failed. Concurrent inserts in constructor ?");
823  }
825  }
826  }
827 
828  clear_buckets_on_exception.dismiss();
829  }
830 
831 #endif // __TBB_CPP11_RVALUE_REF_PRESENT
832 
834  if (this != &right)
835  internal_copy(right);
836  return (*this);
837  }
838 
839 #if __TBB_CPP11_RVALUE_REF_PRESENT
841  {
842  if(this != &other){
844  if(pocma_t::value || this->my_allocator == other.my_allocator) {
845  concurrent_unordered_base trash (std::move(*this));
846  swap(other);
847  if (pocma_t::value) {
848  using std::swap;
849  //TODO: swapping allocators here may be a problem, replace with single direction moving
850  swap(this->my_solist.my_node_allocator, other.my_solist.my_node_allocator);
851  swap(this->my_allocator, other.my_allocator);
852  }
853  } else {
854  concurrent_unordered_base moved_copy(std::move(other),this->my_allocator);
855  this->swap(moved_copy);
856  }
857  }
858  return *this;
859  }
860 
861 #endif // __TBB_CPP11_RVALUE_REF_PRESENT
862 
863 #if __TBB_INITIALIZER_LISTS_PRESENT
864  concurrent_unordered_base& operator=(std::initializer_list<value_type> il)
866  {
867  this->clear();
868  this->insert(il.begin(),il.end());
869  return (*this);
870  }
871 #endif // __TBB_INITIALIZER_LISTS_PRESENT
872 
873 
875  // Delete all node segments
876  internal_clear();
877  }
878 
879 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
880  template<typename SourceType>
881  void internal_merge(SourceType& source) {
882  typedef typename SourceType::iterator source_iterator;
884  typename SourceType::node_type>::value),
885  "Incompatible containers cannot be merged");
886 
887  for(source_iterator it = source.begin(); it != source.end();) {
888  source_iterator where = it++;
889  if (allow_multimapping || find(get_key(*where)) == end()) {
890  std::pair<node_type, raw_iterator> extract_result = source.internal_extract(where);
891 
892  // If the insertion fails, it returns ownership of the node to extract_result.first
893  // extract_result.first remains valid node handle
894  if (!insert(std::move(extract_result.first)).second) {
895  raw_iterator next = extract_result.second;
896  raw_iterator current = next++;
897 
898  __TBB_ASSERT(extract_result.first.my_node->get_order_key() >= current.get_node_ptr()->get_order_key(),
899  "Wrong nodes order in source container");
900  __TBB_ASSERT(next==source.my_solist.raw_end() ||
901  extract_result.first.my_node->get_order_key() <= next.get_node_ptr()->get_order_key(),
902  "Wrong nodes order in source container");
903 
904  size_t new_count = 0;// To use try_insert()
905  bool insert_result =
906  source.my_solist.try_insert(current, next, extract_result.first.my_node, &new_count).second;
907  __TBB_ASSERT_EX(insert_result, "Return to source must be successful. "
908  "Changing source container while merging is unsafe.");
909  }
910  extract_result.first.deactivate();
911  }
912  }
913  }
914 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
915 
916 public:
918  return my_solist.get_allocator();
919  }
920 
921  // Size and capacity function
922  bool empty() const {
923  return my_solist.empty();
924  }
925 
926  size_type size() const {
927  return my_solist.size();
928  }
929 
930  size_type max_size() const {
931  return my_solist.max_size();
932  }
933 
934  // Iterators
936  return my_solist.begin();
937  }
938 
940  return my_solist.begin();
941  }
942 
944  return my_solist.end();
945  }
946 
947  const_iterator end() const {
948  return my_solist.end();
949  }
950 
952  return my_solist.cbegin();
953  }
954 
956  return my_solist.cend();
957  }
958 
959  // Parallel traversal support
965  public:
972 
974  bool empty() const {return my_begin_node == my_end_node;}
975 
977  bool is_divisible() const {
978  return my_midpoint_node != my_end_node;
979  }
983  {
985  __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
986  __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
987  set_midpoint();
988  r.set_midpoint();
989  }
992  my_table(a_table), my_begin_node(a_table.my_solist.begin()),
993  my_end_node(a_table.my_solist.end())
994  {
995  set_midpoint();
996  }
1000  size_type grainsize() const { return 1; }
1001 
1003  void set_midpoint() const {
1004  if( my_begin_node == my_end_node ) // not divisible
1006  else {
1009  size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets;
1010  while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket);
1011  if(__TBB_ReverseBits(mid_bucket) > begin_key) {
1012  // found a dummy_node between begin and end
1014  }
1015  else {
1016  // didn't find a dummy node between begin and end.
1018  }
1019 #if TBB_USE_ASSERT
1020  {
1022  __TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" );
1023  __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" );
1024  }
1025 #endif // TBB_USE_ASSERT
1026  }
1027  }
1028  };
1029 
1030  class range_type : public const_range_type {
1031  public:
1037 
1040  };
1041 
1042  range_type range() {
1043  return range_type( *this );
1044  }
1045 
1046  const_range_type range() const {
1047  return const_range_type( *this );
1048  }
1049 
1050  // Modifiers
1051  std::pair<iterator, bool> insert(const value_type& value) {
1052  return internal_insert</*AllowCreate=*/tbb::internal::true_type,
1053  /*AllowDestroy=*/tbb::internal::true_type>(value);
1054  }
1055 
1057  // Ignore hint
1058  return insert(value).first;
1059  }
1060 
1061 #if __TBB_CPP11_RVALUE_REF_PRESENT
1062  std::pair<iterator, bool> insert(value_type&& value) {
1063  return internal_insert</*AllowCreate=*/tbb::internal::true_type,
1064  /*AllowDestroy=*/tbb::internal::true_type>(std::move(value));
1065  }
1066 
1068  // Ignore hint
1069  return insert(std::move(value)).first;
1070  }
1071 #endif /*__TBB_CPP11_RVALUE_REF_PRESENT*/
1072 
1073 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
1074  std::pair<iterator, bool> insert(node_type&& nh) {
1075  if (!nh.empty()) {
1076  nodeptr_t handled_node = nh.my_node;
1077  std::pair<iterator, bool> insert_result =
1079  /*AllowDestroy=*/tbb::internal::false_type>
1080  (handled_node->my_element, handled_node);
1081  if (insert_result.second)
1082  nh.deactivate();
1083  return insert_result;
1084  }
1085  return std::pair<iterator, bool>(end(), false);
1086  }
1087 
1089  return insert(std::move(nh)).first;
1090  }
1091 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
1092 
1093 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT
1094  template<typename... Args>
1095  std::pair<iterator, bool> emplace(Args&&... args) {
1096  nodeptr_t pnode = my_solist.create_node_v(tbb::internal::forward<Args>(args)...);
1097 
1098  return internal_insert</*AllowCreate=*/tbb::internal::false_type,
1099  /*AllowDestroy=*/tbb::internal::true_type>(pnode->my_element, pnode);
1100  }
1101 
1102  template<typename... Args>
1104  // Ignore hint
1105  return emplace(tbb::internal::forward<Args>(args)...).first;
1106  }
1107 #endif // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT
1108 
1109 
1110  template<class Iterator>
1111  void insert(Iterator first, Iterator last) {
1112  for (Iterator it = first; it != last; ++it)
1113  insert(*it);
1114  }
1115 
1116 #if __TBB_INITIALIZER_LISTS_PRESENT
1117  void insert(std::initializer_list<value_type> il) {
1119  insert(il.begin(), il.end());
1120  }
1121 #endif
1122 
1124  return internal_erase(where);
1125  }
1126 
1128  while (first != last)
1129  unsafe_erase(first++);
1130  return my_solist.get_iterator(first);
1131  }
1132 
1134  pairii_t where = equal_range(key);
1135  size_type item_count = internal_distance(where.first, where.second);
1136  unsafe_erase(where.first, where.second);
1137  return item_count;
1138  }
1139 
1140 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
1142  return internal_extract(where).first;
1143  }
1144 
1146  pairii_t where = equal_range(key);
1147  if (where.first == end()) return node_type(); // element was not found
1148  return internal_extract(where.first).first;
1149  }
1150 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
1151 
1153  if (this != &right) {
1154  std::swap(my_hash_compare, right.my_hash_compare);
1155  my_solist.swap(right.my_solist);
1156  internal_swap_buckets(right);
1159  }
1160  }
1161 
1162  // Observers
1164  return my_hash_compare.my_hash_object;
1165  }
1166 
1167  key_equal key_eq() const {
1168  return my_hash_compare.my_key_compare_object;
1169  }
1170 
1171  void clear() {
1172  // Clear list
1173  my_solist.clear();
1174 
1175  // Clear buckets
1176  internal_clear();
1177 
1178  // Initialize bucket 0
1179  __TBB_ASSERT(my_buckets[0] == NULL, NULL);
1180  raw_iterator dummy_node = my_solist.raw_begin();
1181  set_bucket(0, dummy_node);
1182  }
1183 
1184  // Lookup
1186  return internal_find(key);
1187  }
1188 
1190  return const_cast<self_type*>(this)->internal_find(key);
1191  }
1192 
1193  size_type count(const key_type& key) const {
1194  if(allow_multimapping) {
1195  paircc_t answer = equal_range(key);
1196  size_type item_count = internal_distance(answer.first, answer.second);
1197  return item_count;
1198  } else {
1199  return const_cast<self_type*>(this)->internal_find(key) == end()?0:1;
1200  }
1201  }
1202 
1203  std::pair<iterator, iterator> equal_range(const key_type& key) {
1204  return internal_equal_range(key);
1205  }
1206 
1207  std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const {
1208  return const_cast<self_type*>(this)->internal_equal_range(key);
1209  }
1210 
1211  // Bucket interface - for debugging
1213  return my_number_of_buckets;
1214  }
1215 
1217  return segment_size(pointers_per_table-1);
1218  }
1219 
1221  size_type item_count = 0;
1222  if (is_initialized(bucket)) {
1223  raw_iterator it = get_bucket(bucket);
1224  ++it;
1225  for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it)
1226  ++item_count;
1227  }
1228  return item_count;
1229  }
1230 
1232  sokey_t order_key = (sokey_t) my_hash_compare(key);
1233  size_type bucket = order_key % my_number_of_buckets;
1234  return bucket;
1235  }
1236 
1237  // If the bucket is initialized, return a first non-dummy element in it
1239  if (!is_initialized(bucket))
1240  return end();
1241 
1242  raw_iterator it = get_bucket(bucket);
1243  return my_solist.first_real_iterator(it);
1244  }
1245 
1246  // If the bucket is initialized, return a first non-dummy element in it
1248  {
1249  if (!is_initialized(bucket))
1250  return end();
1251 
1252  raw_const_iterator it = get_bucket(bucket);
1253  return my_solist.first_real_iterator(it);
1254  }
1255 
1256  // @REVIEW: Takes O(n)
1257  // Returns the iterator after the last non-dummy element in the bucket
1259  {
1260  if (!is_initialized(bucket))
1261  return end();
1262 
1263  raw_iterator it = get_bucket(bucket);
1264 
1265  // Find the end of the bucket, denoted by the dummy element
1266  do ++it;
1267  while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
1268 
1269  // Return the first real element past the end of the bucket
1270  return my_solist.first_real_iterator(it);
1271  }
1272 
1273  // @REVIEW: Takes O(n)
1274  // Returns the iterator after the last non-dummy element in the bucket
1276  {
1277  if (!is_initialized(bucket))
1278  return end();
1279 
1280  raw_const_iterator it = get_bucket(bucket);
1281 
1282  // Find the end of the bucket, denoted by the dummy element
1283  do ++it;
1284  while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
1285 
1286  // Return the first real element past the end of the bucket
1287  return my_solist.first_real_iterator(it);
1288  }
1289 
1291  return ((const self_type *) this)->unsafe_begin(bucket);
1292  }
1293 
1295  return ((const self_type *) this)->unsafe_end(bucket);
1296  }
1297 
1298  // Hash policy
1299  float load_factor() const {
1300  return (float) size() / (float) unsafe_bucket_count();
1301  }
1302 
1303  float max_load_factor() const {
1304  return my_maximum_bucket_size;
1305  }
1306 
1307  void max_load_factor(float newmax) {
1308  if (newmax != newmax || newmax < 0)
1310  my_maximum_bucket_size = newmax;
1311  }
1312 
1313  // This function is a noop, because the underlying split-ordered list
1314  // is already sorted, so an increase in the bucket number will be
1315  // reflected next time this bucket is touched.
1316  void rehash(size_type buckets) {
1317  size_type current_buckets = my_number_of_buckets;
1318  if (current_buckets >= buckets)
1319  return;
1320  my_number_of_buckets = size_type(1)<<__TBB_Log2((uintptr_t)buckets*2-1); // round up to power of 2
1321  }
1322 
1323 private:
1324 
1325  // Initialize the hash and keep the first bucket open
1326  void internal_init() {
1327  // Initialize the array of segment pointers
1328  memset(my_buckets, 0, sizeof(my_buckets));
1329 
1330  // Initialize bucket 0
1331  raw_iterator dummy_node = my_solist.raw_begin();
1332  set_bucket(0, dummy_node);
1333  }
1334 
1336  for (size_type index = 0; index < pointers_per_table; ++index) {
1337  if (my_buckets[index] != NULL) {
1338  size_type sz = segment_size(index);
1339  for (size_type index2 = 0; index2 < sz; ++index2)
1340  my_allocator.destroy(&my_buckets[index][index2]);
1341  my_allocator.deallocate(my_buckets[index], sz);
1342  my_buckets[index] = 0;
1343  }
1344  }
1345  }
1346 
1347  void internal_copy(const self_type& right) {
1348  clear();
1349 
1352 
1353  __TBB_TRY {
1354  insert(right.begin(), right.end());
1355  my_hash_compare = right.my_hash_compare;
1356  } __TBB_CATCH(...) {
1357  my_solist.clear();
1358  __TBB_RETHROW();
1359  }
1360  }
1361 
1363  {
1364  // Swap all node segments
1365  for (size_type index = 0; index < pointers_per_table; ++index)
1366  {
1367  raw_iterator * iterator_pointer = my_buckets[index];
1368  my_buckets[index] = right.my_buckets[index];
1369  right.my_buckets[index] = iterator_pointer;
1370  }
1371  }
1372 
1373  //TODO: why not use std::distance?
1374  // Hash APIs
1376  {
1377  size_type num = 0;
1378 
1379  for (const_iterator it = first; it != last; ++it)
1380  ++num;
1381 
1382  return num;
1383  }
1384 
1385  // Insert an element in the hash given its value
1386  template<typename AllowCreate, typename AllowDestroy, typename ValueType>
1387  std::pair<iterator, bool> internal_insert(__TBB_FORWARDING_REF(ValueType) value, nodeptr_t pnode = NULL)
1388  {
1389  const key_type *pkey = &get_key(value);
1390  sokey_t hash_key = (sokey_t) my_hash_compare(*pkey);
1391  size_type new_count = 0;
1392  sokey_t order_key = split_order_key_regular(hash_key);
1393  raw_iterator previous = prepare_bucket(hash_key);
1395  __TBB_ASSERT(previous != last, "Invalid head node");
1396 
1397  // First node is a dummy node
1398  for (raw_iterator where = previous;;)
1399  {
1400  ++where;
1401  if (where == last || solist_t::get_order_key(where) > order_key ||
1402  // if multimapped, stop at the first item equal to us.
1403  (allow_multimapping && solist_t::get_order_key(where) == order_key &&
1404  !my_hash_compare(get_key(*where), *pkey))) // TODO: fix negation
1405  {
1406  if (!pnode) {
1407  pnode = my_solist.create_node(order_key, tbb::internal::forward<ValueType>(value), AllowCreate());
1408  // If the value was moved, the known reference to key might be invalid
1409  pkey = &get_key(pnode->my_element);
1410  }
1411  else
1412  {
1413  // Set new order_key to node
1414  pnode->init(order_key);
1415  }
1416 
1417  // Try to insert 'pnode' between 'previous' and 'where'
1418  std::pair<iterator, bool> result = my_solist.try_insert(previous, where, pnode, &new_count);
1419 
1420  if (result.second)
1421  {
1422  // Insertion succeeded, adjust the table size, if needed
1424  return result;
1425  }
1426  else
1427  {
1428  // Insertion failed: either the same node was inserted by another thread, or
1429  // another element was inserted at exactly the same place as this node.
1430  // Proceed with the search from the previous location where order key was
1431  // known to be larger (note: this is legal only because there is no safe
1432  // concurrent erase operation supported).
1433  where = previous;
1434  continue;
1435  }
1436  }
1437  else if (!allow_multimapping && solist_t::get_order_key(where) == order_key &&
1438  !my_hash_compare(get_key(*where), *pkey)) // TODO: fix negation
1439  { // Element already in the list, return it
1440  if (pnode && AllowDestroy::value)
1441  my_solist.destroy_node(pnode);
1442  return std::pair<iterator, bool>(my_solist.get_iterator(where), false);
1443  }
1444  // Move the iterator forward
1445  previous = where;
1446  }
1447  }
1448 
1449  // Find the element in the split-ordered list
1451  {
1452  sokey_t hash_key = (sokey_t) my_hash_compare(key);
1453  sokey_t order_key = split_order_key_regular(hash_key);
1455 
1456  for (raw_iterator it = prepare_bucket(hash_key); it != last; ++it)
1457  {
1458  if (solist_t::get_order_key(it) > order_key)
1459  {
1460  // If the order key is smaller than the current order key, the element
1461  // is not in the hash.
1462  return end();
1463  }
1464  else if (solist_t::get_order_key(it) == order_key)
1465  {
1466  // The fact that order keys match does not mean that the element is found.
1467  // Key function comparison has to be performed to check whether this is the
1468  // right element. If not, keep searching while order key is the same.
1469  if (!my_hash_compare(get_key(*it), key)) // TODO: fix negation
1470  return my_solist.get_iterator(it);
1471  }
1472  }
1473 
1474  return end();
1475  }
1476 
1477  // Erase an element from the list. This is not a concurrency safe function.
1479  {
1480  sokey_t hash_key = (sokey_t) my_hash_compare(get_key(*it));
1481  raw_iterator previous = prepare_bucket(hash_key);
1483  __TBB_ASSERT(previous != last, "Invalid head node");
1484 
1485  // First node is a dummy node
1486  for (raw_iterator where = previous; where != last; previous = where) {
1487  ++where;
1488  if (my_solist.get_iterator(where) == it)
1489  return my_solist.erase_node(previous, it);
1490  }
1491  return end();
1492  }
1493 
1494 #if __TBB_UNORDERED_NODE_HANDLE_PRESENT
1495  std::pair<node_type, raw_iterator> internal_extract(const_iterator it) {
1496  sokey_t hash_key = sokey_t(my_hash_compare(get_key(*it)));
1497  raw_iterator previous = prepare_bucket(hash_key);
1499  __TBB_ASSERT(previous != last, "Invalid head node");
1500 
1501  for(raw_iterator where = previous; where != last; previous = where) {
1502  ++where;
1503  if (my_solist.get_iterator(where) == it) {
1504  const_iterator result = it;
1505  my_solist.erase_node(previous, it, /*allow_destroy*/tbb::internal::false_type());
1506  return std::pair<node_type, raw_iterator>( node_type(result.get_node_ptr()),
1507  previous);
1508  }
1509  }
1510  return std::pair<node_type, iterator>(node_type(), end());
1511  }
1512 #endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
1513 
1514  // Return the [begin, end) pair of iterators with the same key values.
1515  // This operation makes sense only if mapping is many-to-one.
1517  {
1518  sokey_t hash_key = (sokey_t) my_hash_compare(key);
1519  sokey_t order_key = split_order_key_regular(hash_key);
1520  raw_iterator end_it = my_solist.raw_end();
1521 
1522  for (raw_iterator it = prepare_bucket(hash_key); it != end_it; ++it)
1523  {
1524  if (solist_t::get_order_key(it) > order_key)
1525  {
1526  // There is no element with the given key
1527  return pairii_t(end(), end());
1528  }
1529  else if (solist_t::get_order_key(it) == order_key &&
1530  !my_hash_compare(get_key(*it), key)) // TODO: fix negation; also below
1531  {
1533  iterator last = first;
1534  do ++last; while( allow_multimapping && last != end() && !my_hash_compare(get_key(*last), key) );
1535  return pairii_t(first, last);
1536  }
1537  }
1538 
1539  return pairii_t(end(), end());
1540  }
1541 
1542  // Bucket APIs
1543  void init_bucket(size_type bucket)
1544  {
1545  // Bucket 0 has no parent.
1546  __TBB_ASSERT( bucket != 0, "The first bucket must always be initialized");
1547 
1548  size_type parent_bucket = get_parent(bucket);
1549 
1550  // All parent_bucket buckets have to be initialized before this bucket is
1551  if (!is_initialized(parent_bucket))
1552  init_bucket(parent_bucket);
1553 
1554  raw_iterator parent = get_bucket(parent_bucket);
1555 
1556  // Create a dummy first node in this bucket
1558  set_bucket(bucket, dummy_node);
1559  }
1560 
1561  void adjust_table_size(size_type total_elements, size_type current_size)
1562  {
1563  // Grow the table by a factor of 2 if possible and needed
1564  if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size )
1565  {
1566  // Double the size of the hash only if size has not changed in between loads
1567  my_number_of_buckets.compare_and_swap(2u*current_size, current_size);
1568  //Simple "my_number_of_buckets.compare_and_swap( current_size<<1, current_size );" does not work for VC8
1569  //due to overzealous compiler warnings in /Wp64 mode
1570  }
1571  }
1572 
1574  {
1575  // Unsets bucket's most significant turned-on bit
1576  size_type msb = __TBB_Log2((uintptr_t)bucket);
1577  return bucket & ~(size_type(1) << msb);
1578  }
1579 
1580 
1581  // Dynamic sized array (segments)
1584  return size_type( __TBB_Log2( uintptr_t(index|1) ) );
1585  }
1586 
1589  return (size_type(1)<<k & ~size_type(1));
1590  }
1591 
1594  return k? size_type(1)<<k : 2;
1595  }
1596 
1598  size_type segment = segment_index_of(bucket);
1599  bucket -= segment_base(segment);
1600  __TBB_ASSERT( my_buckets[segment], "bucket must be in an allocated segment" );
1601  return my_buckets[segment][bucket];
1602  }
1603 
1605  size_type bucket = hash_key % my_number_of_buckets;
1606  size_type segment = segment_index_of(bucket);
1607  size_type index = bucket - segment_base(segment);
1608  if (my_buckets[segment] == NULL || my_buckets[segment][index].get_node_ptr() == NULL)
1609  init_bucket(bucket);
1610  return my_buckets[segment][index];
1611  }
1612 
1613  void set_bucket(size_type bucket, raw_iterator dummy_head) {
1614  size_type segment = segment_index_of(bucket);
1615  bucket -= segment_base(segment);
1616 
1617  if (my_buckets[segment] == NULL) {
1618  size_type sz = segment_size(segment);
1619  raw_iterator * new_segment = my_allocator.allocate(sz);
1620  std::memset(static_cast<void*>(new_segment), 0, sz*sizeof(raw_iterator));
1621 
1622  if (my_buckets[segment].compare_and_swap( new_segment, NULL) != NULL)
1623  my_allocator.deallocate(new_segment, sz);
1624  }
1625 
1626  my_buckets[segment][bucket] = dummy_head;
1627  }
1628 
1629  bool is_initialized(size_type bucket) const {
1630  size_type segment = segment_index_of(bucket);
1631  bucket -= segment_base(segment);
1632 
1633  if (my_buckets[segment] == NULL)
1634  return false;
1635 
1636  raw_iterator it = my_buckets[segment][bucket];
1637  return (it.get_node_ptr() != NULL);
1638  }
1639 
1640  // Utilities for keys
1641 
1642  // A regular order key has its original hash value reversed and the last bit set
1644  return __TBB_ReverseBits(order_key) | 0x1;
1645  }
1646 
1647  // A dummy order key has its original hash value reversed and the last bit unset
1649  return __TBB_ReverseBits(order_key) & ~sokey_t(0x1);
1650  }
1651 
1652  // Shared variables
1654  solist_t my_solist; // List where all the elements are kept
1656  float my_maximum_bucket_size; // Maximum size of the bucket
1658 };
1659 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1660 #pragma warning(pop) // warning 4127 is back
1661 #endif
1662 
1663 } // namespace internal
1665 } // namespace interface5
1666 } // namespace tbb
1667 #endif // __TBB__concurrent_unordered_impl_H
flist_iterator(const flist_iterator< Solist, typename Solist::value_type > &other)
concurrent_unordered_base::size_type size_type
Type for size of a range.
iterator insert(const_iterator, const value_type &value)
atomic< raw_iterator * > my_buckets[pointers_per_table]
std::pair< iterator, bool > emplace(Args &&... args)
const_iterator get_iterator(raw_const_iterator it) const
bool operator==(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
auto last(Container &c) -> decltype(begin(c))
bool_constant< false > false_type
Definition: tbb_stddef.h:469
allocator_type::difference_type difference_type
tbb::internal::allocator_traits< allocator_type >::difference_type difference_type
flist_iterator< self_type, const value_type > raw_const_iterator
const_range_type(const concurrent_unordered_base &a_table)
Init range with container and grainsize specified.
friend bool operator==(const flist_iterator< M, T > &i, const flist_iterator< M, U > &j)
value_type compare_and_swap(value_type value, value_type comparand)
Definition: atomic.h:285
std::pair< const_iterator, const_iterator > paircc_t
allocator_type::const_pointer const_pointer
tbb::internal::allocator_rebind< allocator_type, node >::type my_node_allocator
solist_iterator< self_type, const value_type > const_iterator
solist_iterator(nodeptr_t pnode, const Solist *plist)
raw_iterator insert_dummy(raw_iterator it, sokey_t order_key)
const_local_iterator unsafe_begin(size_type bucket) const
std::pair< iterator, bool > insert(node_type &&nh)
nodeptr_t create_node_v(__TBB_FORWARDING_REF(Args) __TBB_PARAMETER_PACK args)
void erase_node(raw_iterator previous, raw_const_iterator &where, tbb::internal::false_type)
auto first(Container &c) -> decltype(begin(c))
The graph class.
tbb::internal::allocator_traits< allocator_type >::difference_type difference_type
range_type(const concurrent_unordered_base &a_table)
Init range with container and grainsize specified.
nodeptr_t erase_node_impl(raw_iterator previous, raw_const_iterator &where)
friend bool operator!=(const flist_iterator< M, T > &i, const flist_iterator< M, U > &j)
std::pair< iterator, bool > try_insert(raw_iterator it, raw_iterator next, nodeptr_t pnode, size_type *new_count)
std::pair< const_iterator, const_iterator > equal_range(const key_type &key) const
#define __TBB_STATIC_ASSERT(condition, msg)
Definition: tbb_stddef.h:532
const_local_iterator unsafe_end(size_type bucket) const
tbb::internal::allocator_traits< allocator_type >::size_type size_type
std::pair< iterator, bool > insert(value_type &&value)
iterator emplace_hint(const_iterator, Args &&... args)
concurrent_unordered_base(concurrent_unordered_base &&right, const allocator_type &a)
solist_iterator(const solist_iterator< Solist, typename Solist::value_type > &other)
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Definition: tbb_stddef.h:377
solist_iterator< self_type, value_type > iterator
atomic< T > & as_atomic(T &t)
Definition: atomic.h:543
nodeptr_t create_node(sokey_t order_key, __TBB_FORWARDING_REF(Arg) t, tbb::internal::true_type=tbb::internal::true_type())
tbb::internal::allocator_traits< allocator_type >::pointer pointer
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
flist_iterator< self_type, value_type > raw_iterator
tbb::internal::allocator_rebind< allocator_type, raw_iterator >::type my_allocator
void set_midpoint() const
Set my_midpoint_node to point approximately half way between my_begin_node and my_end_node.
tbb::internal::allocator_rebind< Allocator, T >::type allocator_type
std::pair< iterator, bool > internal_insert(__TBB_FORWARDING_REF(ValueType) value, nodeptr_t pnode=NULL)
const_local_iterator unsafe_cend(size_type bucket) const
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
tbb::internal::allocator_traits< allocator_type >::const_pointer const_pointer
void erase_node(raw_iterator previous, raw_const_iterator &where)
concurrent_unordered_base & operator=(const concurrent_unordered_base &right)
static size_type internal_distance(const_iterator first, const_iterator last)
bool operator!=(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
void set_bucket(size_type bucket, raw_iterator dummy_head)
tbb::internal::allocator_traits< allocator_type >::value_type value_type
void adjust_table_size(size_type total_elements, size_type current_size)
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:305
std::pair< node_type, raw_iterator > internal_extract(const_iterator it)
friend bool operator!=(const solist_iterator< M, T > &i, const solist_iterator< M, U > &j)
allocator_type::size_type size_type
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle * key
void check_range(raw_iterator first, raw_iterator last)
allocator_traits< Alloc >::template rebind_alloc< T >::other type
#define __TBB_FORWARDING_REF(A)
Definition: tbb_stddef.h:496
Detects whether two given types are the same.
#define __TBB_ASSERT_EX(predicate, comment)
"Extended" version is useful to suppress warnings if a variable is only used with an assert
Definition: tbb_stddef.h:167
tbb::internal::allocator_traits< allocator_type >::const_pointer const_pointer
const_local_iterator unsafe_cbegin(size_type bucket) const
iterator insert(const_iterator, value_type &&value)
std::pair< iterator, iterator > equal_range(const key_type &key)
nodeptr_t create_node(sokey_t, __TBB_FORWARDING_REF(Arg), tbb::internal::false_type)
void erase_node(raw_iterator previous, raw_const_iterator &where, tbb::internal::true_type)
#define __TBB_CATCH(e)
Definition: tbb_stddef.h:284
bool is_divisible() const
True if range can be partitioned into two subranges.
T __TBB_ReverseBits(T src)
Definition: tbb_machine.h:967
#define __TBB_TRY
Definition: tbb_stddef.h:283
Base class for types that should not be assigned.
Definition: tbb_stddef.h:320
friend bool operator==(const solist_iterator< M, T > &i, const solist_iterator< M, U > &j)
split_ordered_list< value_type, typename Traits::allocator_type > solist_t
intptr_t __TBB_Log2(uintptr_t x)
Definition: tbb_machine.h:860
iterator erase_node(raw_iterator previous, const_iterator where, AllowDestroy)
const_iterator first_real_iterator(raw_const_iterator it) const
concurrent_unordered_base(const concurrent_unordered_base &right)
iterator unsafe_erase(const_iterator first, const_iterator last)
iterator erase_node(raw_iterator previous, const_iterator &where)
std::pair< iterator, bool > insert(const value_type &value)
split_ordered_list(allocator_type a=allocator_type())
concurrent_unordered_base & operator=(concurrent_unordered_base &&other)
static sokey_t get_safe_order_key(const raw_const_iterator &it)
void internal_swap_buckets(concurrent_unordered_base &right)
concurrent_unordered_base(const concurrent_unordered_base &right, const allocator_type &a)
#define __TBB_PARAMETER_PACK
Definition: tbb_stddef.h:503
tbb::internal::allocator_traits< allocator_type >::size_type size_type
static sokey_t get_order_key(const raw_const_iterator &it)
concurrent_unordered_base(size_type n_of_buckets=initial_bucket_number, const hash_compare &hc=hash_compare(), const allocator_type &a=allocator_type())
#define __TBB_RETHROW()
Definition: tbb_stddef.h:286
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
tbb::internal::allocator_traits< allocator_type >::pointer pointer
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance * instance
static nodeptr_t try_insert_atomic(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node)
allocator_type::value_type value_type
allocator_type::pointer pointer
nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node)
Dummy type that distinguishes splitting constructor from copy constructor.
Definition: tbb_stddef.h:395
tbb::internal::allocator_rebind< Allocator, value_type >::type allocator_type
void swap(atomic< T > &lhs, atomic< T > &rhs)
Definition: atomic.h:535
#define __TBB_PACK_EXPANSION(A)
Definition: tbb_stddef.h:504
bool_constant< true > true_type
Definition: tbb_stddef.h:468

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.