Stxxl  1.2.1
stack.h
1 /***************************************************************************
2  * include/stxxl/bits/containers/stack.h
3  *
4  * Part of the STXXL. See http://stxxl.sourceforge.net
5  *
6  * Copyright (C) 2003-2004 Roman Dementiev <dementiev@mpi-sb.mpg.de>
7  *
8  * Distributed under the Boost Software License, Version 1.0.
9  * (See accompanying file LICENSE_1_0.txt or copy at
10  * http://www.boost.org/LICENSE_1_0.txt)
11  **************************************************************************/
12 
13 #ifndef STXXL_STACK_HEADER
14 #define STXXL_STACK_HEADER
15 
16 #include <stack>
17 #include <vector>
18 
19 #include <stxxl/bits/mng/mng.h>
20 #include <stxxl/bits/common/simple_vector.h>
21 #include <stxxl/bits/common/tmeta.h>
22 #include <stxxl/bits/mng/write_pool.h>
23 #include <stxxl/bits/mng/prefetch_pool.h>
24 
25 
26 __STXXL_BEGIN_NAMESPACE
27 
30 
31 template <class ValTp,
32  unsigned BlocksPerPage = 4,
33  unsigned BlkSz = STXXL_DEFAULT_BLOCK_SIZE(ValTp),
34  class AllocStr = STXXL_DEFAULT_ALLOC_STRATEGY,
35  class SzTp = stxxl::int64>
36 struct stack_config_generator
37 {
38  typedef ValTp value_type;
39  enum { blocks_per_page = BlocksPerPage };
40  typedef AllocStr alloc_strategy;
41  enum { block_size = BlkSz };
42  typedef SzTp size_type;
43 };
44 
45 
47 
53 template <class Config_>
54 class normal_stack : private noncopyable
55 {
56 public:
57  typedef Config_ cfg;
58  typedef typename cfg::value_type value_type;
59  typedef typename cfg::alloc_strategy alloc_strategy;
60  typedef typename cfg::size_type size_type;
61  enum {
62  blocks_per_page = cfg::blocks_per_page,
63  block_size = cfg::block_size
64  };
65 
67  typedef BID<block_size> bid_type;
68 
69 private:
70  size_type size_;
71  unsigned_type cache_offset;
72  value_type * current_element;
73  simple_vector<block_type> cache;
74  typename simple_vector<block_type>::iterator front_page;
75  typename simple_vector<block_type>::iterator back_page;
76  std::vector<bid_type> bids;
77  alloc_strategy alloc_strategy_;
78 
79 public:
80  normal_stack() :
81  size_(0),
82  cache_offset(0),
83  current_element(NULL),
84  cache(blocks_per_page * 2),
85  front_page(cache.begin() + blocks_per_page),
86  back_page(cache.begin()),
87  bids(0)
88  {
89  bids.reserve(blocks_per_page);
90  }
91 
92  void swap(normal_stack & obj)
93  {
94  std::swap(size_, obj.size_);
95  std::swap(cache_offset, obj.cache_offset);
96  std::swap(current_element, obj.current_element);
97  std::swap(cache, obj.cache);
98  std::swap(front_page, obj.front_page);
99  std::swap(back_page, obj.back_page);
100  std::swap(bids, obj.bids);
101  std::swap(alloc_strategy_, obj.alloc_strategy_);
102  }
103 
107  template <class stack_type>
108  normal_stack(const stack_type & stack_) :
109  size_(0),
110  cache_offset(0),
111  current_element(NULL),
112  cache(blocks_per_page * 2),
113  front_page(cache.begin() + blocks_per_page),
114  back_page(cache.begin()),
115  bids(0)
116  {
117  bids.reserve(blocks_per_page);
118 
119  stack_type stack_copy = stack_;
120  const size_type sz = stack_copy.size();
121  size_type i;
122 
123  std::vector<value_type> tmp(sz);
124 
125  for (i = 0; i < sz; ++i)
126  {
127  tmp[sz - i - 1] = stack_copy.top();
128  stack_copy.pop();
129  }
130  for (i = 0; i < sz; ++i)
131  this->push(tmp[i]);
132  }
133  virtual ~normal_stack()
134  {
135  STXXL_VERBOSE(STXXL_PRETTY_FUNCTION_NAME);
136  block_manager::get_instance()->delete_blocks(bids.begin(), bids.end());
137  }
138  size_type size() const
139  {
140  return size_;
141  }
142  bool empty() const
143  {
144  return (!size_);
145  }
146  value_type & top()
147  {
148  assert(size_ > 0);
149  return (*current_element);
150  }
151  const value_type & top() const
152  {
153  assert(size_ > 0);
154  return (*current_element);
155  }
156  void push(const value_type & val)
157  {
158  assert(cache_offset <= 2 * blocks_per_page * block_type::size);
159  //assert(cache_offset >= 0);
160 
161  if (cache_offset == 2 * blocks_per_page * block_type::size) // cache overflow
162  {
163  STXXL_VERBOSE2("growing, size: " << size_);
164 
165  bids.resize(bids.size() + blocks_per_page);
166  typename std::vector<bid_type>::iterator cur_bid = bids.end() - blocks_per_page;
167  block_manager::get_instance()->new_blocks(
168  offset_allocator<alloc_strategy>(cur_bid - bids.begin(), alloc_strategy_), cur_bid, bids.end());
169 
170  simple_vector<request_ptr> requests(blocks_per_page);
171 
172  for (int i = 0; i < blocks_per_page; ++i, ++cur_bid)
173  {
174  requests[i] = (back_page + i)->write(*cur_bid);
175  }
176 
177 
178  std::swap(back_page, front_page);
179 
180  bids.reserve(bids.size() + blocks_per_page);
181 
182  cache_offset = blocks_per_page * block_type::size + 1;
183  current_element = &((*front_page)[0]);
184  ++size_;
185 
186  wait_all(requests.begin(), blocks_per_page);
187 
188  *current_element = val;
189 
190  return;
191  }
192 
193  current_element = element(cache_offset);
194  *current_element = val;
195  ++size_;
196  ++cache_offset;
197  }
198  void pop()
199  {
200  assert(cache_offset <= 2 * blocks_per_page * block_type::size);
201  assert(cache_offset > 0);
202  assert(size_ > 0);
203 
204  if (cache_offset == 1 && bids.size() >= blocks_per_page)
205  {
206  STXXL_VERBOSE2("shrinking, size: " << size_);
207 
208  simple_vector<request_ptr> requests(blocks_per_page);
209 
210  {
211  typename std::vector<bid_type>::const_iterator cur_bid = bids.end();
212  for (int i = blocks_per_page - 1; i >= 0; --i)
213  {
214  requests[i] = (front_page + i)->read(*(--cur_bid));
215  }
216  }
217 
218  std::swap(front_page, back_page);
219 
220  cache_offset = blocks_per_page * block_type::size;
221  --size_;
222  current_element = &((*(back_page + (blocks_per_page - 1)))[block_type::size - 1]);
223 
224  wait_all(requests.begin(), blocks_per_page);
225 
226  block_manager::get_instance()->delete_blocks(bids.end() - blocks_per_page, bids.end());
227  bids.resize(bids.size() - blocks_per_page);
228 
229  return;
230  }
231 
232  --size_;
233 
234  current_element = element((--cache_offset) - 1);
235  }
236 
237 private:
238  value_type * element(unsigned_type offset)
239  {
240  if (offset < blocks_per_page * block_type::size)
241  return &((*(back_page + offset / block_type::size))[offset % block_type::size]);
242 
243 
244  unsigned_type unbiased_offset = offset - blocks_per_page * block_type::size;
245  return &((*(front_page + unbiased_offset / block_type::size))[unbiased_offset % block_type::size]);
246  }
247 };
248 
249 
251 
255 template <class Config_>
256 class grow_shrink_stack : private noncopyable
257 {
258 public:
259  typedef Config_ cfg;
260  typedef typename cfg::value_type value_type;
261  typedef typename cfg::alloc_strategy alloc_strategy;
262  typedef typename cfg::size_type size_type;
263  enum {
264  blocks_per_page = cfg::blocks_per_page,
265  block_size = cfg::block_size,
266  };
267 
269  typedef BID<block_size> bid_type;
270 
271 private:
272  size_type size_;
273  unsigned_type cache_offset;
274  value_type * current_element;
275  simple_vector<block_type> cache;
276  typename simple_vector<block_type>::iterator cache_buffers;
277  typename simple_vector<block_type>::iterator overlap_buffers;
278  simple_vector<request_ptr> requests;
279  std::vector<bid_type> bids;
280  alloc_strategy alloc_strategy_;
281 
282 public:
284  size_(0),
285  cache_offset(0),
286  current_element(NULL),
287  cache(blocks_per_page * 2),
288  cache_buffers(cache.begin()),
289  overlap_buffers(cache.begin() + blocks_per_page),
290  requests(blocks_per_page),
291  bids(0)
292  {
293  bids.reserve(blocks_per_page);
294  }
295 
296  void swap(grow_shrink_stack & obj)
297  {
298  std::swap(size_, obj.size_);
299  std::swap(cache_offset, obj.cache_offset);
300  std::swap(current_element, obj.current_element);
301  std::swap(cache, obj.cache);
302  std::swap(cache_buffers, obj.cache_buffers);
303  std::swap(overlap_buffers, obj.overlap_buffers);
304  std::swap(requests, obj.requests);
305  std::swap(bids, obj.bids);
306  std::swap(alloc_strategy_, obj.alloc_strategy_);
307  }
308 
312  template <class stack_type>
313  grow_shrink_stack(const stack_type & stack_) :
314  size_(0),
315  cache_offset(0),
316  current_element(NULL),
317  cache(blocks_per_page * 2),
318  cache_buffers(cache.begin()),
319  overlap_buffers(cache.begin() + blocks_per_page),
320  requests(blocks_per_page),
321  bids(0)
322  {
323  bids.reserve(blocks_per_page);
324 
325  stack_type stack_copy = stack_;
326  const size_type sz = stack_copy.size();
327  size_type i;
328 
329  std::vector<value_type> tmp(sz);
330 
331  for (i = 0; i < sz; ++i)
332  {
333  tmp[sz - i - 1] = stack_copy.top();
334  stack_copy.pop();
335  }
336  for (i = 0; i < sz; ++i)
337  this->push(tmp[i]);
338  }
339  virtual ~grow_shrink_stack()
340  {
341  STXXL_VERBOSE(STXXL_PRETTY_FUNCTION_NAME);
342  try
343  {
344  if (requests[0].get())
345  wait_all(requests.begin(), blocks_per_page);
346  }
347  catch (const io_error & ex)
348  { }
349  block_manager::get_instance()->delete_blocks(bids.begin(), bids.end());
350  }
351  size_type size() const
352  {
353  return size_;
354  }
355  bool empty() const
356  {
357  return (!size_);
358  }
359  value_type & top()
360  {
361  assert(size_ > 0);
362  return (*current_element);
363  }
364  const value_type & top() const
365  {
366  assert(size_ > 0);
367  return (*current_element);
368  }
369  void push(const value_type & val)
370  {
371  assert(cache_offset <= blocks_per_page * block_type::size);
372  //assert(cache_offset >= 0);
373 
374  if (cache_offset == blocks_per_page * block_type::size) // cache overflow
375  {
376  STXXL_VERBOSE2("growing, size: " << size_);
377 
378  bids.resize(bids.size() + blocks_per_page);
379  typename std::vector<bid_type>::iterator cur_bid = bids.end() - blocks_per_page;
380  block_manager::get_instance()->new_blocks(
381  offset_allocator<alloc_strategy>(cur_bid - bids.begin(), alloc_strategy_), cur_bid, bids.end());
382 
383  for (int i = 0; i < blocks_per_page; ++i, ++cur_bid)
384  {
385  if (requests[i].get())
386  requests[i]->wait();
387 
388  requests[i] = (cache_buffers + i)->write(*cur_bid);
389  }
390 
391  std::swap(cache_buffers, overlap_buffers);
392 
393  bids.reserve(bids.size() + blocks_per_page);
394 
395  cache_offset = 1;
396  current_element = &((*cache_buffers)[0]);
397  ++size_;
398 
399  *current_element = val;
400 
401  return;
402  }
403 
404  current_element = &((*(cache_buffers + cache_offset / block_type::size))[cache_offset % block_type::size]);
405  *current_element = val;
406  ++size_;
407  ++cache_offset;
408  }
409  void pop()
410  {
411  assert(cache_offset <= blocks_per_page * block_type::size);
412  assert(cache_offset > 0);
413  assert(size_ > 0);
414 
415  if (cache_offset == 1 && bids.size() >= blocks_per_page)
416  {
417  STXXL_VERBOSE2("shrinking, size: " << size_);
418 
419  if (requests[0].get())
420  wait_all(requests.begin(), blocks_per_page);
421 
422 
423  std::swap(cache_buffers, overlap_buffers);
424 
425  if (bids.size() > blocks_per_page)
426  {
427  STXXL_VERBOSE2("prefetching, size: " << size_);
428  typename std::vector<bid_type>::const_iterator cur_bid = bids.end() - blocks_per_page;
429  for (int i = blocks_per_page - 1; i >= 0; --i)
430  requests[i] = (overlap_buffers + i)->read(*(--cur_bid));
431  }
432 
433  block_manager::get_instance()->delete_blocks(bids.end() - blocks_per_page, bids.end());
434  bids.resize(bids.size() - blocks_per_page);
435 
436  cache_offset = blocks_per_page * block_type::size;
437  --size_;
438  current_element = &((*(cache_buffers + (blocks_per_page - 1)))[block_type::size - 1]);
439 
440  return;
441  }
442 
443  --size_;
444  unsigned_type cur_offset = (--cache_offset) - 1;
445  current_element = &((*(cache_buffers + cur_offset / block_type::size))[cur_offset % block_type::size]);
446  }
447 };
448 
450 template <class Config_>
451 class grow_shrink_stack2 : private noncopyable
452 {
453 public:
454  typedef Config_ cfg;
455  typedef typename cfg::value_type value_type;
456  typedef typename cfg::alloc_strategy alloc_strategy;
457  typedef typename cfg::size_type size_type;
458  enum {
459  blocks_per_page = cfg::blocks_per_page, // stack of this type has only one page
460  block_size = cfg::block_size,
461  };
462 
464  typedef BID<block_size> bid_type;
465 
466 private:
467  size_type size_;
468  unsigned_type cache_offset;
469  block_type * cache;
470  value_type current;
471  std::vector<bid_type> bids;
472  alloc_strategy alloc_strategy_;
473  unsigned_type pref_aggr;
474  prefetch_pool<block_type> & p_pool;
475  write_pool<block_type> & w_pool;
476 
477 public:
483  prefetch_pool<block_type> & p_pool_,
484  write_pool<block_type> & w_pool_,
485  unsigned_type prefetch_aggressiveness = 0) :
486  size_(0),
487  cache_offset(0),
488  cache(new block_type),
489  pref_aggr(prefetch_aggressiveness),
490  p_pool(p_pool_),
491  w_pool(w_pool_)
492  {
493  STXXL_VERBOSE2("grow_shrink_stack2::grow_shrink_stack2(...)");
494  }
495 
496  void swap(grow_shrink_stack2 & obj)
497  {
498  std::swap(size_, obj.size_);
499  std::swap(cache_offset, obj.cache_offset);
500  std::swap(cache, obj.cache);
501  std::swap(current, obj.current);
502  std::swap(bids, obj.bids);
503  std::swap(alloc_strategy_, obj.alloc_strategy_);
504  std::swap(pref_aggr, obj.pref_aggr);
505  //std::swap(p_pool,obj.p_pool);
506  //std::swap(w_pool,obj.w_pool);
507  }
508 
509  virtual ~grow_shrink_stack2()
510  {
511  try
512  {
513  STXXL_VERBOSE2("grow_shrink_stack2::~grow_shrink_stack2()");
514  const int_type bids_size = bids.size();
515  const int_type last_pref = STXXL_MAX(int_type(bids_size) - int_type(pref_aggr), (int_type)0);
516  int_type i;
517  for (i = bids_size - 1; i >= last_pref; --i)
518  {
519  if (p_pool.in_prefetching(bids[i]))
520  p_pool.read(cache, bids[i])->wait();
521  // clean the prefetch buffer
522  }
523  typename std::vector<bid_type>::iterator cur = bids.begin();
524  typename std::vector<bid_type>::const_iterator end = bids.end();
525  for ( ; cur != end; ++cur)
526  {
527  block_type * b = w_pool.steal(*cur);
528  if (b)
529  {
530  w_pool.add(cache); // return buffer
531  cache = b;
532  }
533  }
534  delete cache;
535  }
536  catch (const io_error & ex)
537  { }
538  block_manager::get_instance()->delete_blocks(bids.begin(), bids.end());
539  }
540  size_type size() const { return size_; }
541 
542  bool empty() const
543  {
544  return (!size_);
545  }
546 
547  void push(const value_type & val)
548  {
549  STXXL_VERBOSE3("grow_shrink_stack2::push(" << val << ")");
550  assert(cache_offset <= block_type::size);
551 
552  if (cache_offset == block_type::size)
553  {
554  STXXL_VERBOSE2("grow_shrink_stack2::push(" << val << ") growing, size: " << size_);
555 
556  bids.resize(bids.size() + 1);
557  typename std::vector<bid_type>::iterator cur_bid = bids.end() - 1;
558  block_manager::get_instance()->new_blocks(
559  offset_allocator<alloc_strategy>(cur_bid - bids.begin(), alloc_strategy_), cur_bid, bids.end());
560  w_pool.write(cache, bids.back());
561  cache = w_pool.steal();
562  const int_type bids_size = bids.size();
563  const int_type last_pref = STXXL_MAX(int_type(bids_size) - int_type(pref_aggr) - 1, (int_type)0);
564  for (int_type i = bids_size - 2; i >= last_pref; --i)
565  {
566  if (p_pool.in_prefetching(bids[i]))
567  p_pool.read(cache, bids[i])->wait();
568  // clean prefetch buffers
569  }
570  cache_offset = 0;
571  }
572  current = val;
573  (*cache)[cache_offset] = val;
574  ++size_;
575  ++cache_offset;
576 
577  assert(cache_offset > 0);
578  assert(cache_offset <= block_type::size);
579  }
580  value_type & top()
581  {
582  assert(size_ > 0);
583  assert(cache_offset > 0);
584  assert(cache_offset <= block_type::size);
585  return current;
586  }
587  const value_type & top() const
588  {
589  assert(size_ > 0);
590  assert(cache_offset > 0);
591  assert(cache_offset <= block_type::size);
592  return current;
593  }
594  void pop()
595  {
596  STXXL_VERBOSE3("grow_shrink_stack2::pop()");
597  assert(size_ > 0);
598  assert(cache_offset > 0);
599  assert(cache_offset <= block_type::size);
600  if (cache_offset == 1 && (!bids.empty()))
601  {
602  STXXL_VERBOSE2("grow_shrink_stack2::pop() shrinking, size = " << size_);
603 
604  bid_type last_block = bids.back();
605  bids.pop_back();
606  /*block_type * b = w_pool.steal(last_block);
607  if(b)
608  {
609  STXXL_VERBOSE2("grow_shrink_stack2::pop() block is still in write buffer");
610  w_pool.add(cache);
611  cache = b;
612  }
613  else*/
614  {
615  //STXXL_VERBOSE2("grow_shrink_stack2::pop() block is no longer in write buffer"
616  // ", reading from prefetch/read pool");
617  p_pool.read(cache, last_block)->wait();
618  }
619  block_manager::get_instance()->delete_block(last_block);
620  const int_type bids_size = bids.size();
621  const int_type last_pref = STXXL_MAX(int_type(bids_size) - int_type(pref_aggr), (int_type)0);
622  for (int_type i = bids_size - 1; i >= last_pref; --i)
623  {
624  p_pool.hint(bids[i]); // prefetch
625  }
626  cache_offset = block_type::size + 1;
627  }
628 
629  --cache_offset;
630  if (cache_offset > 0)
631  current = (*cache)[cache_offset - 1];
632 
633  --size_;
634  }
638  void set_prefetch_aggr(unsigned_type new_p)
639  {
640  if (pref_aggr > new_p)
641  {
642  const int_type bids_size = bids.size();
643  const int_type last_pref = STXXL_MAX(int_type(bids_size) - int_type(pref_aggr), (int_type)0);
644  for (int_type i = bids_size - new_p - 1; i >= last_pref; --i)
645  {
646  if (p_pool.in_prefetching(bids[i]))
647  p_pool.read(cache, bids[i])->wait();
648  // clean prefetch buffers
649  }
650  }
651  else if (pref_aggr < new_p)
652  {
653  const int_type bids_size = bids.size();
654  const int_type last_pref = STXXL_MAX(int_type(bids_size) - int_type(new_p), (int_type)0);
655  for (int_type i = bids_size - 1; i >= last_pref; --i)
656  {
657  p_pool.hint(bids[i]); // prefetch
658  }
659  }
660  pref_aggr = new_p;
661  }
663  unsigned_type get_prefetch_aggr() const
664  {
665  return pref_aggr;
666  }
667 };
668 
669 
671 
673 template <unsigned_type CritSize, class ExternalStack, class InternalStack>
674 class migrating_stack : private noncopyable
675 {
676 public:
677  typedef typename ExternalStack::cfg cfg;
678  typedef typename cfg::value_type value_type;
679  typedef typename cfg::alloc_strategy alloc_strategy;
680  typedef typename cfg::size_type size_type;
681  enum {
682  blocks_per_page = cfg::blocks_per_page,
683  block_size = cfg::block_size
684  };
685 
686 
687  typedef InternalStack int_stack_type;
688  typedef ExternalStack ext_stack_type;
689 
690 private:
691  enum { critical_size = CritSize };
692 
693  int_stack_type * int_impl;
694  ext_stack_type * ext_impl;
695 
696  // not implemented yet
697  template <class stack_type>
698  migrating_stack(const stack_type & stack_);
699 
700 public:
701  migrating_stack() : int_impl(new int_stack_type()), ext_impl(NULL) { }
702 
703  void swap(migrating_stack & obj)
704  {
705  std::swap(int_impl, obj.int_impl);
706  std::swap(ext_impl, obj.ext_impl);
707  }
708 
710  bool internal() const
711  {
712  assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
713  return int_impl;
714  }
716  bool external() const
717  {
718  assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
719  return ext_impl;
720  }
721 
722  bool empty() const
723  {
724  assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
725 
726  if (int_impl)
727  return int_impl->empty();
728 
729 
730  return ext_impl->empty();
731  }
732  size_type size() const
733  {
734  assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
735 
736  if (int_impl)
737  return int_impl->size();
738 
739 
740  return ext_impl->size();
741  }
742  value_type & top()
743  {
744  assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
745 
746  if (int_impl)
747  return int_impl->top();
748 
749 
750  return ext_impl->top();
751  }
752  const value_type & top() const
753  {
754  assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
755 
756  if (int_impl)
757  return int_impl->top();
758 
759 
760  return ext_impl->top();
761  }
762  void push(const value_type & val)
763  {
764  assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
765 
766  if (int_impl)
767  {
768  int_impl->push(val);
769  if (int_impl->size() == critical_size)
770  {
771  // migrate to external stack
772  ext_impl = new ext_stack_type(*int_impl);
773  delete int_impl;
774  int_impl = NULL;
775  }
776  }
777  else
778  ext_impl->push(val);
779  }
780  void pop()
781  {
782  assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
783 
784  if (int_impl)
785  int_impl->pop();
786 
787  else
788  ext_impl->pop();
789  }
790  virtual ~migrating_stack()
791  {
792  assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
793 
794  if (int_impl)
795  delete int_impl;
796 
797  else
798  delete ext_impl;
799  }
800 };
801 
803 
804 
807 
808 enum stack_externality { external, migrating, internal };
809 enum stack_behaviour { normal, grow_shrink, grow_shrink2 };
810 
812 
848 template <
849  class ValTp,
850  stack_externality Externality = external,
851  stack_behaviour Behaviour = normal,
852  unsigned BlocksPerPage = 4,
853  unsigned BlkSz = STXXL_DEFAULT_BLOCK_SIZE(ValTp),
854 
855  class IntStackTp = std::stack<ValTp>,
856  unsigned_type MigrCritSize = (2 * BlocksPerPage * BlkSz),
857 
858  class AllocStr = STXXL_DEFAULT_ALLOC_STRATEGY,
859  class SzTp = stxxl::int64
860  >
862 {
863  typedef stack_config_generator<ValTp, BlocksPerPage, BlkSz, AllocStr, SzTp> cfg;
864 
865  typedef typename IF<Behaviour == grow_shrink,
867  grow_shrink_stack2<cfg> >::result GrShrTp;
868  typedef typename IF<Behaviour == normal, normal_stack<cfg>, GrShrTp>::result ExtStackTp;
869  typedef typename IF<Externality == migrating,
870  migrating_stack<MigrCritSize, ExtStackTp, IntStackTp>, ExtStackTp>::result MigrOrNotStackTp;
871 
872 public:
873  typedef typename IF<Externality == internal, IntStackTp, MigrOrNotStackTp>::result result;
874 };
875 
877 
878 __STXXL_END_NAMESPACE
879 
880 
881 namespace std
882 {
883  template <class Config_>
884  void swap(stxxl::normal_stack<Config_> & a,
885  stxxl::normal_stack<Config_> & b)
886  {
887  a.swap(b);
888  }
889 
890  template <class Config_>
891  void swap(stxxl::grow_shrink_stack<Config_> & a,
892  stxxl::grow_shrink_stack<Config_> & b)
893  {
894  a.swap(b);
895  }
896 
897  template <class Config_>
898  void swap(stxxl::grow_shrink_stack2<Config_> & a,
899  stxxl::grow_shrink_stack2<Config_> & b)
900  {
901  a.swap(b);
902  }
903 
904  template <stxxl::unsigned_type CritSize, class ExternalStack, class InternalStack>
905  void swap(stxxl::migrating_stack<CritSize, ExternalStack, InternalStack> & a,
906  stxxl::migrating_stack<CritSize, ExternalStack, InternalStack> & b)
907  {
908  a.swap(b);
909  }
910 }
911 
912 #endif // !STXXL_STACK_HEADER