Generated on Thu Feb 14 2013 20:59:45 for Gecode by doxygen 1.8.3.1
memory-manager.hpp
Go to the documentation of this file.
1 /* -*- mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- */
2 /*
3  * Main authors:
4  * Christian Schulte <schulte@gecode.org>
5  *
6  * Contributing authors:
7  * Guido Tack <tack@gecode.org>
8  *
9  * Copyright:
10  * Christian Schulte, 2002
11  * Guido Tack, 2004
12  *
13  * Last modified:
14  * $Date: 2009-09-09 05:10:29 +1000 (Wed, 09 Sep 2009) $ by $Author: schulte $
15  * $Revision: 9692 $
16  *
17  * This file is part of Gecode, the generic constraint
18  * development environment:
19  * http://www.gecode.org
20  *
21  * Permission is hereby granted, free of charge, to any person obtaining
22  * a copy of this software and associated documentation files (the
23  * "Software"), to deal in the Software without restriction, including
24  * without limitation the rights to use, copy, modify, merge, publish,
25  * distribute, sublicense, and/or sell copies of the Software, and to
26  * permit persons to whom the Software is furnished to do so, subject to
27  * the following conditions:
28  *
29  * The above copyright notice and this permission notice shall be
30  * included in all copies or substantial portions of the Software.
31  *
32  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
35  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
36  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
37  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
38  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39  *
40  */
41 
42 namespace Gecode {
43 
45  class MemoryChunk {
46  public:
50  size_t size;
51  };
52 
54  class HeapChunk : public MemoryChunk {
55  public:
57  double area[1];
58  };
59 
60  class Region;
61 
63  class SharedMemory {
64  friend class Region;
65  private:
67  unsigned int use_cnt;
69  struct {
71  size_t free;
73  double area[MemoryConfig::region_area_size / sizeof(double)];
74  } region;
76  struct {
78  unsigned int n_hc;
81  } heap;
82  public:
84  SharedMemory(void);
86  void flush(void);
88  ~SharedMemory(void);
90  //@
92  bool region_alloc(size_t s, void*& p);
94 
95  //@
97  HeapChunk* heap_alloc(size_t s, size_t l);
99  void heap_free(HeapChunk* hc);
101 
102  SharedMemory* copy(bool share);
104  bool release(void);
106  static void* operator new(size_t s);
108  static void operator delete(void* p);
109  };
110 
111 
120  class FreeList {
121  protected:
124  public:
126  FreeList(void);
128  FreeList(FreeList* n);
130  FreeList* next(void) const;
132  FreeList** nextRef(void);
134  void next(FreeList* n);
135  };
136 
139  public:
143  MemoryManager(SharedMemory* sm, MemoryManager& mm, size_t s_sub);
145  void release(SharedMemory* sm);
146 
147  private:
148  size_t cur_hcsz;
149  HeapChunk* cur_hc;
150  size_t requested;
151 
152  char* start;
153  size_t lsz;
154 
157  void alloc_refill(SharedMemory* sm, size_t s);
159  void alloc_fill(SharedMemory* sm, size_t s, bool first);
160 
161  public:
163  void* alloc(SharedMemory* sm, size_t s);
165  size_t allocated(void) const;
167  void* subscriptions(void) const;
168 
169  private:
173  template<size_t> void fl_refill(SharedMemory* sm);
175  static size_t sz2i(size_t);
177  static size_t i2sz(size_t);
178 
179  public:
181  template<size_t s>
182  void* fl_alloc(SharedMemory* sm);
184  template<size_t> void fl_dispose(FreeList* f, FreeList* l);
185 
186  private:
188  MemoryChunk* slack;
189  public:
191  void reuse(void* p, size_t s);
192  };
193 
194 
195  /*
196  * Shared memory area
197  *
198  */
199 
200  forceinline void*
201  SharedMemory::operator new(size_t s) {
202  return Gecode::heap.ralloc(s);
203  }
204  forceinline void
205  SharedMemory::operator delete(void* p) {
206  Gecode::heap.rfree(p);
207  }
210  : use_cnt(1) {
211  region.free = MemoryConfig::region_area_size;
212  heap.n_hc = 0;
213  heap.hc = NULL;
214  }
215  forceinline void
217  heap.n_hc = 0;
218  while (heap.hc != NULL) {
219  HeapChunk* hc = heap.hc;
220  heap.hc = static_cast<HeapChunk*>(hc->next);
221  Gecode::heap.rfree(hc);
222  }
223  }
226  flush();
227  }
229  SharedMemory::copy(bool share) {
230  if (share) {
231  use_cnt++;
232  return this;
233  } else {
234  return new SharedMemory();
235  }
236  }
237  forceinline bool
239  return --use_cnt == 0;
240  }
241  forceinline bool
242  SharedMemory::region_alloc(size_t s, void*& p) {
244  if (s > region.free)
245  return false;
246  region.free -= s;
247  p = Support::ptr_cast<char*>(&region.area[0]) + region.free;
248  return true;
249  }
251  SharedMemory::heap_alloc(size_t s, size_t l) {
252  while ((heap.hc != NULL) && (heap.hc->size < l)) {
253  heap.n_hc--;
254  HeapChunk* hc = heap.hc;
255  heap.hc = static_cast<HeapChunk*>(hc->next);
256  Gecode::heap.rfree(hc);
257  }
258  if (heap.hc == NULL) {
259  assert(heap.n_hc == 0);
260  HeapChunk* hc = static_cast<HeapChunk*>(Gecode::heap.ralloc(s));
261  hc->size = s;
262  return hc;
263  } else {
264  heap.n_hc--;
265  HeapChunk* hc = heap.hc;
266  heap.hc = static_cast<HeapChunk*>(hc->next);
267  return hc;
268  }
269  }
270  forceinline void
272  if (heap.n_hc == MemoryConfig::n_hc_cache) {
273  Gecode::heap.rfree(hc);
274  } else {
275  heap.n_hc++;
276  hc->next = heap.hc; heap.hc = hc;
277  }
278  }
279 
280 
281  /*
282  * Freelists
283  *
284  */
285 
288 
291  : _next(n) {}
292 
294  FreeList::next(void) const {
295  return _next;
296  }
297 
300  return &_next;
301  }
302 
303  forceinline void
305  _next = n;
306  }
307 
308  forceinline size_t
309  MemoryManager::sz2i(size_t s) {
313  }
314 
315  forceinline size_t
316  MemoryManager::i2sz(size_t i) {
318  }
319 
320 
321  /*
322  * The active memory manager
323  *
324  */
325 
326  forceinline size_t
328  return requested;
329  }
330 
331  forceinline void*
333  assert(sz > 0);
334  // Perform alignment
336  // Check whether sufficient memory left
337  if (sz > lsz)
338  alloc_refill(sm,sz);
339  lsz -= sz;
340  return start + lsz;
341  }
342 
343  forceinline void*
345  return &cur_hc->area[0];
346  }
347 
348  forceinline void
349  MemoryManager::alloc_fill(SharedMemory* sm, size_t sz, bool first) {
350  // Adjust current heap chunk size
351  if (((requested > MemoryConfig::hcsz_inc_ratio*cur_hcsz) ||
352  (sz > cur_hcsz)) &&
353  (cur_hcsz < MemoryConfig::hcsz_max)) {
354  cur_hcsz <<= 1;
355  }
356  // Increment the size that it caters for the initial overhead
357  size_t overhead = sizeof(HeapChunk) - sizeof(double);
358  sz += overhead;
359  // Round size to next multiple of current heap chunk size
360  size_t allocate = ((sz > cur_hcsz) ?
361  (((size_t) (sz / cur_hcsz)) + 1) * cur_hcsz : cur_hcsz);
362  // Request a chunk of preferably size allocate, but at least size sz
363  HeapChunk* hc = sm->heap_alloc(allocate,sz);
364  start = Support::ptr_cast<char*>(&hc->area[0]);
365  lsz = hc->size - overhead;
366  // Link heap chunk, where the first heap chunk is kept in place
367  if (first) {
368  requested = hc->size;
369  hc->next = NULL; cur_hc = hc;
370  } else {
371  requested += hc->size;
372  hc->next = cur_hc->next; cur_hc->next = hc;
373  }
374 #ifdef GECODE_MEMORY_CHECK
375  for (char* c = start; c < (start+lsz); c++)
376  *c = 0;
377 #endif
378  }
379 
382  : cur_hcsz(MemoryConfig::hcsz_min), requested(0), slack(NULL) {
383  alloc_fill(sm,cur_hcsz,true);
385  i--; )
386  fl[i] = NULL;
387  }
388 
391  size_t s_sub)
392  : cur_hcsz(mm.cur_hcsz), requested(0), slack(NULL) {
393  MemoryConfig::align(s_sub);
394  if ((mm.requested < MemoryConfig::hcsz_dec_ratio*mm.cur_hcsz) &&
395  (cur_hcsz > MemoryConfig::hcsz_min) &&
396  (s_sub*2 < cur_hcsz))
397  cur_hcsz >>= 1;
398  alloc_fill(sm,cur_hcsz+s_sub,true);
399  // Skip the memory area at the beginning for subscriptions
400  lsz -= s_sub;
401  start += s_sub;
403  i--; )
404  fl[i] = NULL;
405  }
406 
407  forceinline void
409  // Release all allocated heap chunks
410  HeapChunk* hc = cur_hc;
411  do {
412  HeapChunk* t = hc; hc = static_cast<HeapChunk*>(hc->next);
413  sm->heap_free(t);
414  } while (hc != NULL);
415  }
416 
417 
418 
419  /*
420  * Slack memory management
421  *
422  */
423  forceinline void
424  MemoryManager::reuse(void* p, size_t s) {
425 #ifdef GECODE_MEMORY_CHECK
426  {
427  char* c = static_cast<char*>(p);
428  char* e = c + s;
429  while (c < e) {
430  *c = 0; c++;
431  }
432  }
433 #endif
435  return;
437  MemoryChunk* rc = static_cast<MemoryChunk*>(p);
438  rc->next = slack;
439  rc->size = s;
440  slack = rc;
441  } else {
442  size_t i = sz2i(s);
443  FreeList* f = static_cast<FreeList*>(p);
444  f->next(fl[i]); fl[i]=f;
445  }
446  }
447 
448 
449  /*
450  * Freelist management
451  *
452  */
453 
454  template<size_t s>
455  forceinline void*
457  size_t i = sz2i(s);
458  FreeList* f = fl[i];
459  if (f == NULL) {
460  fl_refill<s>(sm); f = fl[i];
461  }
462  FreeList* n = f->next();
463  fl[i] = n;
464  return f;
465  }
466 
467  template<size_t s>
468  forceinline void
470  size_t i = sz2i(s);
471  l->next(fl[i]); fl[i] = f;
472  }
473 
474  template<size_t sz>
475  void
476  MemoryManager::fl_refill(SharedMemory* sm) {
477  // Try to acquire memory from slack
478  if (slack != NULL) {
479  MemoryChunk* m = slack;
480  slack = NULL;
481  do {
482  char* block = Support::ptr_cast<char*>(m);
483  size_t s = m->size;
484  assert(s >= sz);
485  m = m->next;
486  fl[sz2i(sz)] = Support::ptr_cast<FreeList*>(block);
487  while (s >= 2*sz) {
488  Support::ptr_cast<FreeList*>(block)->next
489  (Support::ptr_cast<FreeList*>(block+sz));
490  block += sz;
491  s -= sz;
492  }
493  Support::ptr_cast<FreeList*>(block)->next(NULL);
494  } while (m != NULL);
495  } else {
496  char* block = static_cast<char*>(alloc(sm,MemoryConfig::fl_refill*sz));
497  fl[sz2i(sz)] = Support::ptr_cast<FreeList*>(block);
498  int i = MemoryConfig::fl_refill-2;
499  do {
500  Support::ptr_cast<FreeList*>(block+i*sz)->next
501  (Support::ptr_cast<FreeList*>(block+(i+1)*sz));
502  } while (--i >= 0);
503  Support::ptr_cast<FreeList*>(block+
504  (MemoryConfig::fl_refill-1)*sz)->next
505  (Support::ptr_cast<FreeList*>(NULL));
506  }
507  }
508 
509 }
510 
511 // STATISTICS: kernel-core