GNU libmicrohttpd  0.9.73
memorypool.c
Go to the documentation of this file.
1 /*
2  This file is part of libmicrohttpd
3  Copyright (C) 2007--2019 Daniel Pittman, Christian Grothoff and
4  Karlson2k (Evgeny Grin)
5 
6  This library is free software; you can redistribute it and/or
7  modify it under the terms of the GNU Lesser General Public
8  License as published by the Free Software Foundation; either
9  version 2.1 of the License, or (at your option) any later version.
10 
11  This library is distributed in the hope that it will be useful,
12  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  Lesser General Public License for more details.
15 
16  You should have received a copy of the GNU Lesser General Public
17  License along with this library; if not, write to the Free Software
18  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20 
27 #include "memorypool.h"
28 #include <stdlib.h>
29 #include <string.h>
30 #include <stdint.h>
31 #include "mhd_assert.h"
32 #if HAVE_SYS_MMAN_H
33 #include <sys/mman.h>
34 #endif
35 #ifdef _WIN32
36 #include <windows.h>
37 #endif
38 #ifdef HAVE_SYSCONF
39 #include <unistd.h>
40 #if defined(_SC_PAGE_SIZE)
41 #define MHD_SC_PAGESIZE _SC_PAGE_SIZE
42 #elif defined(_SC_PAGESIZE)
43 #define MHD_SC_PAGESIZE _SC_PAGESIZE
44 #endif /* _SC_PAGESIZE */
45 #endif /* HAVE_SYSCONF */
46 
47 /* define MAP_ANONYMOUS for Mac OS X */
48 #if defined(MAP_ANON) && ! defined(MAP_ANONYMOUS)
49 #define MAP_ANONYMOUS MAP_ANON
50 #endif
51 #if defined(_WIN32)
52 #define MAP_FAILED NULL
53 #elif ! defined(MAP_FAILED)
54 #define MAP_FAILED ((void*) -1)
55 #endif
56 
60 #define ALIGN_SIZE (2 * sizeof(void*))
61 
65 #define ROUND_TO_ALIGN(n) (((n) + (ALIGN_SIZE - 1)) \
66  / (ALIGN_SIZE) *(ALIGN_SIZE))
67 
68 #if defined(PAGE_SIZE) && (0 < (PAGE_SIZE + 0))
69 #define MHD_DEF_PAGE_SIZE_ PAGE_SIZE
70 #elif defined(PAGESIZE) && (0 < (PAGESIZE + 0))
71 #define MHD_DEF_PAGE_SIZE_ PAGESIZE
72 #else /* ! PAGESIZE */
73 #define MHD_DEF_PAGE_SIZE_ (4096)
74 #endif /* ! PAGESIZE */
75 
79 static size_t MHD_sys_page_size_ = MHD_DEF_PAGE_SIZE_; /* Default fallback value */
80 
84 void
86 {
87 #ifdef MHD_SC_PAGESIZE
88  long result;
89  result = sysconf (MHD_SC_PAGESIZE);
90  if (-1 != result)
91  MHD_sys_page_size_ = (size_t) result;
92  else
94 #elif defined(_WIN32)
95  SYSTEM_INFO si;
96  GetSystemInfo (&si);
97  MHD_sys_page_size_ = (size_t) si.dwPageSize;
98 #else
100 #endif /* _WIN32 */
102 }
103 
104 
109 struct MemoryPool
110 {
111 
115  uint8_t *memory;
116 
120  size_t size;
121 
125  size_t pos;
126 
130  size_t end;
131 
135  bool is_mmap;
136 };
137 
138 
145 struct MemoryPool *
146 MHD_pool_create (size_t max)
147 {
148  struct MemoryPool *pool;
149  size_t alloc_size;
150 
151  mhd_assert (max > 0);
152  alloc_size = 0;
153  pool = malloc (sizeof (struct MemoryPool));
154  if (NULL == pool)
155  return NULL;
156 #if defined(MAP_ANONYMOUS) || defined(_WIN32)
157  if ( (max <= 32 * 1024) ||
158  (max < MHD_sys_page_size_ * 4 / 3) )
159  {
160  pool->memory = MAP_FAILED;
161  }
162  else
163  {
164  /* Round up allocation to page granularity. */
165  alloc_size = max + MHD_sys_page_size_ - 1;
166  alloc_size -= alloc_size % MHD_sys_page_size_;
167 #if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
168  pool->memory = mmap (NULL,
169  alloc_size,
170  PROT_READ | PROT_WRITE,
171  MAP_PRIVATE | MAP_ANONYMOUS,
172  -1,
173  0);
174 #elif defined(_WIN32)
175  pool->memory = VirtualAlloc (NULL,
176  alloc_size,
177  MEM_COMMIT | MEM_RESERVE,
178  PAGE_READWRITE);
179 #endif /* _WIN32 */
180  }
181 #else /* ! _WIN32 && ! MAP_ANONYMOUS */
182  pool->memory = MAP_FAILED;
183 #endif /* ! _WIN32 && ! MAP_ANONYMOUS */
184  if (MAP_FAILED == pool->memory)
185  {
186  alloc_size = ROUND_TO_ALIGN (max);
187  pool->memory = malloc (alloc_size);
188  if (NULL == pool->memory)
189  {
190  free (pool);
191  return NULL;
192  }
193  pool->is_mmap = false;
194  }
195 #if defined(MAP_ANONYMOUS) || defined(_WIN32)
196  else
197  {
198  pool->is_mmap = true;
199  }
200 #endif /* _WIN32 || MAP_ANONYMOUS */
201  mhd_assert (0 == (((uintptr_t) pool->memory) % ALIGN_SIZE));
202  pool->pos = 0;
203  pool->end = alloc_size;
204  pool->size = alloc_size;
205  mhd_assert (0 < alloc_size);
206  return pool;
207 }
208 
209 
215 void
216 MHD_pool_destroy (struct MemoryPool *pool)
217 {
218  if (NULL == pool)
219  return;
220 
221  mhd_assert (pool->end >= pool->pos);
222  mhd_assert (pool->size >= pool->end - pool->pos);
223  if (! pool->is_mmap)
224  free (pool->memory);
225  else
226 #if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
227  munmap (pool->memory,
228  pool->size);
229 #elif defined(_WIN32)
230  VirtualFree (pool->memory,
231  0,
232  MEM_RELEASE);
233 #else
234  abort ();
235 #endif
236  free (pool);
237 }
238 
239 
246 size_t
247 MHD_pool_get_free (struct MemoryPool *pool)
248 {
249  mhd_assert (pool->end >= pool->pos);
250  mhd_assert (pool->size >= pool->end - pool->pos);
251  return (pool->end - pool->pos);
252 }
253 
254 
266 void *
267 MHD_pool_allocate (struct MemoryPool *pool,
268  size_t size,
269  bool from_end)
270 {
271  void *ret;
272  size_t asize;
273 
274  mhd_assert (pool->end >= pool->pos);
275  mhd_assert (pool->size >= pool->end - pool->pos);
276  asize = ROUND_TO_ALIGN (size);
277  if ( (0 == asize) && (0 != size) )
278  return NULL; /* size too close to SIZE_MAX */
279  if ( (pool->pos + asize > pool->end) ||
280  (pool->pos + asize < pool->pos))
281  return NULL;
282  if (from_end)
283  {
284  ret = &pool->memory[pool->end - asize];
285  pool->end -= asize;
286  }
287  else
288  {
289  ret = &pool->memory[pool->pos];
290  pool->pos += asize;
291  }
292  return ret;
293 }
294 
295 
313 void *
314 MHD_pool_reallocate (struct MemoryPool *pool,
315  void *old,
316  size_t old_size,
317  size_t new_size)
318 {
319  size_t asize;
320  uint8_t *new_blc;
321 
322  mhd_assert (pool->end >= pool->pos);
323  mhd_assert (pool->size >= pool->end - pool->pos);
324  mhd_assert (old != NULL || old_size == 0);
325  mhd_assert (old == NULL || pool->memory <= (uint8_t*) old);
326  mhd_assert (old == NULL || pool->memory + pool->size >= (uint8_t*) old
327  + old_size);
328  /* Blocks "from the end" must not be reallocated */
329  mhd_assert (old == NULL || old_size == 0 || \
330  pool->memory + pool->pos > (uint8_t*) old);
331 
332  if (0 != old_size)
333  { /* Need to save some data */
334  const size_t old_offset = (uint8_t*) old - pool->memory;
335  const bool shrinking = (old_size > new_size);
336  /* Try resizing in-place */
337  if (shrinking)
338  { /* Shrinking in-place, zero-out freed part */
339  memset ((uint8_t*) old + new_size, 0, old_size - new_size);
340  }
341  if (pool->pos == ROUND_TO_ALIGN (old_offset + old_size))
342  { /* "old" block is the last allocated block */
343  const size_t new_apos = ROUND_TO_ALIGN (old_offset + new_size);
344  if (! shrinking)
345  { /* Grow in-place, check for enough space. */
346  if ( (new_apos > pool->end) ||
347  (new_apos < pool->pos) ) /* Value wrap */
348  return NULL; /* No space */
349  }
350  /* Resized in-place */
351  pool->pos = new_apos;
352  return old;
353  }
354  if (shrinking)
355  return old; /* Resized in-place, freed part remains allocated */
356  }
357  /* Need to allocate new block */
358  asize = ROUND_TO_ALIGN (new_size);
359  if ( ( (0 == asize) &&
360  (0 != new_size) ) || /* Value wrap, too large new_size. */
361  (asize > pool->end - pool->pos) ) /* Not enough space */
362  return NULL;
363 
364  new_blc = pool->memory + pool->pos;
365  pool->pos += asize;
366 
367  if (0 != old_size)
368  {
369  /* Move data to new block, old block remains allocated */
370  memcpy (new_blc, old, old_size);
371  /* Zero-out old block */
372  memset (old, 0, old_size);
373  }
374  return new_blc;
375 }
376 
377 
391 void *
392 MHD_pool_reset (struct MemoryPool *pool,
393  void *keep,
394  size_t copy_bytes,
395  size_t new_size)
396 {
397  mhd_assert (pool->end >= pool->pos);
398  mhd_assert (pool->size >= pool->end - pool->pos);
399  mhd_assert (copy_bytes < new_size);
400  mhd_assert (keep != NULL || copy_bytes == 0);
401  mhd_assert (keep == NULL || pool->memory <= (uint8_t*) keep);
402  mhd_assert (keep == NULL || pool->memory + pool->size >= (uint8_t*) keep
403  + copy_bytes);
404  if ( (NULL != keep) &&
405  (keep != pool->memory) )
406  {
407  if (0 != copy_bytes)
408  memmove (pool->memory,
409  keep,
410  copy_bytes);
411  }
412  /* technically not needed, but safer to zero out */
413  if (pool->size > copy_bytes)
414  {
415  size_t to_zero;
417  to_zero = pool->size - copy_bytes;
418 #ifdef _WIN32
419  if (pool->is_mmap)
420  {
421  size_t to_recommit;
422  uint8_t *recommit_addr;
423  /* Round down to page size */
424  to_recommit = to_zero - to_zero % MHD_sys_page_size_;
425  recommit_addr = pool->memory + pool->size - to_recommit;
426 
427  /* De-committing and re-committing again clear memory and make
428  * pages free / available for other needs until accessed. */
429  if (VirtualFree (recommit_addr,
430  to_recommit,
431  MEM_DECOMMIT))
432  {
433  to_zero -= to_recommit;
434 
435  if (recommit_addr != VirtualAlloc (recommit_addr,
436  to_recommit,
437  MEM_COMMIT,
438  PAGE_READWRITE))
439  abort (); /* Serious error, must never happen */
440  }
441  }
442 #endif /* _WIN32 */
443  memset (&pool->memory[copy_bytes],
444  0,
445  to_zero);
446  }
447  pool->pos = ROUND_TO_ALIGN (new_size);
448  pool->end = pool->size;
449  return pool->memory;
450 }
451 
452 
453 /* end of memorypool.c */
MHD_sys_page_size_
static size_t MHD_sys_page_size_
Definition: memorypool.c:79
ALIGN_SIZE
#define ALIGN_SIZE
Definition: memorypool.c:60
memorypool.h
memory pool; mostly used for efficient (de)allocation for each connection and bounding memory use for...
MHD_pool_create
struct MemoryPool * MHD_pool_create(size_t max)
Definition: memorypool.c:102
MHD_pool_reset
void * MHD_pool_reset(struct MemoryPool *pool, void *keep, size_t copy_bytes, size_t new_size)
Definition: memorypool.c:314
NULL
#define NULL
Definition: reason_phrase.c:30
mhd_assert.h
macros for mhd_assert()
ROUND_TO_ALIGN
#define ROUND_TO_ALIGN(n)
Definition: memorypool.c:65
MHD_pool_reallocate
void * MHD_pool_reallocate(struct MemoryPool *pool, void *old, size_t old_size, size_t new_size)
Definition: memorypool.c:248
MHD_pool_destroy
void MHD_pool_destroy(struct MemoryPool *pool)
Definition: memorypool.c:157
MHD_DEF_PAGE_SIZE_
#define MHD_DEF_PAGE_SIZE_
Definition: memorypool.c:73
mhd_assert
#define mhd_assert(CHK)
Definition: mhd_assert.h:39
MHD_pool_get_free
size_t MHD_pool_get_free(struct MemoryPool *pool)
Definition: memorypool.c:185
MAP_FAILED
#define MAP_FAILED
Definition: memorypool.c:54
MHD_pool_allocate
void * MHD_pool_allocate(struct MemoryPool *pool, size_t size, int from_end)
Definition: memorypool.c:203
MHD_init_mem_pools_
void MHD_init_mem_pools_(void)
Definition: memorypool.c:85