2 +----------------------------------------------------------------------+
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2006 The PHP Group |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
15 | Authors: Daniel Cowgill <dcowgill@communityconnect.com> |
16 | Rasmus Lerdorf <rasmus@php.net> |
17 +----------------------------------------------------------------------+
19 This software was contributed to PHP by Community Connect Inc. in 2002
20 and revised in 2005 by Yahoo! Inc. to add support for PHP 5.1.
21 Future revisions and derivatives of this source code must acknowledge
22 Community Connect Inc. as the original contributor of this module by
23 leaving this note intact in the source code.
25 All other licensing and usage conditions are those of the PHP Group.
29 /* $Id: apc_sma.c,v 1.69.2.2 2008/03/28 20:17:47 rasmus Exp $ */
33 #include "apc_globals.h"
36 #include "apc_cache.h"
39 void *apc_mmap(char *file_mask, size_t size);
40 void apc_unmap(void* shmaddr, size_t size);
43 #ifdef HAVE_VALGRIND_MEMCHECK_H
44 #include <valgrind/memcheck.h>
47 /* {{{ locking macros */
48 #define LOCK(c) { HANDLE_BLOCK_INTERRUPTIONS(); apc_lck_lock(c); }
49 #define RDLOCK(c) { HANDLE_BLOCK_INTERRUPTIONS(); apc_lck_rdlock(c); }
50 #define UNLOCK(c) { apc_lck_unlock(c); HANDLE_UNBLOCK_INTERRUPTIONS(); }
53 enum { DEFAULT_NUMSEG=1, DEFAULT_SEGSIZE=30*1024*1024 };
55 static int sma_initialized = 0; /* true if the sma has been initialized */
56 static unsigned int sma_numseg; /* number of shm segments to allow */
57 static size_t sma_segsize; /* size of each shm segment */
58 static size_t* sma_segments; /* array of shm segment ids */
59 static void** sma_shmaddrs; /* array of shm segment addresses */
60 static int sma_lastseg = 0; /* index of MRU segment */
62 typedef struct header_t header_t;
64 apc_lck_t sma_lock; /* segment lock, MUST BE ALIGNED for futex locks */
65 size_t segsize; /* size of entire segment */
66 size_t avail; /* bytes available (not necessarily contiguous) */
67 size_t nfoffset; /* start next fit search from this offset */
68 #if ALLOC_DISTRIBUTION
74 /* do not enable for threaded http servers */
75 /* #define __APC_SMA_DEBUG__ 1 */
77 #ifdef __APC_SMA_DEBUG__
78 /* global counter for identifying blocks
79 * Technically it is possible to do the same
80 * using offsets, but double allocations of the
81 * same offset can happen. */
82 static volatile size_t block_id = 0;
85 #define APC_SMA_CANARIES 1
87 typedef struct block_t block_t;
89 size_t size; /* size of this block */
90 size_t next; /* offset in segment of next free block */
91 #ifdef APC_SMA_CANARIES
92 size_t canary; /* canary to check for memory overwrites */
94 #ifdef __APC_SMA_DEBUG__
95 size_t id; /* identifier for the memory block */
99 /* The macros BLOCKAT and OFFSET are used for convenience throughout this
100 * module. Both assume the presence of a variable shmaddr that points to the
101 * beginning of the shared memory segment in question. */
103 #define BLOCKAT(offset) ((block_t*)((char *)shmaddr + offset))
104 #define OFFSET(block) ((size_t)(((char*)block) - (char*)shmaddr))
106 /* Canary macros for setting, checking and resetting memory canaries */
107 #ifdef APC_SMA_CANARIES
108 #define SET_CANARY(v) (v)->canary = 0x42424242
109 #define CHECK_CANARY(v) assert((v)->canary == 0x42424242)
110 #define RESET_CANARY(v) (v)->canary = -42
112 #define SET_CANARY(v)
113 #define CHECK_CANARY(v)
114 #define RESET_CANARY(v)
118 /* {{{ MINBLOCKSIZE */
119 #define MINBLOCKSIZE (ALIGNWORD(1) + ALIGNWORD(sizeof(block_t)))
122 /* {{{ sma_allocate: tries to allocate size bytes in a segment */
123 static size_t sma_allocate(void* shmaddr, size_t size)
125 header_t* header; /* header of shared memory segment */
126 block_t* prv; /* block prior to working block */
127 block_t* cur; /* working block in list */
128 block_t* prvnextfit; /* block before next fit */
129 size_t realsize; /* actual size of block needed, including header */
130 size_t last_offset; /* save the last search offset */
132 const size_t block_size = ALIGNWORD(sizeof(struct block_t));
134 realsize = ALIGNWORD(size + block_size);
137 * First, insure that the segment contains at least realsize free bytes,
138 * even if they are not contiguous.
140 header = (header_t*) shmaddr;
141 if (header->avail < realsize) {
145 prvnextfit = 0; /* initially null (no fit) */
148 /* If we have a next fit offset, start searching from there */
149 if(header->nfoffset) {
150 prv = BLOCKAT(header->nfoffset);
151 /* if prv is the last block, jump to the beginning */
153 prv = BLOCKAT(ALIGNWORD(sizeof(header_t)));
157 prv = BLOCKAT(ALIGNWORD(sizeof(header_t)));
162 while (prv->next != header->nfoffset) {
163 cur = BLOCKAT(prv->next);
164 #ifdef __APC_SMA_DEBUG__
167 /* If it can fit realiszie bytes in cur block, stop searching */
168 if (cur->size >= realsize) {
172 last_offset = prv->next;
174 if(wrapped && (prv->next >= header->nfoffset)) break;
176 /* Check to see if we need to wrap around and search from the top */
177 if(header->nfoffset && prv->next == 0) {
178 prv = BLOCKAT(ALIGNWORD(sizeof(header_t)));
179 #ifdef __APC_SMA_DEBUG__
187 if (prvnextfit == 0) {
188 header->nfoffset = 0;
193 cur = BLOCKAT(prv->next);
198 if (cur->size == realsize || (cur->size > realsize && cur->size < (realsize + (MINBLOCKSIZE * 2)))) {
199 /* cur is big enough for realsize, but too small to split - unlink it */
200 prv->next = cur->next;
203 block_t* nxt; /* the new block (chopped part of cur) */
204 size_t nxtoffset; /* offset of the block currently after cur */
205 size_t oldsize; /* size of cur before split */
207 /* nextfit is too big; split it into two smaller blocks */
208 nxtoffset = cur->next;
210 prv->next += realsize; /* skip over newly allocated block */
211 cur->size = realsize; /* Set the size of this new block */
212 nxt = BLOCKAT(prv->next);
213 nxt->next = nxtoffset; /* Re-link the shortened block */
214 nxt->size = oldsize - realsize; /* and fix the size */
216 #ifdef __APC_SMA_DEBUG__
221 /* update the block header */
222 header->avail -= cur->size;
223 #if ALLOC_DISTRIBUTION
224 header->adist[(int)(log(size)/log(2))]++;
227 header->nfoffset = last_offset;
230 #ifdef __APC_SMA_DEBUG__
231 cur->id = ++block_id;
232 fprintf(stderr, "allocate(realsize=%d,size=%d,id=%d)\n", (int)(size), (int)(cur->size), cur->id);
235 return OFFSET(cur) + block_size;
239 /* {{{ sma_deallocate: deallocates the block at the given offset */
240 static size_t sma_deallocate(void* shmaddr, size_t offset)
242 header_t* header; /* header of shared memory segment */
243 block_t* cur; /* the new block to insert */
244 block_t* prv; /* the block before cur */
245 block_t* nxt; /* the block after cur */
246 size_t size; /* size of deallocated block */
248 offset -= ALIGNWORD(sizeof(struct block_t));
251 /* find position of new block in free list */
252 cur = BLOCKAT(offset);
253 prv = BLOCKAT(ALIGNWORD(sizeof(header_t)));
257 #ifdef __APC_SMA_DEBUG__
259 fprintf(stderr, "free(%p, size=%d,id=%d)\n", cur, (int)(cur->size), cur->id);
261 while (prv->next != 0 && prv->next < offset) {
262 prv = BLOCKAT(prv->next);
263 #ifdef __APC_SMA_DEBUG__
270 /* insert new block after prv */
271 cur->next = prv->next;
274 #ifdef __APC_SMA_DEBUG__
279 /* update the block header */
280 header = (header_t*) shmaddr;
281 header->avail += cur->size;
284 if (((char *)prv) + prv->size == (char *) cur) {
285 /* cur and prv share an edge, combine them */
286 prv->size += cur->size;
287 prv->next = cur->next;
292 nxt = BLOCKAT(cur->next);
294 if (((char *)cur) + cur->size == (char *) nxt) {
295 /* cur and nxt shared an edge, combine them */
296 cur->size += nxt->size;
297 cur->next = nxt->next;
298 #ifdef __APC_SMA_DEBUG__
300 nxt->id = -1; /* assert this or set it ? */
304 header->nfoffset = 0; /* Reset the next fit search marker */
310 /* {{{ apc_sma_init */
312 void apc_sma_init(int numseg, size_t segsize, char *mmap_file_mask)
316 if (sma_initialized) {
323 * I don't think multiple anonymous mmaps makes any sense
324 * so force sma_numseg to 1 in this case
326 if(!mmap_file_mask ||
327 (mmap_file_mask && !strlen(mmap_file_mask)) ||
328 (mmap_file_mask && !strcmp(mmap_file_mask, "/dev/zero"))) {
331 sma_numseg = numseg > 0 ? numseg : DEFAULT_NUMSEG;
334 sma_numseg = numseg > 0 ? numseg : DEFAULT_NUMSEG;
337 sma_segsize = segsize > 0 ? segsize : DEFAULT_SEGSIZE;
339 sma_segments = (size_t*) apc_emalloc(sma_numseg*sizeof(size_t));
340 sma_shmaddrs = (void**) apc_emalloc(sma_numseg*sizeof(void*));
342 for (i = 0; i < sma_numseg; i++) {
348 sma_segments[i] = sma_segsize;
349 sma_shmaddrs[i] = apc_mmap(mmap_file_mask, sma_segsize);
350 if(sma_numseg != 1) memcpy(&mmap_file_mask[strlen(mmap_file_mask)-6], "XXXXXX", 6);
352 sma_segments[i] = apc_shm_create(NULL, i, sma_segsize);
353 sma_shmaddrs[i] = apc_shm_attach(sma_segments[i]);
355 shmaddr = sma_shmaddrs[i];
357 header = (header_t*) shmaddr;
358 apc_lck_create(NULL, 0, 1, header->sma_lock);
359 header->segsize = sma_segsize;
360 header->avail = sma_segsize - ALIGNWORD(sizeof(header_t)) - ALIGNWORD(sizeof(block_t));
361 header->nfoffset = 0;
362 #if ALLOC_DISTRIBUTION
365 for(j=0; j<30; j++) header->adist[j] = 0;
368 block = BLOCKAT(ALIGNWORD(sizeof(header_t)));
370 block->next = ALIGNWORD(sizeof(header_t)) + ALIGNWORD(sizeof(block_t));
372 #ifdef __APC_SMA_DEBUG__
375 block = BLOCKAT(block->next);
376 block->size = header->avail;
379 #ifdef __APC_SMA_DEBUG__
386 /* {{{ apc_sma_cleanup */
387 void apc_sma_cleanup()
391 assert(sma_initialized);
393 for (i = 0; i < sma_numseg; i++) {
394 apc_lck_destroy(((header_t*)sma_shmaddrs[i])->sma_lock);
396 apc_unmap(sma_shmaddrs[i], sma_segments[i]);
398 apc_shm_detach(sma_shmaddrs[i]);
402 apc_efree(sma_segments);
403 apc_efree(sma_shmaddrs);
407 /* {{{ apc_sma_malloc */
408 void* apc_sma_malloc(size_t n)
412 size_t *orig_mem_size_ptr;
415 assert(sma_initialized);
416 LOCK(((header_t*)sma_shmaddrs[sma_lastseg])->sma_lock);
418 off = sma_allocate(sma_shmaddrs[sma_lastseg], n);
420 void* p = (void *)(((char *)(sma_shmaddrs[sma_lastseg])) + off);
421 if (APCG(mem_size_ptr) != NULL) { *(APCG(mem_size_ptr)) += n; }
422 UNLOCK(((header_t*)sma_shmaddrs[sma_lastseg])->sma_lock);
423 #ifdef VALGRIND_MALLOCLIKE_BLOCK
424 VALGRIND_MALLOCLIKE_BLOCK(p, n, 0, 0);
428 UNLOCK(((header_t*)sma_shmaddrs[sma_lastseg])->sma_lock);
430 for (i = 0; i < sma_numseg; i++) {
431 if (i == sma_lastseg) {
434 LOCK(((header_t*)sma_shmaddrs[i])->sma_lock);
435 off = sma_allocate(sma_shmaddrs[i], n);
437 void* p = (void *)(((char *)(sma_shmaddrs[i])) + off);
438 if (APCG(mem_size_ptr) != NULL) { *(APCG(mem_size_ptr)) += n; }
439 UNLOCK(((header_t*)sma_shmaddrs[i])->sma_lock);
441 #ifdef VALGRIND_MALLOCLIKE_BLOCK
442 VALGRIND_MALLOCLIKE_BLOCK(p, n, 0, 0);
446 UNLOCK(((header_t*)sma_shmaddrs[i])->sma_lock);
453 /* {{{ apc_sma_realloc */
454 void* apc_sma_realloc(void *p, size_t n)
457 return apc_sma_malloc(n);
461 /* {{{ apc_sma_strdup */
462 char* apc_sma_strdup(const char* s)
470 q = apc_sma_malloc(len);
477 /* {{{ apc_sma_free */
478 void apc_sma_free(void* p)
489 assert(sma_initialized);
491 for (i = 0; i < sma_numseg; i++) {
492 LOCK(((header_t*)sma_shmaddrs[i])->sma_lock);
493 offset = (size_t)((char *)p - (char *)(sma_shmaddrs[i]));
494 if (p >= sma_shmaddrs[i] && offset < sma_segsize) {
495 d_size = sma_deallocate(sma_shmaddrs[i], offset);
496 if (APCG(mem_size_ptr) != NULL) { *(APCG(mem_size_ptr)) -= d_size; }
497 UNLOCK(((header_t*)sma_shmaddrs[i])->sma_lock);
498 #ifdef VALGRIND_FREELIKE_BLOCK
499 VALGRIND_FREELIKE_BLOCK(p, 0);
503 UNLOCK(((header_t*)sma_shmaddrs[i])->sma_lock);
506 apc_eprint("apc_sma_free: could not locate address %p", p);
510 /* {{{ apc_sma_info */
511 apc_sma_info_t* apc_sma_info(zend_bool limited)
513 apc_sma_info_t* info;
514 apc_sma_link_t** link;
519 if (!sma_initialized) {
523 info = (apc_sma_info_t*) apc_emalloc(sizeof(apc_sma_info_t));
524 info->num_seg = sma_numseg;
525 info->seg_size = sma_segsize - ALIGNWORD(sizeof(header_t)) - ALIGNWORD(sizeof(block_t));
527 info->list = apc_emalloc(info->num_seg * sizeof(apc_sma_link_t*));
528 for (i = 0; i < sma_numseg; i++) {
529 info->list[i] = NULL;
532 if(limited) return info;
534 /* For each segment */
535 for (i = 0; i < sma_numseg; i++) {
536 RDLOCK(((header_t*)sma_shmaddrs[i])->sma_lock);
537 shmaddr = sma_shmaddrs[i];
538 prv = BLOCKAT(ALIGNWORD(sizeof(header_t)));
540 link = &info->list[i];
542 /* For each block in this segment */
543 while (prv->next != 0) {
544 block_t* cur = BLOCKAT(prv->next);
545 #ifdef __APC_SMA_DEBUG__
549 *link = apc_emalloc(sizeof(apc_sma_link_t));
550 (*link)->size = cur->size;
551 (*link)->offset = prv->next;
552 (*link)->next = NULL;
553 link = &(*link)->next;
557 UNLOCK(((header_t*)sma_shmaddrs[i])->sma_lock);
564 /* {{{ apc_sma_free_info */
565 void apc_sma_free_info(apc_sma_info_t* info)
569 for (i = 0; i < info->num_seg; i++) {
570 apc_sma_link_t* p = info->list[i];
572 apc_sma_link_t* q = p;
577 apc_efree(info->list);
582 /* {{{ apc_sma_get_avail_mem */
583 size_t apc_sma_get_avail_mem()
585 size_t avail_mem = 0;
588 for (i = 0; i < sma_numseg; i++) {
589 header_t* header = (header_t*) sma_shmaddrs[i];
590 avail_mem += header->avail;
596 #if ALLOC_DISTRIBUTION
597 size_t *apc_sma_get_alloc_distribution(void) {
598 header_t* header = (header_t*) sma_shmaddrs[0];
599 return header->adist;
604 /* {{{ apc_sma_check_integrity */
605 void apc_sma_check_integrity()
609 /* For each segment */
610 for (i = 0; i < sma_numseg; i++) {
611 char* shmaddr = sma_shmaddrs[i];
612 header_t* header = (header_t*) shmaddr;
613 block_t* prv = BLOCKAT(ALIGNWORD(sizeof(header_t)));
616 /* For each block in this segment */
617 while (prv->next != 0) {
618 block_t* cur = BLOCKAT(prv->next);
623 assert(avail == header->avail);
634 * vim600: expandtab sw=4 ts=4 sts=4 fdm=marker
635 * vim<600: expandtab sw=4 ts=4 sts=4