Hi,
I've modified the shmem.c to have a another shared memory segment
for storing the allocation details. I've also tried to ensure that all the
computation is based on offsets (stored in the shared memory again) - so
that if different processes map the shared memory at different addresses,
the memory mgmt should still work.
As regards testing, I've done some minimal testing (testshmem,
shmcb, shmht) - but there may be lots of problems.. Also, the code may
require perf. tuning - I haven't yet looked into those aspects.. Pl. let me
know for any clarifications..
'waiting for your feedback / suggestions..
Thanks
-Madhu
-----Original Message-----
From: Ryan Bloom [mailto:[EMAIL PROTECTED]
Sent: Sunday, September 09, 2001 5:39 PM
To: [email protected]; MATHIHALLI,MADHUSUDAN (HP-Cupertino,ex1);
'[email protected]'
Subject: Re: FW: [PATCH] shmem.c
On Sunday 09 September 2001 17:23, MATHIHALLI,MADHUSUDAN (HP-Cupertino,ex1)
wrote:
> Hi Ian,
> Thanks for the suggestion.. I certainly like the first idea (infact, that
> was also one of the idea I'd proposed earlier). But, I'm not sure how
> *open* are others to create a seperate shared memory just for maintaining
> the allocation table..
The only problem is going to be ensuring that the platform has enough
shared memory to handle both segments. I think we can just return an
error if we run out of shared memory though. IOW, +1 for having a second
shared memory segment for the control data.
Ryan
>
> As regards your second idea, I'm not comfortable with it - it'd introduce
a
> lot of performance overhead - especially with IPC's around..
>
> Thanks for your suggestions..
> -Madhu
>
>
> -----Original Message-----
> From: Ian Holsman [mailto:[EMAIL PROTECTED]
> Sent: Sunday, September 09, 2001 3:54 PM
> To: [email protected]
> Subject: Re: FW: [PATCH] shmem.c
>
>
> Hi Madhu.
> the only way I can think of handling this is to store the memory
> block list in another piece of shared memory/mmap (allocated seperatly)
>
> The other method I can think of is to spawn a seperate process which
> would hold all the memory list information, and it would do all the memory
> management
> and the other processes would communicate it via some IPC.
>
> the problem is that shared memory is NOT guarnteed to be in the same space
> for
> each process.
>
> ..Ian
>
Index: shmem.c
===================================================================
RCS file: /home/cvspublic/apr/shmem/unix/shmem.c,v
retrieving revision 1.33
diff -u -r1.33 shmem.c
--- shmem.c 2001/08/30 17:11:04 1.33
+++ shmem.c 2001/09/12 21:07:29
@@ -89,182 +89,593 @@
#include <kernel/OS.h>
#endif
+#define MIN_BLK_SIZE 200
+
+/***************************************************************************
+The Shared Memory - Management Architecture :
+
+List Memory (Shared Memory - 1)
+ ____________________________________________
+ | Memory Offsets for the List and |
+ | for the Shared Memory |
+ |------------------------------------------|
+L1 | MemChunk(MC1) |Next| MemChunk(MC2) |Next |
+ |------------------------------------------|
+ |------------------------------------------|
+L2 | MemChunk(MC3) |Next| MemChunk(MC4) |Next |
+ |------------------------------------------|
+ ______| . . . . . . .
+ | . . . . . . . .
+ | . . . . . . . .
+ | |------------------------------------------|
+Ln | | MemChunk(MCm) |Next| ...... |
+ | |------------------------------------------|
+ | |
+ | |________________________________________
+ V | (offset from shm_base)
+Chunk Memory (Shared Memory - 2) V
+-------------------------------------------------------------------------
+| | | | | | |
+| C1 | C2 | C3 | C4 | C5 |.... |
+| | | | | | |
+-------------------------------------------------------------------------
+
+***************************************************************************/
+
+
+typedef struct memoffsets_t {
+ apr_off_t l_used; /* Starting of the Used list elements */
+ apr_off_t l_free; /* Starting of the Freed list elements */
+ apr_off_t l_usedlast; /* Last element of the Used list */
+ apr_off_t l_freelast; /* Last element of the Freed list */
+ apr_off_t c_used; /* Begining of the used chunk list */
+ apr_off_t c_free; /* Begining of the freed chunk list */
+ apr_off_t shm_offset; /* The current offset of the shared memory */
+ apr_off_t shm_length; /* Total length of shared memory available */
+} memoffsets_t;
+
+typedef struct memchunk_t {
+ apr_off_t offset; /* Offset of the chunk - from m->shm_base */
+ apr_size_t size; /* Size of the chunk */
+ apr_off_t next; /* Next chunk - from m->list_base) */
+ apr_off_t prev; /* Previous chunk - from m->list_base) */
+} memchunk_t;
+
+typedef struct memlist_t {
+ struct memchunk_t chunk;
+ apr_off_t next;
+} memlist_t;
+
struct shmem_t {
- void *mem;
- void *curmem;
- apr_size_t length;
- apr_lock_t *lock;
- char *filename;
+ apr_pool_t *p;
+ void *shm_base; /* Starting address of the shared memory */
+ memoffsets_t *offsets; /* Begining of the set of offsets */
+ memlist_t *list_base; /* Begining of the list elements */
+
+ apr_lock_t *lock;
+ char *filename;
#if APR_USE_SHMEM_MMAP_TMP || APR_USE_SHMEM_MMAP_SHM || APR_USE_SHMEM_MMAP_ZERO
- apr_file_t *file;
-#elif APR_USE_SHMEM_MMAP_ANON
- /* Nothing else. */
+ apr_file_t *file;
#elif APR_USE_SHMEM_SHMGET
- apr_os_file_t file;
+ apr_os_file_t file;
+ apr_os_file_t listfd;
#elif APR_USE_SHMEM_BEOS
- area_id areaid;
+ area_id areaid;
#endif
};
+#define SHM_ADDR(offset) ((offset < 0) ? (void *)NULL : \
+ (void *)((unsigned char *)m->shm_base + offset))
+
+#define CHUNK_OFFSET(ptr) ((ptr == (memchunk_t *)NULL) ? 0 : \
+ ((unsigned char *)ptr - (unsigned char *)m->list_base))
+
+#define CHUNK_ADDR(offset) ((offset < 0) ? (memchunk_t *)NULL : \
+ (memchunk_t *)((unsigned char *)m->list_base + offset))
+
+#define LIST_OFFSET(ptr) ((ptr == (memlist_t *)NULL) ? 0 : \
+ ((unsigned char *)ptr - (unsigned char *)m->list_base)
+
+#define LIST_INDEX(ptr) ((ptr == (memlist_t *)NULL) ? -1 : \
+ (((unsigned char *)ptr - (unsigned char *)m->list_base)/sizeof(memlist_t)))
+
+#define LIST_ADDR(idx) ((idx < 0) ? (memlist_t *)NULL : \
+ (memlist_t *)((unsigned char *)m->list_base + (idx * sizeof(memlist_t))))
+
+static memchunk_t *list_malloc (apr_shmem_t *m)
+{
+ apr_off_t index;
+ memlist_t *elem, *last;
+
+ /* Find the first free list element */
+ if ((elem = LIST_ADDR(m->offsets->l_free)) == (memlist_t *)NULL)
+ return (memchunk_t *)NULL;
+ else {
+ index = m->offsets->l_free;
+ m->offsets->l_free = elem->next;
+ }
+
+ elem->next = -1;
+
+ if (m->offsets->l_used == -1)
+ m->offsets->l_used = index;
+ else {
+ last = LIST_ADDR(m->offsets->l_usedlast);
+ last->next = index;
+ }
+
+ m->offsets->l_usedlast = index;
+
+ return (&(elem->chunk));
+}
+
+static apr_status_t list_free (apr_shmem_t *m, memchunk_t *chunk)
+{
+ apr_off_t index;
+ memlist_t *elem, *iter;
+
+ /* Find the required element in the allocated list */
+ iter = LIST_ADDR(m->offsets->l_used);
+ while (iter && (&(iter->chunk) != chunk)) {
+ elem = iter;
+ index = iter->next;
+ iter = LIST_ADDR(iter->next);
+ }
+
+ if ((!iter) || (&(iter->chunk) != chunk))
+ return -1;
+ else
+ elem->next = iter->next;
+
+ iter->next = -1;
+
+ if (m->offsets->l_free == -1)
+ m->offsets->l_free = index;
+ else {
+ elem = LIST_ADDR(m->offsets->l_freelast);
+ elem->next = index;
+ }
+
+ m->offsets->l_freelast = index;
+
+ return APR_SUCCESS;
+}
+
+static void add_chunk (apr_shmem_t *m, apr_off_t *list, memchunk_t *blk)
+{
+ memchunk_t *elem;
+ if (*list == -1)
+ *list = CHUNK_OFFSET(blk);
+
+ elem = CHUNK_ADDR(*list);
+
+ if (blk == elem){
+ blk->prev = CHUNK_OFFSET(blk);
+ blk->next = CHUNK_OFFSET(blk);
+ } else {
+ (CHUNK_ADDR(elem->prev))->next = CHUNK_OFFSET(blk);
+ blk->prev = elem->prev;
+ blk->next = CHUNK_OFFSET(elem);
+ elem->prev = CHUNK_OFFSET(blk);
+ }
+}
+
+static void remove_chunk(apr_shmem_t *m, apr_off_t *list, memchunk_t *blk)
+{
+ memchunk_t *elem;
+
+ elem = CHUNK_ADDR(*list);
+
+ if ((elem == blk)
+ && (CHUNK_ADDR(blk->next) == blk) && (blk == CHUNK_ADDR(blk->prev))) {
+ *list = -1;
+ } else {
+ (CHUNK_ADDR(blk->next))->prev = blk->prev;
+ (CHUNK_ADDR(blk->prev))->next = blk->next;
+ if (elem == blk)
+ *list = blk->next;
+ }
+ blk->next = -1;
+ blk->prev = -1;
+}
+
+static void split_chunk(
+ apr_shmem_t *m, apr_off_t *list, memchunk_t *blk, apr_size_t size)
+{
+ memchunk_t *b;
+
+ b = list_malloc (m);
+ if (b != (memchunk_t *)NULL) {
+ b->size = blk->size - size;
+ b->offset = blk->offset + size;
+ blk->size = size;
+ add_chunk(m, list, b);
+ }
+}
+
+static memchunk_t *find_chunk_by_addr(
+ apr_shmem_t *m, apr_off_t *list, void *addr)
+{
+ memchunk_t *iter = CHUNK_ADDR(*list);
+
+ if (!iter) return NULL;
+
+ do {
+ if (SHM_ADDR(iter->offset) == addr)
+ return iter;
+ } while (iter && ((iter = CHUNK_ADDR(iter->next)) != CHUNK_ADDR(*list)));
+
+ return NULL;
+}
+
+static memchunk_t *find_chunk_by_size(
+ apr_shmem_t *m, apr_off_t *list, apr_size_t size)
+{
+ apr_size_t diff = -1;
+ memchunk_t *iter = CHUNK_ADDR(*list), *found = NULL;
+
+ if (!iter) return NULL;
+
+ do {
+ if (iter->size == size)
+ return iter;
+ if (iter->size > size) {
+ if (diff == -1)
+ diff = iter->size;
+ if ((iter->size - size) < diff) {
+ diff = iter->size - size;
+ found = iter;
+ }
+ }
+ } while ((iter = CHUNK_ADDR(iter->next)) != CHUNK_ADDR(*list));
+
+ if (diff > MIN_BLK_SIZE)
+ split_chunk(m, list, found, size);
+
+ return found;
+}
+
+static void display_shm_snapshot(apr_shmem_t *m)
+{
+#if 0
+ memchunk_t *iter;
+
+ extern ap_log_error ();
+ if (!m) return;
+
+ if ((iter = CHUNK_ADDR(m->offsets->c_used)) != NULL) {
+ ap_log_error(__FILE__, __LINE__, 3, 0, NULL, "USELIST :");
+ do {
+ ap_log_error(__FILE__, __LINE__, 3, 0, NULL,"[%ld] [%ld] [%ld]",
+ iter->offset, iter->next, iter->size);
+ } while (iter &&
+ ((iter = CHUNK_ADDR(iter->next)) !=
CHUNK_ADDR(m->offsets->c_used)));
+ }
+
+ if ((iter = CHUNK_ADDR(m->offsets->c_free)) != NULL) {
+ ap_log_error(__FILE__, __LINE__, 3, 0, NULL,"FREELIST :");
+ do {
+ ap_log_error(__FILE__, __LINE__, 3, 0, NULL,"[%ld] [%ld] [%ld]",
+ iter->offset, iter->next, iter->size);
+ } while (iter &&
+ ((iter = CHUNK_ADDR(iter->next)) !=
CHUNK_ADDR(m->offsets->c_free)));
+ }
+ ap_log_error(__FILE__, __LINE__, 3, 0, NULL,"");
+#endif
+ return;
+}
+
+static memchunk_t *alloc_chunk(apr_shmem_t *m, apr_size_t size)
+{
+ memchunk_t *b = NULL;
+
+ /* Align size to a word boundary */
+ if (size < MIN_BLK_SIZE)
+ size = MIN_BLK_SIZE;
+ size = ((1 + ((size - 1) / sizeof (void *))) * sizeof (void *));
+
+ if (m->offsets->shm_length < size)
+ return NULL;
+
+ b = find_chunk_by_size (m, &(m->offsets->c_free), size);
+
+ if (b != (memchunk_t *)NULL)
+ remove_chunk (m, &(m->offsets->c_free), b);
+ else {
+ b = list_malloc(m);
+ if (b == (memchunk_t *)NULL)
+ return (memchunk_t *)NULL;
+ b->offset = m->offsets->shm_offset;
+ b->size = size;
+ m->offsets->shm_offset += b->size;
+ }
+
+ m->offsets->shm_length -= b->size;
+ add_chunk(m, &(m->offsets->c_used), b);
+
+ display_shm_snapshot (m);
+
+ return b;
+}
+
+static void free_chunk(apr_shmem_t *m, void *entity)
+{
+ memchunk_t *b;
+
+ if (entity == NULL)
+ return;
+
+ b = find_chunk_by_addr (m, &(m->offsets->c_used), entity);
+ if (b != (memchunk_t *)NULL) {
+ remove_chunk(m, &(m->offsets->c_used), b);
+ add_chunk(m, &(m->offsets->c_free), b);
+ m->offsets->shm_length += b->size;
+ }
+
+ display_shm_snapshot (m);
+}
+
+static memchunk_t *realloc_chunk(apr_shmem_t *m, void *entity, apr_size_t size)
+{
+ memchunk_t *b, *new_b;
+
+ /* Align size to a word boundary */
+ size = ((1 + ((size - 1) / sizeof (void *))) * sizeof (void *));
+
+ b = find_chunk_by_addr(m, &(m->offsets->c_used), entity);
+
+ if (b != (memchunk_t *)NULL) {
+ if (b->size > size)
+ split_chunk (m, &(m->offsets->c_used), b, size);
+ else
+ if ((b->size < size) && (size < m->offsets->shm_length)) {
+ new_b = alloc_chunk (m, size);
+ memcpy (SHM_ADDR(new_b->offset), SHM_ADDR(b->offset), b->size);
+ free_chunk (m, entity);
+ b = new_b;
+ }
+ }
+ return b;
+}
+
+/*
+ * FIXME:
+ * 1. Is APR_OS_DEFAULT sufficient?
+ * 2. status = apr_file_remove(filename, p);
+ * 3. Handle errors from return values of system calls.
+ */
APR_DECLARE(apr_status_t) apr_shm_init(apr_shmem_t **m, apr_size_t reqsize,
- const char *filename, apr_pool_t *pool)
+ const char *filename, apr_pool_t *p)
{
- apr_shmem_t *new_m;
- void *mem;
#if APR_USE_SHMEM_SHMGET
struct shmid_ds shmbuf;
apr_uid_t uid;
apr_gid_t gid;
+ int i, listsize;
+ memlist_t *iter, *piter;
+ void *addr;
#endif
+
#if APR_USE_SHMEM_MMAP_TMP || APR_USE_SHMEM_MMAP_SHM || APR_USE_SHMEM_MMAP_ZERO
apr_status_t status;
+ status = APR_SUCCESS;
#endif
+
#if APR_USE_SHMEM_MMAP_TMP || APR_USE_SHMEM_MMAP_SHM || \
APR_USE_SHMEM_MMAP_ZERO || APR_USE_SHMEM_SHMGET
int tmpfd;
#endif
- new_m = apr_palloc(pool, sizeof(apr_shmem_t));
- if (!new_m)
+ (*m) = (apr_shmem_t *)apr_palloc(p, sizeof(apr_shmem_t));
+ if (!(*m))
return APR_ENOMEM;
-/* These implementations are very similar except for opening the file. */
#if APR_USE_SHMEM_MMAP_TMP || APR_USE_SHMEM_MMAP_SHM || APR_USE_SHMEM_MMAP_ZERO
- /* FIXME: Ignore error for now. *
- * status = apr_file_remove(filename, pool);*/
- status = APR_SUCCESS;
#if APR_USE_SHMEM_MMAP_TMP
- /* FIXME: Is APR_OS_DEFAULT sufficient? */
- status = apr_file_open(&new_m->file, filename,
- APR_READ | APR_WRITE | APR_CREATE, APR_OS_DEFAULT,
- pool);
+ status = apr_file_open(&(*m)->file, filename,
+ APR_READ | APR_WRITE | APR_CREATE, APR_OS_DEFAULT, p);
if (status != APR_SUCCESS)
return APR_EGENERAL;
- status = apr_os_file_get(&tmpfd, new_m->file);
- status = apr_file_trunc(new_m->file, reqsize);
+ status = apr_os_file_get(&tmpfd, (*m)->file);
+ status = apr_file_trunc((*m)->file, reqsize);
if (status != APR_SUCCESS)
return APR_EGENERAL;
#elif APR_USE_SHMEM_MMAP_SHM
- /* FIXME: Is APR_OS_DEFAULT sufficient? */
tmpfd = shm_open(filename, O_RDWR | O_CREAT, APR_OS_DEFAULT);
if (tmpfd == -1)
return errno;
- apr_os_file_put(&new_m->file, &tmpfd, pool);
- status = apr_file_trunc(new_m->file, reqsize);
+ apr_os_file_put(&(*m)->file, &tmpfd, p);
+ status = apr_file_trunc((*m)->file, reqsize);
if (status != APR_SUCCESS)
{
shm_unlink(filename);
return APR_EGENERAL;
}
#elif APR_USE_SHMEM_MMAP_ZERO
- status = apr_file_open(&new_m->file, "/dev/zero", APR_READ | APR_WRITE,
- APR_OS_DEFAULT, pool);
+ status = apr_file_open(&(*m)->file, "/dev/zero", APR_READ | APR_WRITE,
+ APR_OS_DEFAULT, p);
if (status != APR_SUCCESS)
return APR_EGENERAL;
- status = apr_os_file_get(&tmpfd, new_m->file);
+ status = apr_os_file_get(&tmpfd, (*m)->file);
#endif
- mem = mmap(NULL, reqsize, PROT_READ|PROT_WRITE, MAP_SHARED, tmpfd, 0);
+ (*m)->shm_base = mmap(NULL, reqsize, PROT_READ|PROT_WRITE,
+ MAP_SHARED, tmpfd, 0);
#elif APR_USE_SHMEM_MMAP_ANON
- mem = mmap(NULL, reqsize, PROT_READ|PROT_WRITE, MAP_ANON|MAP_SHARED, -1,
0);
+ (*m)->shm_base = mmap(NULL, reqsize, PROT_READ|PROT_WRITE,
+ MAP_ANON|MAP_SHARED,-1, 0);
#elif APR_USE_SHMEM_SHMGET
- tmpfd = shmget(IPC_PRIVATE, reqsize, (SHM_R|SHM_W|IPC_CREAT));
- if (tmpfd == -1)
- return errno;
- new_m->file = tmpfd;
+ listsize = (reqsize/MIN_BLK_SIZE + 1) * sizeof(memlist_t)
+ + sizeof (memoffsets_t);
+ (*m)->listfd = shmget(IPC_PRIVATE, listsize, (SHM_R|SHM_W|IPC_CREAT));
+ if ((*m)->listfd == -1)
+ return errno + APR_OS_START_SYSERR;
+
+ addr = (void *)shmat((*m)->listfd, NULL, 0);
+ if (addr == (void *)NULL)
+ return errno + APR_OS_START_SYSERR;
+
+ (*m)->offsets = (memoffsets_t *)addr;
+ (*m)->list_base = (memlist_t *)(addr + (sizeof(memoffsets_t)));
+
+ (*m)->file = shmget(IPC_PRIVATE, reqsize, (SHM_R|SHM_W|IPC_CREAT));
+ if ((*m)->file == -1)
+ return errno + APR_OS_START_SYSERR;
+
+ (*m)->shm_base = (void *)shmat((*m)->file, NULL, 0);
+ if ((*m)->shm_base == (void *)NULL)
+ return errno + APR_OS_START_SYSERR;
- mem = shmat(new_m->file, NULL, 0);
-
- /* FIXME: Handle errors. */
- if (shmctl(new_m->file, IPC_STAT, &shmbuf) == -1)
+ if (shmctl((*m)->file, IPC_STAT, &shmbuf) == -1)
return errno;
- apr_current_userid(&uid, &gid, pool);
+ apr_current_userid(&uid, &gid, p);
shmbuf.shm_perm.uid = uid;
shmbuf.shm_perm.gid = gid;
- if (shmctl(new_m->file, IPC_SET, &shmbuf) == -1)
- return errno;
+ if (shmctl((*m)->file, IPC_SET, &shmbuf) == -1)
+ return errno + APR_OS_START_SYSERR;
- /* remove in future (once use count hits zero) */
- if (shmctl(new_m->file, IPC_RMID, NULL) == -1)
- return errno;
+ if (shmctl((*m)->listfd, IPC_SET, &shmbuf) == -1)
+ return errno + APR_OS_START_SYSERR;
+ /* Initially all elements after the first element is a free block */
+ piter = (*m)->list_base;
+ for (i = 0; i < reqsize/MIN_BLK_SIZE; i++) {
+ piter->next = i + 1;
+ piter++;
+ }
+ piter->next = -1;
+
+ (*m)->offsets->l_used = -1;
+ (*m)->offsets->l_usedlast = -1;
+ (*m)->offsets->l_free = 0;
+ (*m)->offsets->l_freelast = i;
+ (*m)->offsets->c_used = -1;
+ (*m)->offsets->c_free = -1;
+
#elif APR_USE_SHMEM_BEOS
- new_m->area_id = create_area("mm", (void*)&mem, B_ANY_ADDRESS, reqsize,
- B_LAZY_LOCK, B_READ_AREA|B_WRITE_AREA);
- /* FIXME: error code? */
- if (new_m->area_id < 0)
- return APR_EGENERAL;
+ (*m)->area_id = create_area("apr_shm", (void*)&((*m)->shm_base),
B_ANY_ADDRESS,
+ reqsize, B_LAZY_LOCK,
B_READ_AREA|B_WRITE_AREA);
+ if ((*m)->area_id < 0)
+ return APR_EGENERAL; /* FIXME: error code? */
#endif
- new_m->mem = mem;
- new_m->curmem = mem;
- new_m->length = reqsize;
+ (*m)->p = p;
+ (*m)->offsets->shm_offset = 0;
+ (*m)->offsets->shm_length = reqsize;
- apr_lock_create(&new_m->lock, APR_MUTEX, APR_CROSS_PROCESS, NULL, pool);
- if (!new_m->lock)
+ apr_lock_create(&(*m)->lock, APR_MUTEX, APR_CROSS_PROCESS, NULL, p);
+ if (!(*m)->lock)
return APR_EGENERAL;
- *m = new_m;
return APR_SUCCESS;
}
APR_DECLARE(apr_status_t) apr_shm_destroy(apr_shmem_t *m)
{
+#if APR_USE_SHMEM_SHMGET
+ struct shmid_ds shmbuf;
+ apr_uid_t uid;
+ apr_gid_t gid;
+#endif
+
+ if (m == (apr_shmem_t *)NULL)
+ return APR_SUCCESS;
+
#if APR_USE_SHMEM_MMAP_TMP || APR_USE_SHMEM_MMAP_SHM || APR_USE_SHMEM_MMAP_ZERO
- munmap(m->mem, m->length);
+ munmap(m->shm_base, m->offsets->shm_length);
apr_file_close(m->file);
#elif APR_USE_SHMEM_MMAP_ANON
- munmap(m->mem, m->length);
+ munmap(m->shm_base, m->offsets->shm_length);
#elif APR_USE_SHMEM_SHMGET
- shmdt(m->mem);
+ shmdt(m->shm_base);
+ apr_current_userid(&uid, &gid, m->p);
+ shmbuf.shm_perm.uid = uid;
+ shmbuf.shm_perm.gid = gid;
+
+ if (shmctl(m->file, IPC_RMID, &shmbuf) == -1)
+ return errno + APR_OS_START_SYSERR;
+
+ shmdt(m->list_base);
+ if (shmctl(m->listfd, IPC_RMID, &shmbuf) == -1)
+ return errno + APR_OS_START_SYSERR;
+
#elif APR_USE_SHMEM_BEOS
- delete_area(new_m->area_id);
+ delete_area(m->area_id);
#endif
+ m->offsets = NULL;
+ m->shm_base = NULL;
+
return APR_SUCCESS;
}
APR_DECLARE(void *) apr_shm_malloc(apr_shmem_t *m, apr_size_t reqsize)
{
- void *new;
- new = NULL;
+ memchunk_t *b = (memchunk_t *)NULL;
- apr_lock_acquire(m->lock);
- /* Do we have enough space? */
- if (((char *)m->curmem - (char *)m->mem + reqsize) <= m->length)
+ if (m != (apr_shmem_t *)NULL)
{
- new = m->curmem;
- m->curmem = (char *)m->curmem + reqsize;
+ apr_lock_acquire(m->lock);
+ b = alloc_chunk(m, reqsize);
+ apr_lock_release(m->lock);
}
- apr_lock_release(m->lock);
- return new;
+
+ return ((b) ? SHM_ADDR(b->offset) : NULL);
}
APR_DECLARE(void *) apr_shm_calloc(apr_shmem_t *m, apr_size_t reqsize)
{
- void *new = apr_shm_malloc(m, reqsize);
- if (new)
- memset(new, '\0', reqsize);
- return new;
+ memchunk_t *b = (memchunk_t *)NULL;
+
+ if (m != (apr_shmem_t *)NULL)
+ {
+ apr_lock_acquire(m->lock);
+ b = alloc_chunk(m, reqsize);
+ if (b != NULL)
+ memset(SHM_ADDR(b->offset), 0, reqsize);
+ apr_lock_release(m->lock);
+ }
+ return ((b) ? SHM_ADDR(b->offset) : NULL);
+}
+
+APR_DECLARE(void *) apr_shm_realloc(apr_shmem_t *m, void *p, apr_size_t
reqsize)
+{
+ memchunk_t *b = (memchunk_t *)NULL;
+
+ if (m != (apr_shmem_t *)NULL)
+ {
+ apr_lock_acquire(m->lock);
+ if (p != NULL)
+ b = realloc_chunk(m, p, reqsize);
+ else
+ b = alloc_chunk(m, reqsize);
+ apr_lock_release(m->lock);
+ }
+
+ return ((b) ? SHM_ADDR(b->offset) : NULL);
}
-APR_DECLARE(apr_status_t) apr_shm_free(apr_shmem_t *shared, void *entity)
+APR_DECLARE(apr_status_t) apr_shm_free(apr_shmem_t *m, void *entity)
{
- /* Without a memory management scheme within our shared memory, it
- * is impossible to implement free. */
+ if (m != (apr_shmem_t *)NULL)
+ {
+ apr_lock_acquire(m->lock);
+ free_chunk(m, entity);
+ apr_lock_release(m->lock);
+ }
return APR_SUCCESS;
}
-APR_DECLARE(apr_status_t) apr_shm_name_get(apr_shmem_t *c,
+APR_DECLARE(apr_status_t) apr_shm_name_get(apr_shmem_t *m,
apr_shm_name_t **name)
{
#if APR_USES_ANONYMOUS_SHM
@@ -280,7 +691,7 @@
#endif
}
-APR_DECLARE(apr_status_t) apr_shm_name_set(apr_shmem_t *c,
+APR_DECLARE(apr_status_t) apr_shm_name_set(apr_shmem_t *m,
apr_shm_name_t *name)
{
#if APR_USES_ANONYMOUS_SHM
@@ -295,7 +706,7 @@
#endif
}
-APR_DECLARE(apr_status_t) apr_shm_open(apr_shmem_t *c)
+APR_DECLARE(apr_status_t) apr_shm_open(apr_shmem_t *m)
{
#if APR_USES_ANONYMOUS_SHM
@@ -315,16 +726,16 @@
APR_DECLARE(apr_status_t) apr_shm_avail(apr_shmem_t *m, apr_size_t *size)
{
- apr_status_t status;
-
- status = APR_ENOSHMAVAIL;
+ apr_status_t status = APR_ENOSHMAVAIL;
- apr_lock_acquire(m->lock);
+ if (m != (apr_shmem_t *)NULL)
+ {
+ apr_lock_acquire(m->lock);
+ if ((*size = m->offsets->shm_length) > 0)
+ status = APR_SUCCESS;
- *size = m->length - ((char *)m->curmem - (char *)m->mem);
- if (*size)
- status = APR_SUCCESS;
+ apr_lock_release(m->lock);
+ }
- apr_lock_release(m->lock);
return status;
}