Module: xenomai-3
Branch: master
Commit: fa8b8c534166276f9a9f9df7ba83b04acddd37e4
URL:    
http://git.xenomai.org/?p=xenomai-3.git;a=commit;h=fa8b8c534166276f9a9f9df7ba83b04acddd37e4

Author: Philippe Gerum <r...@xenomai.org>
Date:   Mon Jun  1 16:52:34 2015 +0200

boilerplate, copperplate: fix syncluster with remote sync (pshared)

---

 include/boilerplate/scope.h       |   26 ++++++++----
 include/copperplate/cluster.h     |    9 ++++-
 include/copperplate/threadobj.h   |    6 +--
 lib/boilerplate/hash.c            |    4 --
 lib/copperplate/cluster.c         |   19 ++++-----
 lib/copperplate/heapobj-pshared.c |   79 +++++++++++++++++++------------------
 lib/copperplate/threadobj.c       |    2 +-
 7 files changed, 80 insertions(+), 65 deletions(-)

diff --git a/include/boilerplate/scope.h b/include/boilerplate/scope.h
index 9e551ae..ae71063 100644
--- a/include/boilerplate/scope.h
+++ b/include/boilerplate/scope.h
@@ -31,10 +31,15 @@ extern void *__main_heap;
 
 int pshared_check(void *heap, void *addr);
 
-#define dref_type(t)           memoff_t
-#define __memoff(base, addr)   ((caddr_t)(addr) - (caddr_t)(base))
-#define __memptr(base, off)    ((caddr_t)(base) + (off))
-#define __memchk(base, addr)   pshared_check(base, addr)
+#define dref_type(t)   memoff_t
+
+#define __memoff(__base, __addr)       ((caddr_t)(__addr) - (caddr_t)(__base))
+#define __memptr(__base, __off)                ((caddr_t)(__base) + (__off))
+#define __memchk(__base, __addr)       pshared_check(__base, __addr)
+
+#define __moff(__p)    __memoff(__main_heap, __p)
+#define __mptr(__off)  __memptr(__main_heap, __off)
+#define __mchk(__p)    __memchk(__main_heap, __p)
 
 #define mutex_scope_attribute  PTHREAD_PROCESS_SHARED
 #define sem_scope_attribute    1
@@ -47,10 +52,15 @@ int pshared_check(void *heap, void *addr);
 
 #define __main_heap    NULL
 
-#define dref_type(t)           __typeof__(t)
-#define __memoff(base, addr)   (addr)
-#define __memptr(base, off)    (off)
-#define __memchk(base, addr)   1
+#define dref_type(t)   __typeof__(t)
+
+#define __memoff(__base, __addr)       (__addr)
+#define __memptr(__base, __off)                (__off)
+#define __memchk(__base, __addr)       1
+
+#define __moff(__p)    (__p)
+#define __mptr(__off)  (__off)
+#define __mchk(__p)    1
 
 #define mutex_scope_attribute  PTHREAD_PROCESS_PRIVATE
 #define sem_scope_attribute    0
diff --git a/include/copperplate/cluster.h b/include/copperplate/cluster.h
index af7dfee..c29d6f2 100644
--- a/include/copperplate/cluster.h
+++ b/include/copperplate/cluster.h
@@ -155,7 +155,14 @@ pid_t pvclusterobj_cnode(const struct pvclusterobj *cobj)
 #endif /* !CONFIG_XENO_PSHARED */
 
 struct syncluster_wait_struct {
-       const char *name;
+       union {
+               struct {
+                       dref_type(char *) name;
+               } shared;
+               struct {
+                       const char *name;
+               } private;
+       };
 };
 
 #ifdef __cplusplus
diff --git a/include/copperplate/threadobj.h b/include/copperplate/threadobj.h
index edc4d6c..4562dcb 100644
--- a/include/copperplate/threadobj.h
+++ b/include/copperplate/threadobj.h
@@ -179,7 +179,7 @@ struct threadobj {
        struct holder wait_link;
        int wait_status;
        int wait_prio;
-       void *wait_union;
+       dref_type(void *) wait_union;
        size_t wait_size;
        timer_t periodic_timer;
 
@@ -496,14 +496,14 @@ static inline int threadobj_get_errno(struct threadobj 
*thobj)
                struct threadobj *__thobj = threadobj_current();        \
                assert(__thobj != NULL);                                \
                assert(sizeof(typeof(T)) <= __thobj->wait_size);        \
-               __thobj->wait_union;                                    \
+               (void *)__mptr(__thobj->wait_union);                    \
        })
 
 #define threadobj_finish_wait()                do { } while (0)
 
 static inline void *threadobj_get_wait(struct threadobj *thobj)
 {
-       return thobj->wait_union;
+       return __mptr(thobj->wait_union);
 }
 
 static inline const char *threadobj_get_name(struct threadobj *thobj)
diff --git a/lib/boilerplate/hash.c b/lib/boilerplate/hash.c
index 35fcf73..aa35fa4 100644
--- a/lib/boilerplate/hash.c
+++ b/lib/boilerplate/hash.c
@@ -44,10 +44,6 @@
        c -= a; c -= b; c ^= (b>>15);           \
 }
 
-#define __moff(__p)    __memoff(__main_heap, __p)
-#define __mptr(__p)    __memptr(__main_heap, __p)
-#define __mchk(__p)    __memchk(__main_heap, __p)
-
 static inline int store_key(struct hashobj *obj,
                            const void *key, size_t len,
                            const struct hash_operations *hops);
diff --git a/lib/copperplate/cluster.c b/lib/copperplate/cluster.c
index 7353aa8..9355d94 100644
--- a/lib/copperplate/cluster.c
+++ b/lib/copperplate/cluster.c
@@ -255,9 +255,8 @@ redo:
        hobj = hash_search(&main_catalog, name, strlen(name),
                           &hash_operations);
        if (hobj) {
-               d = container_of(hobj, struct syndictionary, hobj);
-               ret = 0;
-               goto out;
+               sc->d = container_of(hobj, struct syndictionary, hobj);
+               return 0;
        }
 
        d = xnmalloc(sizeof(*d));
@@ -277,7 +276,7 @@ redo:
                xnfree(d);
                goto redo;
        }
-out:
+
        sc->d = d;
 
        return syncobj_init(&d->sobj, CLOCK_COPPERPLATE,
@@ -310,7 +309,7 @@ int syncluster_addobj(struct syncluster *sc, const char 
*name,
         */
        syncobj_for_each_grant_waiter_safe(&sc->d->sobj, thobj, tmp) {
                wait = threadobj_get_wait(thobj);
-               if (*wait->name == *name && strcmp(wait->name, name) == 0)
+               if (strcmp(__mptr(wait->shared.name), name) == 0)
                        syncobj_grant_to(&sc->d->sobj, thobj);
        }
 out:
@@ -366,7 +365,7 @@ int syncluster_findobj(struct syncluster *sc,
                }
                if (wait == NULL) {
                        wait = threadobj_prepare_wait(struct 
syncluster_wait_struct);
-                       wait->name = name;
+                       wait->shared.name = __moff(xnstrdup(name));
                }
                ret = syncobj_wait_grant(&sc->d->sobj, timeout, &syns);
                if (ret) {
@@ -378,8 +377,10 @@ int syncluster_findobj(struct syncluster *sc,
 
        syncobj_unlock(&sc->d->sobj, &syns);
 out:
-       if (wait)
+       if (wait) {
+               xnfree(__mptr(wait->shared.name));
                threadobj_finish_wait();
+       }
 
        return ret;
 }
@@ -517,7 +518,7 @@ int pvsyncluster_addobj(struct pvsyncluster *sc, const char 
*name,
         */
        syncobj_for_each_grant_waiter_safe(&sc->sobj, thobj, tmp) {
                wait = threadobj_get_wait(thobj);
-               if (*wait->name == *name && strcmp(wait->name, name) == 0)
+               if (strcmp(wait->private.name, name) == 0)
                        syncobj_grant_to(&sc->sobj, thobj);
        }
 out:
@@ -572,7 +573,7 @@ int pvsyncluster_findobj(struct pvsyncluster *sc,
                }
                if (wait == NULL) {
                        wait = threadobj_prepare_wait(struct 
syncluster_wait_struct);
-                       wait->name = name;
+                       wait->private.name = name;
                }
                ret = syncobj_wait_grant(&sc->sobj, timeout, &syns);
                if (ret) {
diff --git a/lib/copperplate/heapobj-pshared.c 
b/lib/copperplate/heapobj-pshared.c
index 27255a3..d7164c5 100644
--- a/lib/copperplate/heapobj-pshared.c
+++ b/lib/copperplate/heapobj-pshared.c
@@ -100,10 +100,10 @@ struct sysgroup *__main_sysgroup;
 
 static struct heapobj main_pool;
 
-#define __moff(h, p)           ((caddr_t)(p) - (caddr_t)(h))
-#define __moff_check(h, p)     ((p) ? __moff(h, p) : 0)
-#define __mref(h, o)           ((void *)((caddr_t)(h) + (o)))
-#define __mref_check(h, o)     ((o) ? __mref(h, o) : NULL)
+#define __shoff(h, p)          ((caddr_t)(p) - (caddr_t)(h))
+#define __shoff_check(h, p)    ((p) ? __shoff(h, p) : 0)
+#define __shref(h, o)          ((void *)((caddr_t)(h) + (o)))
+#define __shref_check(h, o)    ((o) ? __shref(h, o) : NULL)
 
 static inline size_t __align_to(size_t size, size_t al)
 {
@@ -131,13 +131,13 @@ static void init_extent(struct shared_heap *heap, struct 
shared_extent *extent)
        __holder_init_nocheck(heap, &extent->link);
 
        /* The initial extent starts right after the header. */
-       extent->membase = __moff(heap, extent) + heap->hdrsize;
+       extent->membase = __shoff(heap, extent) + heap->hdrsize;
        lastpgnum = heap->npages - 1;
 
        /* Mark each page as free in the page map. */
-       for (n = 0, freepage = __mref(heap, extent->membase);
+       for (n = 0, freepage = __shref(heap, extent->membase);
             n < lastpgnum; n++, freepage += HOBJ_PAGE_SIZE) {
-               *((memoff_t *)freepage) = __moff(heap, freepage) + 
HOBJ_PAGE_SIZE;
+               *((memoff_t *)freepage) = __shoff(heap, freepage) + 
HOBJ_PAGE_SIZE;
                extent->pagemap[n].type = page_free;
                extent->pagemap[n].bcount = 0;
        }
@@ -145,7 +145,7 @@ static void init_extent(struct shared_heap *heap, struct 
shared_extent *extent)
        *((memoff_t *)freepage) = 0;
        extent->pagemap[lastpgnum].type = page_free;
        extent->pagemap[lastpgnum].bcount = 0;
-       extent->memlim = __moff(heap, freepage) + HOBJ_PAGE_SIZE;
+       extent->memlim = __shoff(heap, freepage) + HOBJ_PAGE_SIZE;
 
        /* The first page starts the free list of a new extent. */
        extent->freelist = extent->membase;
@@ -228,7 +228,7 @@ static caddr_t get_free_range(struct shared_heap *heap, 
size_t bsize, int log2si
        size_t pnum, pcont, fcont;
 
        __list_for_each_entry(heap, extent, &heap->extents, link) {
-               freepage = __mref_check(heap, extent->freelist);
+               freepage = __shref_check(heap, extent->freelist);
                while (freepage) {
                        headpage = freepage;
                        fcont = 0;
@@ -239,7 +239,7 @@ static caddr_t get_free_range(struct shared_heap *heap, 
size_t bsize, int log2si
                         */
                        do {
                                lastpage = freepage;
-                               freepage = __mref_check(heap, *((memoff_t 
*)freepage));
+                               freepage = __shref_check(heap, *((memoff_t 
*)freepage));
                                fcont += HOBJ_PAGE_SIZE;
                        } while (freepage == lastpage + HOBJ_PAGE_SIZE
                                 && fcont < bsize);
@@ -249,7 +249,7 @@ static caddr_t get_free_range(struct shared_heap *heap, 
size_t bsize, int log2si
                                 * page list, then proceed to the next
                                 * step.
                                 */
-                               if (__moff(heap, headpage) == extent->freelist)
+                               if (__shoff(heap, headpage) == extent->freelist)
                                        extent->freelist = *((memoff_t 
*)lastpage);
                                else
                                        *((memoff_t *)freehead) = *((memoff_t 
*)lastpage);
@@ -278,13 +278,13 @@ splitpage:
                for (block = headpage, eblock =
                     headpage + HOBJ_PAGE_SIZE - bsize; block < eblock;
                     block += bsize)
-                       *((memoff_t *)block) = __moff(heap, block) + bsize;
+                       *((memoff_t *)block) = __shoff(heap, block) + bsize;
 
                *((memoff_t *)eblock) = 0;
        } else
                *((memoff_t *)headpage) = 0;
 
-       pnum = (__moff(heap, headpage) - extent->membase) >> HOBJ_PAGE_SHIFT;
+       pnum = (__shoff(heap, headpage) - extent->membase) >> HOBJ_PAGE_SHIFT;
 
        /*
         * Update the page map.  If log2size is non-zero (i.e. bsize
@@ -351,7 +351,7 @@ static void *alloc_block(struct shared_heap *heap, size_t 
size)
 
                write_lock_nocancel(&heap->lock);
 
-               block = __mref_check(heap, heap->buckets[ilog].freelist);
+               block = __shref_check(heap, heap->buckets[ilog].freelist);
                if (block == NULL) {
                        block = get_free_range(heap, bsize, log2size);
                        if (block == NULL)
@@ -364,13 +364,13 @@ static void *alloc_block(struct shared_heap *heap, size_t 
size)
 
                        /* Search for the source extent of block. */
                        __list_for_each_entry(heap, extent, &heap->extents, 
link) {
-                               if (__moff(heap, block) >= extent->membase &&
-                                   __moff(heap, block) < extent->memlim)
+                               if (__shoff(heap, block) >= extent->membase &&
+                                   __shoff(heap, block) < extent->memlim)
                                        goto found;
                        }
                        assert(0);
                found:
-                       pnum = (__moff(heap, block) - extent->membase) >> 
HOBJ_PAGE_SHIFT;
+                       pnum = (__shoff(heap, block) - extent->membase) >> 
HOBJ_PAGE_SHIFT;
                        ++extent->pagemap[pnum].bcount;
                }
 
@@ -408,8 +408,8 @@ static int free_block(struct shared_heap *heap, void *block)
         * originating from.
         */
        __list_for_each_entry(heap, extent, &heap->extents, link) {
-               if (__moff(heap, block) >= extent->membase &&
-                   __moff(heap, block) < extent->memlim)
+               if (__shoff(heap, block) >= extent->membase &&
+                   __shoff(heap, block) < extent->memlim)
                        goto found;
        }
 
@@ -417,8 +417,8 @@ static int free_block(struct shared_heap *heap, void *block)
        goto out;
 found:
        /* Compute the heading page number in the page map. */
-       pnum = (__moff(heap, block) - extent->membase) >> HOBJ_PAGE_SHIFT;
-       boffset = (__moff(heap, block) -
+       pnum = (__shoff(heap, block) - extent->membase) >> HOBJ_PAGE_SHIFT;
+       boffset = (__shoff(heap, block) -
                   (extent->membase + (pnum << HOBJ_PAGE_SHIFT)));
 
        switch (extent->pagemap[pnum].type) {
@@ -439,7 +439,7 @@ found:
                for (freepage = (caddr_t)block,
                     tailpage = (caddr_t)block + bsize - HOBJ_PAGE_SIZE;
                     freepage < tailpage; freepage += HOBJ_PAGE_SIZE)
-                       *((memoff_t *)freepage) = __moff(heap, freepage) + 
HOBJ_PAGE_SIZE;
+                       *((memoff_t *)freepage) = __shoff(heap, freepage) + 
HOBJ_PAGE_SIZE;
 
        free_pages:
                /* Mark the released pages as free in the extent's page map. */
@@ -449,16 +449,16 @@ found:
                 * Return the sub-list to the free page list, keeping
                 * an increasing address order to favor coalescence.
                 */
-               for (nextpage = __mref_check(heap, extent->freelist), lastpage 
= NULL;
+               for (nextpage = __shref_check(heap, extent->freelist), lastpage 
= NULL;
                     nextpage && nextpage < (caddr_t)block;
-                    lastpage = nextpage, nextpage = __mref_check(heap, 
*((memoff_t *)nextpage)))
+                    lastpage = nextpage, nextpage = __shref_check(heap, 
*((memoff_t *)nextpage)))
                  ;     /* Loop */
 
-               *((memoff_t *)tailpage) = __moff_check(heap, nextpage);
+               *((memoff_t *)tailpage) = __shoff_check(heap, nextpage);
                if (lastpage)
-                       *((memoff_t *)lastpage) = __moff(heap, block);
+                       *((memoff_t *)lastpage) = __shoff(heap, block);
                else
-                       extent->freelist = __moff(heap, block);
+                       extent->freelist = __shoff(heap, block);
                break;
 
        default:
@@ -479,7 +479,7 @@ found:
                if (--extent->pagemap[pnum].bcount > 0) {
                        /* Return the block to the bucketed memory space. */
                        *((memoff_t *)block) = heap->buckets[ilog].freelist;
-                       heap->buckets[ilog].freelist = __moff(heap, block);
+                       heap->buckets[ilog].freelist = __shoff(heap, block);
                        ++heap->buckets[ilog].fcount;
                        break;
                }
@@ -495,7 +495,7 @@ found:
                         */
                        goto free_page_list;
 
-               freepage = __mref(heap, extent->membase) + (pnum << 
HOBJ_PAGE_SHIFT);
+               freepage = __shref(heap, extent->membase) + (pnum << 
HOBJ_PAGE_SHIFT);
                block = freepage;
                tailpage = freepage;
                nextpage = freepage + HOBJ_PAGE_SIZE;
@@ -522,11 +522,12 @@ found:
                 * comes first.
                 */
                for (tailptr = &heap->buckets[ilog].freelist,
-                            freeptr = __mref_check(heap, *tailptr), xpage = 1;
-                    freeptr && nblocks > 0; freeptr = __mref_check(heap, 
*((memoff_t *)freeptr))) {
+                            freeptr = __shref_check(heap, *tailptr), xpage = 1;
+                    freeptr && nblocks > 0;
+                    freeptr = __shref_check(heap, *((memoff_t *)freeptr))) {
                        if (freeptr < freepage || freeptr >= nextpage) {
                                if (xpage) { /* Limit random writes */
-                                       *tailptr = __moff(heap, freeptr);
+                                       *tailptr = __shoff(heap, freeptr);
                                        xpage = 0;
                                }
                                tailptr = (memoff_t *)freeptr;
@@ -535,7 +536,7 @@ found:
                                xpage = 1;
                        }
                }
-               *tailptr = __moff_check(heap, freeptr);
+               *tailptr = __shoff_check(heap, freeptr);
                goto free_pages;
        }
 
@@ -558,20 +559,20 @@ static size_t check_block(struct shared_heap *heap, void 
*block)
         * Find the extent the checked block is originating from.
         */
        __list_for_each_entry(heap, extent, &heap->extents, link) {
-               if (__moff(heap, block) >= extent->membase &&
-                   __moff(heap, block) < extent->memlim)
+               if (__shoff(heap, block) >= extent->membase &&
+                   __shoff(heap, block) < extent->memlim)
                        goto found;
        }
        goto out;
 found:
        /* Compute the heading page number in the page map. */
-       pnum = (__moff(heap, block) - extent->membase) >> HOBJ_PAGE_SHIFT;
+       pnum = (__shoff(heap, block) - extent->membase) >> HOBJ_PAGE_SHIFT;
        ptype = extent->pagemap[pnum].type;
        if (ptype == page_free || ptype == page_cont)
                goto out;
 
        bsize = (1 << ptype);
-       boffset = (__moff(heap, block) -
+       boffset = (__shoff(heap, block) -
                   (extent->membase + (pnum << HOBJ_PAGE_SHIFT)));
        if ((boffset & (bsize - 1)) != 0) /* Not a block start? */
                goto out;
@@ -796,8 +797,8 @@ int pshared_check(void *__heap, void *__addr)
        assert(!list_empty(&heap->extents));
 
        __list_for_each_entry(heap, extent, &heap->extents, link) {
-               if (__moff(heap, __addr) >= extent->membase &&
-                   __moff(heap, __addr) < extent->memlim)
+               if (__shoff(heap, __addr) >= extent->membase &&
+                   __shoff(heap, __addr) < extent->memlim)
                        return 1;
        }
 
diff --git a/lib/copperplate/threadobj.c b/lib/copperplate/threadobj.c
index 5a528fa..1b98fbf 100644
--- a/lib/copperplate/threadobj.c
+++ b/lib/copperplate/threadobj.c
@@ -1064,7 +1064,7 @@ void *__threadobj_alloc(size_t tcb_struct_size,
 
        thobj = p + thobj_offset;
        thobj->core_offset = thobj_offset;
-       thobj->wait_union = p + tcb_struct_size;
+       thobj->wait_union = __moff(p + tcb_struct_size);
        thobj->wait_size = wait_union_size;
 
        return p;


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://xenomai.org/mailman/listinfo/xenomai-git

Reply via email to