Module: xenomai-gch
Branch: for-forge
Commit: c05228a2abc2b2b78675676251e70e10fbb05b3d
URL:    
http://git.xenomai.org/?p=xenomai-gch.git;a=commit;h=c05228a2abc2b2b78675676251e70e10fbb05b3d

Author: Gilles Chanteperdrix <gilles.chanteperd...@xenomai.org>
Date:   Mon Nov 21 21:49:45 2011 +0100

cobalt: more mutex and condvars cleanups

---

 include/asm-arm/wrappers.h    |    4 +
 kernel/cobalt/cond.c          |  115 +++++++++++++++++---------------------
 kernel/cobalt/cond.h          |   41 +++++++-------
 kernel/cobalt/internal.h      |    2 -
 kernel/cobalt/mutex.c         |   57 ++++++++-----------
 kernel/cobalt/mutex.h         |   35 +++++++-----
 kernel/cobalt/nucleus/synch.c |    2 +-
 lib/cobalt/cond.c             |  123 ++++++++++++++++++++++------------------
 lib/cobalt/mutex.c            |  113 ++++++++++++++++++-------------------
 9 files changed, 245 insertions(+), 247 deletions(-)

diff --git a/include/asm-arm/wrappers.h b/include/asm-arm/wrappers.h
index 0a12e8d..e1925d1 100644
--- a/include/asm-arm/wrappers.h
+++ b/include/asm-arm/wrappers.h
@@ -32,6 +32,10 @@
 
 #define wrap_strncpy_from_user(dstP, srcP, n)  __strncpy_from_user(dstP, srcP, 
n)
 
+#define __put_user_inatomic __put_user
+
+#define __get_user_inatomic __get_user
+
 #define rthal_irq_desc_status(irq)     (rthal_irq_descp(irq)->status)
 
 #if !defined(CONFIG_GENERIC_HARDIRQS) \
diff --git a/kernel/cobalt/cond.c b/kernel/cobalt/cond.c
index 4b65b3a..f732c1b 100644
--- a/kernel/cobalt/cond.c
+++ b/kernel/cobalt/cond.c
@@ -53,7 +53,8 @@
 
 static pthread_condattr_t default_cond_attr;
 
-static void cond_destroy_internal(cobalt_cond_t * cond, cobalt_kqueues_t *q)
+static inline void
+cond_destroy_internal(cobalt_cond_t *cond, cobalt_kqueues_t *q)
 {
        spl_t s;
 
@@ -94,10 +95,9 @@ static void cond_destroy_internal(cobalt_cond_t * cond, 
cobalt_kqueues_t *q)
  * Specification.</a>
  *
  */
-static int
-pthread_cond_init(pthread_cond_t *cnd, const pthread_condattr_t *attr)
+static inline int
+pthread_cond_init(struct __shadow_cond *cnd, const pthread_condattr_t *attr)
 {
-       struct __shadow_cond *shadow = &((union __xeno_cond *)cnd)->shadow_cond;
        xnflags_t synch_flags = XNSYNCH_PRIO | XNSYNCH_NOPIP;
        struct xnsys_ppd *sys_ppd;
        cobalt_cond_t *cond;
@@ -108,16 +108,16 @@ pthread_cond_init(pthread_cond_t *cnd, const 
pthread_condattr_t *attr)
        if (!attr)
                attr = &default_cond_attr;
 
-       cond = (cobalt_cond_t *) xnmalloc(sizeof(*cond));
+       cond = (cobalt_cond_t *)xnmalloc(sizeof(*cond));
        if (!cond)
-               return ENOMEM;
+               return -ENOMEM;
 
        sys_ppd = xnsys_ppd_get(attr->pshared);
        cond->pending_signals = (unsigned long *)
                xnheap_alloc(&sys_ppd->sem_heap,
                             sizeof(*(cond->pending_signals)));
        if (!cond->pending_signals) {
-               err = EAGAIN;
+               err = -EAGAIN;
                goto err_free_cond;
        }
        *(cond->pending_signals) = 0;
@@ -125,31 +125,31 @@ pthread_cond_init(pthread_cond_t *cnd, const 
pthread_condattr_t *attr)
        xnlock_get_irqsave(&nklock, s);
 
        if (attr->magic != COBALT_COND_ATTR_MAGIC) {
-               err = EINVAL;
+               err = -EINVAL;
                goto err_free_pending_signals;
        }
 
        condq = &cobalt_kqueues(attr->pshared)->condq;
 
-       if (shadow->magic == COBALT_COND_MAGIC) {
+       if (cnd->magic == COBALT_COND_MAGIC) {
                xnholder_t *holder;
                for (holder = getheadq(condq); holder;
                     holder = nextq(condq, holder))
-                       if (holder == &shadow->cond->link) {
+                       if (holder == &cnd->cond->link) {
                                /* cond is already in the queue. */
-                               err = EBUSY;
+                               err = -EBUSY;
                                goto err_free_pending_signals;
                        }
        }
 
-       shadow->attr = *attr;
-       shadow->pending_signals_offset =
+       cnd->attr = *attr;
+       cnd->pending_signals_offset =
                xnheap_mapped_offset(&sys_ppd->sem_heap,
                                     cond->pending_signals);
-       shadow->mutex_datp = (struct mutex_dat *)~0UL;
+       cnd->mutex_datp = (struct mutex_dat *)~0UL;
 
-       shadow->magic = COBALT_COND_MAGIC;
-       shadow->cond = cond;
+       cnd->magic = COBALT_COND_MAGIC;
+       cnd->cond = cond;
 
        cond->magic = COBALT_COND_MAGIC;
        xnsynch_init(&cond->synchbase, synch_flags, NULL);
@@ -195,32 +195,31 @@ pthread_cond_init(pthread_cond_t *cnd, const 
pthread_condattr_t *attr)
  * Specification.</a>
  *
  */
-static int pthread_cond_destroy(pthread_cond_t * cnd)
+static inline int pthread_cond_destroy(struct __shadow_cond *cnd)
 {
-       struct __shadow_cond *shadow = &((union __xeno_cond *)cnd)->shadow_cond;
        cobalt_cond_t *cond;
        spl_t s;
 
        xnlock_get_irqsave(&nklock, s);
 
-       cond = shadow->cond;
-       if (!cobalt_obj_active(shadow, COBALT_COND_MAGIC, struct __shadow_cond)
+       cond = cnd->cond;
+       if (!cobalt_obj_active(cnd, COBALT_COND_MAGIC, struct __shadow_cond)
            || !cobalt_obj_active(cond, COBALT_COND_MAGIC, struct cobalt_cond)) 
{
                xnlock_put_irqrestore(&nklock, s);
-               return EINVAL;
+               return -EINVAL;
        }
 
        if (cond->owningq != cobalt_kqueues(cond->attr.pshared)) {
                xnlock_put_irqrestore(&nklock, s);
-               return EPERM;
+               return -EPERM;
        }
 
        if (xnsynch_nsleepers(&cond->synchbase) || cond->mutex) {
                xnlock_put_irqrestore(&nklock, s);
-               return EBUSY;
+               return -EBUSY;
        }
 
-       cobalt_mark_deleted(shadow);
+       cobalt_mark_deleted(cnd);
        cobalt_mark_deleted(cond);
 
        xnlock_put_irqrestore(&nklock, s);
@@ -239,11 +238,8 @@ static inline int 
cobalt_cond_timedwait_prologue(xnthread_t *cur,
        spl_t s;
        int err;
 
-       if (!cond || !mutex)
-               return EINVAL;
-
        if (xnpod_unblockable_p())
-               return EPERM;
+               return -EPERM;
 
        xnlock_get_irqsave(&nklock, s);
 
@@ -349,16 +345,14 @@ static inline int 
cobalt_cond_timedwait_epilogue(xnthread_t *cur,
        return err;
 }
 
-int cobalt_cond_init(union __xeno_cond __user *u_cnd,
+int cobalt_cond_init(struct __shadow_cond __user *u_cnd,
                     const pthread_condattr_t __user *u_attr)
 {
        pthread_condattr_t locattr, *attr;
-       union __xeno_cond cnd;
+       struct __shadow_cond cnd;
        int err;
 
-       if (__xn_safe_copy_from_user(&cnd.shadow_cond,
-                                    &u_cnd->shadow_cond,
-                                    sizeof(cnd.shadow_cond)))
+       if (__xn_safe_copy_from_user(&cnd, u_cnd, sizeof(cnd)))
                return -EFAULT;
 
        if (u_attr) {
@@ -371,31 +365,26 @@ int cobalt_cond_init(union __xeno_cond __user *u_cnd,
                attr = NULL;
 
        /* Always use default attribute. */
-       err = pthread_cond_init(&cnd.native_cond, attr);
-
-       if (err)
-               return -err;
+       err = pthread_cond_init(&cnd, attr);
+       if (err < 0)
+               return err;
 
-       return __xn_safe_copy_to_user(&u_cnd->shadow_cond,
-                                     &cnd.shadow_cond, 
sizeof(u_cnd->shadow_cond));
+       return __xn_safe_copy_to_user(u_cnd, &cnd, sizeof(*u_cnd));
 }
 
-int cobalt_cond_destroy(union __xeno_cond __user *u_cnd)
+int cobalt_cond_destroy(struct __shadow_cond __user *u_cnd)
 {
-       union __xeno_cond cnd;
+       struct __shadow_cond cnd;
        int err;
 
-       if (__xn_safe_copy_from_user(&cnd.shadow_cond,
-                                    &u_cnd->shadow_cond,
-                                    sizeof(cnd.shadow_cond)))
+       if (__xn_safe_copy_from_user(&cnd, u_cnd, sizeof(cnd)))
                return -EFAULT;
 
-       err = pthread_cond_destroy(&cnd.native_cond);
-       if (err)
-               return -err;
+       err = pthread_cond_destroy(&cnd);
+       if (err < 0)
+               return err;
 
-       return __xn_safe_copy_to_user(&u_cnd->shadow_cond,
-                                     &cnd.shadow_cond, 
sizeof(u_cnd->shadow_cond));
+       return __xn_safe_copy_to_user(u_cnd, &cnd, sizeof(*u_cnd));
 }
 
 struct us_cond_data {
@@ -403,8 +392,8 @@ struct us_cond_data {
 };
 
 /* pthread_cond_wait_prologue(cond, mutex, count_ptr, timed, timeout) */
-int cobalt_cond_wait_prologue(union __xeno_cond __user *u_cnd,
-                             union __xeno_mutex __user *u_mx,
+int cobalt_cond_wait_prologue(struct __shadow_cond __user *u_cnd,
+                             struct __shadow_mutex __user *u_mx,
                              int *u_err,
                              unsigned int timed,
                              struct timespec __user *u_ts)
@@ -417,12 +406,12 @@ int cobalt_cond_wait_prologue(union __xeno_cond __user 
*u_cnd,
        struct timespec ts;
        int err, perr = 0;
 
-       __xn_get_user(cnd, &u_cnd->shadow_cond.cond);
-       __xn_get_user(mx, &u_mx->shadow_mutex.mutex);
+       __xn_get_user(cnd, &u_cnd->cond);
+       __xn_get_user(mx, &u_mx->mutex);
 
        if (!cnd->mutex) {
-               __xn_get_user(datp, &u_mx->shadow_mutex.dat);
-               __xn_put_user(datp, &u_cnd->shadow_cond.mutex_datp);
+               __xn_get_user(datp, &u_mx->dat);
+               __xn_put_user(datp, &u_cnd->mutex_datp);
        }
 
        if (timed) {
@@ -444,14 +433,14 @@ int cobalt_cond_wait_prologue(union __xeno_cond __user 
*u_cnd,
 
                if (!cnd->mutex) {
                        datp = (struct mutex_dat *)~0UL;
-                       __xn_put_user(datp, &u_cnd->shadow_cond.mutex_datp);
+                       __xn_put_user(datp, &u_cnd->mutex_datp);
                }
                break;
 
        case -EINTR:
                if (!cnd->mutex) {
                        datp = (struct mutex_dat *)~0UL;
-                       __xn_put_user(datp, &u_cnd->shadow_cond.mutex_datp);
+                       __xn_put_user(datp, &u_cnd->mutex_datp);
                }
 
                perr = err;
@@ -461,7 +450,7 @@ int cobalt_cond_wait_prologue(union __xeno_cond __user 
*u_cnd,
        default:
                if (!cnd->mutex) {
                        datp = (struct mutex_dat *)~0UL;
-                       __xn_put_user(datp, &u_cnd->shadow_cond.mutex_datp);
+                       __xn_put_user(datp, &u_cnd->mutex_datp);
                }
 
                /* Please gcc and handle the case which will never
@@ -475,22 +464,22 @@ int cobalt_cond_wait_prologue(union __xeno_cond __user 
*u_cnd,
        return err == 0 ? perr : err;
 }
 
-int cobalt_cond_wait_epilogue(union __xeno_cond __user *u_cnd,
-                             union __xeno_mutex __user *u_mx)
+int cobalt_cond_wait_epilogue(struct __shadow_cond __user *u_cnd,
+                             struct __shadow_mutex __user *u_mx)
 {
        xnthread_t *cur = xnshadow_thread(current);
        cobalt_cond_t *cnd;
        cobalt_mutex_t *mx;
        int err;
 
-       __xn_get_user(cnd, &u_cnd->shadow_cond.cond);
-       __xn_get_user(mx, &u_mx->shadow_mutex.mutex);
+       __xn_get_user(cnd, &u_cnd->cond);
+       __xn_get_user(mx, &u_mx->mutex);
 
        err = cobalt_cond_timedwait_epilogue(cur, cnd, mx);
 
        if (!cnd->mutex) {
                struct mutex_dat *datp = (struct mutex_dat *)~0UL;
-               __xn_put_user(datp, &u_cnd->shadow_cond.mutex_datp);
+               __xn_put_user(datp, &u_cnd->mutex_datp);
        }
 
        return err;
diff --git a/kernel/cobalt/cond.h b/kernel/cobalt/cond.h
index 4afce07..094502a 100644
--- a/kernel/cobalt/cond.h
+++ b/kernel/cobalt/cond.h
@@ -42,6 +42,8 @@ union __xeno_cond {
        } shadow_cond;
 };
 
+#define COBALT_COND_MAGIC 0x86860505
+
 #if defined(__KERNEL__) || defined(__XENO_SIM__)
 
 #include "internal.h"
@@ -54,13 +56,11 @@ typedef struct cobalt_cond {
        xnsynch_t synchbase;
        xnholder_t link;        /* Link in cobalt_condq */
 
-#define link2cond(laddr)                                                \
-    ((cobalt_cond_t *)(((char *)laddr) - offsetof(cobalt_cond_t, link)))
+#define link2cond(laddr) container_of(laddr, cobalt_cond_t, link)
 
        xnholder_t mutex_link;
 
-#define mutex_link2cond(laddr)                                         \
-    ((cobalt_cond_t *)(((char *)laddr) - offsetof(cobalt_cond_t, mutex_link)))
+#define mutex_link2cond(laddr) container_of(laddr, cobalt_cond_t, mutex_link)
 
        unsigned long *pending_signals;
        pthread_condattr_t attr;
@@ -71,21 +71,18 @@ typedef struct cobalt_cond {
 static inline int cobalt_cond_deferred_signals(struct cobalt_cond *cond)
 {
        unsigned long pending_signals;
-       int need_resched, i;
+       int need_resched, i, sleepers;
 
        pending_signals = *(cond->pending_signals);
 
        switch(pending_signals) {
-       case 0:
-               need_resched = 0;
-               break;
-
        default:
-               for(i = 0, need_resched = 0; i < pending_signals; i++) {
-                       if (xnsynch_wakeup_one_sleeper(&cond->synchbase) == 
NULL)
-                               break;
-                       need_resched = 1;
-               }
+               sleepers = xnsynch_nsleepers(&cond->synchbase);
+               if (pending_signals > sleepers)
+                       pending_signals = sleepers;
+               need_resched = !!pending_signals;
+               for(i = 0; i < pending_signals; i++)
+                       xnsynch_wakeup_one_sleeper(&cond->synchbase);
                *cond->pending_signals = 0;
                break;
 
@@ -93,24 +90,28 @@ static inline int cobalt_cond_deferred_signals(struct 
cobalt_cond *cond)
                need_resched =
                        xnsynch_flush(&cond->synchbase, 0) == XNSYNCH_RESCHED;
                *cond->pending_signals = 0;
+
+       case 0:
+               need_resched = 0;
+               break;
        }
 
        return need_resched;
 }
 
-int cobalt_cond_init(union __xeno_cond __user *u_cnd,
+int cobalt_cond_init(struct __shadow_cond __user *u_cnd,
                     const pthread_condattr_t __user *u_attr);
 
-int cobalt_cond_destroy(union __xeno_cond __user *u_cnd);
+int cobalt_cond_destroy(struct __shadow_cond __user *u_cnd);
 
-int cobalt_cond_wait_prologue(union __xeno_cond __user *u_cnd,
-                             union __xeno_mutex __user *u_mx,
+int cobalt_cond_wait_prologue(struct __shadow_cond __user *u_cnd,
+                             struct __shadow_mutex __user *u_mx,
                              int *u_err,
                              unsigned int timed,
                              struct timespec __user *u_ts);
 
-int cobalt_cond_wait_epilogue(union __xeno_cond __user *u_cnd,
-                             union __xeno_mutex __user *u_mx);
+int cobalt_cond_wait_epilogue(struct __shadow_cond __user *u_cnd,
+                             struct __shadow_mutex __user *u_mx);
 
 void cobalt_condq_cleanup(cobalt_kqueues_t *q);
 
diff --git a/kernel/cobalt/internal.h b/kernel/cobalt/internal.h
index 9c64204..1d5237f 100644
--- a/kernel/cobalt/internal.h
+++ b/kernel/cobalt/internal.h
@@ -36,9 +36,7 @@
 #define COBALT_ANY_MAGIC         COBALT_MAGIC(00)
 #define COBALT_THREAD_MAGIC      COBALT_MAGIC(01)
 #define COBALT_THREAD_ATTR_MAGIC COBALT_MAGIC(02)
-#define COBALT_MUTEX_MAGIC       COBALT_MAGIC(03)
 #define COBALT_MUTEX_ATTR_MAGIC  (COBALT_MAGIC(04) & ((1 << 24) - 1))
-#define COBALT_COND_MAGIC        COBALT_MAGIC(05)
 #define COBALT_COND_ATTR_MAGIC   (COBALT_MAGIC(06) & ((1 << 24) - 1))
 #define COBALT_SEM_MAGIC         COBALT_MAGIC(07)
 #define COBALT_KEY_MAGIC         COBALT_MAGIC(08)
diff --git a/kernel/cobalt/mutex.c b/kernel/cobalt/mutex.c
index 1ce136b..28bcaac 100644
--- a/kernel/cobalt/mutex.c
+++ b/kernel/cobalt/mutex.c
@@ -115,7 +115,7 @@ static void cobalt_mutex_destroy_inner(cobalt_mutex_t 
*mutex,
        xnfree(mutex);
 }
 
-static int cobalt_mutex_acquire(xnthread_t *cur,
+static inline int cobalt_mutex_acquire(xnthread_t *cur,
                                cobalt_mutex_t *mutex,
                                int timed,
                                xnticks_t abs_to)
@@ -134,8 +134,8 @@ static int cobalt_mutex_acquire(xnthread_t *cur,
        return cobalt_mutex_acquire_unchecked(cur, mutex, timed, abs_to);
 }
 
-static int cobalt_mutex_timedlock_break(cobalt_mutex_t *mutex,
-                                       int timed, xnticks_t abs_to)
+static inline int cobalt_mutex_timedlock_break(cobalt_mutex_t *mutex,
+                                              int timed, xnticks_t abs_to)
 {
        xnthread_t *cur = xnpod_current_thread();
        spl_t s;
@@ -201,14 +201,14 @@ static int cobalt_mutex_timedlock_break(cobalt_mutex_t 
*mutex,
 
 }
 
-int cobalt_mutex_check_init(union __xeno_mutex __user *u_mx)
+int cobalt_mutex_check_init(struct __shadow_mutex __user *u_mx)
 {
        cobalt_mutex_t *mutex;
        xnholder_t *holder;
        xnqueue_t *mutexq;
        spl_t s;
 
-       __xn_get_user(mutex, &u_mx->shadow_mutex.mutex);
+       __xn_get_user(mutex, &u_mx->mutex);
 
        mutexq = &cobalt_kqueues(0)->mutexq;
 
@@ -236,18 +236,16 @@ int cobalt_mutex_check_init(union __xeno_mutex __user 
*u_mx)
        return -EBUSY;
 }
 
-int cobalt_mutex_init(union __xeno_mutex __user *u_mx,
+int cobalt_mutex_init(struct __shadow_mutex __user *u_mx,
                      const pthread_mutexattr_t __user *u_attr)
 {
        pthread_mutexattr_t locattr, *attr;
+       struct __shadow_mutex mx;
        struct mutex_dat *datp;
-       union __xeno_mutex mx;
        cobalt_mutex_t *mutex;
        int err;
 
-       if (__xn_safe_copy_from_user(&mx.shadow_mutex,
-                                    &u_mx->shadow_mutex,
-                                    sizeof(mx.shadow_mutex)))
+       if (__xn_safe_copy_from_user(&mx, u_mx, sizeof(mx)))
                return -EFAULT;
 
        if (u_attr) {
@@ -270,31 +268,25 @@ int cobalt_mutex_init(union __xeno_mutex __user *u_mx,
                return -EAGAIN;
        }
 
-       err = cobalt_mutex_init_inner(&mx.shadow_mutex, mutex, datp, attr);
+       err = cobalt_mutex_init_inner(&mx, mutex, datp, attr);
        if (err) {
                xnfree(mutex);
                xnheap_free(&xnsys_ppd_get(attr->pshared)->sem_heap, datp);
                return err;
        }
 
-       return __xn_safe_copy_to_user(&u_mx->shadow_mutex,
-                                     &mx.shadow_mutex, 
sizeof(u_mx->shadow_mutex));
+       return __xn_safe_copy_to_user(u_mx, &mx, sizeof(*u_mx));
 }
 
-int cobalt_mutex_destroy(union __xeno_mutex __user *u_mx)
+int cobalt_mutex_destroy(struct __shadow_mutex __user *u_mx)
 {
-       struct __shadow_mutex *shadow;
-       union __xeno_mutex mx;
+       struct __shadow_mutex mx;
        cobalt_mutex_t *mutex;
 
-       shadow = &mx.shadow_mutex;
-
-       if (__xn_safe_copy_from_user(shadow,
-                                    &u_mx->shadow_mutex,
-                                    sizeof(*shadow)))
+       if (__xn_safe_copy_from_user(&mx, u_mx, sizeof(mx)))
                return -EFAULT;
 
-       mutex = shadow->mutex;
+       mutex = mx.mutex;
        if (cobalt_kqueues(mutex->attr.pshared) != mutex->owningq)
                return -EPERM;
 
@@ -305,20 +297,19 @@ int cobalt_mutex_destroy(union __xeno_mutex __user *u_mx)
        if (countq(&mutex->conds))
                return -EBUSY;
 
-       cobalt_mark_deleted(shadow);
+       cobalt_mark_deleted(&mx);
        cobalt_mutex_destroy_inner(mutex, mutex->owningq);
 
-       return __xn_safe_copy_to_user(&u_mx->shadow_mutex,
-                                     shadow, sizeof(u_mx->shadow_mutex));
+       return __xn_safe_copy_to_user(u_mx, &mx, sizeof(*u_mx));
 }
 
-int cobalt_mutex_trylock(union __xeno_mutex __user *u_mx)
+int cobalt_mutex_trylock(struct __shadow_mutex __user *u_mx)
 {
        xnthread_t *cur = xnpod_current_thread();
        cobalt_mutex_t *mutex;
        int err;
 
-       __xn_get_user(mutex, &u_mx->shadow_mutex.mutex);
+       __xn_get_user(mutex, &u_mx->mutex);
 
        if (!cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC,
                               struct cobalt_mutex))
@@ -346,26 +337,26 @@ int cobalt_mutex_trylock(union __xeno_mutex __user *u_mx)
        return err;
 }
 
-int cobalt_mutex_lock(union __xeno_mutex __user *u_mx)
+int cobalt_mutex_lock(struct __shadow_mutex __user *u_mx)
 {
        cobalt_mutex_t *mutex;
        int err;
 
-       __xn_get_user(mutex, &u_mx->shadow_mutex.mutex);
+       __xn_get_user(mutex, &u_mx->mutex);
 
        err = cobalt_mutex_timedlock_break(mutex, 0, XN_INFINITE);
 
        return err;
 }
 
-int cobalt_mutex_timedlock(union __xeno_mutex __user *u_mx,
+int cobalt_mutex_timedlock(struct __shadow_mutex __user *u_mx,
                           const struct timespec __user *u_ts)
 {
        cobalt_mutex_t *mutex;
        struct timespec ts;
        int err;
 
-       __xn_get_user(mutex, &u_mx->shadow_mutex.mutex);
+       __xn_get_user(mutex, &u_mx->mutex);
 
        if (__xn_safe_copy_from_user(&ts, u_ts, sizeof(ts)))
                return -EFAULT;
@@ -375,13 +366,13 @@ int cobalt_mutex_timedlock(union __xeno_mutex __user 
*u_mx,
        return err;
 }
 
-int cobalt_mutex_unlock(union __xeno_mutex __user *u_mx)
+int cobalt_mutex_unlock(struct __shadow_mutex __user *u_mx)
 {
        cobalt_mutex_t *mutex;
        int err;
        spl_t s;
 
-       __xn_get_user(mutex, &u_mx->shadow_mutex.mutex);
+       __xn_get_user(mutex, &u_mx->mutex);
 
        xnlock_get_irqsave(&nklock, s);
        err = cobalt_mutex_release(xnpod_current_thread(), mutex);
diff --git a/kernel/cobalt/mutex.h b/kernel/cobalt/mutex.h
index b4b7f88..82bbf05 100644
--- a/kernel/cobalt/mutex.h
+++ b/kernel/cobalt/mutex.h
@@ -46,6 +46,8 @@ union __xeno_mutex {
        } shadow_mutex;
 };
 
+#define COBALT_MUTEX_MAGIC (0x86860303)
+
 #if defined(__KERNEL__) || defined(__XENO_SIM__)
 
 #include "internal.h"
@@ -57,8 +59,7 @@ typedef struct cobalt_mutex {
        xnsynch_t synchbase;
        xnholder_t link;            /* Link in cobalt_mutexq */
 
-#define link2mutex(laddr)                                               \
-       ((cobalt_mutex_t *)(((char *)laddr) - offsetof(cobalt_mutex_t, link)))
+#define link2mutex(laddr) container_of(laddr, cobalt_mutex_t, link)
 
        xnqueue_t conds;
 
@@ -95,6 +96,7 @@ static inline int cobalt_mutex_acquire_unchecked(xnthread_t 
*cur,
 static inline int cobalt_mutex_release(xnthread_t *cur, cobalt_mutex_t *mutex)
 {
        struct mutex_dat *datp;
+       unsigned long flags;
        xnholder_t *holder;
        int need_resched;
 
@@ -107,33 +109,36 @@ static inline int cobalt_mutex_release(xnthread_t *cur, 
cobalt_mutex_t *mutex)
 #endif /* XENO_DEBUG(POSIX) */
 
        need_resched = 0;
-       for (holder = getheadq(&mutex->conds);
-            holder; holder = nextq(&mutex->conds, holder)) {
-               struct cobalt_cond *cond = mutex_link2cond(holder);
-               need_resched |= cobalt_cond_deferred_signals(cond);
-       }
        datp = container_of(mutex->synchbase.fastlock, struct mutex_dat, owner);
-       datp->flags &= ~COBALT_MUTEX_COND_SIGNAL;
+       flags = datp->flags;
+       if ((flags & COBALT_MUTEX_COND_SIGNAL)) {
+               datp->flags = flags & ~COBALT_MUTEX_COND_SIGNAL;
+               for (holder = getheadq(&mutex->conds);
+                    holder; holder = nextq(&mutex->conds, holder)) {
+                       struct cobalt_cond *cond = mutex_link2cond(holder);
+                       need_resched |= cobalt_cond_deferred_signals(cond);
+               }
+       }
        need_resched |= xnsynch_release(&mutex->synchbase) != NULL;
 
        return need_resched;
 }
 
-int cobalt_mutex_check_init(union __xeno_mutex __user *u_mx);
+int cobalt_mutex_check_init(struct __shadow_mutex __user *u_mx);
 
-int cobalt_mutex_init(union __xeno_mutex __user *u_mx,
+int cobalt_mutex_init(struct __shadow_mutex __user *u_mx,
                      const pthread_mutexattr_t __user *u_attr);
 
-int cobalt_mutex_destroy(union __xeno_mutex __user *u_mx);
+int cobalt_mutex_destroy(struct __shadow_mutex __user *u_mx);
 
-int cobalt_mutex_trylock(union __xeno_mutex __user *u_mx);
+int cobalt_mutex_trylock(struct __shadow_mutex __user *u_mx);
 
-int cobalt_mutex_lock(union __xeno_mutex __user *u_mx);
+int cobalt_mutex_lock(struct __shadow_mutex __user *u_mx);
 
-int cobalt_mutex_timedlock(union __xeno_mutex __user *u_mx,
+int cobalt_mutex_timedlock(struct __shadow_mutex __user *u_mx,
                           const struct timespec __user *u_ts);
 
-int cobalt_mutex_unlock(union __xeno_mutex __user *u_mx);
+int cobalt_mutex_unlock(struct __shadow_mutex __user *u_mx);
 
 void cobalt_mutexq_cleanup(cobalt_kqueues_t *q);
 
diff --git a/kernel/cobalt/nucleus/synch.c b/kernel/cobalt/nucleus/synch.c
index f65b99b..08315d8 100644
--- a/kernel/cobalt/nucleus/synch.c
+++ b/kernel/cobalt/nucleus/synch.c
@@ -645,7 +645,7 @@ void xnsynch_requeue_sleeper(struct xnthread *thread)
 }
 EXPORT_SYMBOL_GPL(xnsynch_requeue_sleeper);
 
-static struct xnthread *
+static inline struct xnthread *
 xnsynch_release_thread(struct xnsynch *synch, struct xnthread *lastowner)
 {
        xnhandle_t lastownerh, newownerh;
diff --git a/lib/cobalt/cond.c b/lib/cobalt/cond.c
index 2e9709b..65cb020 100644
--- a/lib/cobalt/cond.c
+++ b/lib/cobalt/cond.c
@@ -25,11 +25,9 @@
 
 extern int __cobalt_muxid;
 
-#define COBALT_COND_MAGIC 0x86860505
-
 extern unsigned long xeno_sem_heap[2];
 
-static unsigned long *cond_get_signalsp(struct __shadow_cond *shadow)
+static inline unsigned long *cond_get_signalsp(struct __shadow_cond *shadow)
 {
        if (likely(!shadow->attr.pshared))
                return shadow->pending_signals;
@@ -38,7 +36,8 @@ static unsigned long *cond_get_signalsp(struct __shadow_cond 
*shadow)
                                 + shadow->pending_signals_offset);
 }
 
-static struct mutex_dat *cond_get_mutex_datp(struct __shadow_cond *shadow)
+static inline struct mutex_dat *
+cond_get_mutex_datp(struct __shadow_cond *shadow)
 {
        if (shadow->mutex_datp == (struct mutex_dat *)~0UL)
                return NULL;
@@ -87,29 +86,26 @@ int __wrap_pthread_condattr_setpshared(pthread_condattr_t 
*attr, int pshared)
                                  __cobalt_condattr_setpshared, attr, pshared);
 }
 
-int __wrap_pthread_cond_init(pthread_cond_t * cond,
+int __wrap_pthread_cond_init(pthread_cond_t *cond,
                             const pthread_condattr_t * attr)
 {
-       struct __shadow_cond *shadow =
-               &((union __xeno_cond *)cond)->shadow_cond;
+       struct __shadow_cond *_cnd = &((union __xeno_cond *)cond)->shadow_cond;
        int err;
 
-       err = XENOMAI_SKINCALL2(__cobalt_muxid,
-                                __cobalt_cond_init, shadow, attr);
-       if (!err && !shadow->attr.pshared) {
-               shadow->pending_signals = (unsigned long *)
-                       (xeno_sem_heap[0] + shadow->pending_signals_offset);
+       err = XENOMAI_SKINCALL2(__cobalt_muxid, __cobalt_cond_init, _cnd, attr);
+       if (!err && !_cnd->attr.pshared) {
+               _cnd->pending_signals = (unsigned long *)
+                       (xeno_sem_heap[0] + _cnd->pending_signals_offset);
        }
 
        return -err;
 }
 
-int __wrap_pthread_cond_destroy(pthread_cond_t * cond)
+int __wrap_pthread_cond_destroy(pthread_cond_t *cond)
 {
-       union __xeno_cond *_cond = (union __xeno_cond *)cond;
+       struct __shadow_cond *_cond = &((union __xeno_cond *)cond)->shadow_cond;
 
-       return -XENOMAI_SKINCALL1(__cobalt_muxid,
-                                 __cobalt_cond_destroy, &_cond->shadow_cond);
+       return -XENOMAI_SKINCALL1(__cobalt_muxid, __cobalt_cond_destroy, _cond);
 }
 
 struct cobalt_cond_cleanup_t {
@@ -135,31 +131,39 @@ static void __pthread_cond_cleanup(void *data)
 
 int __wrap_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
 {
+       struct __shadow_cond *_cnd = &((union __xeno_cond *)cond)->shadow_cond;
+       struct __shadow_mutex *_mx =
+               &((union __xeno_mutex *)mutex)->shadow_mutex;
        struct cobalt_cond_cleanup_t c = {
-               .cond = &((union __xeno_cond *)cond)->shadow_cond,
-               .mutex = &((union __xeno_mutex *)mutex)->shadow_mutex,
+               .cond = _cnd,
+               .mutex = _mx,
        };
        int err, oldtype;
+       unsigned count;
 
-       if (c.mutex->attr.type == PTHREAD_MUTEX_ERRORCHECK) {
+       if (_mx->magic != COBALT_MUTEX_MAGIC
+           || _cnd->magic != COBALT_COND_MAGIC)
+               return EINVAL;
+
+       if (_mx->attr.type == PTHREAD_MUTEX_ERRORCHECK) {
                xnhandle_t cur = xeno_get_current();
 
                if (cur == XN_NO_HANDLE)
                        return EPERM;
 
-               if (xnsynch_fast_owner_check(mutex_get_ownerp(c.mutex), cur))
+               if (xnsynch_fast_owner_check(mutex_get_ownerp(_mx), cur))
                        return EPERM;
        }
 
        pthread_cleanup_push(&__pthread_cond_cleanup, &c);
 
-       c.count = c.mutex->lockcnt;
+       count = _mx->lockcnt;
 
        pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
 
        err = XENOMAI_SKINCALL5(__cobalt_muxid,
                                 __cobalt_cond_wait_prologue,
-                                c.cond, c.mutex, &c.err, 0, NULL);
+                                _cnd, _mx, &c.err, 0, NULL);
 
        pthread_setcanceltype(oldtype, NULL);
 
@@ -167,74 +171,81 @@ int __wrap_pthread_cond_wait(pthread_cond_t *cond, 
pthread_mutex_t *mutex)
 
        while (err == -EINTR)
                err = XENOMAI_SKINCALL2(__cobalt_muxid,
-                                        __cobalt_cond_wait_epilogue,
-                                       c.cond, c.mutex);
+                                       __cobalt_cond_wait_epilogue, _cnd, _mx);
 
-       c.mutex->lockcnt = c.count;
+       _mx->lockcnt = count;
 
        pthread_testcancel();
 
        return -err ?: -c.err;
 }
 
-int __wrap_pthread_cond_timedwait(pthread_cond_t * cond,
-                                 pthread_mutex_t * mutex,
+int __wrap_pthread_cond_timedwait(pthread_cond_t *cond,
+                                 pthread_mutex_t *mutex,
                                  const struct timespec *abstime)
 {
+       struct __shadow_cond *_cnd = &((union __xeno_cond *)cond)->shadow_cond;
+       struct __shadow_mutex *_mx =
+               &((union __xeno_mutex *)mutex)->shadow_mutex;
        struct cobalt_cond_cleanup_t c = {
-               .cond = &((union __xeno_cond *)cond)->shadow_cond,
-               .mutex = &((union __xeno_mutex *)mutex)->shadow_mutex,
+               .cond = _cnd,
+               .mutex = _mx,
        };
        int err, oldtype;
+       unsigned count;
 
-       if (c.mutex->attr.type == PTHREAD_MUTEX_ERRORCHECK) {
+       if (_mx->magic != COBALT_MUTEX_MAGIC
+           || _cnd->magic != COBALT_COND_MAGIC)
+               return EINVAL;
+
+       if (_mx->attr.type == PTHREAD_MUTEX_ERRORCHECK) {
                xnhandle_t cur = xeno_get_current();
 
                if (cur == XN_NO_HANDLE)
                        return EPERM;
 
-               if (xnsynch_fast_owner_check(mutex_get_ownerp(c.mutex), cur))
+               if (xnsynch_fast_owner_check(mutex_get_ownerp(_mx), cur))
                        return EPERM;
        }
 
        pthread_cleanup_push(&__pthread_cond_cleanup, &c);
 
-       c.count = c.mutex->lockcnt;
+       count = _mx->lockcnt;
 
        pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
 
        err = XENOMAI_SKINCALL5(__cobalt_muxid,
                                __cobalt_cond_wait_prologue,
-                               c.cond, c.mutex, &c.err, 1, abstime);
+                               _cnd, _mx, &c.err, 1, abstime);
        pthread_setcanceltype(oldtype, NULL);
 
        pthread_cleanup_pop(0);
 
        while (err == -EINTR)
                err = XENOMAI_SKINCALL2(__cobalt_muxid,
-                                       __cobalt_cond_wait_epilogue,
-                                       c.cond, c.mutex);
+                                       __cobalt_cond_wait_epilogue, _cnd, _mx);
 
-       c.mutex->lockcnt = c.count;
+       _mx->lockcnt = count;
 
        pthread_testcancel();
 
        return -err ?: -c.err;
 }
 
-int __wrap_pthread_cond_signal(pthread_cond_t * cond)
+int __wrap_pthread_cond_signal(pthread_cond_t *cond)
 {
-       struct __shadow_cond *shadow =
-               &((union __xeno_cond *)cond)->shadow_cond;
-       unsigned long *pending_signals;
+       struct __shadow_cond *_cnd = &((union __xeno_cond *)cond)->shadow_cond;
+       unsigned long pending_signals, *pending_signalsp;
        struct mutex_dat *mutex_datp;
 
-       if (shadow->magic != COBALT_COND_MAGIC)
+       if (_cnd->magic != COBALT_COND_MAGIC)
                return EINVAL;
 
-       mutex_datp = cond_get_mutex_datp(shadow);
+       mutex_datp = cond_get_mutex_datp(_cnd);
        if (mutex_datp) {
-               if ((mutex_datp->flags & COBALT_MUTEX_ERRORCHECK)) {
+               unsigned long flags = mutex_datp->flags ;
+
+               if (unlikely(flags & COBALT_MUTEX_ERRORCHECK)) {
                        xnhandle_t cur = xeno_get_current();
 
                        if (cur == XN_NO_HANDLE)
@@ -244,28 +255,30 @@ int __wrap_pthread_cond_signal(pthread_cond_t * cond)
                                return EPERM;
                }
 
-               mutex_datp->flags |= COBALT_MUTEX_COND_SIGNAL;
+               mutex_datp->flags = flags | COBALT_MUTEX_COND_SIGNAL;
 
-               pending_signals = cond_get_signalsp(shadow);
-               if (*pending_signals != ~0UL)
-                       ++(*pending_signals);
+               pending_signalsp = cond_get_signalsp(_cnd);
+               pending_signals = *pending_signalsp;
+               if (pending_signals != ~0UL)
+                       *pending_signalsp = pending_signals + 1;
        }
 
        return 0;
 }
 
-int __wrap_pthread_cond_broadcast(pthread_cond_t * cond)
+int __wrap_pthread_cond_broadcast(pthread_cond_t *cond)
 {
-       struct __shadow_cond *shadow =
-               &((union __xeno_cond *)cond)->shadow_cond;
+       struct __shadow_cond *_cnd = &((union __xeno_cond *)cond)->shadow_cond;
        struct mutex_dat *mutex_datp;
 
-       if (shadow->magic != COBALT_COND_MAGIC)
+       if (_cnd->magic != COBALT_COND_MAGIC)
                return EINVAL;
 
-       mutex_datp = cond_get_mutex_datp(shadow);
+       mutex_datp = cond_get_mutex_datp(_cnd);
        if (mutex_datp) {
-               if (unlikely(mutex_datp->flags & COBALT_MUTEX_ERRORCHECK)) {
+               unsigned long flags = mutex_datp->flags ;
+
+               if (unlikely(flags & COBALT_MUTEX_ERRORCHECK)) {
                        xnhandle_t cur = xeno_get_current();
 
                        if (cur == XN_NO_HANDLE)
@@ -275,9 +288,9 @@ int __wrap_pthread_cond_broadcast(pthread_cond_t * cond)
                                return EPERM;
                }
 
-               mutex_datp->flags |= COBALT_MUTEX_COND_SIGNAL;
+               mutex_datp->flags = flags | COBALT_MUTEX_COND_SIGNAL;
 
-               *cond_get_signalsp(shadow) = ~0UL;
+               *cond_get_signalsp(_cnd) = ~0UL;
        }
 
        return 0;
diff --git a/lib/cobalt/mutex.c b/lib/cobalt/mutex.c
index b615ec1..9ecc5fa 100644
--- a/lib/cobalt/mutex.c
+++ b/lib/cobalt/mutex.c
@@ -26,8 +26,6 @@
 
 extern int __cobalt_muxid;
 
-#define COBALT_MUTEX_MAGIC (0x86860303)
-
 int __wrap_pthread_mutexattr_init(pthread_mutexattr_t *attr)
 {
        return -XENOMAI_SKINCALL1(__cobalt_muxid, __cobalt_mutexattr_init, 
attr);
@@ -82,55 +80,54 @@ int __wrap_pthread_mutexattr_setpshared(pthread_mutexattr_t 
*attr, int pshared)
 int __wrap_pthread_mutex_init(pthread_mutex_t *mutex,
                              const pthread_mutexattr_t *attr)
 {
-       union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
-       struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
+       struct __shadow_mutex *_mutex =
+               &((union __xeno_mutex *)mutex)->shadow_mutex;
        int err;
 
-       if (shadow->magic == COBALT_MUTEX_MAGIC) {
+       if (_mutex->magic == COBALT_MUTEX_MAGIC) {
                err = -XENOMAI_SKINCALL1(__cobalt_muxid,
-                                        __cobalt_check_init,shadow);
+                                        __cobalt_check_init,_mutex);
 
                if (err)
                        return err;
        }
 
-       err = 
-XENOMAI_SKINCALL2(__cobalt_muxid,__cobalt_mutex_init,shadow,attr);
+       err = 
-XENOMAI_SKINCALL2(__cobalt_muxid,__cobalt_mutex_init,_mutex,attr);
 
-       if (!shadow->attr.pshared)
-               shadow->dat = (struct mutex_dat *)
-                       (xeno_sem_heap[0] + shadow->dat_offset);
+       if (!_mutex->attr.pshared)
+               _mutex->dat = (struct mutex_dat *)
+                       (xeno_sem_heap[0] + _mutex->dat_offset);
 
        return err;
 }
 
 int __wrap_pthread_mutex_destroy(pthread_mutex_t *mutex)
 {
-       union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
-       struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
+       struct __shadow_mutex *_mutex =
+               &((union __xeno_mutex *)mutex)->shadow_mutex;
        int err;
 
-       if (shadow->magic != COBALT_MUTEX_MAGIC)
+       if (_mutex->magic != COBALT_MUTEX_MAGIC)
                return EINVAL;
 
-       err = XENOMAI_SKINCALL1(__cobalt_muxid, __cobalt_mutex_destroy, shadow);
+       err = XENOMAI_SKINCALL1(__cobalt_muxid, __cobalt_mutex_destroy, _mutex);
 
        return -err;
 }
 
 int __wrap_pthread_mutex_lock(pthread_mutex_t *mutex)
 {
-       union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
-       struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
-       int err;
-
+       struct __shadow_mutex *_mutex =
+               &((union __xeno_mutex *)mutex)->shadow_mutex;
        unsigned long status;
        xnhandle_t cur;
+       int err;
 
        cur = xeno_get_current();
        if (cur == XN_NO_HANDLE)
                return EPERM;
 
-       if (shadow->magic != COBALT_MUTEX_MAGIC)
+       if (_mutex->magic != COBALT_MUTEX_MAGIC)
                return EINVAL;
 
        /*
@@ -140,19 +137,19 @@ int __wrap_pthread_mutex_lock(pthread_mutex_t *mutex)
         */
        status = xeno_get_current_mode();
        if (likely(!(status & (XNRELAX|XNOTHER)))) {
-               err = xnsynch_fast_acquire(mutex_get_ownerp(shadow), cur);
+               err = xnsynch_fast_acquire(mutex_get_ownerp(_mutex), cur);
                if (likely(!err)) {
-                       shadow->lockcnt = 1;
+                       _mutex->lockcnt = 1;
                        return 0;
                }
        } else {
-               err = xnsynch_fast_owner_check(mutex_get_ownerp(shadow), cur);
+               err = xnsynch_fast_owner_check(mutex_get_ownerp(_mutex), cur);
                if (!err)
                        err = -EBUSY;
        }
 
        if (err == -EBUSY)
-               switch(shadow->attr.type) {
+               switch(_mutex->attr.type) {
                case PTHREAD_MUTEX_NORMAL:
                        break;
 
@@ -160,18 +157,18 @@ int __wrap_pthread_mutex_lock(pthread_mutex_t *mutex)
                        return EDEADLK;
 
                case PTHREAD_MUTEX_RECURSIVE:
-                       if (shadow->lockcnt == UINT_MAX)
+                       if (_mutex->lockcnt == UINT_MAX)
                                return EAGAIN;
-                       ++shadow->lockcnt;
+                       ++_mutex->lockcnt;
                        return 0;
                }
 
        do {
-               err = 
XENOMAI_SKINCALL1(__cobalt_muxid,__cobalt_mutex_lock,shadow);
+               err = 
XENOMAI_SKINCALL1(__cobalt_muxid,__cobalt_mutex_lock,_mutex);
        } while (err == -EINTR);
 
        if (!err)
-               shadow->lockcnt = 1;
+               _mutex->lockcnt = 1;
 
        return -err;
 }
@@ -179,8 +176,8 @@ int __wrap_pthread_mutex_lock(pthread_mutex_t *mutex)
 int __wrap_pthread_mutex_timedlock(pthread_mutex_t *mutex,
                                   const struct timespec *to)
 {
-       union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
-       struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
+       struct __shadow_mutex *_mutex =
+               &((union __xeno_mutex *)mutex)->shadow_mutex;
        unsigned long status;
        xnhandle_t cur;
        int err;
@@ -189,25 +186,25 @@ int __wrap_pthread_mutex_timedlock(pthread_mutex_t *mutex,
        if (cur == XN_NO_HANDLE)
                return EPERM;
 
-       if (shadow->magic != COBALT_MUTEX_MAGIC)
+       if (_mutex->magic != COBALT_MUTEX_MAGIC)
                return EINVAL;
 
        /* See __wrap_pthread_mutex_lock() */
        status = xeno_get_current_mode();
        if (likely(!(status & (XNRELAX|XNOTHER)))) {
-               err = xnsynch_fast_acquire(mutex_get_ownerp(shadow), cur);
+               err = xnsynch_fast_acquire(mutex_get_ownerp(_mutex), cur);
                if (likely(!err)) {
-                       shadow->lockcnt = 1;
+                       _mutex->lockcnt = 1;
                        return 0;
                }
        } else {
-               err = xnsynch_fast_owner_check(mutex_get_ownerp(shadow), cur);
+               err = xnsynch_fast_owner_check(mutex_get_ownerp(_mutex), cur);
                if (!err)
                        err = -EBUSY;
        }
 
        if (err == -EBUSY)
-               switch(shadow->attr.type) {
+               switch(_mutex->attr.type) {
                case PTHREAD_MUTEX_NORMAL:
                        break;
 
@@ -215,27 +212,27 @@ int __wrap_pthread_mutex_timedlock(pthread_mutex_t *mutex,
                        return EDEADLK;
 
                case PTHREAD_MUTEX_RECURSIVE:
-                       if (shadow->lockcnt == UINT_MAX)
+                       if (_mutex->lockcnt == UINT_MAX)
                                return EAGAIN;
 
-                       ++shadow->lockcnt;
+                       ++_mutex->lockcnt;
                        return 0;
                }
 
        do {
                err = XENOMAI_SKINCALL2(__cobalt_muxid,
-                                       __cobalt_mutex_timedlock, shadow, to);
+                                       __cobalt_mutex_timedlock, _mutex, to);
        } while (err == -EINTR);
 
        if (!err)
-               shadow->lockcnt = 1;
+               _mutex->lockcnt = 1;
        return -err;
 }
 
 int __wrap_pthread_mutex_trylock(pthread_mutex_t *mutex)
 {
-       union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
-       struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
+       struct __shadow_mutex *_mutex =
+               &((union __xeno_mutex *)mutex)->shadow_mutex;
        unsigned long status;
        xnhandle_t cur;
        int err;
@@ -244,29 +241,29 @@ int __wrap_pthread_mutex_trylock(pthread_mutex_t *mutex)
        if (cur == XN_NO_HANDLE)
                return EPERM;
 
-       if (unlikely(shadow->magic != COBALT_MUTEX_MAGIC))
+       if (unlikely(_mutex->magic != COBALT_MUTEX_MAGIC))
                return EINVAL;
 
        status = xeno_get_current_mode();
        if (likely(!(status & (XNRELAX|XNOTHER)))) {
-               err = xnsynch_fast_acquire(mutex_get_ownerp(shadow), cur);
+               err = xnsynch_fast_acquire(mutex_get_ownerp(_mutex), cur);
                if (likely(!err)) {
-                       shadow->lockcnt = 1;
+                       _mutex->lockcnt = 1;
                        return 0;
                }
        } else {
-               err = xnsynch_fast_owner_check(mutex_get_ownerp(shadow), cur);
+               err = xnsynch_fast_owner_check(mutex_get_ownerp(_mutex), cur);
                if (err < 0)
                        goto do_syscall;
 
                err = -EBUSY;
        }
 
-       if (err == -EBUSY && shadow->attr.type == PTHREAD_MUTEX_RECURSIVE) {
-               if (shadow->lockcnt == UINT_MAX)
+       if (err == -EBUSY && _mutex->attr.type == PTHREAD_MUTEX_RECURSIVE) {
+               if (_mutex->lockcnt == UINT_MAX)
                        return EAGAIN;
 
-               ++shadow->lockcnt;
+               ++_mutex->lockcnt;
                return 0;
        } else
                err = -EBUSY;
@@ -276,11 +273,11 @@ do_syscall:
 
        do {
                err = XENOMAI_SKINCALL1(__cobalt_muxid,
-                                       __cobalt_mutex_trylock, shadow);
+                                       __cobalt_mutex_trylock, _mutex);
        } while (err == -EINTR);
 
        if (!err)
-               shadow->lockcnt = 1;
+               _mutex->lockcnt = 1;
 
   out:
        return -err;
@@ -288,32 +285,32 @@ do_syscall:
 
 int __wrap_pthread_mutex_unlock(pthread_mutex_t *mutex)
 {
-       union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
-       struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
+       struct __shadow_mutex *_mutex =
+               &((union __xeno_mutex *)mutex)->shadow_mutex;
        struct mutex_dat *datp = NULL;
        xnhandle_t cur = XN_NO_HANDLE;
        int err, check;
 
-       if (unlikely(shadow->magic != COBALT_MUTEX_MAGIC))
+       if (unlikely(_mutex->magic != COBALT_MUTEX_MAGIC))
                return EINVAL;
 
-       if ((check = shadow->attr.type == PTHREAD_MUTEX_ERRORCHECK)) {
+       if ((check = _mutex->attr.type == PTHREAD_MUTEX_ERRORCHECK)) {
                cur = xeno_get_current();
                if (cur == XN_NO_HANDLE)
                        return EPERM;
 
-               datp = mutex_get_datp(shadow);
+               datp = mutex_get_datp(_mutex);
                if (xnsynch_fast_owner_check(&datp->owner, cur) != 0)
                        return EPERM;
        }
 
-       if (shadow->lockcnt > 1) {
-               --shadow->lockcnt;
+       if (_mutex->lockcnt > 1) {
+               --_mutex->lockcnt;
                return 0;
        }
 
        if (!check)
-               datp = mutex_get_datp(shadow);
+               datp = mutex_get_datp(_mutex);
 
        if ((datp->flags & COBALT_MUTEX_COND_SIGNAL))
                goto do_syscall;
@@ -331,7 +328,7 @@ do_syscall:
 
        do {
                err = XENOMAI_SKINCALL1(__cobalt_muxid,
-                                       __cobalt_mutex_unlock, shadow);
+                                       __cobalt_mutex_unlock, _mutex);
        } while (err == -EINTR);
 
        return -err;


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to