Module: xenomai-gch
Branch: for-forge
Commit: 27c7ddde131120c3610a9b0d189216ec9d5322be
URL:    
http://git.xenomai.org/?p=xenomai-gch.git;a=commit;h=27c7ddde131120c3610a9b0d189216ec9d5322be

Author: Gilles Chanteperdrix <gilles.chanteperd...@xenomai.org>
Date:   Wed Nov 16 22:08:02 2011 +0100

cobalt: remove the amount of run-time checks

Some checks were redundant and made both in kernel-space and user-space,
only do them in user-space. Other checks which turned out to be rather
expensive are only done for ERRORCHECK mutexes instead of all kind of
mutexes.

---

 kernel/cobalt/cond.c  |   15 +++++--
 kernel/cobalt/mutex.c |    9 +---
 kernel/cobalt/mutex.h |   27 ++++++++----
 lib/cobalt/cond.c     |  105 ++++++++++++++++++++++++++++--------------------
 lib/cobalt/mutex.c    |   53 +++++++++++--------------
 5 files changed, 114 insertions(+), 95 deletions(-)

diff --git a/kernel/cobalt/cond.c b/kernel/cobalt/cond.c
index 8a08d65..4b65b3a 100644
--- a/kernel/cobalt/cond.c
+++ b/kernel/cobalt/cond.c
@@ -436,11 +436,6 @@ int cobalt_cond_wait_prologue(union __xeno_cond __user 
*u_cnd,
                                                     mx, timed,
                                                     XN_INFINITE);
 
-       if (!cnd->mutex) {
-               datp = (struct mutex_dat *)~0UL;
-               __xn_put_user(datp, &u_cnd->shadow_cond.mutex_datp);
-       }
-
        switch(err) {
        case 0:
        case -ETIMEDOUT:
@@ -454,11 +449,21 @@ int cobalt_cond_wait_prologue(union __xeno_cond __user 
*u_cnd,
                break;
 
        case -EINTR:
+               if (!cnd->mutex) {
+                       datp = (struct mutex_dat *)~0UL;
+                       __xn_put_user(datp, &u_cnd->shadow_cond.mutex_datp);
+               }
+
                perr = err;
                d.err = 0;      /* epilogue should return 0. */
                break;
 
        default:
+               if (!cnd->mutex) {
+                       datp = (struct mutex_dat *)~0UL;
+                       __xn_put_user(datp, &u_cnd->shadow_cond.mutex_datp);
+               }
+
                /* Please gcc and handle the case which will never
                   happen */
                d.err = EINVAL;
diff --git a/kernel/cobalt/mutex.c b/kernel/cobalt/mutex.c
index 81e2ce7..a7bdec0 100644
--- a/kernel/cobalt/mutex.c
+++ b/kernel/cobalt/mutex.c
@@ -84,7 +84,8 @@ static int cobalt_mutex_init_inner(struct __shadow_mutex 
*shadow,
 
        mutex->magic = COBALT_MUTEX_MAGIC;
        xnsynch_init(&mutex->synchbase, synch_flags, &datp->owner);
-       datp->flags = 0;
+       datp->flags = (attr->type == PTHREAD_MUTEX_ERRORCHECK
+                      ? COBALT_MUTEX_ERRORCHECK : 0);
        inith(&mutex->link);
        mutex->attr = *attr;
        mutex->owningq = kq;
@@ -119,9 +120,6 @@ static int cobalt_mutex_acquire(xnthread_t *cur,
                                int timed,
                                xnticks_t abs_to)
 {
-       if (xnpod_unblockable_p())
-               return -EPERM;
-
        if (!cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, struct cobalt_mutex))
                return -EINVAL;
 
@@ -380,9 +378,6 @@ int cobalt_mutex_unlock(union __xeno_mutex __user *u_mx)
        int err;
        spl_t s;
 
-       if (xnpod_root_p())
-               return -EPERM;
-
        __xn_get_user(mutex, &u_mx->shadow_mutex.mutex);
 
        xnlock_get_irqsave(&nklock, s);
diff --git a/kernel/cobalt/mutex.h b/kernel/cobalt/mutex.h
index e9a4673..b4b7f88 100644
--- a/kernel/cobalt/mutex.h
+++ b/kernel/cobalt/mutex.h
@@ -26,9 +26,10 @@ struct cobalt_mutex;
 
 struct mutex_dat {
        xnarch_atomic_t owner;
-       unsigned flags;
+       unsigned long flags;
 
 #define COBALT_MUTEX_COND_SIGNAL 0x00000001
+#define COBALT_MUTEX_ERRORCHECK  0x00000002
 };
 
 union __xeno_mutex {
@@ -74,9 +75,6 @@ static inline int cobalt_mutex_acquire_unchecked(xnthread_t 
*cur,
                                                 xnticks_t abs_to)
 
 {
-       if (xnsynch_owner_check(&mutex->synchbase, cur) == 0)
-               return -EBUSY;
-
        if (timed)
                xnsynch_acquire(&mutex->synchbase, abs_to, XN_REALTIME);
        else
@@ -108,9 +106,6 @@ static inline int cobalt_mutex_release(xnthread_t *cur, 
cobalt_mutex_t *mutex)
                return -EPERM;
 #endif /* XENO_DEBUG(POSIX) */
 
-       if (xnsynch_owner_check(&mutex->synchbase, cur) != 0)
-               return -EPERM;
-
        need_resched = 0;
        for (holder = getheadq(&mutex->conds);
             holder; holder = nextq(&mutex->conds, holder)) {
@@ -122,8 +117,6 @@ static inline int cobalt_mutex_release(xnthread_t *cur, 
cobalt_mutex_t *mutex)
        need_resched |= xnsynch_release(&mutex->synchbase) != NULL;
 
        return need_resched;
-       /* Do not reschedule here, releasing the mutex and suspension must be
-          done atomically in pthread_cond_*wait. */
 }
 
 int cobalt_mutex_check_init(union __xeno_mutex __user *u_mx);
@@ -147,6 +140,22 @@ void cobalt_mutexq_cleanup(cobalt_kqueues_t *q);
 void cobalt_mutex_pkg_init(void);
 
 void cobalt_mutex_pkg_cleanup(void);
+#else /* ! __KERNEL__ */
+
+extern unsigned long xeno_sem_heap[2];
+
+static inline struct mutex_dat *mutex_get_datp(struct __shadow_mutex *shadow)
+{
+       if (likely(!shadow->attr.pshared))
+               return shadow->dat;
+
+       return (struct mutex_dat *)(xeno_sem_heap[1] + shadow->dat_offset);
+}
+
+static inline xnarch_atomic_t *mutex_get_ownerp(struct __shadow_mutex *shadow)
+{
+       return &mutex_get_datp(shadow)->owner;
+}
 #endif /* __KERNEL__ */
 
 #endif /* !_POSIX_MUTEX_H */
diff --git a/lib/cobalt/cond.c b/lib/cobalt/cond.c
index d70f538..2e9709b 100644
--- a/lib/cobalt/cond.c
+++ b/lib/cobalt/cond.c
@@ -29,7 +29,7 @@ extern int __cobalt_muxid;
 
 extern unsigned long xeno_sem_heap[2];
 
-static unsigned long *get_signalsp(struct __shadow_cond *shadow)
+static unsigned long *cond_get_signalsp(struct __shadow_cond *shadow)
 {
        if (likely(!shadow->attr.pshared))
                return shadow->pending_signals;
@@ -38,7 +38,7 @@ static unsigned long *get_signalsp(struct __shadow_cond 
*shadow)
                                 + shadow->pending_signals_offset);
 }
 
-static struct mutex_dat *get_mutex_datp(struct __shadow_cond *shadow)
+static struct mutex_dat *cond_get_mutex_datp(struct __shadow_cond *shadow)
 {
        if (shadow->mutex_datp == (struct mutex_dat *)~0UL)
                return NULL;
@@ -113,45 +113,53 @@ int __wrap_pthread_cond_destroy(pthread_cond_t * cond)
 }
 
 struct cobalt_cond_cleanup_t {
-       union __xeno_cond *cond;
-       union __xeno_mutex *mutex;
+       struct __shadow_cond *cond;
+       struct __shadow_mutex *mutex;
        unsigned count;
        int err;
 };
 
 static void __pthread_cond_cleanup(void *data)
 {
-       struct cobalt_cond_cleanup_t *c = (struct cobalt_cond_cleanup_t *) data;
+       struct cobalt_cond_cleanup_t *c = (struct cobalt_cond_cleanup_t *)data;
        int err;
 
        do {
                err = XENOMAI_SKINCALL2(__cobalt_muxid,
                                        __cobalt_cond_wait_epilogue,
-                                       &c->cond->shadow_cond,
-                                       &c->mutex->shadow_mutex);
+                                       c->cond, c->mutex);
        } while (err == -EINTR);
 
-       c->mutex->shadow_mutex.lockcnt = c->count;
+       c->mutex->lockcnt = c->count;
 }
 
 int __wrap_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
 {
        struct cobalt_cond_cleanup_t c = {
-               .cond = (union __xeno_cond *)cond,
-               .mutex = (union __xeno_mutex *)mutex,
+               .cond = &((union __xeno_cond *)cond)->shadow_cond,
+               .mutex = &((union __xeno_mutex *)mutex)->shadow_mutex,
        };
        int err, oldtype;
 
+       if (c.mutex->attr.type == PTHREAD_MUTEX_ERRORCHECK) {
+               xnhandle_t cur = xeno_get_current();
+
+               if (cur == XN_NO_HANDLE)
+                       return EPERM;
+
+               if (xnsynch_fast_owner_check(mutex_get_ownerp(c.mutex), cur))
+                       return EPERM;
+       }
+
        pthread_cleanup_push(&__pthread_cond_cleanup, &c);
 
-       c.count = c.mutex->shadow_mutex.lockcnt;
+       c.count = c.mutex->lockcnt;
 
        pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
 
        err = XENOMAI_SKINCALL5(__cobalt_muxid,
                                 __cobalt_cond_wait_prologue,
-                                &c.cond->shadow_cond,
-                                &c.mutex->shadow_mutex, &c.err, 0, NULL);
+                                c.cond, c.mutex, &c.err, 0, NULL);
 
        pthread_setcanceltype(oldtype, NULL);
 
@@ -160,10 +168,9 @@ int __wrap_pthread_cond_wait(pthread_cond_t *cond, 
pthread_mutex_t *mutex)
        while (err == -EINTR)
                err = XENOMAI_SKINCALL2(__cobalt_muxid,
                                         __cobalt_cond_wait_epilogue,
-                                        &c.cond->shadow_cond,
-                                        &c.mutex->shadow_mutex);
+                                       c.cond, c.mutex);
 
-       c.mutex->shadow_mutex.lockcnt = c.count;
+       c.mutex->lockcnt = c.count;
 
        pthread_testcancel();
 
@@ -175,21 +182,30 @@ int __wrap_pthread_cond_timedwait(pthread_cond_t * cond,
                                  const struct timespec *abstime)
 {
        struct cobalt_cond_cleanup_t c = {
-               .cond = (union __xeno_cond *)cond,
-               .mutex = (union __xeno_mutex *)mutex,
+               .cond = &((union __xeno_cond *)cond)->shadow_cond,
+               .mutex = &((union __xeno_mutex *)mutex)->shadow_mutex,
        };
        int err, oldtype;
 
+       if (c.mutex->attr.type == PTHREAD_MUTEX_ERRORCHECK) {
+               xnhandle_t cur = xeno_get_current();
+
+               if (cur == XN_NO_HANDLE)
+                       return EPERM;
+
+               if (xnsynch_fast_owner_check(mutex_get_ownerp(c.mutex), cur))
+                       return EPERM;
+       }
+
        pthread_cleanup_push(&__pthread_cond_cleanup, &c);
 
-       c.count = c.mutex->shadow_mutex.lockcnt;
+       c.count = c.mutex->lockcnt;
 
        pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
 
        err = XENOMAI_SKINCALL5(__cobalt_muxid,
                                __cobalt_cond_wait_prologue,
-                               &c.cond->shadow_cond,
-                               &c.mutex->shadow_mutex, &c.err, 1, abstime);
+                               c.cond, c.mutex, &c.err, 1, abstime);
        pthread_setcanceltype(oldtype, NULL);
 
        pthread_cleanup_pop(0);
@@ -197,10 +213,9 @@ int __wrap_pthread_cond_timedwait(pthread_cond_t * cond,
        while (err == -EINTR)
                err = XENOMAI_SKINCALL2(__cobalt_muxid,
                                        __cobalt_cond_wait_epilogue,
-                                       &c.cond->shadow_cond,
-                                       &c.mutex->shadow_mutex);
+                                       c.cond, c.mutex);
 
-       c.mutex->shadow_mutex.lockcnt = c.count;
+       c.mutex->lockcnt = c.count;
 
        pthread_testcancel();
 
@@ -213,23 +228,25 @@ int __wrap_pthread_cond_signal(pthread_cond_t * cond)
                &((union __xeno_cond *)cond)->shadow_cond;
        unsigned long *pending_signals;
        struct mutex_dat *mutex_datp;
-       xnhandle_t cur;
-
-       cur = xeno_get_current();
-       if (cur == XN_NO_HANDLE)
-               return EPERM;
 
        if (shadow->magic != COBALT_COND_MAGIC)
                return EINVAL;
 
-       mutex_datp = get_mutex_datp(shadow);
+       mutex_datp = cond_get_mutex_datp(shadow);
        if (mutex_datp) {
-               if (xnsynch_fast_owner_check(&mutex_datp->owner, cur) < 0)
-                       return EPERM;
+               if ((mutex_datp->flags & COBALT_MUTEX_ERRORCHECK)) {
+                       xnhandle_t cur = xeno_get_current();
+
+                       if (cur == XN_NO_HANDLE)
+                               return EPERM;
+
+                       if (xnsynch_fast_owner_check(&mutex_datp->owner, cur) < 
0)
+                               return EPERM;
+               }
 
                mutex_datp->flags |= COBALT_MUTEX_COND_SIGNAL;
 
-               pending_signals = get_signalsp(shadow);
+               pending_signals = cond_get_signalsp(shadow);
                if (*pending_signals != ~0UL)
                        ++(*pending_signals);
        }
@@ -241,26 +258,26 @@ int __wrap_pthread_cond_broadcast(pthread_cond_t * cond)
 {
        struct __shadow_cond *shadow =
                &((union __xeno_cond *)cond)->shadow_cond;
-       unsigned long *pending_signals;
        struct mutex_dat *mutex_datp;
-       xnhandle_t cur;
-
-       cur = xeno_get_current();
-       if (cur == XN_NO_HANDLE)
-               return EPERM;
 
        if (shadow->magic != COBALT_COND_MAGIC)
                return EINVAL;
 
-       mutex_datp = get_mutex_datp(shadow);
+       mutex_datp = cond_get_mutex_datp(shadow);
        if (mutex_datp) {
-               if (xnsynch_fast_owner_check(&mutex_datp->owner, cur) < 0)
-                       return EPERM;
+               if (unlikely(mutex_datp->flags & COBALT_MUTEX_ERRORCHECK)) {
+                       xnhandle_t cur = xeno_get_current();
+
+                       if (cur == XN_NO_HANDLE)
+                               return EPERM;
+
+                       if (xnsynch_fast_owner_check(&mutex_datp->owner, cur) < 
0)
+                               return EPERM;
+               }
 
                mutex_datp->flags |= COBALT_MUTEX_COND_SIGNAL;
 
-               pending_signals = get_signalsp(shadow);
-               *get_signalsp(shadow) = ~0UL;
+               *cond_get_signalsp(shadow) = ~0UL;
        }
 
        return 0;
diff --git a/lib/cobalt/mutex.c b/lib/cobalt/mutex.c
index e2ea7a0..b615ec1 100644
--- a/lib/cobalt/mutex.c
+++ b/lib/cobalt/mutex.c
@@ -28,21 +28,6 @@ extern int __cobalt_muxid;
 
 #define COBALT_MUTEX_MAGIC (0x86860303)
 
-extern unsigned long xeno_sem_heap[2];
-
-static struct mutex_dat *get_datp(struct __shadow_mutex *shadow)
-{
-       if (likely(!shadow->attr.pshared))
-               return shadow->dat;
-
-       return (struct mutex_dat *)(xeno_sem_heap[1] + shadow->dat_offset);
-}
-
-static xnarch_atomic_t *get_ownerp(struct __shadow_mutex *shadow)
-{
-       return &get_datp(shadow)->owner;
-}
-
 int __wrap_pthread_mutexattr_init(pthread_mutexattr_t *attr)
 {
        return -XENOMAI_SKINCALL1(__cobalt_muxid, __cobalt_mutexattr_init, 
attr);
@@ -155,13 +140,13 @@ int __wrap_pthread_mutex_lock(pthread_mutex_t *mutex)
         */
        status = xeno_get_current_mode();
        if (likely(!(status & (XNRELAX|XNOTHER)))) {
-               err = xnsynch_fast_acquire(get_ownerp(shadow), cur);
+               err = xnsynch_fast_acquire(mutex_get_ownerp(shadow), cur);
                if (likely(!err)) {
                        shadow->lockcnt = 1;
                        return 0;
                }
        } else {
-               err = xnsynch_fast_owner_check(get_ownerp(shadow), cur);
+               err = xnsynch_fast_owner_check(mutex_get_ownerp(shadow), cur);
                if (!err)
                        err = -EBUSY;
        }
@@ -210,13 +195,13 @@ int __wrap_pthread_mutex_timedlock(pthread_mutex_t *mutex,
        /* See __wrap_pthread_mutex_lock() */
        status = xeno_get_current_mode();
        if (likely(!(status & (XNRELAX|XNOTHER)))) {
-               err = xnsynch_fast_acquire(get_ownerp(shadow), cur);
+               err = xnsynch_fast_acquire(mutex_get_ownerp(shadow), cur);
                if (likely(!err)) {
                        shadow->lockcnt = 1;
                        return 0;
                }
        } else {
-               err = xnsynch_fast_owner_check(get_ownerp(shadow), cur);
+               err = xnsynch_fast_owner_check(mutex_get_ownerp(shadow), cur);
                if (!err)
                        err = -EBUSY;
        }
@@ -264,13 +249,13 @@ int __wrap_pthread_mutex_trylock(pthread_mutex_t *mutex)
 
        status = xeno_get_current_mode();
        if (likely(!(status & (XNRELAX|XNOTHER)))) {
-               err = xnsynch_fast_acquire(get_ownerp(shadow), cur);
+               err = xnsynch_fast_acquire(mutex_get_ownerp(shadow), cur);
                if (likely(!err)) {
                        shadow->lockcnt = 1;
                        return 0;
                }
        } else {
-               err = xnsynch_fast_owner_check(get_ownerp(shadow), cur);
+               err = xnsynch_fast_owner_check(mutex_get_ownerp(shadow), cur);
                if (err < 0)
                        goto do_syscall;
 
@@ -305,32 +290,40 @@ int __wrap_pthread_mutex_unlock(pthread_mutex_t *mutex)
 {
        union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
        struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
-       struct mutex_dat *datp;
-       xnhandle_t cur;
-       int err;
+       struct mutex_dat *datp = NULL;
+       xnhandle_t cur = XN_NO_HANDLE;
+       int err, check;
 
        if (unlikely(shadow->magic != COBALT_MUTEX_MAGIC))
                return EINVAL;
 
-       cur = xeno_get_current();
-       if (cur == XN_NO_HANDLE)
-               return EPERM;
+       if ((check = shadow->attr.type == PTHREAD_MUTEX_ERRORCHECK)) {
+               cur = xeno_get_current();
+               if (cur == XN_NO_HANDLE)
+                       return EPERM;
 
-       datp = get_datp(shadow);
-       if (xnsynch_fast_owner_check(&datp->owner, cur) != 0)
-               return EPERM;
+               datp = mutex_get_datp(shadow);
+               if (xnsynch_fast_owner_check(&datp->owner, cur) != 0)
+                       return EPERM;
+       }
 
        if (shadow->lockcnt > 1) {
                --shadow->lockcnt;
                return 0;
        }
 
+       if (!check)
+               datp = mutex_get_datp(shadow);
+
        if ((datp->flags & COBALT_MUTEX_COND_SIGNAL))
                goto do_syscall;
 
        if (unlikely(xeno_get_current_mode() & XNOTHER))
                goto do_syscall;
 
+       if (!check)
+               cur = xeno_get_current();
+
        if (likely(xnsynch_fast_release(&datp->owner, cur)))
                return 0;
 


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to