Module: xenomai-gch
Branch: for-forge
Commit: 84ddf649a06e8c0cdcf96c204eb6102405ce57ec
URL:    
http://git.xenomai.org/?p=xenomai-gch.git;a=commit;h=84ddf649a06e8c0cdcf96c204eb6102405ce57ec

Author: Gilles Chanteperdrix <gilles.chanteperd...@xenomai.org>
Date:   Mon Nov 14 21:36:07 2011 +0100

cobalt: remove cb_lock

---

 kernel/cobalt/cb_lock.h |   61 --------------------------------------------
 kernel/cobalt/cond.c    |   10 -------
 kernel/cobalt/mutex.c   |   65 ++++++----------------------------------------
 kernel/cobalt/mutex.h   |    2 -
 kernel/cobalt/syscall.c |    8 +++---
 lib/cobalt/cond.c       |   11 --------
 lib/cobalt/mutex.c      |   45 +-------------------------------
 7 files changed, 15 insertions(+), 187 deletions(-)

diff --git a/kernel/cobalt/cb_lock.h b/kernel/cobalt/cb_lock.h
deleted file mode 100644
index 6eae926..0000000
--- a/kernel/cobalt/cb_lock.h
+++ /dev/null
@@ -1,61 +0,0 @@
-#ifndef CB_LOCK_H
-#define CB_LOCK_H
-
-#include <asm/xenomai/atomic.h>
-#include <nucleus/compiler.h>
-#include <nucleus/types.h>
-
-#if !defined(__KERNEL__) && !defined(__XENO_SIM__)
-typedef void xnthread_t;
-#endif /* __KERNEL__ */
-
-#define __CLAIMED_BIT          XN_HANDLE_SPARE3
-
-static  __inline__ int __cb_try_read_lock(xnarch_atomic_t *lock)
-{
-       unsigned val = xnarch_atomic_get(lock);
-       while (likely(val != -1)) {
-               unsigned old = xnarch_atomic_cmpxchg(lock, val, val + 1);
-               if (likely(old == val))
-                       return 0;
-               val = old;
-       }
-       return -EBUSY;
-}
-
-static __inline__ void __cb_read_unlock(xnarch_atomic_t *lock)
-{
-       unsigned old, val = xnarch_atomic_get(lock);
-       while (likely(val != -1)) {
-               old = xnarch_atomic_cmpxchg(lock, val, val - 1);
-               if (likely(old == val))
-                       return;
-               val = old;
-       }
-}
-
-static __inline__ int __cb_try_write_lock(xnarch_atomic_t *lock)
-{
-       unsigned old = xnarch_atomic_cmpxchg(lock, 0, -1);
-       if (unlikely(old))
-               return -EBUSY;
-       return 0;
-}
-
-static __inline__ void __cb_force_write_lock(xnarch_atomic_t *lock)
-{
-       xnarch_atomic_set(lock, -1);
-}
-
-static __inline__ void __cb_write_unlock(xnarch_atomic_t *lock)
-{
-       xnarch_atomic_set(lock, 0);
-}
-#define DECLARE_CB_LOCK_FLAGS(name) struct { } name __attribute__((unused))
-#define cb_try_read_lock(lock, flags) __cb_try_read_lock(lock)
-#define cb_read_unlock(lock, flags) __cb_read_unlock(lock)
-#define cb_try_write_lock(lock, flags) __cb_try_write_lock(lock)
-#define cb_force_write_lock(lock, flags) __cb_force_write_lock(lock)
-#define cb_write_unlock(lock, flags) __cb_write_unlock(lock)
-
-#endif /* CB_LOCK_H */
diff --git a/kernel/cobalt/cond.c b/kernel/cobalt/cond.c
index 479646d..6f16481 100644
--- a/kernel/cobalt/cond.c
+++ b/kernel/cobalt/cond.c
@@ -418,9 +418,6 @@ int pthread_cond_wait(pthread_cond_t * cnd, pthread_mutex_t 
* mx)
        unsigned count;
        int err;
 
-       if (unlikely(cb_try_read_lock(&mutex->lock, s)))
-               return EINVAL;
-
        err = cobalt_cond_timedwait_prologue(cur, cond, mutex,
                                            &count, 0, XN_INFINITE);
 
@@ -429,8 +426,6 @@ int pthread_cond_wait(pthread_cond_t * cnd, pthread_mutex_t 
* mx)
                                                               mutex, count))
                        ;
 
-       cb_read_unlock(&mutex->lock, s);
-
        return err != EINTR ? err : 0;
 }
 
@@ -484,9 +479,6 @@ int pthread_cond_timedwait(pthread_cond_t * cnd,
        unsigned count;
        int err;
 
-       if (unlikely(cb_try_read_lock(&mutex->lock, s)))
-               return EINVAL;
-
        err = cobalt_cond_timedwait_prologue(cur, cond, mutex, &count, 1,
                                            ts2ns(abstime) + 1);
 
@@ -495,8 +487,6 @@ int pthread_cond_timedwait(pthread_cond_t * cnd,
                                                               mutex, count))
                        ;
 
-       cb_read_unlock(&mutex->lock, s);
-
        return err != EINTR ? err : 0;
 }
 
diff --git a/kernel/cobalt/mutex.c b/kernel/cobalt/mutex.c
index def5576..320bcf2 100644
--- a/kernel/cobalt/mutex.c
+++ b/kernel/cobalt/mutex.c
@@ -103,7 +103,6 @@ int cobalt_mutex_init_internal(struct __shadow_mutex 
*shadow,
        shadow->magic = COBALT_MUTEX_MAGIC;
        shadow->mutex = mutex;
        shadow->lockcnt = 0;
-       xnarch_atomic_set(&shadow->lock, -1);
 
        shadow->attr = *attr;
        shadow->owner_offset = xnheap_mapped_offset(&sys_ppd->sem_heap, ownerp);
@@ -155,7 +154,6 @@ int pthread_mutex_init(pthread_mutex_t *mx, const 
pthread_mutexattr_t *attr)
 {
        struct __shadow_mutex *shadow =
            &((union __xeno_mutex *)mx)->shadow_mutex;
-       DECLARE_CB_LOCK_FLAGS(s);
        cobalt_mutex_t *mutex;
        xnarch_atomic_t *ownerp = NULL;
        int err;
@@ -163,16 +161,10 @@ int pthread_mutex_init(pthread_mutex_t *mx, const 
pthread_mutexattr_t *attr)
        if (!attr)
                attr = &cobalt_default_mutex_attr;
 
-       if (unlikely(cb_try_read_lock(&shadow->lock, s)))
-               goto checked;
-
        err = cobalt_mutex_check_init(shadow, attr);
-       if (err) {
-               cb_read_unlock(&shadow->lock, s);
+       if (err)
                return -err;
-       }
 
-  checked:
        mutex = (cobalt_mutex_t *) xnmalloc(sizeof(*mutex));
        if (!mutex)
                return ENOMEM;
@@ -185,10 +177,7 @@ int pthread_mutex_init(pthread_mutex_t *mx, const 
pthread_mutexattr_t *attr)
                return EAGAIN;
        }
 
-       cb_force_write_lock(&shadow->lock, s);
        err = cobalt_mutex_init_internal(shadow, mutex, ownerp, attr);
-       cb_write_unlock(&shadow->lock, s);
-
        if (err) {
                xnfree(mutex);
                xnheap_free(&xnsys_ppd_get(attr->pshared)->sem_heap, ownerp);
@@ -238,33 +227,22 @@ int pthread_mutex_destroy(pthread_mutex_t * mx)
 {
        struct __shadow_mutex *shadow =
            &((union __xeno_mutex *)mx)->shadow_mutex;
-       DECLARE_CB_LOCK_FLAGS(s);
        cobalt_mutex_t *mutex;
 
-       if (unlikely(cb_try_write_lock(&shadow->lock, s)))
-               return EBUSY;
-
        mutex = shadow->mutex;
        if (!cobalt_obj_active(shadow, COBALT_MUTEX_MAGIC, struct 
__shadow_mutex)
-           || !cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, struct 
cobalt_mutex)) {
-               cb_write_unlock(&shadow->lock, s);
+           || !cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, struct 
cobalt_mutex))
                return EINVAL;
-       }
 
-       if (cobalt_kqueues(mutex->attr.pshared) != mutex->owningq) {
-               cb_write_unlock(&shadow->lock, s);
+       if (cobalt_kqueues(mutex->attr.pshared) != mutex->owningq)
                return EPERM;
-       }
 
        if (xnsynch_fast_owner_check(mutex->synchbase.fastlock,
-                                    XN_NO_HANDLE) != 0) {
-               cb_write_unlock(&shadow->lock, s);
+                                    XN_NO_HANDLE) != 0)
                return EBUSY;
-       }
 
        cobalt_mark_deleted(shadow);
        cobalt_mark_deleted(mutex);
-       cb_write_unlock(&shadow->lock, s);
 
        cobalt_mutex_destroy_internal(mutex, 
cobalt_kqueues(mutex->attr.pshared));
 
@@ -285,7 +263,7 @@ int cobalt_mutex_timedlock_break(struct __shadow_mutex 
*shadow,
 
        err = cobalt_mutex_timedlock_internal(cur, shadow, 1, timed, abs_to);
        if (err != -EBUSY)
-               goto unlock_and_return;
+               goto out;
 
        mutex = shadow->mutex;
 
@@ -338,7 +316,7 @@ int cobalt_mutex_timedlock_break(struct __shadow_mutex 
*shadow,
                err = 0;
        }
 
-  unlock_and_return:
+  out:
        return err;
 
 }
@@ -377,27 +355,23 @@ int pthread_mutex_trylock(pthread_mutex_t *mx)
            &((union __xeno_mutex *)mx)->shadow_mutex;
        xnthread_t *cur = xnpod_current_thread();
        cobalt_mutex_t *mutex = shadow->mutex;
-       DECLARE_CB_LOCK_FLAGS(s);
        int err;
 
        if (xnpod_unblockable_p())
                return EPERM;
 
-       if (unlikely(cb_try_read_lock(&shadow->lock, s)))
-               return EINVAL;
-
        if (!cobalt_obj_active(shadow, COBALT_MUTEX_MAGIC,
                              struct __shadow_mutex)
            || !cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC,
                                 struct cobalt_mutex)) {
                err = EINVAL;
-               goto unlock_and_return;
+               goto out;
        }
 
 #if XENO_DEBUG(POSIX)
        if (mutex->owningq != cobalt_kqueues(mutex->attr.pshared)) {
                err = EPERM;
-               goto unlock_and_return;
+               goto out;
        }
 #endif /* XENO_DEBUG(POSIX) */
 
@@ -418,9 +392,7 @@ int pthread_mutex_trylock(pthread_mutex_t *mx)
                }
        }
 
-  unlock_and_return:
-       cb_read_unlock(&shadow->lock, s);
-
+  out:
        return err;
 }
 
@@ -464,18 +436,12 @@ int pthread_mutex_lock(pthread_mutex_t * mx)
 {
        struct __shadow_mutex *shadow =
            &((union __xeno_mutex *)mx)->shadow_mutex;
-       DECLARE_CB_LOCK_FLAGS(s);
        int err;
 
-       if (unlikely(cb_try_read_lock(&shadow->lock, s)))
-               return EINVAL;
-
        do {
                err = cobalt_mutex_timedlock_break(shadow, 0, XN_INFINITE);
        } while (err == -EINTR);
 
-       cb_read_unlock(&shadow->lock, s);
-
        return -err;
 }
 
@@ -517,19 +483,13 @@ int pthread_mutex_timedlock(pthread_mutex_t * mx, const 
struct timespec *to)
 {
        struct __shadow_mutex *shadow =
            &((union __xeno_mutex *)mx)->shadow_mutex;
-       DECLARE_CB_LOCK_FLAGS(s);
        int err;
 
-       if (unlikely(cb_try_read_lock(&shadow->lock, s)))
-               return EINVAL;
-
        do {
                err = cobalt_mutex_timedlock_break(shadow, 1,
                                                   ts2ns(to) + 1);
        } while (err == -EINTR);
 
-       cb_read_unlock(&shadow->lock, s);
-
        return -err;
 }
 
@@ -569,16 +529,12 @@ int pthread_mutex_unlock(pthread_mutex_t * mx)
        struct __shadow_mutex *shadow =
            &((union __xeno_mutex *)mx)->shadow_mutex;
        xnthread_t *cur = xnpod_current_thread();
-       DECLARE_CB_LOCK_FLAGS(s);
        cobalt_mutex_t *mutex;
        int err;
 
        if (xnpod_root_p() || xnpod_interrupt_p())
                return EPERM;
 
-       if (unlikely(cb_try_read_lock(&shadow->lock, s)))
-               return EINVAL;
-
        mutex = shadow->mutex;
 
        if (!cobalt_obj_active(shadow,
@@ -595,7 +551,6 @@ int pthread_mutex_unlock(pthread_mutex_t * mx)
        if (shadow->lockcnt > 1) {
                /* Mutex is recursive */
                --shadow->lockcnt;
-               cb_read_unlock(&shadow->lock, s);
                return 0;
        }
 
@@ -603,8 +558,6 @@ int pthread_mutex_unlock(pthread_mutex_t * mx)
                xnpod_schedule();
 
   out:
-       cb_read_unlock(&shadow->lock, s);
-
        return err;
 }
 
diff --git a/kernel/cobalt/mutex.h b/kernel/cobalt/mutex.h
index dcdb48d..82b8c34 100644
--- a/kernel/cobalt/mutex.h
+++ b/kernel/cobalt/mutex.h
@@ -30,7 +30,6 @@ union __xeno_mutex {
                unsigned magic;
                unsigned lockcnt;
                struct cobalt_mutex *mutex;
-               xnarch_atomic_t lock;
                union {
                        unsigned owner_offset;
                        xnarch_atomic_t *owner;
@@ -46,7 +45,6 @@ union __xeno_mutex {
 #include "internal.h"
 #include "thread.h"
 #include "cond.h"
-#include "cb_lock.h"
 
 typedef struct cobalt_mutex {
        unsigned magic;
diff --git a/kernel/cobalt/syscall.c b/kernel/cobalt/syscall.c
index 5f9b129..6ca6979 100644
--- a/kernel/cobalt/syscall.c
+++ b/kernel/cobalt/syscall.c
@@ -1160,7 +1160,7 @@ static int __pthread_mutex_trylock(union __xeno_mutex 
__user *u_mx)
 
        if (__xn_safe_copy_from_user(&mx.shadow_mutex,
                                     &u_mx->shadow_mutex,
-                                    offsetof(struct __shadow_mutex, lock)))
+                                    offsetof(struct __shadow_mutex, owner)))
                return -EFAULT;
 
        shadow = &mx.shadow_mutex;
@@ -1217,7 +1217,7 @@ static int __pthread_mutex_lock(union __xeno_mutex __user 
*u_mx)
 
        if (__xn_safe_copy_from_user(&mx.shadow_mutex,
                                     &u_mx->shadow_mutex,
-                                    offsetof(struct __shadow_mutex, lock)))
+                                    offsetof(struct __shadow_mutex, owner)))
                return -EFAULT;
 
        shadow = &mx.shadow_mutex;
@@ -1272,7 +1272,7 @@ static int __pthread_mutex_unlock(union __xeno_mutex 
__user *u_mx)
 
        if (__xn_safe_copy_from_user(&mx.shadow_mutex,
                                     &u_mx->shadow_mutex,
-                                    offsetof(struct __shadow_mutex, lock)))
+                                    offsetof(struct __shadow_mutex, owner)))
                return -EFAULT;
 
        xnlock_get_irqsave(&nklock, s);
@@ -1544,7 +1544,7 @@ static int __pthread_cond_wait_epilogue(union __xeno_cond 
__user *u_cnd,
 
        if (__xn_safe_copy_from_user(&mx.shadow_mutex,
                                     &u_mx->shadow_mutex,
-                                    offsetof(struct __shadow_mutex, lock)
+                                    offsetof(struct __shadow_mutex, owner)
                                     ))
                return -EFAULT;
 
diff --git a/lib/cobalt/cond.c b/lib/cobalt/cond.c
index 6521238..a1c31a8 100644
--- a/lib/cobalt/cond.c
+++ b/lib/cobalt/cond.c
@@ -22,7 +22,6 @@
 #include <cobalt/syscall.h>
 #include <kernel/cobalt/mutex.h>
 #include <kernel/cobalt/cond.h>
-#include <kernel/cobalt/cb_lock.h>
 #include <asm-generic/bits/current.h>
 
 extern int __cobalt_muxid;
@@ -143,9 +142,6 @@ int __wrap_pthread_cond_wait(pthread_cond_t *cond, 
pthread_mutex_t *mutex)
        };
        int err, oldtype;
 
-       if (cb_try_read_lock(&c.mutex->shadow_mutex.lock, s))
-               return EINVAL;
-
        pthread_cleanup_push(&__pthread_cond_cleanup, &c);
 
        pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
@@ -166,8 +162,6 @@ int __wrap_pthread_cond_wait(pthread_cond_t *cond, 
pthread_mutex_t *mutex)
                                         &c.mutex->shadow_mutex,
                                         c.count);
 
-       cb_read_unlock(&c.mutex->shadow_mutex.lock, s);
-
        pthread_testcancel();
 
        return err ?: c.err;
@@ -183,9 +177,6 @@ int __wrap_pthread_cond_timedwait(pthread_cond_t * cond,
        };
        int err, oldtype;
 
-       if (cb_try_read_lock(&c.mutex->shadow_mutex.lock, s))
-               return EINVAL;
-
        pthread_cleanup_push(&__pthread_cond_cleanup, &c);
 
        pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
@@ -205,8 +196,6 @@ int __wrap_pthread_cond_timedwait(pthread_cond_t * cond,
                                         &c.mutex->shadow_mutex,
                                         c.count);
 
-       cb_read_unlock(&c.mutex->shadow_mutex.lock, s);
-
        pthread_testcancel();
 
        return err ?: c.err;
diff --git a/lib/cobalt/mutex.c b/lib/cobalt/mutex.c
index f2ae5ab..908bdeb 100644
--- a/lib/cobalt/mutex.c
+++ b/lib/cobalt/mutex.c
@@ -22,7 +22,6 @@
 #include <nucleus/synch.h>
 #include <cobalt/syscall.h>
 #include <kernel/cobalt/mutex.h>
-#include <kernel/cobalt/cb_lock.h>
 #include <asm-generic/bits/current.h>
 
 extern int __cobalt_muxid;
@@ -97,18 +96,10 @@ int __wrap_pthread_mutex_init(pthread_mutex_t *mutex,
        struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
        int err;
 
-       if (unlikely(cb_try_read_lock(&shadow->lock, s)))
-               goto checked;
-
        err = 
-XENOMAI_SKINCALL2(__cobalt_muxid,__cobalt_check_init,shadow,attr);
 
-       if (err) {
-               cb_read_unlock(&shadow->lock, s);
+       if (err)
                return err;
-       }
-
-  checked:
-       cb_force_write_lock(&shadow->lock, s);
 
        err = 
-XENOMAI_SKINCALL2(__cobalt_muxid,__cobalt_mutex_init,shadow,attr);
 
@@ -116,8 +107,6 @@ int __wrap_pthread_mutex_init(pthread_mutex_t *mutex,
                shadow->owner = (xnarch_atomic_t *)
                        (xeno_sem_heap[0] + shadow->owner_offset);
 
-       cb_write_unlock(&shadow->lock, s);
-
        return err;
 }
 
@@ -127,13 +116,8 @@ int __wrap_pthread_mutex_destroy(pthread_mutex_t *mutex)
        struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
        int err;
 
-       if (unlikely(cb_try_write_lock(&shadow->lock, s)))
-               return EINVAL;
-
        err = -XENOMAI_SKINCALL1(__cobalt_muxid, __cobalt_mutex_destroy, 
shadow);
 
-       cb_write_unlock(&shadow->lock, s);
-
        return err;
 }
 
@@ -152,9 +136,6 @@ int __wrap_pthread_mutex_lock(pthread_mutex_t *mutex)
 
        status = xeno_get_current_mode();
 
-       if (unlikely(cb_try_read_lock(&shadow->lock, s)))
-               return EINVAL;
-
        if (shadow->magic != COBALT_MUTEX_MAGIC) {
                err = -EINVAL;
                goto out;
@@ -170,7 +151,6 @@ int __wrap_pthread_mutex_lock(pthread_mutex_t *mutex)
 
                if (likely(!err)) {
                        shadow->lockcnt = 1;
-                       cb_read_unlock(&shadow->lock, s);
                        return 0;
                }
 
@@ -199,8 +179,6 @@ int __wrap_pthread_mutex_lock(pthread_mutex_t *mutex)
        } while (err == -EINTR);
 
   out:
-       cb_read_unlock(&shadow->lock, s);
-
        return -err;
 }
 
@@ -219,9 +197,6 @@ int __wrap_pthread_mutex_timedlock(pthread_mutex_t *mutex,
 
        status = xeno_get_current_mode();
 
-       if (unlikely(cb_try_read_lock(&shadow->lock, s)))
-               return EINVAL;
-
        if (shadow->magic != COBALT_MUTEX_MAGIC) {
                err = -EINVAL;
                goto out;
@@ -233,7 +208,6 @@ int __wrap_pthread_mutex_timedlock(pthread_mutex_t *mutex,
 
                if (likely(!err)) {
                        shadow->lockcnt = 1;
-                       cb_read_unlock(&shadow->lock, s);
                        return 0;
                }
 
@@ -263,8 +237,6 @@ int __wrap_pthread_mutex_timedlock(pthread_mutex_t *mutex,
        } while (err == -EINTR);
 
   out:
-       cb_read_unlock(&shadow->lock, s);
-
        return -err;
 }
 
@@ -280,9 +252,6 @@ int __wrap_pthread_mutex_trylock(pthread_mutex_t *mutex)
        if (cur == XN_NO_HANDLE)
                return EPERM;
 
-       if (unlikely(cb_try_read_lock(&shadow->lock, s)))
-               return EINVAL;
-
        if (unlikely(shadow->magic != COBALT_MUTEX_MAGIC)) {
                err = EINVAL;
                goto out;
@@ -310,7 +279,6 @@ int __wrap_pthread_mutex_trylock(pthread_mutex_t *mutex)
 
        if (likely(!err)) {
                shadow->lockcnt = 1;
-               cb_read_unlock(&shadow->lock, s);
                return 0;
        }
 
@@ -325,8 +293,6 @@ do_syscall:
        } while (err == EINTR);
 
   out:
-       cb_read_unlock(&shadow->lock, s);
-
        return err;
 }
 
@@ -342,9 +308,6 @@ int __wrap_pthread_mutex_unlock(pthread_mutex_t *mutex)
        if (cur == XN_NO_HANDLE)
                return EPERM;
 
-       if (unlikely(cb_try_read_lock(&shadow->lock, s)))
-               return EINVAL;
-
        if (unlikely(shadow->magic != COBALT_MUTEX_MAGIC)) {
                err = -EINVAL;
                goto out_err;
@@ -367,11 +330,9 @@ int __wrap_pthread_mutex_unlock(pthread_mutex_t *mutex)
        if (unlikely(xnsynch_fast_check_spares(ownerp, 
COBALT_MUTEX_COND_SIGNAL)))
                goto do_syscall;
 
-       if (likely(xnsynch_fast_release(ownerp, cur))) {
+       if (likely(xnsynch_fast_release(ownerp, cur)))
          out:
-               cb_read_unlock(&shadow->lock, s);
                return 0;
-       }
 
 do_syscall:
 
@@ -381,7 +342,5 @@ do_syscall:
        } while (err == -EINTR);
 
   out_err:
-       cb_read_unlock(&shadow->lock, s);
-
        return -err;
 }


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to