On Wed, Jul 06, 2011 at 10:05:00PM +0000, Thordur Bjornsson wrote:
I really need to hear back from you guys with reports, i.e. on
what, with what fs and a short description of what you toyed with
maybe ?
ciao, thib.
> Hi,
>
>
> This diff introduces rrwlocks or recursive/re-entrant rwlocks. They are
> needed due to the locking must be done in the VFS layer.
>
> This diff also removes lockmgr() as the only user of it are the vnode
> locks, as it was the only locking primitive we had that supported
> recursion.
>
> I'd love to hear test reports from people, using this diff and playing
> with there media (msdos/ext2fs/udf/cd9660/mfs/ufs).
>
> ciao, thib.
>
> diff --git a/kern/kern_lock.c b/kern/kern_lock.c
> index 320d2f3..97fdbda 100644
> --- a/kern/kern_lock.c
> +++ b/kern/kern_lock.c
> @@ -43,299 +43,60 @@
>
> #include <machine/cpu.h>
>
> -/*
> - * Locking primitives implementation.
> - * Locks provide shared/exclusive synchronization.
> - */
> -
> -/*
> - * Acquire a resource.
> - */
> -#define ACQUIRE(lkp, error, extflags, drain, wanted) \
> -do { \
> - for (error = 0; wanted; ) { \
> - if ((drain)) \
> - (lkp)->lk_flags |= LK_WAITDRAIN; \
> - else \
> - (lkp)->lk_waitcount++; \
> - /* XXX Cast away volatile. */ \
> - error = tsleep((drain) ? \
> - (void *)&(lkp)->lk_flags : (void *)(lkp), \
> - (lkp)->lk_prio, (lkp)->lk_wmesg, (lkp)->lk_timo); \
> - if ((drain) == 0) \
> - (lkp)->lk_waitcount--; \
> - if (error) \
> - break; \
> - } \
> -} while (0)
> -
> -#define SETHOLDER(lkp, pid, cpu_id)
> \
> - (lkp)->lk_lockholder = (pid)
> +#ifdef MP_LOCKDEBUG
> +/* CPU-dependent timing, needs this to be settable from ddb. */
> +int __mp_lock_spinout = 200000000;
> +#endif
>
> -#define WEHOLDIT(lkp, pid, cpu_id)
> \
> - ((lkp)->lk_lockholder == (pid))
>
> -/*
> - * Initialize a lock; required before use.
> - */
> void
> lockinit(struct lock *lkp, int prio, char *wmesg, int timo, int flags)
> {
>
> + KASSERT(flags == 0);
> +
> bzero(lkp, sizeof(struct lock));
> - lkp->lk_flags = flags & LK_EXTFLG_MASK;
> - lkp->lk_lockholder = LK_NOPROC;
> - lkp->lk_prio = prio;
> - lkp->lk_timo = timo;
> - lkp->lk_wmesg = wmesg; /* just a name for spin locks */
> + rrw_init(&lkp->lk_lck, wmesg);
> }
>
> -/*
> - * Determine the status of a lock.
> - */
> int
> lockstatus(struct lock *lkp)
> {
> - int lock_type = 0;
> -
> - if (lkp->lk_exclusivecount != 0)
> - lock_type = LK_EXCLUSIVE;
> - else if (lkp->lk_sharecount != 0)
> - lock_type = LK_SHARED;
> - return (lock_type);
> + return (rrw_status(&lkp->lk_lck));
> }
>
> -/*
> - * Set, change, or release a lock.
> - *
> - * Shared requests increment the shared count. Exclusive requests set the
> - * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
> - * accepted shared locks and shared-to-exclusive upgrades to go away.
> - */
> int
> -lockmgr(__volatile struct lock *lkp, u_int flags, void *notused)
> +lockmgr(struct lock *lkp, u_int flags, void *notused)
> {
> - int error;
> - pid_t pid;
> - int extflags;
> - cpuid_t cpu_id;
> - struct proc *p = curproc;
> + int rwflags;
>
> - error = 0;
> - extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
> + rwflags = 0;
>
> -#ifdef DIAGNOSTIC
> - if (p == NULL)
> - panic("lockmgr: process context required");
> -#endif
> - /* Process context required. */
> - pid = p->p_pid;
> - cpu_id = cpu_number();
> -
> - /*
> - * Once a lock has drained, the LK_DRAINING flag is set and an
> - * exclusive lock is returned. The only valid operation thereafter
> - * is a single release of that exclusive lock. This final release
> - * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
> - * further requests of any sort will result in a panic. The bits
> - * selected for these two flags are chosen so that they will be set
> - * in memory that is freed (freed memory is filled with 0xdeadbeef).
> - */
> - if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
> -#ifdef DIAGNOSTIC
> - if (lkp->lk_flags & LK_DRAINED)
> - panic("lockmgr: using decommissioned lock");
> - if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
> - WEHOLDIT(lkp, pid, cpu_id) == 0)
> - panic("lockmgr: non-release on draining lock: %d",
> - flags & LK_TYPE_MASK);
> -#endif /* DIAGNOSTIC */
> - lkp->lk_flags &= ~LK_DRAINING;
> - lkp->lk_flags |= LK_DRAINED;
> - }
> + KASSERT(!((flags & (LK_SHARED|LK_EXCLUSIVE)) ==
> + (LK_SHARED|LK_EXCLUSIVE)));
> + KASSERT(!((flags & (LK_CANRECURSE|LK_RECURSEFAIL)) ==
> + (LK_CANRECURSE|LK_RECURSEFAIL)));
> + KASSERT((flags & LK_RELEASE) ||
> + (flags & (LK_SHARED|LK_EXCLUSIVE|LK_DRAIN)));
>
> - /*
> - * Check if the caller is asking us to be schizophrenic.
> - */
> - if ((lkp->lk_flags & (LK_CANRECURSE|LK_RECURSEFAIL)) ==
> - (LK_CANRECURSE|LK_RECURSEFAIL))
> - panic("lockmgr: make up your mind");
>
> - switch (flags & LK_TYPE_MASK) {
> -
> - case LK_SHARED:
> - if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
> - /*
> - * If just polling, check to see if we will block.
> - */
> - if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
> - (LK_HAVE_EXCL | LK_WANT_EXCL))) {
> - error = EBUSY;
> - break;
> - }
> - /*
> - * Wait for exclusive locks and upgrades to clear.
> - */
> - ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
> - (LK_HAVE_EXCL | LK_WANT_EXCL));
> - if (error)
> - break;
> - lkp->lk_sharecount++;
> - break;
> - }
> - /*
> - * We hold an exclusive lock, so downgrade it to shared.
> - * An alternative would be to fail with EDEADLK.
> - */
> - lkp->lk_sharecount++;
> -
> - if (WEHOLDIT(lkp, pid, cpu_id) == 0 ||
> - lkp->lk_exclusivecount == 0)
> - panic("lockmgr: not holding exclusive lock");
> - lkp->lk_sharecount += lkp->lk_exclusivecount;
> - lkp->lk_exclusivecount = 0;
> - lkp->lk_flags &= ~LK_HAVE_EXCL;
> - SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
> - if (lkp->lk_waitcount)
> - wakeup((void *)(lkp));
> - break;
> -
> - case LK_EXCLUSIVE:
> - if (WEHOLDIT(lkp, pid, cpu_id)) {
> - /*
> - * Recursive lock.
> - */
> - if ((extflags & LK_CANRECURSE) == 0) {
> - if (extflags & LK_RECURSEFAIL) {
> - error = EDEADLK;
> - break;
> - } else
> - panic("lockmgr: locking against
> myself");
> - }
> - lkp->lk_exclusivecount++;
> - break;
> - }
> - /*
> - * If we are just polling, check to see if we will sleep.
> - */
> - if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
> - (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
> - lkp->lk_sharecount != 0)) {
> - error = EBUSY;
> - break;
> - }
> - /*
> - * Try to acquire the want_exclusive flag.
> - */
> - ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
> - (LK_HAVE_EXCL | LK_WANT_EXCL));
> - if (error)
> - break;
> - lkp->lk_flags |= LK_WANT_EXCL;
> - /*
> - * Wait for shared locks and upgrades to finish.
> - */
> - ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0);
> - lkp->lk_flags &= ~LK_WANT_EXCL;
> - if (error)
> - break;
> - lkp->lk_flags |= LK_HAVE_EXCL;
> - SETHOLDER(lkp, pid, cpu_id);
> - if (lkp->lk_exclusivecount != 0)
> - panic("lockmgr: non-zero exclusive count");
> - lkp->lk_exclusivecount = 1;
> - break;
> -
> - case LK_RELEASE:
> - if (lkp->lk_exclusivecount != 0) {
> - if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
> - panic("lockmgr: pid %d, not exclusive lock "
> - "holder %d unlocking",
> - pid, lkp->lk_lockholder);
> - }
> - lkp->lk_exclusivecount--;
> - if (lkp->lk_exclusivecount == 0) {
> - lkp->lk_flags &= ~LK_HAVE_EXCL;
> - SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
> - }
> - } else if (lkp->lk_sharecount != 0) {
> - lkp->lk_sharecount--;
> - }
> -#ifdef DIAGNOSTIC
> - else
> - panic("lockmgr: release of unlocked lock!");
> -#endif
> - if (lkp->lk_waitcount)
> - wakeup((void *)(lkp));
> - break;
> -
> - case LK_DRAIN:
> - /*
> - * Check that we do not already hold the lock, as it can
> - * never drain if we do. Unfortunately, we have no way to
> - * check for holding a shared lock, but at least we can
> - * check for an exclusive one.
> - */
> - if (WEHOLDIT(lkp, pid, cpu_id))
> - panic("lockmgr: draining against myself");
> - /*
> - * If we are just polling, check to see if we will sleep.
> - */
> - if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
> - (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
> - lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
> - error = EBUSY;
> - break;
> - }
> - ACQUIRE(lkp, error, extflags, 1,
> - ((lkp->lk_flags &
> - (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
> - lkp->lk_sharecount != 0 ||
> - lkp->lk_waitcount != 0));
> - if (error)
> - break;
> - lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
> - SETHOLDER(lkp, pid, cpu_id);
> - lkp->lk_exclusivecount = 1;
> - break;
> -
> - default:
> - panic("lockmgr: unknown locktype request %d",
> - flags & LK_TYPE_MASK);
> - /* NOTREACHED */
> - }
> - if ((lkp->lk_flags & LK_WAITDRAIN) != 0 &&
> - ((lkp->lk_flags &
> - (LK_HAVE_EXCL | LK_WANT_EXCL)) == 0 &&
> - lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
> - lkp->lk_flags &= ~LK_WAITDRAIN;
> - wakeup((void *)&lkp->lk_flags);
> + if (flags & LK_RELEASE) {
> + rrw_exit(&lkp->lk_lck);
> + return (0);
> }
> - return (error);
> -}
>
> -#ifdef DIAGNOSTIC
> -/*
> - * Print out information about state of a lock. Used by VOP_PRINT
> - * routines to display status about contained locks.
> - */
> -void
> -lockmgr_printinfo(__volatile struct lock *lkp)
> -{
> + if (flags & LK_SHARED)
> + rwflags |= RW_READ;
> + if (flags & (LK_EXCLUSIVE|LK_DRAIN))
> + rwflags |= RW_WRITE;
>
> - if (lkp->lk_sharecount)
> - printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
> - lkp->lk_sharecount);
> - else if (lkp->lk_flags & LK_HAVE_EXCL) {
> - printf(" lock type %s: EXCL (count %d) by ",
> - lkp->lk_wmesg, lkp->lk_exclusivecount);
> - printf("pid %d", lkp->lk_lockholder);
> - } else
> - printf(" not locked");
> - if (lkp->lk_waitcount > 0)
> - printf(" with %d pending", lkp->lk_waitcount);
> + if (flags & LK_RECURSEFAIL)
> + rwflags |= RW_RECURSEFAIL;
> + if (flags & LK_NOWAIT)
> + rwflags |= RW_NOSLEEP;
> +
> + return (rrw_enter(&lkp->lk_lck, rwflags));
> }
> -#endif /* DIAGNOSTIC */
>
> #if defined(MULTIPROCESSOR)
> /*
> @@ -343,7 +104,7 @@ lockmgr_printinfo(__volatile struct lock *lkp)
> * so that they show up in profiles.
> */
>
> -struct __mp_lock kernel_lock;
> +struct __mp_lock kernel_lock;
>
> void
> _kernel_lock_init(void)
> @@ -385,10 +146,4 @@ _kernel_proc_unlock(struct proc *p)
> {
> __mp_unlock(&kernel_lock);
> }
> -
> -#ifdef MP_LOCKDEBUG
> -/* CPU-dependent timing, needs this to be settable from ddb. */
> -int __mp_lock_spinout = 200000000;
> -#endif
> -
> #endif /* MULTIPROCESSOR */
> diff --git a/kern/kern_rwlock.c b/kern/kern_rwlock.c
> index d22ae3a..a85f8b1 100644
> --- a/kern/kern_rwlock.c
> +++ b/kern/kern_rwlock.c
> @@ -2,27 +2,19 @@
>
> /*
> * Copyright (c) 2002, 2003 Artur Grabowski <[email protected]>
> - * All rights reserved.
> + * Copyright (c) 2011 Thordur Bjornsson <[email protected]>
> *
> - * Redistribution and use in source and binary forms, with or without
> - * modification, are permitted provided that the following conditions
> - * are met:
> + * Permission to use, copy, modify, and distribute this software for any
> + * purpose with or without fee is hereby granted, provided that the above
> + * copyright notice and this permission notice appear in all copies.
> *
> - * 1. Redistributions of source code must retain the above copyright
> - * notice, this list of conditions and the following disclaimer.
> - * 2. The name of the author may not be used to endorse or promote products
> - * derived from this software without specific prior written permission.
> - *
> - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
> - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
> - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
> - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
> - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
> - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
> PROFITS;
> - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
> - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
> - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
> - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
> + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
> + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
> + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
> + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
> + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
> + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
> */
>
> #include <sys/param.h>
> @@ -69,6 +61,9 @@ static const struct rwlock_op {
> 0,
> PLOCK
> },
> + { /* Sparse Entry. */
> + 0,
> + },
> { /* RW_DOWNGRADE */
> RWLOCK_READ_INCR - RWLOCK_WRLOCK,
> 0,
> @@ -191,7 +186,7 @@ rw_enter(struct rwlock *rwl, int flags)
> unsigned long inc, o;
> int error;
>
> - op = &rw_ops[flags & RW_OPMASK];
> + op = &rw_ops[(flags & RW_OPMASK) - 1];
>
> inc = op->inc + RW_PROC(curproc) * op->proc_mult;
> retry:
> @@ -258,6 +253,13 @@ rw_exit(struct rwlock *rwl)
> wakeup(rwl);
> }
>
> +int
> +rw_status(struct rwlock *rwl)
> +{
> +
> + return (rwl->rwl_owner != 0L);
> +}
> +
> #ifdef DIAGNOSTIC
> void
> rw_assert_wrlock(struct rwlock *rwl)
> @@ -283,3 +285,55 @@ rw_assert_unlocked(struct rwlock *rwl)
> panic("%s: lock held", rwl->rwl_name);
> }
> #endif
> +
> +/* recursive rwlocks; */
> +void
> +rrw_init(struct rrwlock *rrwl, char *name)
> +{
> + bzero(rrwl, sizeof(struct rrwlock));
> + rw_init(&rrwl->rrwl_lock, name);
> +}
> +
> +int
> +rrw_enter(struct rrwlock *rrwl, int flags)
> +{
> + int rv;
> +
> + if (RWLOCK_OWNER(&rrwl->rrwl_lock) ==
> + (struct proc *)RW_PROC(curproc)) {
> + if (flags & RW_RECURSEFAIL)
> + return (EDEADLK);
> + else {
> + rrwl->rrwl_wcnt++;
> + return (0);
> + }
> + }
> +
> + rv = rw_enter(&rrwl->rrwl_lock, flags);
> + if (rv == 0)
> + rrwl->rrwl_wcnt = 1;
> +
> + return (rv);
> +}
> +
> +void
> +rrw_exit(struct rrwlock *rrwl)
> +{
> +
> + if (RWLOCK_OWNER(&rrwl->rrwl_lock) ==
> + (struct proc *)RW_PROC(curproc)) {
> + KASSERT(rrwl->rrwl_wcnt > 0);
> + rrwl->rrwl_wcnt--;
> + if (rrwl->rrwl_wcnt != 0)
> + return;
> + }
> +
> + rw_exit(&rrwl->rrwl_lock);
> +}
> +
> +int
> +rrw_status(struct rrwlock *rrwl)
> +{
> +
> + return (rw_status(&rrwl->rrwl_lock));
> +}
> diff --git a/sys/lock.h b/sys/lock.h
> index 54dbf3b..13ddeb3 100644
> --- a/sys/lock.h
> +++ b/sys/lock.h
> @@ -42,6 +42,8 @@
> #include <machine/lock.h>
> #endif
>
> +#include <sys/rwlock.h>
> +
> struct simplelock {
> };
>
> @@ -53,123 +55,27 @@ typedef struct simplelock *simple_lock_t;
> #define simple_lock_try(lkp) (1) /* always succeeds */
> #define simple_unlock(lkp)
> #define simple_lock_assert(lkp)
> -
> -static __inline void simple_lock_init(struct simplelock *lkp)
> -{
> -}
> -
> +#define simple_lock_init(lkp)
> #endif /* _KERNEL */
>
> -typedef struct lock lock_data_t;
> -typedef struct lock *lock_t;
> -
> -/*
> - * The general lock structure. Provides for multiple shared locks,
> - * upgrading from shared to exclusive, and sleeping until the lock
> - * can be gained. The simple locks are defined in <machine/param.h>.
> - */
> struct lock {
> - u_int lk_flags; /* see below */
> - int lk_sharecount; /* # of accepted shared locks */
> - int lk_waitcount; /* # of processes sleeping for lock */
> - int lk_exclusivecount; /* # of recursive exclusive locks */
> -
> - /*
> - * This is the sleep message for sleep locks, and a simple name
> - * for spin locks.
> - */
> - char *lk_wmesg; /* resource sleeping (for tsleep) */
> -
> - /* pid of exclusive lock holder */
> - pid_t lk_lockholder;
> -
> - /* priority at which to sleep */
> - int lk_prio;
> -
> - /* maximum sleep time (for tsleep) */
> - int lk_timo;
> + struct rrwlock lk_lck;
> };
>
> -/*
> - * Lock request types:
> - * LK_SHARED - get one of many possible shared locks. If a process
> - * holding an exclusive lock requests a shared lock, the exclusive
> - * lock(s) will be downgraded to shared locks.
> - * LK_EXCLUSIVE - stop further shared locks, when they are cleared,
> - * grant a pending upgrade if it exists, then grant an exclusive
> - * lock. Only one exclusive lock may exist at a time, except that
> - * a process holding an exclusive lock may get additional exclusive
> - * locks if it explicitly sets the LK_CANRECURSE flag in the lock
> - * request, or if the LK_CANRECUSE flag was set when the lock was
> - * initialized.
> - * LK_RELEASE - release one instance of a lock.
> - * LK_DRAIN - wait for all activity on the lock to end, then mark it
> - * decommissioned. This feature is used before freeing a lock that
> - * is part of a piece of memory that is about to be freed.
> - *
> - * These are flags that are passed to the lockmgr routine.
> - */
> -#define LK_TYPE_MASK 0x0000000f /* type of lock sought */
> -#define LK_SHARED 0x00000001 /* shared lock */
> -#define LK_EXCLUSIVE 0x00000002 /* exclusive lock */
> -#define LK_RELEASE 0x00000006 /* release any type of lock */
> -#define LK_DRAIN 0x00000007 /* wait for all lock activity to end */
> -/*
> - * External lock flags.
> - *
> - * The first three flags may be set in lock_init to set their mode
> permanently,
> - * or passed in as arguments to the lock manager.
> - */
> -#define LK_EXTFLG_MASK 0x00200070 /* mask of external flags */
> -#define LK_NOWAIT 0x00000010 /* do not sleep to await lock */
> -#define LK_CANRECURSE 0x00000040 /* allow recursive exclusive
> lock */
> -#define LK_RECURSEFAIL 0x00200000 /* fail if recursive exclusive
> lock */
> -/*
> - * Internal lock flags.
> - *
> - * These flags are used internally to the lock manager.
> - */
> -#define LK_WANT_EXCL 0x00002000 /* exclusive lock sought */
> -#define LK_HAVE_EXCL 0x00004000 /* exclusive lock obtained */
> -#define LK_WAITDRAIN 0x00008000 /* process waiting for lock to drain */
> -#define LK_DRAINING 0x00040000 /* lock is being drained */
> -#define LK_DRAINED 0x00080000 /* lock has been decommissioned */
> -/*
> - * Control flags
> - *
> - * Non-persistent external flags.
> - */
> -#define LK_RETRY 0x00020000 /* vn_lock: retry until locked */
> -
> -/*
> - * Lock return status.
> - *
> - * Successfully obtained locks return 0. Locks will always succeed
> - * unless one of the following is true:
> - * LK_NOWAIT is set and a sleep would be required (returns EBUSY).
> - * PCATCH is set in lock priority and a signal arrives (returns
> - * either EINTR or ERESTART if system calls is to be restarted).
> - * Non-null lock timeout and timeout expires (returns EWOULDBLOCK).
> - * A failed lock attempt always returns a non-zero error value. No lock
> - * is held after an error return.
> - */
> -
> -/*
> - * Indicator that no process holds exclusive lock
> - */
> -#define LK_KERNPROC ((pid_t) -2)
> -#define LK_NOPROC ((pid_t) -1)
> -#define LK_NOCPU ((cpuid_t) -1)
> -
> -void lockinit(struct lock *, int prio, char *wmesg, int timo,
> - int flags);
> -int lockmgr(__volatile struct lock *, u_int flags, void *);
> -void lockmgr_printinfo(__volatile struct lock *);
> +#define LK_SHARED 0x01 /* shared lock */
> +#define LK_EXCLUSIVE 0x02 /* exclusive lock */
> +#define LK_TYPE_MASK 0x03 /* type of lock sought */
> +#define LK_DRAIN 0x04 /* wait for all lock activity to end */
> +#define LK_RELEASE 0x08 /* release any type of lock */
> +#define LK_NOWAIT 0x10 /* do not sleep to await lock */
> +#define LK_CANRECURSE 0x20 /* allow recursive exclusive lock */
> +#define LK_RECURSEFAIL 0x40 /* fail if recursive exclusive lock */
> +#define LK_RETRY 0x80 /* vn_lock: retry until locked */
> +
> +void lockinit(struct lock *, int, char *, int, int);
> +int lockmgr(struct lock *, u_int flags, void *);
> int lockstatus(struct lock *);
>
> -int spinlock_release_all(__volatile struct lock *);
> -void spinlock_acquire_count(__volatile struct lock *, int);
> -
> -#define LOCK_ASSERT(x) /* nothing */
> +#define lockmgr_printinfo(lkp)
>
> #endif /* !_LOCK_H_ */
> diff --git a/sys/rwlock.h b/sys/rwlock.h
> index 5629ad2..969250d 100644
> --- a/sys/rwlock.h
> +++ b/sys/rwlock.h
> @@ -1,27 +1,18 @@
> /* $OpenBSD$ */
> /*
> * Copyright (c) 2002 Artur Grabowski <[email protected]>
> - * All rights reserved.
> *
> - * Redistribution and use in source and binary forms, with or without
> - * modification, are permitted provided that the following conditions
> - * are met:
> + * Permission to use, copy, modify, and distribute this software for any
> + * purpose with or without fee is hereby granted, provided that the above
> + * copyright notice and this permission notice appear in all copies.
> *
> - * 1. Redistributions of source code must retain the above copyright
> - * notice, this list of conditions and the following disclaimer.
> - * 2. The name of the author may not be used to endorse or promote products
> - * derived from this software without specific prior written permission.
> - *
> - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
> - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
> - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
> - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
> - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
> - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
> PROFITS;
> - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
> - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
> - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
> - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
> + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
> + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
> + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
> + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
> + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
> + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
> */
>
> /*
> @@ -43,7 +34,7 @@
> * optimized by machine dependent code when __HAVE_MD_RWLOCK is defined.
> *
> * MD code that defines __HAVE_MD_RWLOCK and implement four functions:
> - *
> + *
> * void rw_enter_read(struct rwlock *)
> * atomically test for RWLOCK_WRLOCK and if not set, increment the lock
> * by RWLOCK_READ_INCR. While RWLOCK_WRLOCK is set, loop into rw_enter_wait.
> @@ -68,7 +59,6 @@
> #ifndef SYS_RWLOCK_H
> #define SYS_RWLOCK_H
>
> -
> struct proc;
>
> struct rwlock {
> @@ -107,17 +97,31 @@ void rw_assert_unlocked(struct rwlock *);
>
> int rw_enter(struct rwlock *, int);
> void rw_exit(struct rwlock *);
> -#define RW_WRITE 0x00UL /* exclusive lock */
> -#define RW_READ 0x01UL /* shared lock */
> -#define RW_DOWNGRADE 0x02UL /* downgrade exclusive to shared */
> -#define RW_OPMASK 0x03UL
> +int rw_status(struct rwlock *);
> +
> +#define RW_WRITE 0x0001UL /* exclusive lock */
> +#define RW_READ 0x0002UL /* shared lock */
> +#define RW_DOWNGRADE 0x0004UL /* downgrade exclusive to shared */
> +#define RW_OPMASK 0x0007UL
>
> -#define RW_INTR 0x10UL /* interruptible sleep */
> -#define RW_SLEEPFAIL 0x20UL /* fail if we slept for the lock */
> -#define RW_NOSLEEP 0x40UL /* don't wait for the lock */
> +#define RW_INTR 0x0010UL /* interruptible sleep */
> +#define RW_SLEEPFAIL 0x0020UL /* fail if we slept for the lock */
> +#define RW_NOSLEEP 0x0040UL /* don't wait for the lock */
> +#define RW_RECURSEFAIL 0x0080UL /* Fail on recursion for RRW
> locks. */
>
> #ifndef rw_cas
> int rw_cas(volatile unsigned long *, unsigned long, unsigned long);
> #endif
>
> +/* recursive rwlocks; */
> +struct rrwlock {
> + struct rwlock rrwl_lock;
> + uint32_t rrwl_wcnt; /* # writers. */
> +};
> +
> +void rrw_init(struct rrwlock *, char *);
> +int rrw_enter(struct rrwlock *, int);
> +void rrw_exit(struct rrwlock *);
> +int rrw_status(struct rrwlock *);
> +
> #endif