Hi,

Andrew Doran <a...@netbsd.org> writes:

> Hi,
>
> This also happened the last time I touched rw_downgrade(), and I backed out
> the change then, but both times I don't see the bug.  I have some questions:
>
> - Are you running DIAGNOSTIC and/or LOCKDEBUG?  I would be very interested
>   to see what happens with a LOCKDEBUG kernel here.

I will enable LOCKDEBUG and DIAGNOSTIC soon.

> - Do you have an ATI Radeon graphics chip?
> - Are you using ZFS?

My GPU is in Intel CPU (KabyLake Refresh).
And I do not use ZFS at all. All partitions are FFSv2 with WAPBL.

> Thanks,
> Andrew
>
>
> On Mon, Jan 20, 2020 at 12:41:37PM +0900, Ryo ONODERA wrote:
>> Hi,
>> 
>> After this commit, the kernel stalls just before root file system
>> will be found on my NetBSD/amd64 laptop.
>> 
>> Reverting
>> src/sys/kern/kern_rwlock.c to r1.60
>> and
>> src/sys/sys/rwlock.h to r1.12
>> in latest -current tree and I can get the kernel that works like
>> before.
>> 
>> And on another laptop, the problematic kernel stalls before root file
>> system detection like my laptop.
>> 
>> It may be universal problem.
>> 
>> Could you take a look at this problem?
>> 
>> Thank you.
>> 
>> "Andrew Doran" <a...@netbsd.org> writes:
>> 
>> > Module Name:       src
>> > Committed By:      ad
>> > Date:              Sun Jan 19 18:34:24 UTC 2020
>> >
>> > Modified Files:
>> >    src/sys/kern: kern_rwlock.c
>> >    src/sys/sys: rwlock.h
>> >
>> > Log Message:
>> > Tidy rwlocks a bit, no functional change intended.  Mainly:
>> >
>> > - rw_downgrade(): do it in a for () loop like all the others.
>> > - Explicitly carry around RW_NODEBUG - don't be lazy.
>> > - Remove pointless macros.
>> > - Don't make assertions conditional on LOCKDEBUG, there's no reason.
>> > - Make space for a new flag bit (not added yet).
>> >
>> >
>> > To generate a diff of this commit:
>> > cvs rdiff -u -r1.60 -r1.61 src/sys/kern/kern_rwlock.c
>> > cvs rdiff -u -r1.12 -r1.13 src/sys/sys/rwlock.h
>> >
>> > Please note that diffs are not public domain; they are subject to the
>> > copyright notices on the relevant files.
>> >
>> > Modified files:
>> >
>> > Index: src/sys/kern/kern_rwlock.c
>> > diff -u src/sys/kern/kern_rwlock.c:1.60 src/sys/kern/kern_rwlock.c:1.61
>> > --- src/sys/kern/kern_rwlock.c:1.60        Sun Jan 12 18:37:10 2020
>> > +++ src/sys/kern/kern_rwlock.c     Sun Jan 19 18:34:24 2020
>> > @@ -1,4 +1,4 @@
>> > -/*        $NetBSD: kern_rwlock.c,v 1.60 2020/01/12 18:37:10 ad Exp $      
>> > */
>> > +/*        $NetBSD: kern_rwlock.c,v 1.61 2020/01/19 18:34:24 ad Exp $      
>> > */
>> >  
>> >  /*-
>> >   * Copyright (c) 2002, 2006, 2007, 2008, 2009, 2019, 2020
>> > @@ -39,7 +39,9 @@
>> >   */
>> >  
>> >  #include <sys/cdefs.h>
>> > -__KERNEL_RCSID(0, "$NetBSD: kern_rwlock.c,v 1.60 2020/01/12 18:37:10 ad 
>> > Exp $");
>> > +__KERNEL_RCSID(0, "$NetBSD: kern_rwlock.c,v 1.61 2020/01/19 18:34:24 ad 
>> > Exp $");
>> > +
>> > +#include "opt_lockdebug.h"
>> >  
>> >  #define   __RWLOCK_PRIVATE
>> >  
>> > @@ -63,58 +65,32 @@ __KERNEL_RCSID(0, "$NetBSD: kern_rwlock.
>> >   * LOCKDEBUG
>> >   */
>> >  
>> > -#if defined(LOCKDEBUG)
>> > -
>> > -#define   RW_WANTLOCK(rw, op)                                             
>> > \
>> > -  LOCKDEBUG_WANTLOCK(RW_DEBUG_P(rw), (rw),                        \
>> > -      (uintptr_t)__builtin_return_address(0), op == RW_READER);
>> > -#define   RW_LOCKED(rw, op)                                               
>> > \
>> > -  LOCKDEBUG_LOCKED(RW_DEBUG_P(rw), (rw), NULL,                    \
>> > -      (uintptr_t)__builtin_return_address(0), op == RW_READER);
>> > -#define   RW_UNLOCKED(rw, op)                                             
>> > \
>> > -  LOCKDEBUG_UNLOCKED(RW_DEBUG_P(rw), (rw),                        \
>> > -      (uintptr_t)__builtin_return_address(0), op == RW_READER);
>> > -#define   RW_DASSERT(rw, cond)                                            
>> > \
>> > -do {                                                                      
>> > \
>> > -  if (__predict_false(!(cond)))                                   \
>> > -          rw_abort(__func__, __LINE__, rw, "assertion failed: " #cond);\
>> > -} while (/* CONSTCOND */ 0);
>> > -
>> > -#else     /* LOCKDEBUG */
>> > -
>> > -#define   RW_WANTLOCK(rw, op)     /* nothing */
>> > -#define   RW_LOCKED(rw, op)       /* nothing */
>> > -#define   RW_UNLOCKED(rw, op)     /* nothing */
>> > -#define   RW_DASSERT(rw, cond)    /* nothing */
>> > +#define   RW_DEBUG_P(rw)          (((rw)->rw_owner & RW_NODEBUG) == 0)
>> >  
>> > -#endif    /* LOCKDEBUG */
>> > +#define   RW_WANTLOCK(rw, op) \
>> > +    LOCKDEBUG_WANTLOCK(RW_DEBUG_P(rw), (rw), \
>> > +        (uintptr_t)__builtin_return_address(0), op == RW_READER);
>> > +#define   RW_LOCKED(rw, op) \
>> > +    LOCKDEBUG_LOCKED(RW_DEBUG_P(rw), (rw), NULL, \
>> > +        (uintptr_t)__builtin_return_address(0), op == RW_READER);
>> > +#define   RW_UNLOCKED(rw, op) \
>> > +    LOCKDEBUG_UNLOCKED(RW_DEBUG_P(rw), (rw), \
>> > +        (uintptr_t)__builtin_return_address(0), op == RW_READER);
>> >  
>> >  /*
>> >   * DIAGNOSTIC
>> >   */
>> >  
>> >  #if defined(DIAGNOSTIC)
>> > -
>> > -#define   RW_ASSERT(rw, cond)                                             
>> > \
>> > -do {                                                                      
>> > \
>> > -  if (__predict_false(!(cond)))                                   \
>> > +#define   RW_ASSERT(rw, cond) \
>> > +do { \
>> > +  if (__predict_false(!(cond))) \
>> >            rw_abort(__func__, __LINE__, rw, "assertion failed: " #cond);\
>> >  } while (/* CONSTCOND */ 0)
>> > -
>> >  #else
>> > -
>> >  #define   RW_ASSERT(rw, cond)     /* nothing */
>> > -
>> >  #endif    /* DIAGNOSTIC */
>> >  
>> > -#define   RW_SETDEBUG(rw, on)             ((rw)->rw_owner |= (on) ? 0 : 
>> > RW_NODEBUG)
>> > -#define   RW_DEBUG_P(rw)                  (((rw)->rw_owner & RW_NODEBUG) 
>> > == 0)
>> > -#if defined(LOCKDEBUG)
>> > -#define   RW_INHERITDEBUG(n, o)           (n) |= (o) & RW_NODEBUG
>> > -#else /* defined(LOCKDEBUG) */
>> > -#define   RW_INHERITDEBUG(n, o)           /* nothing */
>> > -#endif /* defined(LOCKDEBUG) */
>> > -
>> >  /*
>> >   * Memory barriers.
>> >   */
>> > @@ -128,29 +104,6 @@ do {                                                  
>> >                 \
>> >  #define   RW_MEMBAR_PRODUCER()            membar_producer()
>> >  #endif
>> >  
>> > -static void       rw_abort(const char *, size_t, krwlock_t *, const char 
>> > *);
>> > -static void       rw_dump(const volatile void *, lockop_printer_t);
>> > -static lwp_t      *rw_owner(wchan_t);
>> > -
>> > -static inline uintptr_t
>> > -rw_cas(krwlock_t *rw, uintptr_t o, uintptr_t n)
>> > -{
>> > -
>> > -  RW_INHERITDEBUG(n, o);
>> > -  return (uintptr_t)atomic_cas_ptr((volatile void *)&rw->rw_owner,
>> > -      (void *)o, (void *)n);
>> > -}
>> > -
>> > -static inline void
>> > -rw_swap(krwlock_t *rw, uintptr_t o, uintptr_t n)
>> > -{
>> > -
>> > -  RW_INHERITDEBUG(n, o);
>> > -  n = (uintptr_t)atomic_swap_ptr((volatile void *)&rw->rw_owner,
>> > -      (void *)n);
>> > -  RW_DASSERT(rw, n == o);
>> > -}
>> > -
>> >  /*
>> >   * For platforms that do not provide stubs, or for the LOCKDEBUG case.
>> >   */
>> > @@ -164,6 +117,10 @@ __strong_alias(rw_exit,rw_vector_exit);
>> >  __strong_alias(rw_tryenter,rw_vector_tryenter);
>> >  #endif
>> >  
>> > +static void       rw_abort(const char *, size_t, krwlock_t *, const char 
>> > *);
>> > +static void       rw_dump(const volatile void *, lockop_printer_t);
>> > +static lwp_t      *rw_owner(wchan_t);
>> > +
>> >  lockops_t rwlock_lockops = {
>> >    .lo_name = "Reader / writer lock",
>> >    .lo_type = LOCKOPS_SLEEP,
>> > @@ -179,6 +136,37 @@ syncobj_t rw_syncobj = {
>> >  };
>> >  
>> >  /*
>> > + * rw_cas:
>> > + *
>> > + *        Do an atomic compare-and-swap on the lock word.
>> > + */
>> > +static inline uintptr_t
>> > +rw_cas(krwlock_t *rw, uintptr_t o, uintptr_t n)
>> > +{
>> > +
>> > +  return (uintptr_t)atomic_cas_ptr((volatile void *)&rw->rw_owner,
>> > +      (void *)o, (void *)n);
>> > +}
>> > +
>> > +/*
>> > + * rw_swap:
>> > + *
>> > + *        Do an atomic swap of the lock word.  This is used only when it's
>> > + *        known that the lock word is set up such that it can't be changed
>> > + *        behind us (assert this), so there's no point considering the 
>> > result.
>> > + */
>> > +static inline void
>> > +rw_swap(krwlock_t *rw, uintptr_t o, uintptr_t n)
>> > +{
>> > +
>> > +  n = (uintptr_t)atomic_swap_ptr((volatile void *)&rw->rw_owner,
>> > +      (void *)n);
>> > +
>> > +  RW_ASSERT(rw, n == o);
>> > +  RW_ASSERT(rw, (o & RW_HAS_WAITERS) != 0);
>> > +}
>> > +
>> > +/*
>> >   * rw_dump:
>> >   *
>> >   *        Dump the contents of a rwlock structure.
>> > @@ -214,16 +202,14 @@ rw_abort(const char *func, size_t line, 
>> >   *
>> >   *        Initialize a rwlock for use.
>> >   */
>> > -void _rw_init(krwlock_t *, uintptr_t);
>> >  void
>> >  _rw_init(krwlock_t *rw, uintptr_t return_address)
>> >  {
>> > -  bool dodebug;
>> >  
>> > -  memset(rw, 0, sizeof(*rw));
>> > -
>> > -  dodebug = LOCKDEBUG_ALLOC(rw, &rwlock_lockops, return_address);
>> > -  RW_SETDEBUG(rw, dodebug);
>> > +  if (LOCKDEBUG_ALLOC(rw, &rwlock_lockops, return_address))
>> > +          rw->rw_owner = 0;
>> > +  else
>> > +          rw->rw_owner = RW_NODEBUG;
>> >  }
>> >  
>> >  void
>> > @@ -243,7 +229,7 @@ rw_destroy(krwlock_t *rw)
>> >  {
>> >  
>> >    RW_ASSERT(rw, (rw->rw_owner & ~RW_NODEBUG) == 0);
>> > -  LOCKDEBUG_FREE(RW_DEBUG_P(rw), rw);
>> > +  LOCKDEBUG_FREE((rw->rw_owner & RW_NODEBUG) == 0, rw);
>> >  }
>> >  
>> >  /*
>> > @@ -327,7 +313,7 @@ rw_vector_enter(krwlock_t *rw, const krw
>> >            need_wait = RW_WRITE_LOCKED | RW_WRITE_WANTED;
>> >            queue = TS_READER_Q;
>> >    } else {
>> > -          RW_DASSERT(rw, op == RW_WRITER);
>> > +          RW_ASSERT(rw, op == RW_WRITER);
>> >            incr = curthread | RW_WRITE_LOCKED;
>> >            set_wait = RW_HAS_WAITERS | RW_WRITE_WANTED;
>> >            need_wait = RW_WRITE_LOCKED | RW_THREAD;
>> > @@ -430,7 +416,7 @@ rw_vector_enter(krwlock_t *rw, const krw
>> >          (uintptr_t)__builtin_return_address(0)));
>> >    LOCKSTAT_EXIT(lsflag);
>> >  
>> > -  RW_DASSERT(rw, (op != RW_READER && RW_OWNER(rw) == curthread) ||
>> > +  RW_ASSERT(rw, (op != RW_READER && RW_OWNER(rw) == curthread) ||
>> >        (op == RW_READER && RW_COUNT(rw) != 0));
>> >    RW_LOCKED(rw, op);
>> >  }
>> > @@ -448,7 +434,8 @@ rw_vector_exit(krwlock_t *rw)
>> >    int rcnt, wcnt;
>> >    lwp_t *l;
>> >  
>> > -  curthread = (uintptr_t)curlwp;
>> > +  l = curlwp;
>> > +  curthread = (uintptr_t)l;
>> >    RW_ASSERT(rw, curthread != 0);
>> >  
>> >    /*
>> > @@ -491,8 +478,8 @@ rw_vector_exit(krwlock_t *rw)
>> >     */
>> >    ts = turnstile_lookup(rw);
>> >    owner = rw->rw_owner;
>> > -  RW_DASSERT(rw, ts != NULL);
>> > -  RW_DASSERT(rw, (owner & RW_HAS_WAITERS) != 0);
>> > +  RW_ASSERT(rw, ts != NULL);
>> > +  RW_ASSERT(rw, (owner & RW_HAS_WAITERS) != 0);
>> >  
>> >    wcnt = TS_WAITERS(ts, TS_WRITER_Q);
>> >    rcnt = TS_WAITERS(ts, TS_READER_Q);
>> > @@ -509,31 +496,35 @@ rw_vector_exit(krwlock_t *rw)
>> >     * do the work of acquiring the lock in rw_vector_enter().
>> >     */
>> >    if (rcnt == 0 || decr == RW_READ_INCR) {
>> > -          RW_DASSERT(rw, wcnt != 0);
>> > -          RW_DASSERT(rw, (owner & RW_WRITE_WANTED) != 0);
>> > +          RW_ASSERT(rw, wcnt != 0);
>> > +          RW_ASSERT(rw, (owner & RW_WRITE_WANTED) != 0);
>> >  
>> >            if (rcnt != 0) {
>> >                    /* Give the lock to the longest waiting writer. */
>> >                    l = TS_FIRST(ts, TS_WRITER_Q);
>> > -                  newown = (uintptr_t)l | RW_WRITE_LOCKED | 
>> > RW_HAS_WAITERS;
>> > +                  newown = (uintptr_t)l | (owner & RW_NODEBUG);
>> > +                  newown |= RW_WRITE_LOCKED | RW_HAS_WAITERS;
>> >                    if (wcnt > 1)
>> >                            newown |= RW_WRITE_WANTED;
>> >                    rw_swap(rw, owner, newown);
>> >                    turnstile_wakeup(ts, TS_WRITER_Q, 1, l);
>> >            } else {
>> >                    /* Wake all writers and let them fight it out. */
>> > -                  rw_swap(rw, owner, RW_WRITE_WANTED);
>> > +                  newown = owner & RW_NODEBUG;
>> > +                  newown |= RW_WRITE_WANTED;
>> > +                  rw_swap(rw, owner, newown);
>> >                    turnstile_wakeup(ts, TS_WRITER_Q, wcnt, NULL);
>> >            }
>> >    } else {
>> > -          RW_DASSERT(rw, rcnt != 0);
>> > +          RW_ASSERT(rw, rcnt != 0);
>> >  
>> >            /*
>> >             * Give the lock to all blocked readers.  If there
>> >             * is a writer waiting, new readers that arrive
>> >             * after the release will be blocked out.
>> >             */
>> > -          newown = rcnt << RW_READ_COUNT_SHIFT;
>> > +          newown = owner & RW_NODEBUG;
>> > +          newown += rcnt << RW_READ_COUNT_SHIFT;
>> >            if (wcnt != 0)
>> >                    newown |= RW_HAS_WAITERS | RW_WRITE_WANTED;
>> >                    
>> > @@ -552,8 +543,10 @@ int
>> >  rw_vector_tryenter(krwlock_t *rw, const krw_t op)
>> >  {
>> >    uintptr_t curthread, owner, incr, need_wait, next;
>> > +  lwp_t *l;
>> >  
>> > -  curthread = (uintptr_t)curlwp;
>> > +  l = curlwp;
>> > +  curthread = (uintptr_t)l;
>> >  
>> >    RW_ASSERT(rw, curthread != 0);
>> >  
>> > @@ -561,7 +554,7 @@ rw_vector_tryenter(krwlock_t *rw, const 
>> >            incr = RW_READ_INCR;
>> >            need_wait = RW_WRITE_LOCKED | RW_WRITE_WANTED;
>> >    } else {
>> > -          RW_DASSERT(rw, op == RW_WRITER);
>> > +          RW_ASSERT(rw, op == RW_WRITER);
>> >            incr = curthread | RW_WRITE_LOCKED;
>> >            need_wait = RW_WRITE_LOCKED | RW_THREAD;
>> >    }
>> > @@ -572,24 +565,23 @@ rw_vector_tryenter(krwlock_t *rw, const 
>> >            next = rw_cas(rw, owner, owner + incr);
>> >            if (__predict_true(next == owner)) {
>> >                    /* Got it! */
>> > -                  RW_MEMBAR_ENTER();
>> >                    break;
>> >            }
>> >    }
>> >  
>> >    RW_WANTLOCK(rw, op);
>> >    RW_LOCKED(rw, op);
>> > -  RW_DASSERT(rw, (op != RW_READER && RW_OWNER(rw) == curthread) ||
>> > +  RW_ASSERT(rw, (op != RW_READER && RW_OWNER(rw) == curthread) ||
>> >        (op == RW_READER && RW_COUNT(rw) != 0));
>> >  
>> > +  RW_MEMBAR_ENTER();
>> >    return 1;
>> >  }
>> >  
>> >  /*
>> >   * rw_downgrade:
>> >   *
>> > - *        Downgrade a write lock to a read lock.  Optimise memory 
>> > accesses for
>> > - *        the uncontended case.
>> > + *        Downgrade a write lock to a read lock.
>> >   */
>> >  void
>> >  rw_downgrade(krwlock_t *rw)
>> > @@ -597,55 +589,64 @@ rw_downgrade(krwlock_t *rw)
>> >    uintptr_t owner, curthread, newown, next;
>> >    turnstile_t *ts;
>> >    int rcnt, wcnt;
>> > +  lwp_t *l;
>> >  
>> > -  curthread = (uintptr_t)curlwp;
>> > +  l = curlwp;
>> > +  curthread = (uintptr_t)l;
>> >    RW_ASSERT(rw, curthread != 0);
>> > -  RW_DASSERT(rw, (rw->rw_owner & RW_WRITE_LOCKED) != 0);
>> > +  RW_ASSERT(rw, (rw->rw_owner & RW_WRITE_LOCKED) != 0);
>> >    RW_ASSERT(rw, RW_OWNER(rw) == curthread);
>> >    RW_UNLOCKED(rw, RW_WRITER);
>> >  #if !defined(DIAGNOSTIC)
>> >    __USE(curthread);
>> >  #endif
>> >  
>> > -  /*
>> > -   * If there are no waiters, so we can do this the easy way.
>> > -   * Try swapping us down to one read hold.  If it fails, the
>> > -   * lock condition has changed and we most likely now have
>> > -   * waiters.
>> > -   */
>> >    RW_MEMBAR_PRODUCER();
>> > -  owner = curthread | RW_WRITE_LOCKED;
>> > -  next = rw_cas(rw, owner, RW_READ_INCR);
>> > -  if (__predict_true(next == owner)) {
>> > -          RW_LOCKED(rw, RW_READER);
>> > -          RW_DASSERT(rw, (rw->rw_owner & RW_WRITE_LOCKED) == 0);
>> > -          RW_DASSERT(rw, RW_COUNT(rw) != 0);
>> > -          return;
>> > -  }
>> >  
>> > -  /*
>> > -   * Grab the turnstile chain lock.  This gets the interlock
>> > -   * on the sleep queue.  Once we have that, we can adjust the
>> > -   * waiter bits.
>> > -   */
>> > -  for (;;) {
>> > -          owner = next;
>> > +  for (owner = rw->rw_owner;; owner = next) {
>> > +          /*
>> > +           * If there are no waiters we can do this the easy way.  Try
>> > +           * swapping us down to one read hold.  If it fails, the lock
>> > +           * condition has changed and we most likely now have
>> > +           * waiters.
>> > +           */
>> > +          if ((owner & RW_HAS_WAITERS) == 0) {
>> > +                  newown = (owner & RW_NODEBUG);
>> > +                  next = rw_cas(rw, owner, newown + RW_READ_INCR);
>> > +                  if (__predict_true(next == owner)) {
>> > +                          RW_LOCKED(rw, RW_READER);
>> > +                          RW_ASSERT(rw,
>> > +                              (rw->rw_owner & RW_WRITE_LOCKED) == 0);
>> > +                          RW_ASSERT(rw, RW_COUNT(rw) != 0);
>> > +                          return;
>> > +                  }
>> > +                  continue;
>> > +          }
>> > +
>> > +          /*
>> > +           * Grab the turnstile chain lock.  This gets the interlock
>> > +           * on the sleep queue.  Once we have that, we can adjust the
>> > +           * waiter bits.
>> > +           */
>> >            ts = turnstile_lookup(rw);
>> > -          RW_DASSERT(rw, ts != NULL);
>> > +          RW_ASSERT(rw, ts != NULL);
>> >  
>> >            rcnt = TS_WAITERS(ts, TS_READER_Q);
>> >            wcnt = TS_WAITERS(ts, TS_WRITER_Q);
>> >  
>> > -          /*
>> > -           * If there are no readers, just preserve the waiters
>> > -           * bits, swap us down to one read hold and return.
>> > -           */
>> >            if (rcnt == 0) {
>> > -                  RW_DASSERT(rw, wcnt != 0);
>> > -                  RW_DASSERT(rw, (rw->rw_owner & RW_WRITE_WANTED) != 0);
>> > -                  RW_DASSERT(rw, (rw->rw_owner & RW_HAS_WAITERS) != 0);
>> > -
>> > -                  newown = RW_READ_INCR | RW_HAS_WAITERS | 
>> > RW_WRITE_WANTED;
>> > +                  /*
>> > +                   * If there are no readers, just preserve the
>> > +                   * waiters bits, swap us down to one read hold and
>> > +                   * return.
>> > +                   */
>> > +                  RW_ASSERT(rw, wcnt != 0);
>> > +                  RW_ASSERT(rw, (rw->rw_owner & RW_WRITE_WANTED) != 0);
>> > +                  RW_ASSERT(rw, (rw->rw_owner & RW_HAS_WAITERS) != 0);
>> > +
>> > +                  newown = owner & RW_NODEBUG;
>> > +                  newown = RW_READ_INCR | RW_HAS_WAITERS |
>> > +                      RW_WRITE_WANTED;
>> >                    next = rw_cas(rw, owner, newown);
>> >                    turnstile_exit(rw);
>> >                    if (__predict_true(next == owner))
>> > @@ -653,11 +654,12 @@ rw_downgrade(krwlock_t *rw)
>> >            } else {
>> >                    /*
>> >                     * Give the lock to all blocked readers.  We may
>> > -                   * retain one read hold if downgrading.  If there
>> > -                   * is a writer waiting, new readers will be blocked
>> > +                   * retain one read hold if downgrading.  If there is
>> > +                   * a writer waiting, new readers will be blocked
>> >                     * out.
>> >                     */
>> > -                  newown = (rcnt << RW_READ_COUNT_SHIFT) + RW_READ_INCR;
>> > +                  newown = owner & RW_NODEBUG;
>> > +                  newown += (rcnt << RW_READ_COUNT_SHIFT) + RW_READ_INCR;
>> >                    if (wcnt != 0)
>> >                            newown |= RW_HAS_WAITERS | RW_WRITE_WANTED;
>> >  
>> > @@ -673,22 +675,24 @@ rw_downgrade(krwlock_t *rw)
>> >  
>> >    RW_WANTLOCK(rw, RW_READER);
>> >    RW_LOCKED(rw, RW_READER);
>> > -  RW_DASSERT(rw, (rw->rw_owner & RW_WRITE_LOCKED) == 0);
>> > -  RW_DASSERT(rw, RW_COUNT(rw) != 0);
>> > +  RW_ASSERT(rw, (rw->rw_owner & RW_WRITE_LOCKED) == 0);
>> > +  RW_ASSERT(rw, RW_COUNT(rw) != 0);
>> >  }
>> >  
>> >  /*
>> >   * rw_tryupgrade:
>> >   *
>> >   *        Try to upgrade a read lock to a write lock.  We must be the only
>> > - *        reader.  Optimise memory accesses for the uncontended case.
>> > + *        reader.
>> >   */
>> >  int
>> >  rw_tryupgrade(krwlock_t *rw)
>> >  {
>> >    uintptr_t owner, curthread, newown, next;
>> > +  struct lwp *l;
>> >  
>> > -  curthread = (uintptr_t)curlwp;
>> > +  l = curlwp;
>> > +  curthread = (uintptr_t)l;
>> >    RW_ASSERT(rw, curthread != 0);
>> >    RW_ASSERT(rw, rw_read_held(rw));
>> >  
>> > @@ -709,8 +713,8 @@ rw_tryupgrade(krwlock_t *rw)
>> >    RW_UNLOCKED(rw, RW_READER);
>> >    RW_WANTLOCK(rw, RW_WRITER);
>> >    RW_LOCKED(rw, RW_WRITER);
>> > -  RW_DASSERT(rw, rw->rw_owner & RW_WRITE_LOCKED);
>> > -  RW_DASSERT(rw, RW_OWNER(rw) == curthread);
>> > +  RW_ASSERT(rw, rw->rw_owner & RW_WRITE_LOCKED);
>> > +  RW_ASSERT(rw, RW_OWNER(rw) == curthread);
>> >  
>> >    return 1;
>> >  }
>> >
>> > Index: src/sys/sys/rwlock.h
>> > diff -u src/sys/sys/rwlock.h:1.12 src/sys/sys/rwlock.h:1.13
>> > --- src/sys/sys/rwlock.h:1.12      Wed Jan  1 21:34:39 2020
>> > +++ src/sys/sys/rwlock.h   Sun Jan 19 18:34:24 2020
>> > @@ -1,7 +1,7 @@
>> > -/*        $NetBSD: rwlock.h,v 1.12 2020/01/01 21:34:39 ad Exp $   */
>> > +/*        $NetBSD: rwlock.h,v 1.13 2020/01/19 18:34:24 ad Exp $   */
>> >  
>> >  /*-
>> > - * Copyright (c) 2002, 2006, 2007, 2008, 2019 The NetBSD Foundation, Inc.
>> > + * Copyright (c) 2002, 2006, 2007, 2008, 2019, 2020 The NetBSD 
>> > Foundation, Inc.
>> >   * All rights reserved.
>> >   *
>> >   * This code is derived from software contributed to The NetBSD Foundation
>> > @@ -45,10 +45,6 @@
>> >   *        rw_tryenter()
>> >   */
>> >  
>> > -#if defined(_KERNEL_OPT)
>> > -#include "opt_lockdebug.h"
>> > -#endif
>> > -
>> >  #if !defined(_KERNEL)
>> >  #include <sys/types.h>
>> >  #include <sys/inttypes.h>
>> > @@ -75,13 +71,9 @@ typedef struct krwlock krwlock_t;
>> >  #define   RW_HAS_WAITERS          0x01UL  /* lock has waiters */
>> >  #define   RW_WRITE_WANTED         0x02UL  /* >= 1 waiter is a writer */
>> >  #define   RW_WRITE_LOCKED         0x04UL  /* lock is currently write 
>> > locked */
>> > -#if defined(LOCKDEBUG)
>> > -#define   RW_NODEBUG              0x08UL  /* LOCKDEBUG disabled */
>> > -#else
>> > -#define   RW_NODEBUG              0x00UL  /* do nothing */
>> > -#endif    /* LOCKDEBUG */
>> > +#define   RW_NODEBUG              0x10UL  /* LOCKDEBUG disabled */
>> >  
>> > -#define   RW_READ_COUNT_SHIFT     4
>> > +#define   RW_READ_COUNT_SHIFT     5
>> >  #define   RW_READ_INCR            (1UL << RW_READ_COUNT_SHIFT)
>> >  #define   RW_THREAD               ((uintptr_t)-RW_READ_INCR)
>> >  #define   RW_OWNER(rw)            ((rw)->rw_owner & RW_THREAD)
>> > @@ -91,6 +83,7 @@ typedef struct krwlock krwlock_t;
>> >  void      rw_vector_enter(krwlock_t *, const krw_t);
>> >  void      rw_vector_exit(krwlock_t *);
>> >  int       rw_vector_tryenter(krwlock_t *, const krw_t);
>> > +void      _rw_init(krwlock_t *, uintptr_t);
>> >  #endif    /* __RWLOCK_PRIVATE */
>> >  
>> >  struct krwlock {
>> >
>> 
>> -- 
>> Ryo ONODERA // r...@tetera.org
>> PGP fingerprint = 82A2 DC91 76E0 A10A 8ABB  FD1B F404 27FA C7D1 15F3

-- 
Ryo ONODERA // r...@tetera.org
PGP fingerprint = 82A2 DC91 76E0 A10A 8ABB  FD1B F404 27FA C7D1 15F3

Reply via email to