Hi Jason,

[auto build test ERROR on tip/locking/core]
[also build test ERROR on v4.8-rc1 next-20160809]
[if your patch is applied to the wrong git tree, please drop us a note to help 
improve the system]

url:    
https://github.com/0day-ci/linux/commits/Jason-Low/locking-mutex-Prevent-lock-starvation-when-spinning-is-enabled/20160811-034327
config: x86_64-randconfig-x013-201632 (attached as .config)
compiler: gcc-6 (Debian 6.1.1-9) 6.1.1 20160705
reproduce:
        # save the attached .config to linux build tree
        make ARCH=x86_64 

All error/warnings (new ones prefixed by >>):

   kernel/locking/mutex.c: In function 'do_yield_to_waiter':
>> kernel/locking/mutex.c:441:10: error: 'struct mutex' has no member named 
>> 'yield_to_waiter'
     if (lock->yield_to_waiter != true)
             ^~
   kernel/locking/mutex.c:442:7: error: 'struct mutex' has no member named 
'yield_to_waiter'
      lock->yield_to_waiter = true;
          ^~
   kernel/locking/mutex.c: In function 'clear_yield_to_waiter':
   kernel/locking/mutex.c:447:6: error: 'struct mutex' has no member named 
'yield_to_waiter'
     lock->yield_to_waiter = false;
         ^~
   kernel/locking/mutex.c: In function 'need_yield_to_waiter':
   kernel/locking/mutex.c:452:13: error: 'struct mutex' has no member named 
'yield_to_waiter'
     return lock->yield_to_waiter;
                ^~
   kernel/locking/mutex.c: In function '__mutex_lock_common':
>> kernel/locking/mutex.c:607:3: warning: this 'if' clause does not guard... 
>> [-Wmisleading-indentation]
      if ((!need_yield_to_waiter(lock) || wakeups > 1) &&
      ^~
   kernel/locking/mutex.c:613:4: note: ...this statement, but the latter is 
misleadingly indented as if it is guarded by the 'if'
       break;
       ^~~~~
   kernel/locking/mutex.c: In function 'need_yield_to_waiter':
>> kernel/locking/mutex.c:453:1: warning: control reaches end of non-void 
>> function [-Wreturn-type]
    }
    ^

vim +441 kernel/locking/mutex.c

   435  {
   436          *wakeups += 1;
   437  
   438          if (*wakeups < MUTEX_WAKEUP_THRESHOLD)
   439                  return;
   440  
 > 441          if (lock->yield_to_waiter != true)
   442                  lock->yield_to_waiter = true;
   443  }
   444  
   445  static inline void clear_yield_to_waiter(struct mutex *lock)
   446  {
 > 447          lock->yield_to_waiter = false;
   448  }
   449  
   450  static inline bool need_yield_to_waiter(struct mutex *lock)
   451  {
   452          return lock->yield_to_waiter;
 > 453  }
   454  #endif
   455  
   456  __visible __used noinline
   457  void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
   458  
   459  /**
   460   * mutex_unlock - release the mutex
   461   * @lock: the mutex to be released
   462   *
   463   * Unlock a mutex that has been locked by this task previously.
   464   *
   465   * This function must not be used in interrupt context. Unlocking
   466   * of a not locked mutex is not allowed.
   467   *
   468   * This function is similar to (but not equivalent to) up().
   469   */
   470  void __sched mutex_unlock(struct mutex *lock)
   471  {
   472          /*
   473           * The unlocking fastpath is the 0->1 transition from 'locked'
   474           * into 'unlocked' state:
   475           */
   476  #ifndef CONFIG_DEBUG_MUTEXES
   477          /*
   478           * When debugging is enabled we must not clear the owner before 
time,
   479           * the slow path will always be taken, and that clears the 
owner field
   480           * after verifying that it was indeed current.
   481           */
   482          mutex_clear_owner(lock);
   483  #endif
   484          __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
   485  }
   486  
   487  EXPORT_SYMBOL(mutex_unlock);
   488  
   489  /**
   490   * ww_mutex_unlock - release the w/w mutex
   491   * @lock: the mutex to be released
   492   *
   493   * Unlock a mutex that has been locked by this task previously with any 
of the
   494   * ww_mutex_lock* functions (with or without an acquire context). It is
   495   * forbidden to release the locks after releasing the acquire context.
   496   *
   497   * This function must not be used in interrupt context. Unlocking
   498   * of a unlocked mutex is not allowed.
   499   */
   500  void __sched ww_mutex_unlock(struct ww_mutex *lock)
   501  {
   502          /*
   503           * The unlocking fastpath is the 0->1 transition from 'locked'
   504           * into 'unlocked' state:
   505           */
   506          if (lock->ctx) {
   507  #ifdef CONFIG_DEBUG_MUTEXES
   508                  DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
   509  #endif
   510                  if (lock->ctx->acquired > 0)
   511                          lock->ctx->acquired--;
   512                  lock->ctx = NULL;
   513          }
   514  
   515  #ifndef CONFIG_DEBUG_MUTEXES
   516          /*
   517           * When debugging is enabled we must not clear the owner before 
time,
   518           * the slow path will always be taken, and that clears the 
owner field
   519           * after verifying that it was indeed current.
   520           */
   521          mutex_clear_owner(&lock->base);
   522  #endif
   523          __mutex_fastpath_unlock(&lock->base.count, 
__mutex_unlock_slowpath);
   524  }
   525  EXPORT_SYMBOL(ww_mutex_unlock);
   526  
   527  static inline int __sched
   528  __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx 
*ctx)
   529  {
   530          struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
   531          struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
   532  
   533          if (!hold_ctx)
   534                  return 0;
   535  
   536          if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
   537              (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
   538  #ifdef CONFIG_DEBUG_MUTEXES
   539                  DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
   540                  ctx->contending_lock = ww;
   541  #endif
   542                  return -EDEADLK;
   543          }
   544  
   545          return 0;
   546  }
   547  
   548  /*
   549   * Lock a mutex (possibly interruptible), slowpath:
   550   */
   551  static __always_inline int __sched
   552  __mutex_lock_common(struct mutex *lock, long state, unsigned int 
subclass,
   553                      struct lockdep_map *nest_lock, unsigned long ip,
   554                      struct ww_acquire_ctx *ww_ctx, const bool 
use_ww_ctx)
   555  {
   556          struct task_struct *task = current;
   557          struct mutex_waiter waiter;
   558          unsigned long flags;
   559          int ret;
   560          int wakeups = 0;
   561  
   562          if (use_ww_ctx) {
   563                  struct ww_mutex *ww = container_of(lock, struct 
ww_mutex, base);
   564                  if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
   565                          return -EALREADY;
   566          }
   567  
   568          preempt_disable();
   569          mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
   570  
   571          if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
   572                  /* got the lock, yay! */
   573                  preempt_enable();
   574                  return 0;
   575          }
   576  
   577          spin_lock_mutex(&lock->wait_lock, flags);
   578  
   579          /*
   580           * Once more, try to acquire the lock. Only try-lock the mutex 
if
   581           * it is unlocked to reduce unnecessary xchg() operations.
   582           */
   583          if (!need_yield_to_waiter(lock) && !mutex_is_locked(lock) &&
   584              (atomic_xchg_acquire(&lock->count, 0) == 1))
   585                  goto skip_wait;
   586  
   587          debug_mutex_lock_common(lock, &waiter);
   588          debug_mutex_add_waiter(lock, &waiter, task);
   589  
   590          /* add waiting tasks to the end of the waitqueue (FIFO): */
   591          list_add_tail(&waiter.list, &lock->wait_list);
   592          waiter.task = task;
   593  
   594          lock_contended(&lock->dep_map, ip);
   595  
   596          for (;;) {
   597                  /*
   598                   * Lets try to take the lock again - this is needed 
even if
   599                   * we get here for the first time (shortly after 
failing to
   600                   * acquire the lock), to make sure that we get a wakeup 
once
   601                   * it's unlocked. Later on, if we sleep, this is the
   602                   * operation that gives us the lock. We xchg it to -1, 
so
   603                   * that when we release the lock, we properly wake up 
the
   604                   * other waiters. We only attempt the xchg if the count 
is
   605                   * non-negative in order to avoid unnecessary xchg 
operations:
   606                   */
 > 607                  if ((!need_yield_to_waiter(lock) || wakeups > 1) &&
   608                      atomic_read(&lock->count) >= 0 &&
   609                      (atomic_xchg_acquire(&lock->count, -1) == 1))
   610                          if (wakeups > 1)

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: Binary data

Reply via email to