On Wed, Dec 11, 2013 at 08:06:37PM -0500, Paul Gortmaker wrote:
> +/*
> + * Event API
> + */
> +#define __swait_event(wq, condition)                                 \
> +do {                                                                 \
> +     DEFINE_SWAITER(__wait);                                         \
> +                                                                     \
> +     for (;;) {                                                      \
> +             swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE);      \
> +             if (condition)                                          \
> +                     break;                                          \
> +             schedule();                                             \
> +     }                                                               \
> +     swait_finish(&wq, &__wait);                                     \
> +} while (0)
> +
> +#define __swait_event_interruptible(wq, condition, ret)                      
> \
> +do {                                                                 \
> +     DEFINE_SWAITER(__wait);                                         \
> +                                                                     \
> +     for (;;) {                                                      \
> +             swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE);        \
> +             if (condition)                                          \
> +                     break;                                          \
> +             if (signal_pending(current)) {                          \
> +                     ret = -ERESTARTSYS;                             \
> +                     break;                                          \
> +             }                                                       \
> +             schedule();                                             \
> +     }                                                               \
> +     swait_finish(&wq, &__wait);                                     \
> +} while (0)
> +
> +#define __swait_event_interruptible_timeout(wq, condition, ret)              
> \
> +do {                                                                 \
> +     DEFINE_SWAITER(__wait);                                         \
> +                                                                     \
> +     for (;;) {                                                      \
> +             swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE);        \
> +             if (condition)                                          \
> +                     break;                                          \
> +             if (signal_pending(current)) {                          \
> +                     ret = -ERESTARTSYS;                             \
> +                     break;                                          \
> +             }                                                       \
> +             ret = schedule_timeout(ret);                            \
> +             if (!ret)                                               \
> +                     break;                                          \
> +     }                                                               \
> +     swait_finish(&wq, &__wait);                                     \
> +} while (0)

Urgh, please have a look at ___wait_event() we just killed all the
pointless replication for the normal waitqueues, please don't add more
of it.


> +unsigned int
> +__swake_up_locked(struct swait_queue_head *head, unsigned int state,
> +               unsigned int num)
> +{
> +     struct swaiter *curr, *next;
> +     int woken = 0;
> +
> +     list_for_each_entry_safe(curr, next, &head->task_list, node) {
> +             if (wake_up_state(curr->task, state)) {
> +                     __swait_dequeue(curr);
> +                     /*
> +                      * The waiting task can free the waiter as
> +                      * soon as curr->task = NULL is written,
> +                      * without taking any locks. A memory barrier
> +                      * is required here to prevent the following
> +                      * store to curr->task from getting ahead of
> +                      * the dequeue operation.
> +                      */
> +                     smp_wmb();
> +                     curr->task = NULL;
> +                     if (++woken == num)
> +                             break;
> +             }
> +     }
> +     return woken;
> +}
> +
> +unsigned int
> +__swake_up(struct swait_queue_head *head, unsigned int state, unsigned int 
> num)
> +{
> +     unsigned long flags;
> +     int woken;
> +
> +     if (!swaitqueue_active(head))
> +             return 0;
> +
> +     raw_spin_lock_irqsave(&head->lock, flags);
> +     woken = __swake_up_locked(head, state, num);
> +     raw_spin_unlock_irqrestore(&head->lock, flags);
> +     return woken;
> +}
> +EXPORT_SYMBOL(__swake_up);

Urgh, fail. Do not put unbounded loops in raw_spin_lock.

I think I posted a patch a while back to cure this.


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to