Re: [PATCH] sched: Enhance readability of iterating wake_list

2017-02-10 Thread Byungchul Park
On Fri, Feb 10, 2017 at 08:55:23AM +0100, Peter Zijlstra wrote:
> On Fri, Feb 10, 2017 at 01:09:31PM +0900, Byungchul Park wrote:
> > +#define for_each_wake_list(task, node) \
> > +   for ((task) = llist_entry((node), struct task_struct, wake_entry); \
> > +node; (node) = llist_next(node), \
> > +(task) = llist_entry((node), struct task_struct, wake_entry))
> > +
> 
> How about you make that llist_for_each(pos, member) or similar and
> replace all while (foo) { foo = llist_next(foo); } instances?
> 
> Because most llist_next() users have the same pattern.

I did what you recommand, like the following.

Would it be better if I split the patch into several ones?
Or keep it one patch?

-8<-
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 864e673..7d5286b 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -70,21 +70,10 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
list = llist_del_all(_list->list);
 
/* We first reverse the list to preserve FIFO ordering and fairness */
-
-   while (list) {
-   struct llist_node *t = list;
-   list = llist_next(list);
-
-   t->next = reverse;
-   reverse = t;
-   }
+   reverse = llist_reverse_order(list);
 
/* Then do the wakeups */
-
-   while (reverse) {
-   cl = container_of(reverse, struct closure, list);
-   reverse = llist_next(reverse);
-
+   llist_for_each_entry(cl, reverse, list) {
closure_set_waiting(cl, 0);
closure_sub(cl, CLOSURE_WAITING + 1);
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 36c13e4..c82243a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -359,11 +359,9 @@ static int release_stripe_list(struct r5conf *conf,
 
head = llist_del_all(>released_stripes);
head = llist_reverse_order(head);
-   while (head) {
+   llist_for_each_entry(sh, head, release_list) {
int hash;
 
-   sh = llist_entry(head, struct stripe_head, release_list);
-   head = llist_next(head);
/* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
smp_mb();
clear_bit(STRIPE_ON_RELEASE_LIST, >state);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 253310c..280b912 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -501,9 +501,7 @@ static void vhost_scsi_evt_work(struct vhost_work *work)
 
mutex_lock(>mutex);
llnode = llist_del_all(>vs_event_list);
-   while (llnode) {
-   evt = llist_entry(llnode, struct vhost_scsi_evt, list);
-   llnode = llist_next(llnode);
+   llist_for_each_entry(evt, llnode, list) {
vhost_scsi_do_evt_work(vs, evt);
vhost_scsi_free_evt(vs, evt);
}
@@ -529,10 +527,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work 
*work)
 
bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
llnode = llist_del_all(>vs_completion_list);
-   while (llnode) {
-   cmd = llist_entry(llnode, struct vhost_scsi_cmd,
-tvc_completion_list);
-   llnode = llist_next(llnode);
+   llist_for_each_entry(cmd, llnode, tvc_completion_list) {
se_cmd = >tvc_se_cmd;
 
pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
diff --git a/fs/file_table.c b/fs/file_table.c
index 6d982b5..8800153 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -232,11 +232,10 @@ static void delayed_fput(struct work_struct *unused)
 {
struct llist_node *node = llist_del_all(_fput_list);
struct llist_node *next;
+   struct file *f;
 
-   for (; node; node = next) {
-   next = llist_next(node);
-   __fput(llist_entry(node, struct file, f_u.fu_llist));
-   }
+   llist_for_each_entry(f, node, f_u.fu_llist)
+   __fput(f);
 }
 
 static void fput(struct callback_head *work)
@@ -310,7 +309,7 @@ void put_filp(struct file *file)
 }
 
 void __init files_init(void)
-{ 
+{
filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
percpu_counter_init(_files, 0, GFP_KERNEL);
@@ -329,4 +328,4 @@ void __init files_maxfiles_init(void)
n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
 
files_stat.max_files = max_t(unsigned long, n, NR_FILE);
-} 
+}
diff --git a/fs/namespace.c b/fs/namespace.c
index b5b1259..0caf954 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1083,11 +1083,10 @@ static void delayed_mntput(struct work_struct *unused)
 {
struct llist_node *node = llist_del_all(_mntput_list);
struct llist_node *next;
+   struct mount *m;
 
-   for (; node; node = next) {
-   next = 

Re: [PATCH] sched: Enhance readability of iterating wake_list

2017-02-10 Thread Byungchul Park
On Fri, Feb 10, 2017 at 08:55:23AM +0100, Peter Zijlstra wrote:
> On Fri, Feb 10, 2017 at 01:09:31PM +0900, Byungchul Park wrote:
> > +#define for_each_wake_list(task, node) \
> > +   for ((task) = llist_entry((node), struct task_struct, wake_entry); \
> > +node; (node) = llist_next(node), \
> > +(task) = llist_entry((node), struct task_struct, wake_entry))
> > +
> 
> How about you make that llist_for_each(pos, member) or similar and
> replace all while (foo) { foo = llist_next(foo); } instances?
> 
> Because most llist_next() users have the same pattern.

I did what you recommand, like the following.

Would it be better if I split the patch into several ones?
Or keep it one patch?

-8<-
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 864e673..7d5286b 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -70,21 +70,10 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
list = llist_del_all(_list->list);
 
/* We first reverse the list to preserve FIFO ordering and fairness */
-
-   while (list) {
-   struct llist_node *t = list;
-   list = llist_next(list);
-
-   t->next = reverse;
-   reverse = t;
-   }
+   reverse = llist_reverse_order(list);
 
/* Then do the wakeups */
-
-   while (reverse) {
-   cl = container_of(reverse, struct closure, list);
-   reverse = llist_next(reverse);
-
+   llist_for_each_entry(cl, reverse, list) {
closure_set_waiting(cl, 0);
closure_sub(cl, CLOSURE_WAITING + 1);
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 36c13e4..c82243a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -359,11 +359,9 @@ static int release_stripe_list(struct r5conf *conf,
 
head = llist_del_all(>released_stripes);
head = llist_reverse_order(head);
-   while (head) {
+   llist_for_each_entry(sh, head, release_list) {
int hash;
 
-   sh = llist_entry(head, struct stripe_head, release_list);
-   head = llist_next(head);
/* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
smp_mb();
clear_bit(STRIPE_ON_RELEASE_LIST, >state);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 253310c..280b912 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -501,9 +501,7 @@ static void vhost_scsi_evt_work(struct vhost_work *work)
 
mutex_lock(>mutex);
llnode = llist_del_all(>vs_event_list);
-   while (llnode) {
-   evt = llist_entry(llnode, struct vhost_scsi_evt, list);
-   llnode = llist_next(llnode);
+   llist_for_each_entry(evt, llnode, list) {
vhost_scsi_do_evt_work(vs, evt);
vhost_scsi_free_evt(vs, evt);
}
@@ -529,10 +527,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work 
*work)
 
bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
llnode = llist_del_all(>vs_completion_list);
-   while (llnode) {
-   cmd = llist_entry(llnode, struct vhost_scsi_cmd,
-tvc_completion_list);
-   llnode = llist_next(llnode);
+   llist_for_each_entry(cmd, llnode, tvc_completion_list) {
se_cmd = >tvc_se_cmd;
 
pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
diff --git a/fs/file_table.c b/fs/file_table.c
index 6d982b5..8800153 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -232,11 +232,10 @@ static void delayed_fput(struct work_struct *unused)
 {
struct llist_node *node = llist_del_all(_fput_list);
struct llist_node *next;
+   struct file *f;
 
-   for (; node; node = next) {
-   next = llist_next(node);
-   __fput(llist_entry(node, struct file, f_u.fu_llist));
-   }
+   llist_for_each_entry(f, node, f_u.fu_llist)
+   __fput(f);
 }
 
 static void fput(struct callback_head *work)
@@ -310,7 +309,7 @@ void put_filp(struct file *file)
 }
 
 void __init files_init(void)
-{ 
+{
filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
percpu_counter_init(_files, 0, GFP_KERNEL);
@@ -329,4 +328,4 @@ void __init files_maxfiles_init(void)
n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
 
files_stat.max_files = max_t(unsigned long, n, NR_FILE);
-} 
+}
diff --git a/fs/namespace.c b/fs/namespace.c
index b5b1259..0caf954 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1083,11 +1083,10 @@ static void delayed_mntput(struct work_struct *unused)
 {
struct llist_node *node = llist_del_all(_mntput_list);
struct llist_node *next;
+   struct mount *m;
 
-   for (; node; node = next) {
-   next = 

Re: [PATCH] sched: Enhance readability of iterating wake_list

2017-02-10 Thread Byungchul Park
On Fri, Feb 10, 2017 at 08:55:23AM +0100, Peter Zijlstra wrote:
> On Fri, Feb 10, 2017 at 01:09:31PM +0900, Byungchul Park wrote:
> > +#define for_each_wake_list(task, node) \
> > +   for ((task) = llist_entry((node), struct task_struct, wake_entry); \
> > +node; (node) = llist_next(node), \
> > +(task) = llist_entry((node), struct task_struct, wake_entry))
> > +
> 
> How about you make that llist_for_each(pos, member) or similar and
> replace all while (foo) { foo = llist_next(foo); } instances?
> 
> Because most llist_next() users have the same pattern.

Yes, it would be better. I will do it.

Thank you.


Re: [PATCH] sched: Enhance readability of iterating wake_list

2017-02-10 Thread Byungchul Park
On Fri, Feb 10, 2017 at 08:55:23AM +0100, Peter Zijlstra wrote:
> On Fri, Feb 10, 2017 at 01:09:31PM +0900, Byungchul Park wrote:
> > +#define for_each_wake_list(task, node) \
> > +   for ((task) = llist_entry((node), struct task_struct, wake_entry); \
> > +node; (node) = llist_next(node), \
> > +(task) = llist_entry((node), struct task_struct, wake_entry))
> > +
> 
> How about you make that llist_for_each(pos, member) or similar and
> replace all while (foo) { foo = llist_next(foo); } instances?
> 
> Because most llist_next() users have the same pattern.

Yes, it would be better. I will do it.

Thank you.


Re: [PATCH] sched: Enhance readability of iterating wake_list

2017-02-09 Thread Peter Zijlstra
On Fri, Feb 10, 2017 at 01:09:31PM +0900, Byungchul Park wrote:
> +#define for_each_wake_list(task, node) \
> + for ((task) = llist_entry((node), struct task_struct, wake_entry); \
> +  node; (node) = llist_next(node), \
> +  (task) = llist_entry((node), struct task_struct, wake_entry))
> +

How about you make that llist_for_each(pos, member) or similar and
replace all while (foo) { foo = llist_next(foo); } instances?

Because most llist_next() users have the same pattern.


Re: [PATCH] sched: Enhance readability of iterating wake_list

2017-02-09 Thread Peter Zijlstra
On Fri, Feb 10, 2017 at 01:09:31PM +0900, Byungchul Park wrote:
> +#define for_each_wake_list(task, node) \
> + for ((task) = llist_entry((node), struct task_struct, wake_entry); \
> +  node; (node) = llist_next(node), \
> +  (task) = llist_entry((node), struct task_struct, wake_entry))
> +

How about you make that llist_for_each(pos, member) or similar and
replace all while (foo) { foo = llist_next(foo); } instances?

Because most llist_next() users have the same pattern.