4.12-stable review patch.  If anyone has any objections, please let me know.

------------------

From: Nikolay Borisov <[email protected]>

commit 3e8f399da490e6ac20a3cfd6aa404c9aa961a9a2 upstream.

Currently the writeback statistics code uses a percpu counters to hold
various statistics.  Furthermore we have 2 families of functions - those
which disable local irq and those which doesn't and whose names begin
with double underscore.  However, they both end up calling
__add_wb_stats which in turn calls percpu_counter_add_batch which is
already irq-safe.

Exploiting this fact allows to eliminated the __wb_* functions since
they don't add any further protection than we already have.
Furthermore, refactor the wb_* function to call __add_wb_stat directly
without the irq-disabling dance.  This will likely result in better
runtime of code which deals with modifying the stat counters.

While at it also document why percpu_counter_add_batch is in fact
preempt and irq-safe since at least 3 people got confused.

Link: 
http://lkml.kernel.org/r/[email protected]
Signed-off-by: Nikolay Borisov <[email protected]>
Acked-by: Tejun Heo <[email protected]>
Reviewed-by: Jan Kara <[email protected]>
Cc: Josef Bacik <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Jeff Layton <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Signed-off-by: Mel Gorman <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>

---
 fs/fs-writeback.c           |    8 ++++----
 include/linux/backing-dev.h |   24 ++----------------------
 lib/percpu_counter.c        |    7 +++++++
 mm/page-writeback.c         |   10 +++++-----
 4 files changed, 18 insertions(+), 31 deletions(-)

--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -380,8 +380,8 @@ static void inode_switch_wbs_work_fn(str
                struct page *page = radix_tree_deref_slot_protected(slot,
                                                        &mapping->tree_lock);
                if (likely(page) && PageDirty(page)) {
-                       __dec_wb_stat(old_wb, WB_RECLAIMABLE);
-                       __inc_wb_stat(new_wb, WB_RECLAIMABLE);
+                       dec_wb_stat(old_wb, WB_RECLAIMABLE);
+                       inc_wb_stat(new_wb, WB_RECLAIMABLE);
                }
        }
 
@@ -391,8 +391,8 @@ static void inode_switch_wbs_work_fn(str
                                                        &mapping->tree_lock);
                if (likely(page)) {
                        WARN_ON_ONCE(!PageWriteback(page));
-                       __dec_wb_stat(old_wb, WB_WRITEBACK);
-                       __inc_wb_stat(new_wb, WB_WRITEBACK);
+                       dec_wb_stat(old_wb, WB_WRITEBACK);
+                       inc_wb_stat(new_wb, WB_WRITEBACK);
                }
        }
 
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -69,34 +69,14 @@ static inline void __add_wb_stat(struct
        percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
 }
 
-static inline void __inc_wb_stat(struct bdi_writeback *wb,
-                                enum wb_stat_item item)
-{
-       __add_wb_stat(wb, item, 1);
-}
-
 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item 
item)
 {
-       unsigned long flags;
-
-       local_irq_save(flags);
-       __inc_wb_stat(wb, item);
-       local_irq_restore(flags);
-}
-
-static inline void __dec_wb_stat(struct bdi_writeback *wb,
-                                enum wb_stat_item item)
-{
-       __add_wb_stat(wb, item, -1);
+       __add_wb_stat(wb, item, 1);
 }
 
 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item 
item)
 {
-       unsigned long flags;
-
-       local_irq_save(flags);
-       __dec_wb_stat(wb, item);
-       local_irq_restore(flags);
+       __add_wb_stat(wb, item, -1);
 }
 
 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -72,6 +72,13 @@ void percpu_counter_set(struct percpu_co
 }
 EXPORT_SYMBOL(percpu_counter_set);
 
+/**
+ * This function is both preempt and irq safe. The former is due to explicit
+ * preemption disable. The latter is guaranteed by the fact that the slow path
+ * is explicitly protected by an irq-safe spinlock whereas the fast patch uses
+ * this_cpu_add which is irq-safe by definition. Hence there is no need muck
+ * with irq state before calling this one
+ */
 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 
batch)
 {
        s64 count;
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -601,7 +601,7 @@ static inline void __wb_writeout_inc(str
 {
        struct wb_domain *cgdom;
 
-       __inc_wb_stat(wb, WB_WRITTEN);
+       inc_wb_stat(wb, WB_WRITTEN);
        wb_domain_writeout_inc(&global_wb_domain, &wb->completions,
                               wb->bdi->max_prop_frac);
 
@@ -2437,8 +2437,8 @@ void account_page_dirtied(struct page *p
                __inc_node_page_state(page, NR_FILE_DIRTY);
                __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
                __inc_node_page_state(page, NR_DIRTIED);
-               __inc_wb_stat(wb, WB_RECLAIMABLE);
-               __inc_wb_stat(wb, WB_DIRTIED);
+               inc_wb_stat(wb, WB_RECLAIMABLE);
+               inc_wb_stat(wb, WB_DIRTIED);
                task_io_account_write(PAGE_SIZE);
                current->nr_dirtied++;
                this_cpu_inc(bdp_ratelimits);
@@ -2745,7 +2745,7 @@ int test_clear_page_writeback(struct pag
                        if (bdi_cap_account_writeback(bdi)) {
                                struct bdi_writeback *wb = inode_to_wb(inode);
 
-                               __dec_wb_stat(wb, WB_WRITEBACK);
+                               dec_wb_stat(wb, WB_WRITEBACK);
                                __wb_writeout_inc(wb);
                        }
                }
@@ -2791,7 +2791,7 @@ int __test_set_page_writeback(struct pag
                                                page_index(page),
                                                PAGECACHE_TAG_WRITEBACK);
                        if (bdi_cap_account_writeback(bdi))
-                               __inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK);
+                               inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK);
 
                        /*
                         * We can come through here when swapping anonymous


Reply via email to