This patch modifies get_dirty_limits() to calculate the limit of dirty
pages based on vm.dirty_limit_ratio. It also changes the interface of the
function to return it.

If mapped memory become large and limit_ratio (calculated based on
vm.dirty_limit_ratio) is decreased, dirty_ratio is also decreased to
keep writeback independently among disks.

Signed-off-by: Tomoki Sekiyama <[EMAIL PROTECTED]>
Signed-off-by: Yuji Kakutani <[EMAIL PROTECTED]>
---
 mm/page-writeback.c |   37 +++++++++++++++++++++++++++++--------
 1 file changed, 29 insertions(+), 8 deletions(-)

Index: linux-2.6.20-mm2-bdp/mm/page-writeback.c
===================================================================
--- linux-2.6.20-mm2-bdp.orig/mm/page-writeback.c
+++ linux-2.6.20-mm2-bdp/mm/page-writeback.c
@@ -117,6 +117,9 @@ static void background_writeout(unsigned
  * performing lots of scanning.
  *
  * We only allow 1/2 of the currently-unmapped memory to be dirtied.
+ * `dirty_limit_ratio' is ignored if it is larger than that.
+ * In this case, `dirty_ratio' is decreased to keep writeback independently
+ * among disks.
  *
  * We don't permit the clamping level to fall below 5% - that is getting rather
  * excessive.
@@ -163,14 +166,16 @@ static unsigned long determine_dirtyable
 }

 static void
-get_dirty_limits(long *pbackground, long *pdirty,
+get_dirty_limits(long *pbackground, long *pdirty, long *plimit,
                                        struct address_space *mapping)
 {
        int background_ratio;           /* Percentages */
        int dirty_ratio;
+       int limit_ratio;
        int unmapped_ratio;
        long background;
        long dirty;
+       long limit;
        unsigned long available_memory = determine_dirtyable_memory();
        struct task_struct *tsk;

@@ -178,10 +183,17 @@ get_dirty_limits(long *pbackground, long
                                global_page_state(NR_ANON_PAGES)) * 100) /
                                        available_memory;

+       limit_ratio = dirty_limit_ratio;
+       if (limit_ratio > unmapped_ratio / 2)
+               limit_ratio = unmapped_ratio / 2;
+
        dirty_ratio = vm_dirty_ratio;
-       if (dirty_ratio > unmapped_ratio / 2)
-               dirty_ratio = unmapped_ratio / 2;
+       if (dirty_ratio > dirty_limit_ratio)
+               dirty_ratio = dirty_limit_ratio;
+       dirty_ratio -= dirty_limit_ratio - limit_ratio;

+       if (dirty_limit_ratio < 5)
+               dirty_limit_ratio = 5;
        if (dirty_ratio < 5)
                dirty_ratio = 5;

@@ -191,13 +203,16 @@ get_dirty_limits(long *pbackground, long

        background = (background_ratio * available_memory) / 100;
        dirty = (dirty_ratio * available_memory) / 100;
+       limit = (limit_ratio * available_memory) / 100;
        tsk = current;
        if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
                background += background / 4;
                dirty += dirty / 4;
+               limit += limit / 4;
        }
        *pbackground = background;
        *pdirty = dirty;
+       *plimit = limit;
 }

 /*
@@ -212,6 +227,7 @@ static void balance_dirty_pages(struct a
        long nr_reclaimable;
        long background_thresh;
        long dirty_thresh;
+       long dirty_limit;
        unsigned long pages_written = 0;
        unsigned long write_chunk = sync_writeback_pages();

@@ -226,7 +242,8 @@ static void balance_dirty_pages(struct a
                        .range_cyclic   = 1,
                };

-               get_dirty_limits(&background_thresh, &dirty_thresh, mapping);
+               get_dirty_limits(&background_thresh, &dirty_thresh,
+                                                       &dirty_limit, mapping);
                nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
                                        global_page_state(NR_UNSTABLE_NFS);
                if (nr_reclaimable + global_page_state(NR_WRITEBACK) <=
@@ -244,8 +261,8 @@ static void balance_dirty_pages(struct a
                 */
                if (nr_reclaimable) {
                        writeback_inodes(&wbc);
-                       get_dirty_limits(&background_thresh,
-                                               &dirty_thresh, mapping);
+                       get_dirty_limits(&background_thresh, &dirty_thresh,
+                                                       &dirty_limit, mapping);
                        nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
                                        global_page_state(NR_UNSTABLE_NFS);
                        if (nr_reclaimable +
@@ -335,9 +352,11 @@ void throttle_vm_writeout(void)
 {
        long background_thresh;
        long dirty_thresh;
+       long dirty_limit;

         for ( ; ; ) {
-               get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
+               get_dirty_limits(&background_thresh, &dirty_thresh,
+                                                       &dirty_limit, NULL);

                 /*
                  * Boost the allowable dirty threshold a bit for page
@@ -372,8 +391,10 @@ static void background_writeout(unsigned
        for ( ; ; ) {
                long background_thresh;
                long dirty_thresh;
+               long dirty_limit;

-               get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
+               get_dirty_limits(&background_thresh, &dirty_thresh,
+                                                       &dirty_limit,  NULL);
                if (global_page_state(NR_FILE_DIRTY) +
                        global_page_state(NR_UNSTABLE_NFS) < background_thresh
                                && min_pages <= 0)

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to