+ lts' convert anon_vma list lock to reader/write lock patch
+ Nick Piggin's move and rework isolate_lru_page() patch

Free swap cache entries when swapping in pages if vm_swap_full()
[swap space > 1/2 used?].  Uses new pagevec to reduce pressure
on locks.

Signed-off-by: Rik van Riel <[EMAIL PROTECTED]>
Signed-off-by: Lee Schermerhorn <[EMAIL PROTECTED]>

Index: linux-2.6.24-rc3-mm2/mm/vmscan.c
===================================================================
--- linux-2.6.24-rc3-mm2.orig/mm/vmscan.c
+++ linux-2.6.24-rc3-mm2/mm/vmscan.c
@@ -632,6 +632,9 @@ free_it:
                continue;
 
 activate_locked:
+               /* Not a candidate for swapping, so reclaim swap space. */
+               if (PageSwapCache(page) && vm_swap_full())
+                       remove_exclusive_swap_page(page);
                SetPageActive(page);
                pgactivate++;
 keep_locked:
@@ -1213,6 +1216,8 @@ static void shrink_active_list(unsigned 
                        __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
                        pgmoved = 0;
                        spin_unlock_irq(&zone->lru_lock);
+                       if (vm_swap_full())
+                               pagevec_swap_free(&pvec);
                        __pagevec_release(&pvec);
                        spin_lock_irq(&zone->lru_lock);
                }
@@ -1222,6 +1227,8 @@ static void shrink_active_list(unsigned 
        __count_zone_vm_events(PGREFILL, zone, pgscanned);
        __count_vm_events(PGDEACTIVATE, pgdeactivate);
        spin_unlock_irq(&zone->lru_lock);
+       if (vm_swap_full())
+               pagevec_swap_free(&pvec);
 
        pagevec_release(&pvec);
 }
Index: linux-2.6.24-rc3-mm2/mm/swap.c
===================================================================
--- linux-2.6.24-rc3-mm2.orig/mm/swap.c
+++ linux-2.6.24-rc3-mm2/mm/swap.c
@@ -465,6 +465,24 @@ void pagevec_strip(struct pagevec *pvec)
        }
 }
 
+/*
+ * Try to free swap space from the pages in a pagevec
+ */
+void pagevec_swap_free(struct pagevec *pvec)
+{
+       int i;
+
+       for (i = 0; i < pagevec_count(pvec); i++) {
+               struct page *page = pvec->pages[i];
+
+               if (PageSwapCache(page) && !TestSetPageLocked(page)) {
+                       if (PageSwapCache(page))
+                               remove_exclusive_swap_page(page);
+                       unlock_page(page);
+               }
+       }
+}
+
 /**
  * pagevec_lookup - gang pagecache lookup
  * @pvec:      Where the resulting pages are placed
Index: linux-2.6.24-rc3-mm2/include/linux/pagevec.h
===================================================================
--- linux-2.6.24-rc3-mm2.orig/include/linux/pagevec.h
+++ linux-2.6.24-rc3-mm2/include/linux/pagevec.h
@@ -26,6 +26,7 @@ void __pagevec_free(struct pagevec *pvec
 void __pagevec_lru_add(struct pagevec *pvec);
 void __pagevec_lru_add_active(struct pagevec *pvec);
 void pagevec_strip(struct pagevec *pvec);
+void pagevec_swap_free(struct pagevec *pvec);
 unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
                pgoff_t start, unsigned nr_pages);
 unsigned pagevec_lookup_tag(struct pagevec *pvec,

-- 
All Rights Reversed

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to