lumpy: cleanup a missplaced comment and simplify some code

Move the comment for isolate_lru_pages() back to its function
and comment the new function.  Add some running commentry on the
area scan.  Cleanup the indentation on switch to match the majority
view in mm/*.  Finally, clarify the boundary pfn calculations.

Signed-off-by: Andy Whitcroft <[EMAIL PROTECTED]>
Acked-by: Peter Zijlstra <[EMAIL PROTECTED]>
---
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0f2d961..0effa3e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -606,21 +606,14 @@ keep:
 }
 
 /*
- * zone->lru_lock is heavily contended.  Some of the functions that
- * shrink the lists perform better by taking out a batch of pages
- * and working on them outside the LRU lock.
+ * Attempt to remove the specified page from its LRU.  Only take this
+ * page if it is of the appropriate PageActive status.  Pages which
+ * are being freed elsewhere are also ignored.
  *
- * For pagecache intensive workloads, this function is the hottest
- * spot in the kernel (apart from copy_*_user functions).
- *
- * Appropriate locks must be held before calling this function.
+ * @page:      page to consider
+ * @active:    active/inactive flag only take pages of this type
  *
- * @nr_to_scan:        The number of pages to look through on the list.
- * @src:       The LRU list to pull pages off.
- * @dst:       The temp list to put pages on to.
- * @scanned:   The number of pages that were scanned.
- *
- * returns how many pages were moved onto [EMAIL PROTECTED]
+ * returns 0 on success, -ve errno on failure.
  */
 int __isolate_lru_page(struct page *page, int active)
 {
@@ -642,6 +635,23 @@ int __isolate_lru_page(struct page *page, int active)
        return ret;
 }
 
+/*
+ * zone->lru_lock is heavily contended.  Some of the functions that
+ * shrink the lists perform better by taking out a batch of pages
+ * and working on them outside the LRU lock.
+ *
+ * For pagecache intensive workloads, this function is the hottest
+ * spot in the kernel (apart from copy_*_user functions).
+ *
+ * Appropriate locks must be held before calling this function.
+ *
+ * @nr_to_scan:        The number of pages to look through on the list.
+ * @src:       The LRU list to pull pages off.
+ * @dst:       The temp list to put pages on to.
+ * @scanned:   The number of pages that were scanned.
+ *
+ * returns how many pages were moved onto [EMAIL PROTECTED]
+ */
 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                struct list_head *src, struct list_head *dst,
                unsigned long *scanned, int order)
@@ -659,26 +669,31 @@ static unsigned long isolate_lru_pages(unsigned long 
nr_to_scan,
 
                active = PageActive(page);
                switch (__isolate_lru_page(page, active)) {
-                       case 0:
-                               list_move(&page->lru, dst);
-                               nr_taken++;
-                               break;
+               case 0:
+                       list_move(&page->lru, dst);
+                       nr_taken++;
+                       break;
 
-                       case -EBUSY:
-                               /* else it is being freed elsewhere */
-                               list_move(&page->lru, src);
-                               continue;
+               case -EBUSY:
+                       /* else it is being freed elsewhere */
+                       list_move(&page->lru, src);
+                       continue;
 
-                       default:
-                               BUG();
+               default:
+                       BUG();
                }
 
                if (!order)
                        continue;
 
-               page_pfn = pfn = __page_to_pfn(page);
-               end_pfn = pfn &= ~((1 << order) - 1);
-               end_pfn += 1 << order;
+               /*
+                * Attempt to take all pages in the order aligned region
+                * surrounding the tag page.  Only take those pages of
+                * the same active state as that tag page.
+                */
+               page_pfn = __page_to_pfn(page);
+               pfn = page_pfn & ~((1 << order) - 1);
+               end_pfn = pfn + (1 << order);
                for (; pfn < end_pfn; pfn++) {
                        if (unlikely(pfn == page_pfn))
                                continue;
@@ -688,17 +703,16 @@ static unsigned long isolate_lru_pages(unsigned long 
nr_to_scan,
                        scan++;
                        tmp = __pfn_to_page(pfn);
                        switch (__isolate_lru_page(tmp, active)) {
-                               case 0:
-                                       list_move(&tmp->lru, dst);
-                                       nr_taken++;
-                                       continue;
-
-                               case -EBUSY:
-                                       /* else it is being freed elsewhere */
-                                       list_move(&tmp->lru, src);
-                               default:
-                                       break;
+                       case 0:
+                               list_move(&tmp->lru, dst);
+                               nr_taken++;
+                               continue;
 
+                       case -EBUSY:
+                               /* else it is being freed elsewhere */
+                               list_move(&tmp->lru, src);
+                       default:
+                               break;
                        }
                        break;
                }
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to