[patch 7/10] mm: cleanup pagecache insertion operations

2007-01-12 Thread Nick Piggin
Quite a bit of code is used in maintaining these "cached pages" that are
probably pretty unlikely to get used. It would require a narrow race where
the page is inserted concurrently while this process is allocating a page
in order to create the spare page. Then a multi-page write into an uncached
part of the file, to make use of it.

Next, the buffered write path (and others) uses its own LRU pagevec when it
should be just using the per-CPU LRU pagevec (which will cut down on both data
and code size cacheline footprint). Also, these private LRU pagevecs are
emptied after just a very short time, in contrast with the per-CPU pagevecs
that are persistent. Net result: 7.3 times fewer lru_lock acquisitions required
to add the pages to pagecache for a bulk write (in 4K chunks).

Signed-off-by: Nick Piggin <[EMAIL PROTECTED]>

Index: linux-2.6/mm/filemap.c
===
--- linux-2.6.orig/mm/filemap.c
+++ linux-2.6/mm/filemap.c
@@ -686,26 +686,22 @@ EXPORT_SYMBOL(find_lock_page);
 struct page *find_or_create_page(struct address_space *mapping,
unsigned long index, gfp_t gfp_mask)
 {
-   struct page *page, *cached_page = NULL;
+   struct page *page;
int err;
 repeat:
page = find_lock_page(mapping, index);
if (!page) {
-   if (!cached_page) {
-   cached_page = alloc_page(gfp_mask);
-   if (!cached_page)
-   return NULL;
-   }
-   err = add_to_page_cache_lru(cached_page, mapping,
-   index, gfp_mask);
-   if (!err) {
-   page = cached_page;
-   cached_page = NULL;
-   } else if (err == -EEXIST)
-   goto repeat;
+   page = alloc_page(gfp_mask);
+   if (!page)
+   return NULL;
+   err = add_to_page_cache_lru(page, mapping, index, gfp_mask);
+   if (unlikely(err)) {
+   page_cache_release(page);
+   page = NULL;
+   if (err == -EEXIST)
+   goto repeat;
+   }
}
-   if (cached_page)
-   page_cache_release(cached_page);
return page;
 }
 EXPORT_SYMBOL(find_or_create_page);
@@ -891,11 +887,9 @@ void do_generic_mapping_read(struct addr
unsigned long next_index;
unsigned long prev_index;
loff_t isize;
-   struct page *cached_page;
int error;
struct file_ra_state ra = *_ra;
 
-   cached_page = NULL;
index = *ppos >> PAGE_CACHE_SHIFT;
next_index = index;
prev_index = ra.prev_page;
@@ -1059,23 +1053,20 @@ no_cached_page:
 * Ok, it wasn't cached, so we need to create a new
 * page..
 */
-   if (!cached_page) {
-   cached_page = page_cache_alloc_cold(mapping);
-   if (!cached_page) {
-   desc->error = -ENOMEM;
-   goto out;
-   }
+   page = page_cache_alloc_cold(mapping);
+   if (!page) {
+   desc->error = -ENOMEM;
+   goto out;
}
-   error = add_to_page_cache_lru(cached_page, mapping,
+   error = add_to_page_cache_lru(page, mapping,
index, GFP_KERNEL);
if (error) {
+   page_cache_release(page);
if (error == -EEXIST)
goto find_page;
desc->error = error;
goto out;
}
-   page = cached_page;
-   cached_page = NULL;
goto readpage;
}
 
@@ -1083,8 +1074,6 @@ out:
*_ra = ra;
 
*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
-   if (cached_page)
-   page_cache_release(cached_page);
if (filp)
file_accessed(filp);
 }
@@ -1542,35 +1531,28 @@ static inline struct page *__read_cache_
int (*filler)(void *,struct page*),
void *data)
 {
-   struct page *page, *cached_page = NULL;
+   struct page *page;
int err;
 repeat:
page = find_get_page(mapping, index);
if (!page) {
-   if (!cached_page) {
-   cached_page = page_cache_alloc_cold(mapping);
-   if (!cached_page)
-   return ERR_PTR(-ENOMEM);
-   }
-   err = add_to_page_cache_lru(cached_page, mapping,
-   index, GFP_KERNEL);
-   if (err == -EEXIST)
-   goto 

[patch 7/10] mm: cleanup pagecache insertion operations

2007-01-12 Thread Nick Piggin
Quite a bit of code is used in maintaining these cached pages that are
probably pretty unlikely to get used. It would require a narrow race where
the page is inserted concurrently while this process is allocating a page
in order to create the spare page. Then a multi-page write into an uncached
part of the file, to make use of it.

Next, the buffered write path (and others) uses its own LRU pagevec when it
should be just using the per-CPU LRU pagevec (which will cut down on both data
and code size cacheline footprint). Also, these private LRU pagevecs are
emptied after just a very short time, in contrast with the per-CPU pagevecs
that are persistent. Net result: 7.3 times fewer lru_lock acquisitions required
to add the pages to pagecache for a bulk write (in 4K chunks).

Signed-off-by: Nick Piggin [EMAIL PROTECTED]

Index: linux-2.6/mm/filemap.c
===
--- linux-2.6.orig/mm/filemap.c
+++ linux-2.6/mm/filemap.c
@@ -686,26 +686,22 @@ EXPORT_SYMBOL(find_lock_page);
 struct page *find_or_create_page(struct address_space *mapping,
unsigned long index, gfp_t gfp_mask)
 {
-   struct page *page, *cached_page = NULL;
+   struct page *page;
int err;
 repeat:
page = find_lock_page(mapping, index);
if (!page) {
-   if (!cached_page) {
-   cached_page = alloc_page(gfp_mask);
-   if (!cached_page)
-   return NULL;
-   }
-   err = add_to_page_cache_lru(cached_page, mapping,
-   index, gfp_mask);
-   if (!err) {
-   page = cached_page;
-   cached_page = NULL;
-   } else if (err == -EEXIST)
-   goto repeat;
+   page = alloc_page(gfp_mask);
+   if (!page)
+   return NULL;
+   err = add_to_page_cache_lru(page, mapping, index, gfp_mask);
+   if (unlikely(err)) {
+   page_cache_release(page);
+   page = NULL;
+   if (err == -EEXIST)
+   goto repeat;
+   }
}
-   if (cached_page)
-   page_cache_release(cached_page);
return page;
 }
 EXPORT_SYMBOL(find_or_create_page);
@@ -891,11 +887,9 @@ void do_generic_mapping_read(struct addr
unsigned long next_index;
unsigned long prev_index;
loff_t isize;
-   struct page *cached_page;
int error;
struct file_ra_state ra = *_ra;
 
-   cached_page = NULL;
index = *ppos  PAGE_CACHE_SHIFT;
next_index = index;
prev_index = ra.prev_page;
@@ -1059,23 +1053,20 @@ no_cached_page:
 * Ok, it wasn't cached, so we need to create a new
 * page..
 */
-   if (!cached_page) {
-   cached_page = page_cache_alloc_cold(mapping);
-   if (!cached_page) {
-   desc-error = -ENOMEM;
-   goto out;
-   }
+   page = page_cache_alloc_cold(mapping);
+   if (!page) {
+   desc-error = -ENOMEM;
+   goto out;
}
-   error = add_to_page_cache_lru(cached_page, mapping,
+   error = add_to_page_cache_lru(page, mapping,
index, GFP_KERNEL);
if (error) {
+   page_cache_release(page);
if (error == -EEXIST)
goto find_page;
desc-error = error;
goto out;
}
-   page = cached_page;
-   cached_page = NULL;
goto readpage;
}
 
@@ -1083,8 +1074,6 @@ out:
*_ra = ra;
 
*ppos = ((loff_t) index  PAGE_CACHE_SHIFT) + offset;
-   if (cached_page)
-   page_cache_release(cached_page);
if (filp)
file_accessed(filp);
 }
@@ -1542,35 +1531,28 @@ static inline struct page *__read_cache_
int (*filler)(void *,struct page*),
void *data)
 {
-   struct page *page, *cached_page = NULL;
+   struct page *page;
int err;
 repeat:
page = find_get_page(mapping, index);
if (!page) {
-   if (!cached_page) {
-   cached_page = page_cache_alloc_cold(mapping);
-   if (!cached_page)
-   return ERR_PTR(-ENOMEM);
-   }
-   err = add_to_page_cache_lru(cached_page, mapping,
-   index, GFP_KERNEL);
-   if (err == -EEXIST)
-   goto repeat;
-