Use page_cache_xxx in mm/truncate.c

Signed-off-by: Christoph Lameter <[EMAIL PROTECTED]>
---
 mm/truncate.c |   35 ++++++++++++++++++-----------------
 1 files changed, 18 insertions(+), 17 deletions(-)

diff --git a/mm/truncate.c b/mm/truncate.c
index bf8068d..8c3d32e 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -45,9 +45,10 @@ void do_invalidatepage(struct page *page, unsigned long 
offset)
                (*invalidatepage)(page, offset);
 }
 
-static inline void truncate_partial_page(struct page *page, unsigned partial)
+static inline void truncate_partial_page(struct address_space *mapping,
+                       struct page *page, unsigned partial)
 {
-       zero_user_segment(page, partial, PAGE_CACHE_SIZE);
+       zero_user_segment(page, partial, page_cache_size(mapping));
        if (PagePrivate(page))
                do_invalidatepage(page, partial);
 }
@@ -95,7 +96,7 @@ truncate_complete_page(struct address_space *mapping, struct 
page *page)
        if (page->mapping != mapping)
                return;
 
-       cancel_dirty_page(page, PAGE_CACHE_SIZE);
+       cancel_dirty_page(page, page_cache_size(mapping));
 
        if (PagePrivate(page))
                do_invalidatepage(page, 0);
@@ -157,9 +158,9 @@ invalidate_complete_page(struct address_space *mapping, 
struct page *page)
 void truncate_inode_pages_range(struct address_space *mapping,
                                loff_t lstart, loff_t lend)
 {
-       const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
+       const pgoff_t start = page_cache_next(mapping, lstart);
        pgoff_t end;
-       const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
+       const unsigned partial = page_cache_offset(mapping, lstart);
        struct pagevec pvec;
        pgoff_t next;
        int i;
@@ -167,8 +168,9 @@ void truncate_inode_pages_range(struct address_space 
*mapping,
        if (mapping->nrpages == 0)
                return;
 
-       BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
-       end = (lend >> PAGE_CACHE_SHIFT);
+       BUG_ON(page_cache_offset(mapping, lend) !=
+                               page_cache_size(mapping) - 1);
+       end = page_cache_index(mapping, lend);
 
        pagevec_init(&pvec, 0);
        next = start;
@@ -194,8 +196,8 @@ void truncate_inode_pages_range(struct address_space 
*mapping,
                        }
                        if (page_mapped(page)) {
                                unmap_mapping_range(mapping,
-                                 (loff_t)page_index<<PAGE_CACHE_SHIFT,
-                                 PAGE_CACHE_SIZE, 0);
+                                 page_cache_pos(mapping, page_index, 0),
+                                 page_cache_size(mapping), 0);
                        }
                        truncate_complete_page(mapping, page);
                        unlock_page(page);
@@ -208,7 +210,7 @@ void truncate_inode_pages_range(struct address_space 
*mapping,
                struct page *page = find_lock_page(mapping, start - 1);
                if (page) {
                        wait_on_page_writeback(page);
-                       truncate_partial_page(page, partial);
+                       truncate_partial_page(mapping, page, partial);
                        unlock_page(page);
                        page_cache_release(page);
                }
@@ -236,8 +238,8 @@ void truncate_inode_pages_range(struct address_space 
*mapping,
                        wait_on_page_writeback(page);
                        if (page_mapped(page)) {
                                unmap_mapping_range(mapping,
-                                 (loff_t)page->index<<PAGE_CACHE_SHIFT,
-                                 PAGE_CACHE_SIZE, 0);
+                                 page_cache_pos(mapping, page->index, 0),
+                                 page_cache_size(mapping), 0);
                        }
                        if (page->index > next)
                                next = page->index;
@@ -421,9 +423,8 @@ int invalidate_inode_pages2_range(struct address_space 
*mapping,
                                         * Zap the rest of the file in one hit.
                                         */
                                        unmap_mapping_range(mapping,
-                                          (loff_t)page_index<<PAGE_CACHE_SHIFT,
-                                          (loff_t)(end - page_index + 1)
-                                                       << PAGE_CACHE_SHIFT,
+                                          page_cache_pos(mapping, page_index, 
0),
+                                          page_cache_pos(mapping, end - 
page_index + 1, 0),
                                            0);
                                        did_range_unmap = 1;
                                } else {
@@ -431,8 +432,8 @@ int invalidate_inode_pages2_range(struct address_space 
*mapping,
                                         * Just zap this page
                                         */
                                        unmap_mapping_range(mapping,
-                                         (loff_t)page_index<<PAGE_CACHE_SHIFT,
-                                         PAGE_CACHE_SIZE, 0);
+                                         page_cache_pos(mapping, page_index, 
0),
+                                         page_cache_size(mapping), 0);
                                }
                        }
                        BUG_ON(page_mapped(page));
-- 
1.5.2.4

-- 
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to