Pass the head page to zero_user_segment(), not the tail page, and adjust
the byte offsets appropriately.

Signed-off-by: Matthew Wilcox (Oracle) <wi...@infradead.org>
---
 mm/shmem.c    | 7 +++++++
 mm/truncate.c | 7 +++++++
 2 files changed, 14 insertions(+)

diff --git a/mm/shmem.c b/mm/shmem.c
index 271548ca20f3..77982149b437 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -958,11 +958,18 @@ static void shmem_undo_range(struct inode *inode, loff_t 
lstart, loff_t lend,
                struct page *page = NULL;
                shmem_getpage(inode, start - 1, &page, SGP_READ);
                if (page) {
+                       struct page *head = thp_head(page);
                        unsigned int top = PAGE_SIZE;
                        if (start > end) {
                                top = partial_end;
                                partial_end = 0;
                        }
+                       if (head != page) {
+                               unsigned int diff = start - 1 - head->index;
+                               partial_start += diff << PAGE_SHIFT;
+                               top += diff << PAGE_SHIFT;
+                               page = head;
+                       }
                        zero_user_segment(page, partial_start, top);
                        set_page_dirty(page);
                        unlock_page(page);
diff --git a/mm/truncate.c b/mm/truncate.c
index dd9ebc1da356..152974888124 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -374,12 +374,19 @@ void truncate_inode_pages_range(struct address_space 
*mapping,
        if (partial_start) {
                struct page *page = find_lock_page(mapping, start - 1);
                if (page) {
+                       struct page *head = thp_head(page);
                        unsigned int top = PAGE_SIZE;
                        if (start > end) {
                                /* Truncation within a single page */
                                top = partial_end;
                                partial_end = 0;
                        }
+                       if (head != page) {
+                               unsigned int diff = start - 1 - head->index;
+                               partial_start += diff << PAGE_SHIFT;
+                               top += diff << PAGE_SHIFT;
+                               page = head;
+                       }
                        wait_on_page_writeback(page);
                        zero_user_segment(page, partial_start, top);
                        cleancache_invalidate_page(mapping, page);
-- 
2.28.0

Reply via email to