From: Ira Weiny <ira.we...@intel.com>

Remove the pattern of kmap/mem*/kunmap in favor of the new mem*_page()
functions which handle the kmap'ing correctly for us.

Signed-off-by: Ira Weiny <ira.we...@intel.com>
---
 fs/hfsplus/bnode.c | 53 +++++++++++++---------------------------------
 1 file changed, 15 insertions(+), 38 deletions(-)

diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 177fae4e6581..c4347b1cb36f 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -29,14 +29,12 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int 
off, int len)
        off &= ~PAGE_MASK;
 
        l = min_t(int, len, PAGE_SIZE - off);
-       memcpy(buf, kmap(*pagep) + off, l);
-       kunmap(*pagep);
+       memcpy_from_page(buf, *pagep, off, l);
 
        while ((len -= l) != 0) {
                buf += l;
                l = min_t(int, len, PAGE_SIZE);
-               memcpy(buf, kmap(*++pagep), l);
-               kunmap(*pagep);
+               memcpy_from_page(buf, *++pagep, 0, l);
        }
 }
 
@@ -82,16 +80,14 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int 
off, int len)
        off &= ~PAGE_MASK;
 
        l = min_t(int, len, PAGE_SIZE - off);
-       memcpy(kmap(*pagep) + off, buf, l);
+       memcpy_to_page(*pagep, off, buf, l);
        set_page_dirty(*pagep);
-       kunmap(*pagep);
 
        while ((len -= l) != 0) {
                buf += l;
                l = min_t(int, len, PAGE_SIZE);
-               memcpy(kmap(*++pagep), buf, l);
+               memcpy_to_page(*++pagep, 0, buf, l);
                set_page_dirty(*pagep);
-               kunmap(*pagep);
        }
 }
 
@@ -112,15 +108,13 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int 
len)
        off &= ~PAGE_MASK;
 
        l = min_t(int, len, PAGE_SIZE - off);
-       memset(kmap(*pagep) + off, 0, l);
+       memzero_page(*pagep, off, l);
        set_page_dirty(*pagep);
-       kunmap(*pagep);
 
        while ((len -= l) != 0) {
                l = min_t(int, len, PAGE_SIZE);
-               memset(kmap(*++pagep), 0, l);
+               memzero_page(*++pagep, 0, l);
                set_page_dirty(*pagep);
-               kunmap(*pagep);
        }
 }
 
@@ -142,17 +136,13 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
 
        if (src == dst) {
                l = min_t(int, len, PAGE_SIZE - src);
-               memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
-               kunmap(*src_page);
+               memcpy_page(*dst_page, src, *src_page, src, l);
                set_page_dirty(*dst_page);
-               kunmap(*dst_page);
 
                while ((len -= l) != 0) {
                        l = min_t(int, len, PAGE_SIZE);
-                       memcpy(kmap(*++dst_page), kmap(*++src_page), l);
-                       kunmap(*src_page);
+                       memcpy_page(*++dst_page, 0, *++src_page, 0, l);
                        set_page_dirty(*dst_page);
-                       kunmap(*dst_page);
                }
        } else {
                void *src_ptr, *dst_ptr;
@@ -202,21 +192,16 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int 
src, int len)
 
                if (src == dst) {
                        while (src < len) {
-                               memmove(kmap(*dst_page), kmap(*src_page), src);
-                               kunmap(*src_page);
+                               memmove_page(*dst_page, 0, *src_page, 0, src);
                                set_page_dirty(*dst_page);
-                               kunmap(*dst_page);
                                len -= src;
                                src = PAGE_SIZE;
                                src_page--;
                                dst_page--;
                        }
                        src -= len;
-                       memmove(kmap(*dst_page) + src,
-                               kmap(*src_page) + src, len);
-                       kunmap(*src_page);
+                       memmove_page(*dst_page, src, *src_page, src, len);
                        set_page_dirty(*dst_page);
-                       kunmap(*dst_page);
                } else {
                        void *src_ptr, *dst_ptr;
 
@@ -251,19 +236,13 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int 
src, int len)
 
                if (src == dst) {
                        l = min_t(int, len, PAGE_SIZE - src);
-                       memmove(kmap(*dst_page) + src,
-                               kmap(*src_page) + src, l);
-                       kunmap(*src_page);
+                       memmove_page(*dst_page, src, *src_page, src, l);
                        set_page_dirty(*dst_page);
-                       kunmap(*dst_page);
 
                        while ((len -= l) != 0) {
                                l = min_t(int, len, PAGE_SIZE);
-                               memmove(kmap(*++dst_page),
-                                       kmap(*++src_page), l);
-                               kunmap(*src_page);
+                               memmove_page(*++dst_page, 0, *++src_page, 0, l);
                                set_page_dirty(*dst_page);
-                               kunmap(*dst_page);
                        }
                } else {
                        void *src_ptr, *dst_ptr;
@@ -593,14 +572,12 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree 
*tree, u32 num)
        }
 
        pagep = node->page;
-       memset(kmap(*pagep) + node->page_offset, 0,
-              min_t(int, PAGE_SIZE, tree->node_size));
+       memzero_page(*pagep, node->page_offset,
+                    min_t(int, PAGE_SIZE, tree->node_size));
        set_page_dirty(*pagep);
-       kunmap(*pagep);
        for (i = 1; i < tree->pages_per_bnode; i++) {
-               memset(kmap(*++pagep), 0, PAGE_SIZE);
+               memzero_page(*++pagep, 0, PAGE_SIZE);
                set_page_dirty(*pagep);
-               kunmap(*pagep);
        }
        clear_bit(HFS_BNODE_NEW, &node->flags);
        wake_up(&node->lock_wq);
-- 
2.28.0.rc0.12.gb6a658bd00c9

Reply via email to