From: "Kirill A. Shutemov" <kirill.shute...@linux.intel.com>

Signed-off-by: Kirill A. Shutemov <kirill.shute...@linux.intel.com>
---
 mm/hugetlb.c |   38 +++++++++++++++++++-------------------
 1 files changed, 19 insertions(+), 19 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index bc72712..3c86d3d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2672,7 +2672,8 @@ static bool hugetlbfs_pagecache_present(struct hstate *h,
 }
 
 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
-                       unsigned long address, pte_t *ptep, unsigned int flags)
+                       unsigned long haddr, unsigned long fault_address,
+                       pte_t *ptep, unsigned int flags)
 {
        struct hstate *h = hstate_vma(vma);
        int ret = VM_FAULT_SIGBUS;
@@ -2696,7 +2697,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct 
vm_area_struct *vma,
        }
 
        mapping = vma->vm_file->f_mapping;
-       idx = vma_hugecache_offset(h, vma, address);
+       idx = vma_hugecache_offset(h, vma, haddr);
 
        /*
         * Use page lock to guard against racing truncation
@@ -2708,7 +2709,7 @@ retry:
                size = i_size_read(mapping->host) >> huge_page_shift(h);
                if (idx >= size)
                        goto out;
-               page = alloc_huge_page(vma, address, 0);
+               page = alloc_huge_page(vma, haddr, 0);
                if (IS_ERR(page)) {
                        ret = PTR_ERR(page);
                        if (ret == -ENOMEM)
@@ -2717,7 +2718,7 @@ retry:
                                ret = VM_FAULT_SIGBUS;
                        goto out;
                }
-               clear_huge_page(page, address, pages_per_huge_page(h));
+               clear_huge_page(page, haddr, pages_per_huge_page(h));
                __SetPageUptodate(page);
 
                if (vma->vm_flags & VM_MAYSHARE) {
@@ -2763,7 +2764,7 @@ retry:
         * the spinlock.
         */
        if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
-               if (vma_needs_reservation(h, vma, address) < 0) {
+               if (vma_needs_reservation(h, vma, haddr) < 0) {
                        ret = VM_FAULT_OOM;
                        goto backout_unlocked;
                }
@@ -2778,16 +2779,16 @@ retry:
                goto backout;
 
        if (anon_rmap)
-               hugepage_add_new_anon_rmap(page, vma, address);
+               hugepage_add_new_anon_rmap(page, vma, haddr);
        else
                page_dup_rmap(page);
        new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
                                && (vma->vm_flags & VM_SHARED)));
-       set_huge_pte_at(mm, address, ptep, new_pte);
+       set_huge_pte_at(mm, haddr, ptep, new_pte);
 
        if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
                /* Optimization, do the COW without a second fault */
-               ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
+               ret = hugetlb_cow(mm, vma, haddr, ptep, new_pte, page);
        }
 
        spin_unlock(&mm->page_table_lock);
@@ -2813,21 +2814,20 @@ int hugetlb_fault(struct mm_struct *mm, struct 
vm_area_struct *vma,
        struct page *pagecache_page = NULL;
        static DEFINE_MUTEX(hugetlb_instantiation_mutex);
        struct hstate *h = hstate_vma(vma);
+       unsigned long haddr = address & huge_page_mask(h);
 
-       address &= huge_page_mask(h);
-
-       ptep = huge_pte_offset(mm, address);
+       ptep = huge_pte_offset(mm, haddr);
        if (ptep) {
                entry = huge_ptep_get(ptep);
                if (unlikely(is_hugetlb_entry_migration(entry))) {
-                       migration_entry_wait(mm, (pmd_t *)ptep, address);
+                       migration_entry_wait(mm, (pmd_t *)ptep, haddr);
                        return 0;
                } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
                        return VM_FAULT_HWPOISON_LARGE |
                                VM_FAULT_SET_HINDEX(hstate_index(h));
        }
 
-       ptep = huge_pte_alloc(mm, address, huge_page_size(h));
+       ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
        if (!ptep)
                return VM_FAULT_OOM;
 
@@ -2839,7 +2839,7 @@ int hugetlb_fault(struct mm_struct *mm, struct 
vm_area_struct *vma,
        mutex_lock(&hugetlb_instantiation_mutex);
        entry = huge_ptep_get(ptep);
        if (huge_pte_none(entry)) {
-               ret = hugetlb_no_page(mm, vma, address, ptep, flags);
+               ret = hugetlb_no_page(mm, vma, haddr, address, ptep, flags);
                goto out_mutex;
        }
 
@@ -2854,14 +2854,14 @@ int hugetlb_fault(struct mm_struct *mm, struct 
vm_area_struct *vma,
         * consumed.
         */
        if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
-               if (vma_needs_reservation(h, vma, address) < 0) {
+               if (vma_needs_reservation(h, vma, haddr) < 0) {
                        ret = VM_FAULT_OOM;
                        goto out_mutex;
                }
 
                if (!(vma->vm_flags & VM_MAYSHARE))
                        pagecache_page = hugetlbfs_pagecache_page(h,
-                                                               vma, address);
+                                                               vma, haddr);
        }
 
        /*
@@ -2884,16 +2884,16 @@ int hugetlb_fault(struct mm_struct *mm, struct 
vm_area_struct *vma,
 
        if (flags & FAULT_FLAG_WRITE) {
                if (!pte_write(entry)) {
-                       ret = hugetlb_cow(mm, vma, address, ptep, entry,
+                       ret = hugetlb_cow(mm, vma, haddr, ptep, entry,
                                                        pagecache_page);
                        goto out_page_table_lock;
                }
                entry = pte_mkdirty(entry);
        }
        entry = pte_mkyoung(entry);
-       if (huge_ptep_set_access_flags(vma, address, ptep, entry,
+       if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
                                                flags & FAULT_FLAG_WRITE))
-               update_mmu_cache(vma, address, ptep);
+               update_mmu_cache(vma, haddr, ptep);
 
 out_page_table_lock:
        spin_unlock(&mm->page_table_lock);
-- 
1.7.7.6

_______________________________________________
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Reply via email to