Now, we have an infrastructure in order to remove a this awkward mutex
which serialize all faulting tasks, so remove it.

Signed-off-by: Joonsoo Kim <iamjoonsoo....@lge.com>

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 843c554..6edf423 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2595,9 +2595,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct 
vm_area_struct *vma,
 
 /*
  * Hugetlb_cow() should be called with page lock of the original hugepage held.
- * Called with hugetlb_instantiation_mutex held and pte_page locked so we
- * cannot race with other handlers or page migration.
- * Keep the pte_same checks anyway to make transition from the mutex easier.
+ * Called with pte_page locked so we cannot race with page migration.
  */
 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
                        unsigned long address, pte_t *ptep, pte_t pte,
@@ -2941,7 +2939,6 @@ int hugetlb_fault(struct mm_struct *mm, struct 
vm_area_struct *vma,
        int ret;
        struct page *page = NULL;
        struct page *pagecache_page = NULL;
-       static DEFINE_MUTEX(hugetlb_instantiation_mutex);
        struct hstate *h = hstate_vma(vma);
 
        address &= huge_page_mask(h);
@@ -2961,17 +2958,9 @@ int hugetlb_fault(struct mm_struct *mm, struct 
vm_area_struct *vma,
        if (!ptep)
                return VM_FAULT_OOM;
 
-       /*
-        * Serialize hugepage allocation and instantiation, so that we don't
-        * get spurious allocation failures if two CPUs race to instantiate
-        * the same page in the page cache.
-        */
-       mutex_lock(&hugetlb_instantiation_mutex);
        entry = huge_ptep_get(ptep);
-       if (huge_pte_none(entry)) {
-               ret = hugetlb_no_page(mm, vma, address, ptep, flags);
-               goto out_mutex;
-       }
+       if (huge_pte_none(entry))
+               return hugetlb_no_page(mm, vma, address, ptep, flags);
 
        ret = 0;
 
@@ -2984,10 +2973,8 @@ int hugetlb_fault(struct mm_struct *mm, struct 
vm_area_struct *vma,
         * consumed.
         */
        if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
-               if (vma_needs_reservation(h, vma, address) < 0) {
-                       ret = VM_FAULT_OOM;
-                       goto out_mutex;
-               }
+               if (vma_needs_reservation(h, vma, address) < 0)
+                       return VM_FAULT_OOM;
 
                if (!(vma->vm_flags & VM_MAYSHARE))
                        pagecache_page = hugetlbfs_pagecache_page(h,
@@ -3037,9 +3024,6 @@ out_ptl:
                unlock_page(page);
        put_page(page);
 
-out_mutex:
-       mutex_unlock(&hugetlb_instantiation_mutex);
-
        return ret;
 }
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to