Author: kib
Date: Fri Aug  8 17:12:03 2014
New Revision: 269728
URL: http://svnweb.freebsd.org/changeset/base/269728

Log:
  Change pmap_enter(9) interface to take flags parameter and superpage
  mapping size (currently unused).  The flags includes the fault access
  bits, wired flag as PMAP_ENTER_WIRED, and a new flag
  PMAP_ENTER_NOSLEEP to indicate that pmap should not sleep.
  
  For powerpc aim both 32 and 64 bit, fix implementation to ensure that
  the requested mapping is created when PMAP_ENTER_NOSLEEP is not
  specified, in particular, wait for the available memory required to
  proceed.
  
  In collaboration with:        alc
  Tested by:    nwhitehorn (ppc aim32 and booke)
  Sponsored by: The FreeBSD Foundation and EMC / Isilon Storage Division
  MFC after:    2 weeks

Modified:
  head/sys/amd64/amd64/pmap.c
  head/sys/arm/arm/pmap-v6.c
  head/sys/arm/arm/pmap.c
  head/sys/i386/i386/pmap.c
  head/sys/i386/xen/pmap.c
  head/sys/mips/mips/pmap.c
  head/sys/powerpc/aim/mmu_oea.c
  head/sys/powerpc/aim/mmu_oea64.c
  head/sys/powerpc/booke/pmap.c
  head/sys/powerpc/powerpc/mmu_if.m
  head/sys/powerpc/powerpc/pmap_dispatch.c
  head/sys/sparc64/sparc64/pmap.c
  head/sys/vm/pmap.h
  head/sys/vm/vm_fault.c
  head/sys/vm/vm_kern.c

Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c Fri Aug  8 16:32:06 2014        (r269727)
+++ head/sys/amd64/amd64/pmap.c Fri Aug  8 17:12:03 2014        (r269728)
@@ -4116,9 +4116,9 @@ setpte:
  *     or lose information.  That is, this routine must actually
  *     insert this page into the given map NOW.
  */
-void
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
-    vm_prot_t prot, boolean_t wired)
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+    u_int flags, int8_t psind __unused)
 {
        struct rwlock *lock;
        pd_entry_t *pde;
@@ -4127,6 +4127,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
        pv_entry_t pv;
        vm_paddr_t opa, pa;
        vm_page_t mpte, om;
+       boolean_t nosleep;
 
        PG_A = pmap_accessed_bit(pmap);
        PG_G = pmap_global_bit(pmap);
@@ -4143,10 +4144,10 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
            va >= kmi.clean_eva,
            ("pmap_enter: managed mapping within the clean submap"));
        if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
-               VM_OBJECT_ASSERT_WLOCKED(m->object);
+               VM_OBJECT_ASSERT_LOCKED(m->object);
        pa = VM_PAGE_TO_PHYS(m);
        newpte = (pt_entry_t)(pa | PG_A | PG_V);
-       if ((access & VM_PROT_WRITE) != 0)
+       if ((flags & VM_PROT_WRITE) != 0)
                newpte |= PG_M;
        if ((prot & VM_PROT_WRITE) != 0)
                newpte |= PG_RW;
@@ -4154,7 +4155,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
            ("pmap_enter: access includes VM_PROT_WRITE but prot doesn't"));
        if ((prot & VM_PROT_EXECUTE) == 0)
                newpte |= pg_nx;
-       if (wired)
+       if ((flags & PMAP_ENTER_WIRED) != 0)
                newpte |= PG_W;
        if (va < VM_MAXUSER_ADDRESS)
                newpte |= PG_U;
@@ -4196,7 +4197,15 @@ retry:
                 * Here if the pte page isn't mapped, or if it has been
                 * deallocated.
                 */
-               mpte = _pmap_allocpte(pmap, pmap_pde_pindex(va), &lock);
+               nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
+               mpte = _pmap_allocpte(pmap, pmap_pde_pindex(va),
+                   nosleep ? NULL : &lock);
+               if (mpte == NULL && nosleep) {
+                       KASSERT(lock == NULL, ("lock leaked for nosleep"));
+                       PMAP_UNLOCK(pmap);
+                       rw_runlock(&pvh_global_lock);
+                       return (KERN_RESOURCE_SHORTAGE);
+               }
                goto retry;
        } else
                panic("pmap_enter: invalid page directory va=%#lx", va);
@@ -4328,6 +4337,7 @@ unchanged:
                rw_wunlock(lock);
        rw_runlock(&pvh_global_lock);
        PMAP_UNLOCK(pmap);
+       return (KERN_SUCCESS);
 }
 
 /*

Modified: head/sys/arm/arm/pmap-v6.c
==============================================================================
--- head/sys/arm/arm/pmap-v6.c  Fri Aug  8 16:32:06 2014        (r269727)
+++ head/sys/arm/arm/pmap-v6.c  Fri Aug  8 17:12:03 2014        (r269728)
@@ -231,8 +231,8 @@ static boolean_t    pmap_pv_insert_section(
 static struct pv_entry *pmap_remove_pv(struct vm_page *, pmap_t, vm_offset_t);
 static int             pmap_pvh_wired_mappings(struct md_page *, int);
 
-static void            pmap_enter_locked(pmap_t, vm_offset_t, vm_prot_t,
-    vm_page_t, vm_prot_t, boolean_t, int);
+static int             pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t,
+    vm_prot_t, u_int);
 static vm_paddr_t      pmap_extract_locked(pmap_t pmap, vm_offset_t va);
 static void            pmap_alloc_l1(pmap_t);
 static void            pmap_free_l1(pmap_t);
@@ -2934,35 +2934,38 @@ pmap_protect(pmap_t pmap, vm_offset_t sv
  *     insert this page into the given map NOW.
  */
 
-void
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
-    vm_prot_t prot, boolean_t wired)
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+    u_int flags, int8_t psind __unused)
 {
        struct l2_bucket *l2b;
+       int rv;
 
        rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pmap);
-       pmap_enter_locked(pmap, va, access, m, prot, wired, M_WAITOK);
-       /*
-        * If both the l2b_occupancy and the reservation are fully
-        * populated, then attempt promotion.
-        */
-       l2b = pmap_get_l2_bucket(pmap, va);
-       if ((l2b != NULL) && (l2b->l2b_occupancy == L2_PTE_NUM_TOTAL) &&
-           sp_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
-           vm_reserv_level_iffullpop(m) == 0)
-               pmap_promote_section(pmap, va);
-
+       rv = pmap_enter_locked(pmap, va, m, prot, flags);
+       if (rv == KERN_SUCCESS) {
+               /*
+                * If both the l2b_occupancy and the reservation are fully
+                * populated, then attempt promotion.
+                */
+               l2b = pmap_get_l2_bucket(pmap, va);
+               if (l2b != NULL && l2b->l2b_occupancy == L2_PTE_NUM_TOTAL &&
+                   sp_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
+                   vm_reserv_level_iffullpop(m) == 0)
+                       pmap_promote_section(pmap, va);
+       }
        PMAP_UNLOCK(pmap);
        rw_wunlock(&pvh_global_lock);
+       return (rv);
 }
 
 /*
  *     The pvh global and pmap locks must be held.
  */
-static void
-pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
-    vm_prot_t prot, boolean_t wired, int flags)
+static int
+pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+    u_int flags)
 {
        struct l2_bucket *l2b = NULL;
        struct vm_page *om;
@@ -2980,9 +2983,8 @@ pmap_enter_locked(pmap_t pmap, vm_offset
                pa = systempage.pv_pa;
                m = NULL;
        } else {
-               KASSERT((m->oflags & VPO_UNMANAGED) != 0 ||
-                   vm_page_xbusied(m) || (flags & M_NOWAIT) != 0,
-                   ("pmap_enter_locked: page %p is not busy", m));
+               if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
+                       VM_OBJECT_ASSERT_LOCKED(m->object);
                pa = VM_PAGE_TO_PHYS(m);
        }
 
@@ -3003,12 +3005,12 @@ pmap_enter_locked(pmap_t pmap, vm_offset
 
        if (prot & VM_PROT_WRITE)
                nflags |= PVF_WRITE;
-       if (wired)
+       if ((flags & PMAP_ENTER_WIRED) != 0)
                nflags |= PVF_WIRED;
 
        PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, "
-           "prot = %x, wired = %x\n", (uint32_t) pmap, va, (uint32_t) m,
-           prot, wired));
+           "prot = %x, flags = %x\n", (uint32_t) pmap, va, (uint32_t) m,
+           prot, flags));
 
        if (pmap == pmap_kernel()) {
                l2b = pmap_get_l2_bucket(pmap, va);
@@ -3018,7 +3020,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset
 do_l2b_alloc:
                l2b = pmap_alloc_l2_bucket(pmap, va);
                if (l2b == NULL) {
-                       if (flags & M_WAITOK) {
+                       if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
                                PMAP_UNLOCK(pmap);
                                rw_wunlock(&pvh_global_lock);
                                VM_WAIT;
@@ -3026,7 +3028,7 @@ do_l2b_alloc:
                                PMAP_LOCK(pmap);
                                goto do_l2b_alloc;
                        }
-                       return;
+                       return (KERN_RESOURCE_SHORTAGE);
                }
        }
 
@@ -3185,6 +3187,7 @@ validate:
 
        if ((pmap != pmap_kernel()) && (pmap == &curproc->p_vmspace->vm_pmap))
                cpu_icache_sync_range(va, PAGE_SIZE);
+       return (KERN_SUCCESS);
 }
 
 /*
@@ -3206,13 +3209,12 @@ pmap_enter_object(pmap_t pmap, vm_offset
        vm_offset_t va;
        vm_page_t m;
        vm_pindex_t diff, psize;
-       vm_prot_t access;
 
        VM_OBJECT_ASSERT_LOCKED(m_start->object);
 
        psize = atop(end - start);
        m = m_start;
-       access = prot = prot & (VM_PROT_READ | VM_PROT_EXECUTE);
+       prot &= VM_PROT_READ | VM_PROT_EXECUTE;
        rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pmap);
        while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
@@ -3222,8 +3224,8 @@ pmap_enter_object(pmap_t pmap, vm_offset
                    pmap_enter_section(pmap, va, m, prot))
                        m = &m[L1_S_SIZE / PAGE_SIZE - 1];
                else
-                       pmap_enter_locked(pmap, va, access, m, prot,
-                           FALSE, M_NOWAIT);
+                       pmap_enter_locked(pmap, va, m, prot,
+                           PMAP_ENTER_NOSLEEP);
                m = TAILQ_NEXT(m, listq);
        }
        PMAP_UNLOCK(pmap);
@@ -3242,12 +3244,11 @@ pmap_enter_object(pmap_t pmap, vm_offset
 void
 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 {
-       vm_prot_t access;
 
-       access = prot = prot & (VM_PROT_READ | VM_PROT_EXECUTE);
+       prot &= VM_PROT_READ | VM_PROT_EXECUTE;
        rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pmap);
-       pmap_enter_locked(pmap, va, access, m, prot, FALSE, M_NOWAIT);
+       pmap_enter_locked(pmap, va, m, prot, PMAP_ENTER_NOSLEEP);
        PMAP_UNLOCK(pmap);
        rw_wunlock(&pvh_global_lock);
 }
@@ -3499,8 +3500,8 @@ pmap_pinit(pmap_t pmap)
        pmap->pm_stats.resident_count = 1;
        if (vector_page < KERNBASE) {
                pmap_enter(pmap, vector_page,
-                   VM_PROT_READ, PHYS_TO_VM_PAGE(systempage.pv_pa),
-                   VM_PROT_READ, 1);
+                   PHYS_TO_VM_PAGE(systempage.pv_pa), VM_PROT_READ,
+                   PMAP_ENTER_WIRED, 0);
        }
        return (1);
 }

Modified: head/sys/arm/arm/pmap.c
==============================================================================
--- head/sys/arm/arm/pmap.c     Fri Aug  8 16:32:06 2014        (r269727)
+++ head/sys/arm/arm/pmap.c     Fri Aug  8 17:12:03 2014        (r269728)
@@ -199,8 +199,8 @@ extern int last_fault_code;
 static void pmap_free_pv_entry (pv_entry_t);
 static pv_entry_t pmap_get_pv_entry(void);
 
-static void            pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t,
-    vm_prot_t, boolean_t, int);
+static int             pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t,
+    vm_prot_t, u_int);
 static vm_paddr_t      pmap_extract_locked(pmap_t pmap, vm_offset_t va);
 static void            pmap_fix_cache(struct vm_page *, pmap_t, vm_offset_t);
 static void            pmap_alloc_l1(pmap_t);
@@ -3208,24 +3208,26 @@ pmap_protect(pmap_t pm, vm_offset_t sva,
  *     insert this page into the given map NOW.
  */
 
-void
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
-    vm_prot_t prot, boolean_t wired)
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+    u_int flags, int8_t psind __unused)
 {
+       int rv;
 
        rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pmap);
-       pmap_enter_locked(pmap, va, m, prot, wired, M_WAITOK);
+       rv = pmap_enter_locked(pmap, va, m, prot, flags);
        rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(pmap);
+       return (rv);
 }
 
 /*
  *     The pvh global and pmap locks must be held.
  */
-static void
+static int
 pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
-    boolean_t wired, int flags)
+    u_int flags)
 {
        struct l2_bucket *l2b = NULL;
        struct vm_page *opg;
@@ -3241,9 +3243,8 @@ pmap_enter_locked(pmap_t pmap, vm_offset
                pa = systempage.pv_pa;
                m = NULL;
        } else {
-               KASSERT((m->oflags & VPO_UNMANAGED) != 0 ||
-                   vm_page_xbusied(m) || (flags & M_NOWAIT) != 0,
-                   ("pmap_enter_locked: page %p is not busy", m));
+               if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
+                       VM_OBJECT_ASSERT_LOCKED(m->object);
                pa = VM_PAGE_TO_PHYS(m);
        }
        nflags = 0;
@@ -3251,10 +3252,10 @@ pmap_enter_locked(pmap_t pmap, vm_offset
                nflags |= PVF_WRITE;
        if (prot & VM_PROT_EXECUTE)
                nflags |= PVF_EXEC;
-       if (wired)
+       if ((flags & PMAP_ENTER_WIRED) != 0)
                nflags |= PVF_WIRED;
        PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = 
%x, "
-           "wired = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, wired));
+           "flags = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, flags));
 
        if (pmap == pmap_kernel()) {
                l2b = pmap_get_l2_bucket(pmap, va);
@@ -3264,7 +3265,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset
 do_l2b_alloc:
                l2b = pmap_alloc_l2_bucket(pmap, va);
                if (l2b == NULL) {
-                       if (flags & M_WAITOK) {
+                       if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
                                PMAP_UNLOCK(pmap);
                                rw_wunlock(&pvh_global_lock);
                                VM_WAIT;
@@ -3272,7 +3273,7 @@ do_l2b_alloc:
                                PMAP_LOCK(pmap);
                                goto do_l2b_alloc;
                        }
-                       return;
+                       return (KERN_RESOURCE_SHORTAGE);
                }
        }
 
@@ -3486,6 +3487,7 @@ do_l2b_alloc:
                if (m)
                        pmap_fix_cache(m, pmap, va);
        }
+       return (KERN_SUCCESS);
 }
 
 /*
@@ -3515,7 +3517,7 @@ pmap_enter_object(pmap_t pmap, vm_offset
        PMAP_LOCK(pmap);
        while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
                pmap_enter_locked(pmap, start + ptoa(diff), m, prot &
-                   (VM_PROT_READ | VM_PROT_EXECUTE), FALSE, M_NOWAIT);
+                   (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP);
                m = TAILQ_NEXT(m, listq);
        }
        rw_wunlock(&pvh_global_lock);
@@ -3538,7 +3540,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_
        rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pmap);
        pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
-           FALSE, M_NOWAIT);
+           PMAP_ENTER_NOSLEEP);
        rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(pmap);
 }
@@ -3769,9 +3771,8 @@ pmap_pinit(pmap_t pmap)
        bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
        pmap->pm_stats.resident_count = 1;
        if (vector_page < KERNBASE) {
-               pmap_enter(pmap, vector_page,
-                   VM_PROT_READ, PHYS_TO_VM_PAGE(systempage.pv_pa),
-                   VM_PROT_READ, 1);
+               pmap_enter(pmap, vector_page, PHYS_TO_VM_PAGE(systempage.pv_pa),
+                   VM_PROT_READ, PMAP_ENTER_WIRED | VM_PROT_READ, 0);
        }
        return (1);
 }

Modified: head/sys/i386/i386/pmap.c
==============================================================================
--- head/sys/i386/i386/pmap.c   Fri Aug  8 16:32:06 2014        (r269727)
+++ head/sys/i386/i386/pmap.c   Fri Aug  8 17:12:03 2014        (r269728)
@@ -3458,9 +3458,9 @@ setpte:
  *     or lose information.  That is, this routine must actually
  *     insert this page into the given map NOW.
  */
-void
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
-    vm_prot_t prot, boolean_t wired)
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+    u_int flags, int8_t psind)
 {
        pd_entry_t *pde;
        pt_entry_t *pte;
@@ -3468,17 +3468,19 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
        pv_entry_t pv;
        vm_paddr_t opa, pa;
        vm_page_t mpte, om;
-       boolean_t invlva;
+       boolean_t invlva, nosleep, wired;
 
        va = trunc_page(va);
+       mpte = NULL;
+       wired = (flags & PMAP_ENTER_WIRED) != 0;
+       nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
+
        KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
        KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
            ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
            va));
        if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
-               VM_OBJECT_ASSERT_WLOCKED(m->object);
-
-       mpte = NULL;
+               VM_OBJECT_ASSERT_LOCKED(m->object);
 
        rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pmap);
@@ -3489,7 +3491,15 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
         * resident, we are creating it here.
         */
        if (va < VM_MAXUSER_ADDRESS) {
-               mpte = pmap_allocpte(pmap, va, M_WAITOK);
+               mpte = pmap_allocpte(pmap, va, nosleep ? M_NOWAIT : M_WAITOK);
+               if (mpte == NULL) {
+                       KASSERT(nosleep,
+                           ("pmap_allocpte failed with sleep allowed"));
+                       sched_unpin();
+                       rw_wunlock(&pvh_global_lock);
+                       PMAP_UNLOCK(pmap);
+                       return (KERN_RESOURCE_SHORTAGE);
+               }
        }
 
        pde = pmap_pde(pmap, va);
@@ -3607,7 +3617,7 @@ validate:
         */
        if ((origpte & ~(PG_M|PG_A)) != newpte) {
                newpte |= PG_A;
-               if ((access & VM_PROT_WRITE) != 0)
+               if ((flags & VM_PROT_WRITE) != 0)
                        newpte |= PG_M;
                if (origpte & PG_V) {
                        invlva = FALSE;
@@ -3652,6 +3662,7 @@ validate:
        sched_unpin();
        rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(pmap);
+       return (KERN_SUCCESS);
 }
 
 /*

Modified: head/sys/i386/xen/pmap.c
==============================================================================
--- head/sys/i386/xen/pmap.c    Fri Aug  8 16:32:06 2014        (r269727)
+++ head/sys/i386/xen/pmap.c    Fri Aug  8 17:12:03 2014        (r269728)
@@ -298,9 +298,9 @@ static void pmap_remove_entry(struct pma
 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
     vm_page_t m);
 
-static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
+static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags);
 
-static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags);
+static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags);
 static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free);
 static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
 static void pmap_pte_release(pt_entry_t *pte);
@@ -1546,21 +1546,17 @@ pmap_pinit(pmap_t pmap)
  * mapped correctly.
  */
 static vm_page_t
-_pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags)
+_pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags)
 {
        vm_paddr_t ptema;
        vm_page_t m;
 
-       KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
-           (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
-           ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
-
        /*
         * Allocate a page table page.
         */
        if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
            VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
-               if (flags & M_WAITOK) {
+               if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
                        PMAP_UNLOCK(pmap);
                        rw_wunlock(&pvh_global_lock);
                        VM_WAIT;
@@ -1595,16 +1591,12 @@ _pmap_allocpte(pmap_t pmap, u_int ptepin
 }
 
 static vm_page_t
-pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
+pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags)
 {
        u_int ptepindex;
        pd_entry_t ptema;
        vm_page_t m;
 
-       KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
-           (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
-           ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
-
        /*
         * Calculate pagetable page index
         */
@@ -1644,7 +1636,7 @@ retry:
                CTR3(KTR_PMAP, "pmap_allocpte: pmap=%p va=0x%08x flags=0x%x",
                    pmap, va, flags);
                m = _pmap_allocpte(pmap, ptepindex, flags);
-               if (m == NULL && (flags & M_WAITOK))
+               if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0)
                        goto retry;
 
                KASSERT(pmap->pm_pdir[ptepindex], ("ptepindex=%d did not get 
mapped", ptepindex));
@@ -2643,9 +2635,9 @@ retry:
  *     or lose information.  That is, this routine must actually
  *     insert this page into the given map NOW.
  */
-void
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
-    vm_prot_t prot, boolean_t wired)
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+    u_int flags, int8_t psind __unused)
 {
        pd_entry_t *pde;
        pt_entry_t *pte;
@@ -2653,19 +2645,21 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
        pv_entry_t pv;
        vm_paddr_t opa, pa;
        vm_page_t mpte, om;
-       boolean_t invlva;
+       boolean_t invlva, wired;
 
-       CTR6(KTR_PMAP, "pmap_enter: pmap=%08p va=0x%08x access=0x%x ma=0x%08x 
prot=0x%x wired=%d",
-           pmap, va, access, VM_PAGE_TO_MACH(m), prot, wired);
+       CTR5(KTR_PMAP,
+           "pmap_enter: pmap=%08p va=0x%08x ma=0x%08x prot=0x%x flags=0x%x",
+           pmap, va, VM_PAGE_TO_MACH(m), prot, flags);
        va = trunc_page(va);
        KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
        KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
            ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
            va));
        if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
-               VM_OBJECT_ASSERT_WLOCKED(m->object);
+               VM_OBJECT_ASSERT_LOCKED(m->object);
 
        mpte = NULL;
+       wired = (flags & PMAP_ENTER_WIRED) != 0;
 
        rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pmap);
@@ -2676,7 +2670,15 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
         * resident, we are creating it here.
         */
        if (va < VM_MAXUSER_ADDRESS) {
-               mpte = pmap_allocpte(pmap, va, M_WAITOK);
+               mpte = pmap_allocpte(pmap, va, flags);
+               if (mpte == NULL) {
+                       KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0,
+                           ("pmap_allocpte failed with sleep allowed"));
+                       sched_unpin();
+                       rw_wunlock(&pvh_global_lock);
+                       PMAP_UNLOCK(pmap);
+                       return (KERN_RESOURCE_SHORTAGE);
+               }
        }
 
        pde = pmap_pde(pmap, va);
@@ -2842,6 +2844,7 @@ validate:
        sched_unpin();
        rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(pmap);
+       return (KERN_SUCCESS);
 }
 
 /*
@@ -2996,7 +2999,7 @@ pmap_enter_quick_locked(multicall_entry_
                                mpte->wire_count++;
                        } else {
                                mpte = _pmap_allocpte(pmap, ptepindex,
-                                   M_NOWAIT);
+                                   PMAP_ENTER_NOSLEEP);
                                if (mpte == NULL)
                                        return (mpte);
                        }
@@ -3305,7 +3308,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm
                         */
                        if ((ptetemp & PG_MANAGED) != 0) {
                                dstmpte = pmap_allocpte(dst_pmap, addr,
-                                   M_NOWAIT);
+                                   PMAP_ENTER_NOSLEEP);
                                if (dstmpte == NULL)
                                        goto out;
                                dst_pte = pmap_pte_quick(dst_pmap, addr);

Modified: head/sys/mips/mips/pmap.c
==============================================================================
--- head/sys/mips/mips/pmap.c   Fri Aug  8 16:32:06 2014        (r269727)
+++ head/sys/mips/mips/pmap.c   Fri Aug  8 17:12:03 2014        (r269728)
@@ -177,8 +177,8 @@ static void pmap_invalidate_all(pmap_t p
 static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va);
 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m);
 
-static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
-static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
+static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags);
+static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, u_int flags);
 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t);
 static pt_entry_t init_pte_prot(vm_page_t m, vm_prot_t access, vm_prot_t prot);
 
@@ -1094,20 +1094,16 @@ pmap_pinit(pmap_t pmap)
  * mapped correctly.
  */
 static vm_page_t
-_pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
+_pmap_allocpte(pmap_t pmap, unsigned ptepindex, u_int flags)
 {
        vm_offset_t pageva;
        vm_page_t m;
 
-       KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
-           (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
-           ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
-
        /*
         * Find or fabricate a new pagetable page
         */
        if ((m = pmap_alloc_direct_page(ptepindex, VM_ALLOC_NORMAL)) == NULL) {
-               if (flags & M_WAITOK) {
+               if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
                        PMAP_UNLOCK(pmap);
                        rw_wunlock(&pvh_global_lock);
                        pmap_grow_direct_page_cache();
@@ -1164,16 +1160,12 @@ _pmap_allocpte(pmap_t pmap, unsigned pte
 }
 
 static vm_page_t
-pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
+pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags)
 {
        unsigned ptepindex;
        pd_entry_t *pde;
        vm_page_t m;
 
-       KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
-           (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
-           ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
-
        /*
         * Calculate pagetable page index
         */
@@ -1197,7 +1189,7 @@ retry:
                 * deallocated.
                 */
                m = _pmap_allocpte(pmap, ptepindex, flags);
-               if (m == NULL && (flags & M_WAITOK))
+               if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0)
                        goto retry;
        }
        return (m);
@@ -1994,9 +1986,9 @@ pmap_protect(pmap_t pmap, vm_offset_t sv
  *     or lose information.  That is, this routine must actually
  *     insert this page into the given map NOW.
  */
-void
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
-    vm_prot_t prot, boolean_t wired)
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+    u_int flags, int8_t psind __unused)
 {
        vm_paddr_t pa, opa;
        pt_entry_t *pte;
@@ -2009,11 +2001,11 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
        KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva ||
            va >= kmi.clean_eva,
            ("pmap_enter: managed mapping within the clean submap"));
-       KASSERT((m->oflags & VPO_UNMANAGED) != 0 || vm_page_xbusied(m),
-           ("pmap_enter: page %p is not busy", m));
+       if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
+               VM_OBJECT_ASSERT_LOCKED(m->object);
        pa = VM_PAGE_TO_PHYS(m);
-       newpte = TLBLO_PA_TO_PFN(pa) | init_pte_prot(m, access, prot);
-       if (wired)
+       newpte = TLBLO_PA_TO_PFN(pa) | init_pte_prot(m, flags, prot);
+       if ((flags & PMAP_ENTER_WIRED) != 0)
                newpte |= PTE_W;
        if (is_kernel_pmap(pmap))
                newpte |= PTE_G;
@@ -2032,7 +2024,14 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
         * creating it here.
         */
        if (va < VM_MAXUSER_ADDRESS) {
-               mpte = pmap_allocpte(pmap, va, M_WAITOK);
+               mpte = pmap_allocpte(pmap, va, flags);
+               if (mpte == NULL) {
+                       KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0,
+                           ("pmap_allocpte failed with sleep allowed"));
+                       rw_wunlock(&pvh_global_lock);
+                       PMAP_UNLOCK(pmap);
+                       return (KERN_RESOURCE_SHORTAGE);
+               }
        }
        pte = pmap_pte(pmap, va);
 
@@ -2057,9 +2056,10 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
                 * are valid mappings in them. Hence, if a user page is
                 * wired, the PT page will be also.
                 */
-               if (wired && !pte_test(&origpte, PTE_W))
+               if (pte_test(&newpte, PTE_W) && !pte_test(&origpte, PTE_W))
                        pmap->pm_stats.wired_count++;
-               else if (!wired && pte_test(&origpte, PTE_W))
+               else if (!pte_test(&newpte, PTE_W) && pte_test(&origpte,
+                   PTE_W))
                        pmap->pm_stats.wired_count--;
 
                KASSERT(!pte_test(&origpte, PTE_D | PTE_RO),
@@ -2123,7 +2123,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
        /*
         * Increment counters
         */
-       if (wired)
+       if (pte_test(&newpte, PTE_W))
                pmap->pm_stats.wired_count++;
 
 validate:
@@ -2170,6 +2170,7 @@ validate:
        }
        rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(pmap);
+       return (KERN_SUCCESS);
 }
 
 /*
@@ -2235,7 +2236,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_
                                mpte->wire_count++;
                        } else {
                                mpte = _pmap_allocpte(pmap, ptepindex,
-                                   M_NOWAIT);
+                                   PMAP_ENTER_NOSLEEP);
                                if (mpte == NULL)
                                        return (mpte);
                        }

Modified: head/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea.c      Fri Aug  8 16:32:06 2014        
(r269727)
+++ head/sys/powerpc/aim/mmu_oea.c      Fri Aug  8 17:12:03 2014        
(r269728)
@@ -258,8 +258,8 @@ static struct       pte *moea_pvo_to_pte(const
 /*
  * Utility routines.
  */
-static void            moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
-                           vm_prot_t, boolean_t);
+static int             moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
+                           vm_prot_t, u_int, int8_t);
 static void            moea_syncicache(vm_offset_t, vm_size_t);
 static boolean_t       moea_query_bit(vm_page_t, int);
 static u_int           moea_clear_bit(vm_page_t, int);
@@ -273,7 +273,8 @@ void moea_clear_modify(mmu_t, vm_page_t)
 void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
 void moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
     vm_page_t *mb, vm_offset_t b_offset, int xfersize);
-void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
+int moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int,
+    int8_t);
 void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
     vm_prot_t);
 void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
@@ -1100,16 +1101,25 @@ moea_zero_page_idle(mmu_t mmu, vm_page_t
  * target pmap with the protection requested.  If specified the page
  * will be wired down.
  */
-void
+int
 moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
-          boolean_t wired)
+    u_int flags, int8_t psind)
 {
+       int error;
 
-       rw_wlock(&pvh_global_lock);
-       PMAP_LOCK(pmap);
-       moea_enter_locked(pmap, va, m, prot, wired);
-       rw_wunlock(&pvh_global_lock);
-       PMAP_UNLOCK(pmap);
+       for (;;) {
+               rw_wlock(&pvh_global_lock);
+               PMAP_LOCK(pmap);
+               error = moea_enter_locked(pmap, va, m, prot, flags, psind);
+               rw_wunlock(&pvh_global_lock);
+               PMAP_UNLOCK(pmap);
+               if (error != ENOMEM)
+                       return (KERN_SUCCESS);
+               if ((flags & PMAP_ENTER_NOSLEEP) != 0)
+                       return (KERN_RESOURCE_SHORTAGE);
+               VM_OBJECT_ASSERT_UNLOCKED(m->object);
+               VM_WAIT;
+       }
 }
 
 /*
@@ -1119,9 +1129,9 @@ moea_enter(mmu_t mmu, pmap_t pmap, vm_of
  *
  * The global pvh and pmap must be locked.
  */
-static void
+static int
 moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
-    boolean_t wired)
+    u_int flags, int8_t psind __unused)
 {
        struct          pvo_head *pvo_head;
        uma_zone_t      zone;
@@ -1154,7 +1164,7 @@ moea_enter_locked(pmap_t pmap, vm_offset
        } else
                pte_lo |= PTE_BR;
 
-       if (wired)
+       if ((flags & PMAP_ENTER_WIRED) != 0)
                pvo_flags |= PVO_WIRED;
 
        error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
@@ -1169,6 +1179,8 @@ moea_enter_locked(pmap_t pmap, vm_offset
        if (pmap != kernel_pmap && error == ENOENT &&
            (pte_lo & (PTE_I | PTE_G)) == 0)
                moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
+
+       return (error);
 }
 
 /*
@@ -1198,7 +1210,7 @@ moea_enter_object(mmu_t mmu, pmap_t pm, 
        PMAP_LOCK(pm);
        while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
                moea_enter_locked(pm, start + ptoa(diff), m, prot &
-                   (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
+                   (VM_PROT_READ | VM_PROT_EXECUTE), 0, 0);
                m = TAILQ_NEXT(m, listq);
        }
        rw_wunlock(&pvh_global_lock);
@@ -1213,7 +1225,7 @@ moea_enter_quick(mmu_t mmu, pmap_t pm, v
        rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pm);
        moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
-           FALSE);
+           0, 0);
        rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(pm);
 }

Modified: head/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea64.c    Fri Aug  8 16:32:06 2014        
(r269727)
+++ head/sys/powerpc/aim/mmu_oea64.c    Fri Aug  8 17:12:03 2014        
(r269728)
@@ -267,7 +267,7 @@ int         moea64_large_page_shift = 0;
  * PVO calls.
  */
 static int     moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *,
-                   vm_offset_t, vm_offset_t, uint64_t, int);
+                   vm_offset_t, vm_offset_t, uint64_t, int, int8_t);
 static void    moea64_pvo_remove(mmu_t, struct pvo_entry *);
 static struct  pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
 
@@ -287,7 +287,8 @@ void moea64_clear_modify(mmu_t, vm_page_
 void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
 void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
     vm_page_t *mb, vm_offset_t b_offset, int xfersize);
-void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
+int moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
+    u_int flags, int8_t psind);
 void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
     vm_prot_t);
 void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
@@ -627,7 +628,7 @@ moea64_setup_direct_map(mmu_t mmup, vm_o
 
                        moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone,
                                    NULL, pa, pa, pte_lo,
-                                   PVO_WIRED | PVO_LARGE);
+                                   PVO_WIRED | PVO_LARGE, 0);
                  }
                }
                PMAP_UNLOCK(kernel_pmap);
@@ -1228,9 +1229,9 @@ moea64_zero_page_idle(mmu_t mmu, vm_page
  * will be wired down.
  */
 
-void
+int
 moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 
-    vm_prot_t prot, boolean_t wired)
+    vm_prot_t prot, u_int flags, int8_t psind)
 {
        struct          pvo_head *pvo_head;
        uma_zone_t      zone;
@@ -1264,15 +1265,23 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_
        if ((prot & VM_PROT_EXECUTE) == 0)
                pte_lo |= LPTE_NOEXEC;
 
-       if (wired)
+       if ((flags & PMAP_ENTER_WIRED) != 0)
                pvo_flags |= PVO_WIRED;
 
-       LOCK_TABLE_WR();
-       PMAP_LOCK(pmap);
-       error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va,
-           VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags);
-       PMAP_UNLOCK(pmap);
-       UNLOCK_TABLE_WR();
+       for (;;) {
+               LOCK_TABLE_WR();
+               PMAP_LOCK(pmap);
+               error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va,
+                   VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags, psind);
+               PMAP_UNLOCK(pmap);
+               UNLOCK_TABLE_WR();
+               if (error != ENOMEM)
+                       break;
+               if ((flags & PMAP_ENTER_NOSLEEP) != 0)
+                       return (KERN_RESOURCE_SHORTAGE);
+               VM_OBJECT_ASSERT_UNLOCKED(m->object);
+               VM_WAIT;
+       }
 
        /*
         * Flush the page from the instruction cache if this page is
@@ -1283,6 +1292,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_
                vm_page_aflag_set(m, PGA_EXECUTABLE);
                moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
        }
+       return (KERN_SUCCESS);
 }
 
 static void
@@ -1347,7 +1357,7 @@ moea64_enter_object(mmu_t mmu, pmap_t pm
        m = m_start;
        while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
                moea64_enter(mmu, pm, start + ptoa(diff), m, prot &
-                   (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
+                   (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 0);
                m = TAILQ_NEXT(m, listq);
        }
 }
@@ -1357,8 +1367,8 @@ moea64_enter_quick(mmu_t mmu, pmap_t pm,
     vm_prot_t prot)
 {
 
-       moea64_enter(mmu, pm, va, m,
-           prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
+       moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
+           PMAP_ENTER_NOSLEEP, 0);
 }
 
 vm_paddr_t
@@ -1446,7 +1456,8 @@ moea64_uma_page_alloc(uma_zone_t zone, i
                PMAP_LOCK(kernel_pmap);
 
        moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone,
-           NULL, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP);
+           NULL, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP,
+           0);
 
        if (needed_lock)
                PMAP_UNLOCK(kernel_pmap);
@@ -1668,7 +1679,7 @@ moea64_kenter_attr(mmu_t mmu, vm_offset_
        LOCK_TABLE_WR();
        PMAP_LOCK(kernel_pmap);
        error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone,
-           NULL, va, pa, pte_lo, PVO_WIRED);
+           NULL, va, pa, pte_lo, PVO_WIRED, 0);
        PMAP_UNLOCK(kernel_pmap);
        UNLOCK_TABLE_WR();
 
@@ -2166,7 +2177,7 @@ moea64_bootstrap_alloc(vm_size_t size, u
 static int
 moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone,
     struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa,
-    uint64_t pte_lo, int flags)
+    uint64_t pte_lo, int flags, int8_t psind __unused)
 {
        struct   pvo_entry *pvo;
        uintptr_t pt;

Modified: head/sys/powerpc/booke/pmap.c
==============================================================================
--- head/sys/powerpc/booke/pmap.c       Fri Aug  8 16:32:06 2014        
(r269727)
+++ head/sys/powerpc/booke/pmap.c       Fri Aug  8 17:12:03 2014        
(r269728)
@@ -146,8 +146,8 @@ static struct mtx copy_page_mutex;
 /* PMAP */
 /**************************************************************************/
 
-static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
-    vm_prot_t, boolean_t);
+static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
+    vm_prot_t, u_int flags, int8_t psind);
 
 unsigned int kptbl_min;                /* Index of the first kernel ptbl. */
 unsigned int kernel_ptbls;     /* Number of KVA ptbls. */
@@ -228,14 +228,14 @@ static struct ptbl_buf *ptbl_buf_alloc(v
 static void ptbl_buf_free(struct ptbl_buf *);
 static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
 
-static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int);
+static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t);
 static void ptbl_free(mmu_t, pmap_t, unsigned int);
 static void ptbl_hold(mmu_t, pmap_t, unsigned int);
 static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
 
 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
-static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t);
+static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, 
boolean_t);
 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
_______________________________________________
svn-src-head@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to