Author: alc
Date: Wed Aug  1 16:04:13 2012
New Revision: 238970
URL: http://svn.freebsd.org/changeset/base/238970

Log:
  Revise pmap_enter()'s handling of mapping updates that change the
  PTE's PG_M and PG_RW bits but not the physical page frame.  First,
  only perform vm_page_dirty() on a managed vm_page when the PG_M bit is
  being cleared.  If the updated PTE continues to have PG_M set, then
  there is no requirement to perform vm_page_dirty().  Second, flush the
  mapping from the TLB when PG_M alone is cleared, not just when PG_M
  and PG_RW are cleared.  Otherwise, a stale TLB entry may stop PG_M
  from being set again on the next store to the virtual page.  However,
  since the vm_page's dirty field already shows the physical page as
  being dirty, no actual harm comes from the PG_M bit not being set.
  Nonetheless, it is potentially confusing to someone expecting to see
  the PTE change after a store to the virtual page.

Modified:
  head/sys/amd64/amd64/pmap.c

Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c Wed Aug  1 12:24:13 2012        (r238969)
+++ head/sys/amd64/amd64/pmap.c Wed Aug  1 16:04:13 2012        (r238970)
@@ -3439,7 +3439,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
        pv_entry_t pv;
        vm_paddr_t opa, pa;
        vm_page_t mpte, om;
-       boolean_t invlva;
 
        va = trunc_page(va);
        KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
@@ -3537,7 +3536,6 @@ retry:
                                newpte |= PG_MANAGED;
                                if ((newpte & PG_RW) != 0)
                                        vm_page_aflag_set(m, PGA_WRITEABLE);
-                               om = m;
                        }
                        if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0)
                                goto unchanged;
@@ -3576,30 +3574,40 @@ retry:
         */
        if ((origpte & PG_V) != 0) {
 validate:
-               invlva = FALSE;
                origpte = pte_load_store(pte, newpte);
                opa = origpte & PG_FRAME;
-               if ((origpte & PG_A) != 0 && (opa != pa ||
-                   ((origpte & PG_NX) == 0 && (newpte & PG_NX) != 0)))
-                       invlva = TRUE;
-               if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
+               if (opa != pa) {
+                       if ((origpte & PG_MANAGED) != 0) {
+                               if ((origpte & (PG_M | PG_RW)) == (PG_M |
+                                   PG_RW))
+                                       vm_page_dirty(om);
+                               if ((origpte & PG_A) != 0)
+                                       vm_page_aflag_set(om, PGA_REFERENCED);
+                               CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
+                               pmap_pvh_free(&om->md, pmap, va);
+                               if ((om->aflags & PGA_WRITEABLE) != 0 &&
+                                   TAILQ_EMPTY(&om->md.pv_list) &&
+                                   ((om->flags & PG_FICTITIOUS) != 0 ||
+                                   TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
+                                       vm_page_aflag_clear(om, PGA_WRITEABLE);
+                       }
+               } else if ((newpte & PG_M) == 0 && (origpte & (PG_M |
+                   PG_RW)) == (PG_M | PG_RW)) {
                        if ((origpte & PG_MANAGED) != 0)
-                               vm_page_dirty(om);
-                       if ((newpte & PG_RW) == 0)
-                               invlva = TRUE;
-               }
-               if (opa != pa && (origpte & PG_MANAGED) != 0) {
-                       if ((origpte & PG_A) != 0)
-                               vm_page_aflag_set(om, PGA_REFERENCED);
-                       CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
-                       pmap_pvh_free(&om->md, pmap, va);
-                       if ((om->aflags & PGA_WRITEABLE) != 0 &&
-                           TAILQ_EMPTY(&om->md.pv_list) &&
-                           ((om->flags & PG_FICTITIOUS) != 0 ||
-                           TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
-                               vm_page_aflag_clear(om, PGA_WRITEABLE);
+                               vm_page_dirty(m);
+
+                       /*
+                        * Although the PTE may still have PG_RW set, TLB
+                        * invalidation may nonetheless be required because
+                        * the PTE no longer has PG_M set.
+                        */
+               } else if ((origpte & PG_NX) != 0 || (newpte & PG_NX) == 0) {
+                       /*
+                        * This PTE change does not require TLB invalidation.
+                        */
+                       goto unchanged;
                }
-               if (invlva)
+               if ((origpte & PG_A) != 0)
                        pmap_invalidate_page(pmap, va);
        } else
                pte_store(pte, newpte);
_______________________________________________
svn-src-head@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to