Author: alc
Date: Wed Jun 27 03:45:25 2012
New Revision: 237623
URL: http://svn.freebsd.org/changeset/base/237623

Log:
  Add new pmap layer locks to the predefined lock order.  Change the names
  of a few existing VM locks to follow a consistent naming scheme.

Modified:
  head/sys/amd64/amd64/pmap.c
  head/sys/i386/i386/pmap.c
  head/sys/kern/subr_witness.c
  head/sys/sparc64/sparc64/pmap.c
  head/sys/vm/vm_map.c
  head/sys/vm/vm_page.c

Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c Wed Jun 27 03:24:27 2012        (r237622)
+++ head/sys/amd64/amd64/pmap.c Wed Jun 27 03:45:25 2012        (r237623)
@@ -642,7 +642,7 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
        /*
         * Initialize the global pv list lock.
         */
-       rw_init(&pvh_global_lock, "pvh global");
+       rw_init(&pvh_global_lock, "pmap pv global");
 
        /*
         * Reserve some special page table entries/VA space for temporary
@@ -810,13 +810,13 @@ pmap_init(void)
        /*
         * Initialize the pv chunk list mutex.
         */
-       mtx_init(&pv_chunks_mutex, "pv chunk list", NULL, MTX_DEF);
+       mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
 
        /*
         * Initialize the pool of pv list locks.
         */
        for (i = 0; i < NPV_LIST_LOCKS; i++)
-               rw_init(&pv_list_locks[i], "pv list");
+               rw_init(&pv_list_locks[i], "pmap pv list");
 
        /*
         * Calculate the size of the pv head table for superpages.

Modified: head/sys/i386/i386/pmap.c
==============================================================================
--- head/sys/i386/i386/pmap.c   Wed Jun 27 03:24:27 2012        (r237622)
+++ head/sys/i386/i386/pmap.c   Wed Jun 27 03:45:25 2012        (r237623)
@@ -409,7 +409,7 @@ pmap_bootstrap(vm_paddr_t firstaddr)
        /*
         * Initialize the global pv list lock.
         */
-       rw_init(&pvh_global_lock, "pvh global");
+       rw_init(&pvh_global_lock, "pmap pv global");
 
        LIST_INIT(&allpmaps);
 

Modified: head/sys/kern/subr_witness.c
==============================================================================
--- head/sys/kern/subr_witness.c        Wed Jun 27 03:24:27 2012        
(r237622)
+++ head/sys/kern/subr_witness.c        Wed Jun 27 03:45:25 2012        
(r237623)
@@ -593,19 +593,22 @@ static struct witness_order_list_entry o
        /*
         * CDEV
         */
-       { "system map", &lock_class_mtx_sleep },
-       { "vm page queue mutex", &lock_class_mtx_sleep },
+       { "vm map (system)", &lock_class_mtx_sleep },
+       { "vm page queue", &lock_class_mtx_sleep },
        { "vnode interlock", &lock_class_mtx_sleep },
        { "cdev", &lock_class_mtx_sleep },
        { NULL, NULL },
        /*
         * VM
-        * 
         */
+       { "vm map (user)", &lock_class_sx },
        { "vm object", &lock_class_mtx_sleep },
-       { "page lock", &lock_class_mtx_sleep },
-       { "vm page queue mutex", &lock_class_mtx_sleep },
+       { "vm page", &lock_class_mtx_sleep },
+       { "vm page queue", &lock_class_mtx_sleep },
+       { "pmap pv global", &lock_class_rw },
        { "pmap", &lock_class_mtx_sleep },
+       { "pmap pv list", &lock_class_rw },
+       { "vm page free queue", &lock_class_mtx_sleep },
        { NULL, NULL },
        /*
         * kqueue/VFS interaction

Modified: head/sys/sparc64/sparc64/pmap.c
==============================================================================
--- head/sys/sparc64/sparc64/pmap.c     Wed Jun 27 03:24:27 2012        
(r237622)
+++ head/sys/sparc64/sparc64/pmap.c     Wed Jun 27 03:45:25 2012        
(r237623)
@@ -673,9 +673,10 @@ pmap_bootstrap(u_int cpu_impl)
        CPU_FILL(&pm->pm_active);
 
        /*
-        * Initialize the global tte list lock.
+        * Initialize the global tte list lock, which is more commonly
+        * known as the pmap pv global lock.
         */
-       rw_init(&tte_list_global_lock, "tte list global");
+       rw_init(&tte_list_global_lock, "pmap pv global");
 
        /*
         * Flush all non-locked TLB entries possibly left over by the

Modified: head/sys/vm/vm_map.c
==============================================================================
--- head/sys/vm/vm_map.c        Wed Jun 27 03:24:27 2012        (r237622)
+++ head/sys/vm/vm_map.c        Wed Jun 27 03:45:25 2012        (r237623)
@@ -241,8 +241,8 @@ vm_map_zinit(void *mem, int size, int fl
        map = (vm_map_t)mem;
        map->nentries = 0;
        map->size = 0;
-       mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
-       sx_init(&map->lock, "user map");
+       mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | 
MTX_DUPOK);
+       sx_init(&map->lock, "vm map (user)");
        return (0);
 }
 

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c       Wed Jun 27 03:24:27 2012        (r237622)
+++ head/sys/vm/vm_page.c       Wed Jun 27 03:45:25 2012        (r237623)
@@ -292,16 +292,13 @@ vm_page_startup(vm_offset_t vaddr)
        end = phys_avail[biggestone+1];
 
        /*
-        * Initialize the locks.
+        * Initialize the page and queue locks.
         */
-       mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF |
+       mtx_init(&vm_page_queue_mtx, "vm page queue", NULL, MTX_DEF |
            MTX_RECURSE);
-       mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL,
-           MTX_DEF);
-
-       /* Setup page locks. */
+       mtx_init(&vm_page_queue_free_mtx, "vm page free queue", NULL, MTX_DEF);
        for (i = 0; i < PA_LOCK_COUNT; i++)
-               mtx_init(&pa_lock[i].data, "page lock", NULL, MTX_DEF);
+               mtx_init(&pa_lock[i].data, "vm page", NULL, MTX_DEF);
 
        /*
         * Initialize the queue headers for the hold queue, the active queue,
_______________________________________________
[email protected] mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "[email protected]"

Reply via email to