On 12/23/2011 16:25, Sean Bruno wrote:
On Wed, 2011-12-21 at 12:47 -0800, Alan Cox wrote:
Can you please try the attached patch? I'm trying to reduce the number
of differences between the native and Xen pmap implementations.
Alan
Without really looking at the output, I note that this didn't apply
cleanly against -head ... can you regenerate it?
My bad. I gave you the wrong patch. Try this instead.
Alan
Index: i386/i386/pmap.c
===
--- i386/i386/pmap.c(revision 228570)
+++ i386/i386/pmap.c(working copy)
@@ -330,7 +330,7 @@ static void pmap_update_pde_invalidate(vm_offset_t
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
-static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
+static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags);
static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free);
static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
static void pmap_pte_release(pt_entry_t *pte);
@@ -340,6 +340,8 @@ static void *pmap_pdpt_allocf(uma_zone_t zone, int
#endif
static void pmap_set_pg(void);
+static __inline void pagezero(void *page);
+
CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
@@ -1216,7 +1218,7 @@ pmap_is_current(pmap_t pmap)
{
return (pmap == kernel_pmap ||
- (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) &&
+ (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) &&
(pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME)));
}
@@ -1759,7 +1761,6 @@ pmap_pinit(pmap_t pmap)
if (pmap->pm_pdir == NULL) {
pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map,
NBPTD);
-
if (pmap->pm_pdir == NULL) {
PMAP_LOCK_DESTROY(pmap);
return (0);
@@ -1792,10 +1793,9 @@ pmap_pinit(pmap_t pmap)
pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD);
- for (i = 0; i < NPGPTD; i++) {
+ for (i = 0; i < NPGPTD; i++)
if ((ptdpg[i]->flags & PG_ZERO) == 0)
- bzero(pmap->pm_pdir + (i * NPDEPG), PAGE_SIZE);
- }
+ pagezero(pmap->pm_pdir + (i * NPDEPG));
mtx_lock_spin(&allpmaps_lock);
LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
@@ -1824,7 +1824,7 @@ pmap_pinit(pmap_t pmap)
* mapped correctly.
*/
static vm_page_t
-_pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
+_pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags)
{
vm_paddr_t ptepa;
vm_page_t m;
@@ -1872,7 +1872,7 @@ static vm_page_t
static vm_page_t
pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
{
- unsigned ptepindex;
+ u_int ptepindex;
pd_entry_t ptepa;
vm_page_t m;
@@ -2020,7 +2020,7 @@ pmap_lazyfix(pmap_t pmap)
cr3 = vtophys(pmap->pm_pdir);
if (cr3 == rcr3()) {
load_cr3(PCPU_GET(curpcb)->pcb_cr3);
- CPU_CLR(PCPU_GET(cpuid), &pmap->pm_active);
+ CPU_CLR(PCPU_GET(cpuid), &pmap->pm_active);
}
}
#endif /* SMP */
@@ -2849,7 +2849,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offse
}
for (; sva < eva; sva = pdnxt) {
- unsigned pdirindex;
+ u_int pdirindex;
/*
* Calculate index for next page table.
@@ -3070,7 +3070,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offs
PMAP_LOCK(pmap);
for (; sva < eva; sva = pdnxt) {
pt_entry_t obits, pbits;
- unsigned pdirindex;
+ u_int pdirindex;
pdnxt = (sva + NBPDR) & ~PDRMASK;
if (pdnxt < sva)
@@ -3596,7 +3596,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start,
m = TAILQ_NEXT(m, listq);
}
vm_page_unlock_queues();
- PMAP_UNLOCK(pmap);
+ PMAP_UNLOCK(pmap);
}
/*
@@ -3638,7 +3638,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t v
* resident, we are creating it here.
*/
if (va < VM_MAXUSER_ADDRESS) {
- unsigned ptepindex;
+ u_int ptepindex;
pd_entry_t ptepa;
/*
@@ -3904,7 +3904,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_off
pt_entry_t *src_pte, *dst_pte;
vm_page_t dstmpte, srcmpte;
pd_entry_t srcptepaddr;
- unsigned ptepindex;
+ u_int ptepindex;
KASSERT(addr < UPT_MIN_ADDRESS,
("pmap_copy: invalid to pmap_copy page tables"));
@@ -5244,7 +5244,7 @@ pmap_pid_dump(int pid)
#if defined(DEBUG)
static voidpads(pmap_t pm);
-void pmap_pvdump(vm_offset_t pa);
+void pmap_pvdump(vm_paddr_t pa);
/* print address space of pmap*/
s