It seems per-page reference counting is used since forever. I think there's no reason to ever turn it off (and track referenced pages with less accuracy, causing leaks).
So remove those #ifdefs. ok? Index: uvm/uvm_amap.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_amap.c,v retrieving revision 1.63 diff -u -p -r1.63 uvm_amap.c --- uvm/uvm_amap.c 27 Mar 2016 09:51:37 -0000 1.63 +++ uvm/uvm_amap.c 27 Mar 2016 12:09:16 -0000 @@ -81,11 +81,9 @@ amap_list_remove(struct vm_amap *amap) LIST_REMOVE(amap, am_list); } -#ifdef UVM_AMAP_PPREF /* - * what is ppref? ppref is an _optional_ amap feature which is used - * to keep track of reference counts on a per-page basis. it is enabled - * when UVM_AMAP_PPREF is defined. + * what is ppref? ppref is an amap feature which is used + * to keep track of reference counts on a per-page basis. * * when enabled, an array of ints is allocated for the pprefs. this * array is allocated only when a partial reference is added to the @@ -147,7 +145,6 @@ pp_setreflen(int *ppref, int offset, int ppref[offset+1] = len; } } -#endif /* * amap_init: called at boot time to init global amap data structures @@ -196,9 +193,7 @@ amap_alloc1(int slots, int padslots, int amap->am_ref = 1; amap->am_flags = 0; -#ifdef UVM_AMAP_PPREF amap->am_ppref = NULL; -#endif amap->am_maxslot = totalslots; amap->am_nslot = slots; amap->am_nused = 0; @@ -270,10 +265,8 @@ amap_free(struct vm_amap *amap) pool_put(&uvm_amap_slot_pools[amap->am_maxslot - 1], amap->am_slots); -#ifdef UVM_AMAP_PPREF if (amap->am_ppref && amap->am_ppref != PPREF_NONE) free(amap->am_ppref, M_UVMAMAP, 0); -#endif pool_put(&uvm_amap_pool, amap); } @@ -422,12 +415,10 @@ amap_copy(struct vm_map *map, struct vm_ srcamap->am_ref--; if (srcamap->am_ref == 1 && (srcamap->am_flags & AMAP_SHARED) != 0) srcamap->am_flags &= ~AMAP_SHARED; /* clear shared flag */ -#ifdef UVM_AMAP_PPREF if (srcamap->am_ppref && srcamap->am_ppref != PPREF_NONE) { amap_pp_adjref(srcamap, entry->aref.ar_pageoff, (entry->end - entry->start) >> PAGE_SHIFT, -1); } -#endif /* install new amap. */ entry->aref.ar_pageoff = 0; @@ -551,19 +542,15 @@ amap_splitref(struct vm_aref *origref, s if (origref->ar_amap->am_nslot - origref->ar_pageoff - leftslots <= 0) panic("amap_splitref: map size check failed"); -#ifdef UVM_AMAP_PPREF /* establish ppref before we add a duplicate reference to the amap */ if (origref->ar_amap->am_ppref == NULL) amap_pp_establish(origref->ar_amap); -#endif splitref->ar_amap = origref->ar_amap; splitref->ar_amap->am_ref++; /* not a share reference */ splitref->ar_pageoff = origref->ar_pageoff + leftslots; } -#ifdef UVM_AMAP_PPREF - /* * amap_pp_establish: add a ppref array to an amap, if possible */ @@ -719,8 +706,6 @@ amap_wiperange(struct vm_amap *amap, int } } -#endif - /* * amap_swap_off: pagein anonymous pages in amaps and drop swap slots. * @@ -911,7 +896,6 @@ amap_ref(struct vm_amap *amap, vaddr_t o amap->am_ref++; if (flags & AMAP_SHARED) amap->am_flags |= AMAP_SHARED; -#ifdef UVM_AMAP_PPREF if (amap->am_ppref == NULL && (flags & AMAP_REFALL) == 0 && len != amap->am_nslot) amap_pp_establish(amap); @@ -921,7 +905,6 @@ amap_ref(struct vm_amap *amap, vaddr_t o else amap_pp_adjref(amap, offset, len, 1); } -#endif } /* @@ -945,7 +928,6 @@ amap_unref(struct vm_amap *amap, vaddr_t /* otherwise just drop the reference count(s) */ if (amap->am_ref == 1 && (amap->am_flags & AMAP_SHARED) != 0) amap->am_flags &= ~AMAP_SHARED; /* clear shared flag */ -#ifdef UVM_AMAP_PPREF if (amap->am_ppref == NULL && all == 0 && len != amap->am_nslot) amap_pp_establish(amap); if (amap->am_ppref && amap->am_ppref != PPREF_NONE) { @@ -954,5 +936,4 @@ amap_unref(struct vm_amap *amap, vaddr_t else amap_pp_adjref(amap, offset, len, -1); } -#endif } Index: uvm/uvm_amap.h =================================================================== RCS file: /cvs/src/sys/uvm/uvm_amap.h,v retrieving revision 1.22 diff -u -p -r1.22 uvm_amap.h --- uvm/uvm_amap.h 27 Mar 2016 09:51:37 -0000 1.22 +++ uvm/uvm_amap.h 27 Mar 2016 12:09:16 -0000 @@ -114,13 +114,10 @@ boolean_t amap_swap_off(int, int); /* * we currently provide an array-based amap implementation. in this - * implementation we provide the option of tracking split references - * so that we don't lose track of references during partial unmaps - * ... this is enabled with the "UVM_AMAP_PPREF" define. + * implementation we track split references so that we don't lose + * track of references during partial unmaps */ -#define UVM_AMAP_PPREF /* track partial references */ - /* * here is the definition of the vm_amap structure for this implementation. */ @@ -134,9 +131,7 @@ struct vm_amap { int *am_slots; /* contig array of active slots */ int *am_bckptr; /* back pointer array to am_slots */ struct vm_anon **am_anon; /* array of anonymous pages */ -#ifdef UVM_AMAP_PPREF int *am_ppref; /* per page reference count (if !NULL) */ -#endif LIST_ENTRY(vm_amap) am_list; }; @@ -227,13 +222,6 @@ struct vm_amap { #define amap_flags(AMAP) ((AMAP)->am_flags) #define amap_refs(AMAP) ((AMAP)->am_ref) -/* - * if we enable PPREF, then we have a couple of extra functions that - * we need to prototype here... - */ - -#ifdef UVM_AMAP_PPREF - #define PPREF_NONE ((int *) -1) /* not using ppref */ /* adjust references */ @@ -242,7 +230,6 @@ void amap_pp_adjref(struct vm_amap *, i void amap_pp_establish(struct vm_amap *); /* wipe part of an amap */ void amap_wiperange(struct vm_amap *, int, int); -#endif /* UVM_AMAP_PPREF */ #endif /* _KERNEL */