Stefan Kempf wrote:
> amap_extend is called when merging two adjacent areas of virtual address
> space. However, merging is done only for kernel
> virtual address space. It's not done for user space:
> 
> uvm/uvm_map.c:1359
>       /*
>        * Try to merge entry.
>        *
>        * Userland allocations are kept separated most of the time.
>        * Forego the effort of merging what most of the time can't be merged
>        * and only try the merge if it concerns a kernel entry.
>        */
>       if ((flags & UVM_FLAG_NOMERGE) == 0 &&
>           (map->flags & VM_MAP_ISVMSPACE) == 0)
>               uvm_mapent_tryjoin(map, entry, &dead);
> 
> 
> As far as I can tell, kernel vm_map_entries do not use amaps. So
> amap_extend should never be called. Can we remove it?

Any objections against committing this diff?

Merging vm map entries for userspace was turned off a long time
ago in r1.104 of uvm/uvm_map.c.

The kernel doesn't use amaps either. Removing this would make the amap
shrink diffs simpler.

> Index: uvm/uvm_amap.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_amap.c,v
> retrieving revision 1.62
> diff -u -p -r1.62 uvm_amap.c
> --- uvm/uvm_amap.c    16 Mar 2016 16:53:43 -0000      1.62
> +++ uvm/uvm_amap.c    23 Mar 2016 17:03:53 -0000
> @@ -279,174 +279,6 @@ amap_free(struct vm_amap *amap)
>  }
>  
>  /*
> - * amap_extend: extend the size of an amap (if needed)
> - *
> - * => called from uvm_map when we want to extend an amap to cover
> - *    a new mapping (rather than allocate a new one)
> - * => to safely extend an amap it should have a reference count of
> - *    one (thus it can't be shared)
> - * => XXXCDC: support padding at this level?
> - */
> -int
> -amap_extend(struct vm_map_entry *entry, vsize_t addsize)
> -{
> -     struct vm_amap *amap = entry->aref.ar_amap;
> -     int slotoff = entry->aref.ar_pageoff;
> -     int slotmapped, slotadd, slotneed, slotalloc;
> -#ifdef UVM_AMAP_PPREF
> -     int *newppref, *oldppref;
> -#endif
> -     u_int *newsl, *newbck, *oldsl, *oldbck;
> -     struct vm_anon **newover, **oldover;
> -     int slotadded;
> -
> -     /*
> -      * first, determine how many slots we need in the amap.  don't
> -      * forget that ar_pageoff could be non-zero: this means that
> -      * there are some unused slots before us in the amap.
> -      */
> -     AMAP_B2SLOT(slotmapped, entry->end - entry->start); /* slots mapped */
> -     AMAP_B2SLOT(slotadd, addsize);                  /* slots to add */
> -     slotneed = slotoff + slotmapped + slotadd;
> -
> -     /*
> -      * case 1: we already have enough slots in the map and thus
> -      * only need to bump the reference counts on the slots we are
> -      * adding.
> -      */
> -     if (amap->am_nslot >= slotneed) {
> -#ifdef UVM_AMAP_PPREF
> -             if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
> -                     amap_pp_adjref(amap, slotoff + slotmapped, slotadd, 1);
> -             }
> -#endif
> -             return (0);
> -     }
> -
> -     /*
> -      * case 2: we pre-allocated slots for use and we just need to
> -      * bump nslot up to take account for these slots.
> -      */
> -     if (amap->am_maxslot >= slotneed) {
> -#ifdef UVM_AMAP_PPREF
> -             if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
> -                     if ((slotoff + slotmapped) < amap->am_nslot)
> -                             amap_pp_adjref(amap, slotoff + slotmapped, 
> -                                 (amap->am_nslot - (slotoff + slotmapped)),
> -                                 1);
> -                     pp_setreflen(amap->am_ppref, amap->am_nslot, 1, 
> -                        slotneed - amap->am_nslot);
> -             }
> -#endif
> -             amap->am_nslot = slotneed;
> -             /*
> -              * no need to zero am_anon since that was done at
> -              * alloc time and we never shrink an allocation.
> -              */
> -             return (0);
> -     }
> -
> -     /*
> -      * case 3: we need to malloc a new amap and copy all the amap
> -      * data over from old amap to the new one.
> -      *
> -      * XXXCDC: could we take advantage of a kernel realloc()?  
> -      */
> -     if (slotneed >= UVM_AMAP_LARGE)
> -             return E2BIG;
> -
> -     if (slotneed > UVM_AMAP_CHUNK)
> -             slotalloc = malloc_roundup(slotneed * MALLOC_SLOT_UNIT) /
> -                 MALLOC_SLOT_UNIT;
> -     else
> -             slotalloc = slotneed;
> -
> -#ifdef UVM_AMAP_PPREF
> -     newppref = NULL;
> -     if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
> -             newppref = mallocarray(slotalloc, sizeof(int), M_UVMAMAP,
> -                 M_WAITOK | M_CANFAIL);
> -             if (newppref == NULL) {
> -                     /* give up if malloc fails */
> -                     free(amap->am_ppref, M_UVMAMAP, 0);
> -                     amap->am_ppref = PPREF_NONE;
> -             }
> -     }
> -#endif
> -     if (slotneed > UVM_AMAP_CHUNK)
> -             newsl = malloc(slotalloc * MALLOC_SLOT_UNIT, M_UVMAMAP,
> -                 M_WAITOK | M_CANFAIL);
> -     else
> -             newsl = pool_get(&uvm_amap_slot_pools[slotalloc - 1],
> -                 PR_WAITOK | PR_LIMITFAIL);
> -     if (newsl == NULL) {
> -#ifdef UVM_AMAP_PPREF
> -             if (newppref != NULL) {
> -                     free(newppref, M_UVMAMAP, 0);
> -             }
> -#endif
> -             return (ENOMEM);
> -     }
> -     newbck = (int *)(((char *)newsl) + slotalloc * sizeof(int));
> -     newover = (struct vm_anon **)(((char *)newbck) + slotalloc *
> -         sizeof(int));
> -     KASSERT(amap->am_maxslot < slotneed);
> -
> -     /* now copy everything over to new malloc'd areas... */
> -     slotadded = slotalloc - amap->am_nslot;
> -
> -     /* do am_slots */
> -     oldsl = amap->am_slots;
> -     memcpy(newsl, oldsl, sizeof(int) * amap->am_nused);
> -     amap->am_slots = newsl;
> -
> -     /* do am_anon */
> -     oldover = amap->am_anon;
> -     memcpy(newover, oldover, sizeof(struct vm_anon *) * amap->am_nslot);
> -     memset(newover + amap->am_nslot, 0, sizeof(struct vm_anon *) *
> -         slotadded);
> -     amap->am_anon = newover;
> -
> -     /* do am_bckptr */
> -     oldbck = amap->am_bckptr;
> -     memcpy(newbck, oldbck, sizeof(int) * amap->am_nslot);
> -     memset(newbck + amap->am_nslot, 0, sizeof(int) * slotadded); /* XXX: 
> needed? */
> -     amap->am_bckptr = newbck;
> -
> -#ifdef UVM_AMAP_PPREF
> -     /* do ppref */
> -     oldppref = amap->am_ppref;
> -     if (newppref) {
> -             memcpy(newppref, oldppref, sizeof(int) * amap->am_nslot);
> -             memset(newppref + amap->am_nslot, 0, sizeof(int) * slotadded);
> -             amap->am_ppref = newppref;
> -             if ((slotoff + slotmapped) < amap->am_nslot)
> -                     amap_pp_adjref(amap, slotoff + slotmapped, 
> -                         (amap->am_nslot - (slotoff + slotmapped)), 1);
> -             pp_setreflen(newppref, amap->am_nslot, 1,
> -                 slotneed - amap->am_nslot);
> -     }
> -#endif
> -
> -     /* free */
> -     if (amap->am_maxslot > UVM_AMAP_CHUNK)
> -             free(oldsl, M_UVMAMAP, 0);
> -     else
> -             pool_put(&uvm_amap_slot_pools[amap->am_maxslot - 1],
> -                 oldsl);
> -
> -     /* and update master values */
> -     amap->am_nslot = slotneed;
> -     amap->am_maxslot = slotalloc;
> -
> -#ifdef UVM_AMAP_PPREF
> -     if (oldppref && oldppref != PPREF_NONE)
> -             free(oldppref, M_UVMAMAP, 0);
> -#endif
> -     return (0);
> -}
> -
> -/*
>   * amap_wipeout: wipeout all anon's in an amap; then free the amap!
>   *
>   * => called from amap_unref when the final reference to an amap is
> Index: uvm/uvm_amap.h
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_amap.h,v
> retrieving revision 1.21
> diff -u -p -r1.21 uvm_amap.h
> --- uvm/uvm_amap.h    6 Mar 2016 14:47:07 -0000       1.21
> +++ uvm/uvm_amap.h    23 Mar 2016 17:03:53 -0000
> @@ -72,8 +72,6 @@ void                amap_copy(vm_map_t, vm_map_entry_t
>                   vaddr_t);
>                                       /* resolve all COW faults now */
>  void         amap_cow_now(vm_map_t, vm_map_entry_t);
> -                                     /* make amap larger */
> -int          amap_extend(vm_map_entry_t, vsize_t);
>                                       /* get amap's flags */
>  int          amap_flags(struct vm_amap *);
>                                       /* free amap */
> Index: uvm/uvm_map.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_map.c,v
> retrieving revision 1.209
> diff -u -p -r1.209 uvm_map.c
> --- uvm/uvm_map.c     15 Mar 2016 20:50:23 -0000      1.209
> +++ uvm/uvm_map.c     23 Mar 2016 17:03:54 -0000
> @@ -1448,14 +1448,15 @@ uvm_mapent_merge(struct vm_map *map, str
>       struct uvm_addr_state *free;
>  
>       /*
> -      * Amap of e1 must be extended to include e2.
> +      * Merging is not supported for map entries that
> +      * contain an amap in e1. This should never happen
> +      * anyway, because only kernel entries are merged.
> +      * These do not contain amaps.
>        * e2 contains no real information in its amap,
>        * so it can be erased immediately.
>        */
> -     if (e1->aref.ar_amap) {
> -             if (amap_extend(e1, e2->end - e2->start))
> -                     return NULL;
> -     }
> +     if (e1->aref.ar_amap)
> +             return NULL;
>  
>       /*
>        * Don't drop obj reference:

Reply via email to