Re: svn commit: r338018 - head/sys/vm

2018-08-20 Thread Alan Cox
On 08/20/2018 13:36, O. Hartmann wrote:
> Am Sat, 18 Aug 2018 18:33:50 + (UTC)
> Alan Cox  schrieb:
>
> > Author: alc
> > Date: Sat Aug 18 18:33:50 2018
> > New Revision: 338018
> > URL: https://svnweb.freebsd.org/changeset/base/338018
>
> > Log:
> >   Eliminate the arena parameter to kmem_malloc_domain().  It is
> redundant.
> >   The domain and flags parameters suffice.  In fact, the related
> functions
> >   kmem_alloc_{attr,contig}_domain() don't have an arena parameter.
>
> >   Reviewed by:kib, markj
> >   Differential Revision:https://reviews.freebsd.org/D16713
>
> > Modified:
> >   head/sys/vm/uma_core.c
> >   head/sys/vm/vm_extern.h
> >   head/sys/vm/vm_kern.c
>
> > Modified: head/sys/vm/uma_core.c
> >
> ==
> > --- head/sys/vm/uma_core.cSat Aug 18 16:03:15 2018(r338017)
> > +++ head/sys/vm/uma_core.cSat Aug 18 18:33:50 2018(r338018)
> > @@ -1169,7 +1169,7 @@ page_alloc(uma_zone_t zone, vm_size_t bytes,
> int domai
> >  void *p;/* Returned page */
>
> >  *pflag = UMA_SLAB_KERNEL;
> > -p = (void *) kmem_malloc_domain(kernel_arena, domain, bytes, wait);
> > +p = (void *) kmem_malloc_domain(domain, bytes, wait);
>
> >  return (p);
> >  }
> > @@ -3680,32 +3680,22 @@ uma_zone_exhausted_nolock(uma_zone_t zone)
> >  void *
> >  uma_large_malloc_domain(vm_size_t size, int domain, int wait)
> >  {
> > -struct vmem *arena;
> >  vm_offset_t addr;
> >  uma_slab_t slab;
>
> > -#if VM_NRESERVLEVEL > 0
> > -if (__predict_true((wait & M_EXEC) == 0))
> > -arena = kernel_arena;
> > -else
> > -arena = kernel_rwx_arena;
> > -#else
> > -arena = kernel_arena;
> > -#endif
> > -
> >  slab = zone_alloc_item(slabzone, NULL, domain, wait);
> >  if (slab == NULL)
> >  return (NULL);
> >  if (domain == UMA_ANYDOMAIN)
> > -addr = kmem_malloc(arena, size, wait);
> > +addr = kmem_malloc(NULL, size, wait);
> >  else
> > -addr = kmem_malloc_domain(arena, domain, size, wait);
> > +addr = kmem_malloc_domain(domain, size, wait);
> >  if (addr != 0) {
> >  vsetslab(addr, slab);
> >  slab->us_data = (void *)addr;
> >  slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC;
> >  #if VM_NRESERVLEVEL > 0
> > -if (__predict_false(arena == kernel_rwx_arena))
> > +if (__predict_false((wait & M_EXEC) != 0))
> >  slab->us_flags |= UMA_SLAB_KRWX;
> >  #endif
> >  slab->us_size = size;
>
> > Modified: head/sys/vm/vm_extern.h
> >
> ==
> > --- head/sys/vm/vm_extern.hSat Aug 18 16:03:15 2018(r338017)
> > +++ head/sys/vm/vm_extern.hSat Aug 18 18:33:50 2018(r338018)
> > @@ -65,8 +65,7 @@ vm_offset_t kmem_alloc_contig_domain(int domain, vm_si
> >  vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t
> boundary,
> >  vm_memattr_t memattr);
> >  vm_offset_t kmem_malloc(struct vmem *, vm_size_t size, int flags);
> > -vm_offset_t kmem_malloc_domain(struct vmem *, int domain, vm_size_t
> size,
> > -int flags);
> > +vm_offset_t kmem_malloc_domain(int domain, vm_size_t size, int flags);
> >  void kmem_free(struct vmem *, vm_offset_t, vm_size_t);
>
> >  /* This provides memory for previously allocated address space. */
>
> > Modified: head/sys/vm/vm_kern.c
> >
> ==
> > --- head/sys/vm/vm_kern.cSat Aug 18 16:03:15 2018(r338017)
> > +++ head/sys/vm/vm_kern.cSat Aug 18 18:33:50 2018(r338018)
> > @@ -372,23 +372,18 @@ kmem_suballoc(vm_map_t parent, vm_offset_t
> *min, vm_of
> >   *Allocate wired-down pages in the kernel's address space.
> >   */
> >  vm_offset_t
> > -kmem_malloc_domain(struct vmem *vmem, int domain, vm_size_t size,
> int flags)
> > +kmem_malloc_domain(int domain, vm_size_t size, int flags)
> >  {
> >  vmem_t *arena;
> >  vm_offset_t addr;
> >  int rv;
>
> >  #if VM_NRESERVLEVEL > 0
> > -KASSERT(vmem == kernel_arena || vmem == kernel_rwx_arena,
> > -("kmem_malloc_domain: Only kernel_arena or kernel_rwx_arena "
> > -"are supported."));
> > -if (__predict_true(vmem == kernel_arena))
> > +if (__predict_true((flags & M_EXEC) == 0))
> >  arena = vm_dom[domain].vmd_kernel_arena;
> >  else
> >  arena = vm_dom[domain].vmd_kernel_rwx_arena;
> >  #else
> > -KASSERT(vmem == kernel_arena,
> > -("kmem_malloc_domain: Only kernel_arena is supported."));
> >  arena = vm_dom[domain].vmd_kernel_arena;
> >  #endif
> >  size = round_page(size);
> > @@ -404,7 +399,7 @@ kmem_malloc_domain(struct vmem *vmem, int
> domain, vm_s
> >  }
>
> >  vm_offset_t
> > -kmem_malloc(struct vmem *vmem, vm_size_t size, int flags)
> > +kmem_malloc(struct vmem *vmem __unused, vm_size_t size, int flags)
> >  {
> >  struct 

Re: svn commit: r338018 - head/sys/vm

2018-08-20 Thread O. Hartmann
-BEGIN PGP SIGNED MESSAGE-
Hash: SHA512

Am Sat, 18 Aug 2018 18:33:50 + (UTC)
Alan Cox  schrieb:

> Author: alc
> Date: Sat Aug 18 18:33:50 2018
> New Revision: 338018
> URL: https://svnweb.freebsd.org/changeset/base/338018
> 
> Log:
>   Eliminate the arena parameter to kmem_malloc_domain().  It is redundant.
>   The domain and flags parameters suffice.  In fact, the related functions
>   kmem_alloc_{attr,contig}_domain() don't have an arena parameter.
>   
>   Reviewed by:kib, markj
>   Differential Revision:  https://reviews.freebsd.org/D16713
> 
> Modified:
>   head/sys/vm/uma_core.c
>   head/sys/vm/vm_extern.h
>   head/sys/vm/vm_kern.c
> 
> Modified: head/sys/vm/uma_core.c
> ==
> --- head/sys/vm/uma_core.cSat Aug 18 16:03:15 2018(r338017)
> +++ head/sys/vm/uma_core.cSat Aug 18 18:33:50 2018(r338018)
> @@ -1169,7 +1169,7 @@ page_alloc(uma_zone_t zone, vm_size_t bytes, int domai
>   void *p;/* Returned page */
>  
>   *pflag = UMA_SLAB_KERNEL;
> - p = (void *) kmem_malloc_domain(kernel_arena, domain, bytes, wait);
> + p = (void *) kmem_malloc_domain(domain, bytes, wait);
>  
>   return (p);
>  }
> @@ -3680,32 +3680,22 @@ uma_zone_exhausted_nolock(uma_zone_t zone)
>  void *
>  uma_large_malloc_domain(vm_size_t size, int domain, int wait)
>  {
> - struct vmem *arena;
>   vm_offset_t addr;
>   uma_slab_t slab;
>  
> -#if VM_NRESERVLEVEL > 0
> - if (__predict_true((wait & M_EXEC) == 0))
> - arena = kernel_arena;
> - else
> - arena = kernel_rwx_arena;
> -#else
> - arena = kernel_arena;
> -#endif
> -
>   slab = zone_alloc_item(slabzone, NULL, domain, wait);
>   if (slab == NULL)
>   return (NULL);
>   if (domain == UMA_ANYDOMAIN)
> - addr = kmem_malloc(arena, size, wait);
> + addr = kmem_malloc(NULL, size, wait);
>   else
> - addr = kmem_malloc_domain(arena, domain, size, wait);
> + addr = kmem_malloc_domain(domain, size, wait);
>   if (addr != 0) {
>   vsetslab(addr, slab);
>   slab->us_data = (void *)addr;
>   slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC;
>  #if VM_NRESERVLEVEL > 0
> - if (__predict_false(arena == kernel_rwx_arena))
> + if (__predict_false((wait & M_EXEC) != 0))
>   slab->us_flags |= UMA_SLAB_KRWX;
>  #endif
>   slab->us_size = size;
> 
> Modified: head/sys/vm/vm_extern.h
> ==
> --- head/sys/vm/vm_extern.h   Sat Aug 18 16:03:15 2018(r338017)
> +++ head/sys/vm/vm_extern.h   Sat Aug 18 18:33:50 2018(r338018)
> @@ -65,8 +65,7 @@ vm_offset_t kmem_alloc_contig_domain(int domain, vm_si
>  vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
>  vm_memattr_t memattr);
>  vm_offset_t kmem_malloc(struct vmem *, vm_size_t size, int flags);
> -vm_offset_t kmem_malloc_domain(struct vmem *, int domain, vm_size_t size,
> -int flags);
> +vm_offset_t kmem_malloc_domain(int domain, vm_size_t size, int flags);
>  void kmem_free(struct vmem *, vm_offset_t, vm_size_t);
>  
>  /* This provides memory for previously allocated address space. */
> 
> Modified: head/sys/vm/vm_kern.c
> ==
> --- head/sys/vm/vm_kern.c Sat Aug 18 16:03:15 2018(r338017)
> +++ head/sys/vm/vm_kern.c Sat Aug 18 18:33:50 2018(r338018)
> @@ -372,23 +372,18 @@ kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_of
>   *   Allocate wired-down pages in the kernel's address space.
>   */
>  vm_offset_t
> -kmem_malloc_domain(struct vmem *vmem, int domain, vm_size_t size, int flags)
> +kmem_malloc_domain(int domain, vm_size_t size, int flags)
>  {
>   vmem_t *arena;
>   vm_offset_t addr;
>   int rv;
>  
>  #if VM_NRESERVLEVEL > 0
> - KASSERT(vmem == kernel_arena || vmem == kernel_rwx_arena,
> - ("kmem_malloc_domain: Only kernel_arena or kernel_rwx_arena "
> - "are supported."));
> - if (__predict_true(vmem == kernel_arena))
> + if (__predict_true((flags & M_EXEC) == 0))
>   arena = vm_dom[domain].vmd_kernel_arena;
>   else
>   arena = vm_dom[domain].vmd_kernel_rwx_arena;
>  #else
> - KASSERT(vmem == kernel_arena,
> - ("kmem_malloc_domain: Only kernel_arena is supported."));
>   arena = vm_dom[domain].vmd_kernel_arena;
>  #endif
>   size = round_page(size);
> @@ -404,7 +399,7 @@ kmem_malloc_domain(struct vmem *vmem, int domain, vm_s
>  }
>  
>  vm_offset_t
> -kmem_malloc(struct vmem *vmem, vm_size_t size, int flags)
> +kmem_malloc(struct vmem *vmem __unused, vm_size_t size, int flags)
>  {
>   struct vm_domainset_iter di;
>   vm_offset_t addr;
> @@ -412,7 +407,7 @@ 

svn commit: r338018 - head/sys/vm

2018-08-18 Thread Alan Cox
Author: alc
Date: Sat Aug 18 18:33:50 2018
New Revision: 338018
URL: https://svnweb.freebsd.org/changeset/base/338018

Log:
  Eliminate the arena parameter to kmem_malloc_domain().  It is redundant.
  The domain and flags parameters suffice.  In fact, the related functions
  kmem_alloc_{attr,contig}_domain() don't have an arena parameter.
  
  Reviewed by:  kib, markj
  Differential Revision:https://reviews.freebsd.org/D16713

Modified:
  head/sys/vm/uma_core.c
  head/sys/vm/vm_extern.h
  head/sys/vm/vm_kern.c

Modified: head/sys/vm/uma_core.c
==
--- head/sys/vm/uma_core.c  Sat Aug 18 16:03:15 2018(r338017)
+++ head/sys/vm/uma_core.c  Sat Aug 18 18:33:50 2018(r338018)
@@ -1169,7 +1169,7 @@ page_alloc(uma_zone_t zone, vm_size_t bytes, int domai
void *p;/* Returned page */
 
*pflag = UMA_SLAB_KERNEL;
-   p = (void *) kmem_malloc_domain(kernel_arena, domain, bytes, wait);
+   p = (void *) kmem_malloc_domain(domain, bytes, wait);
 
return (p);
 }
@@ -3680,32 +3680,22 @@ uma_zone_exhausted_nolock(uma_zone_t zone)
 void *
 uma_large_malloc_domain(vm_size_t size, int domain, int wait)
 {
-   struct vmem *arena;
vm_offset_t addr;
uma_slab_t slab;
 
-#if VM_NRESERVLEVEL > 0
-   if (__predict_true((wait & M_EXEC) == 0))
-   arena = kernel_arena;
-   else
-   arena = kernel_rwx_arena;
-#else
-   arena = kernel_arena;
-#endif
-
slab = zone_alloc_item(slabzone, NULL, domain, wait);
if (slab == NULL)
return (NULL);
if (domain == UMA_ANYDOMAIN)
-   addr = kmem_malloc(arena, size, wait);
+   addr = kmem_malloc(NULL, size, wait);
else
-   addr = kmem_malloc_domain(arena, domain, size, wait);
+   addr = kmem_malloc_domain(domain, size, wait);
if (addr != 0) {
vsetslab(addr, slab);
slab->us_data = (void *)addr;
slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC;
 #if VM_NRESERVLEVEL > 0
-   if (__predict_false(arena == kernel_rwx_arena))
+   if (__predict_false((wait & M_EXEC) != 0))
slab->us_flags |= UMA_SLAB_KRWX;
 #endif
slab->us_size = size;

Modified: head/sys/vm/vm_extern.h
==
--- head/sys/vm/vm_extern.h Sat Aug 18 16:03:15 2018(r338017)
+++ head/sys/vm/vm_extern.h Sat Aug 18 18:33:50 2018(r338018)
@@ -65,8 +65,7 @@ vm_offset_t kmem_alloc_contig_domain(int domain, vm_si
 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
 vm_memattr_t memattr);
 vm_offset_t kmem_malloc(struct vmem *, vm_size_t size, int flags);
-vm_offset_t kmem_malloc_domain(struct vmem *, int domain, vm_size_t size,
-int flags);
+vm_offset_t kmem_malloc_domain(int domain, vm_size_t size, int flags);
 void kmem_free(struct vmem *, vm_offset_t, vm_size_t);
 
 /* This provides memory for previously allocated address space. */

Modified: head/sys/vm/vm_kern.c
==
--- head/sys/vm/vm_kern.c   Sat Aug 18 16:03:15 2018(r338017)
+++ head/sys/vm/vm_kern.c   Sat Aug 18 18:33:50 2018(r338018)
@@ -372,23 +372,18 @@ kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_of
  * Allocate wired-down pages in the kernel's address space.
  */
 vm_offset_t
-kmem_malloc_domain(struct vmem *vmem, int domain, vm_size_t size, int flags)
+kmem_malloc_domain(int domain, vm_size_t size, int flags)
 {
vmem_t *arena;
vm_offset_t addr;
int rv;
 
 #if VM_NRESERVLEVEL > 0
-   KASSERT(vmem == kernel_arena || vmem == kernel_rwx_arena,
-   ("kmem_malloc_domain: Only kernel_arena or kernel_rwx_arena "
-   "are supported."));
-   if (__predict_true(vmem == kernel_arena))
+   if (__predict_true((flags & M_EXEC) == 0))
arena = vm_dom[domain].vmd_kernel_arena;
else
arena = vm_dom[domain].vmd_kernel_rwx_arena;
 #else
-   KASSERT(vmem == kernel_arena,
-   ("kmem_malloc_domain: Only kernel_arena is supported."));
arena = vm_dom[domain].vmd_kernel_arena;
 #endif
size = round_page(size);
@@ -404,7 +399,7 @@ kmem_malloc_domain(struct vmem *vmem, int domain, vm_s
 }
 
 vm_offset_t
-kmem_malloc(struct vmem *vmem, vm_size_t size, int flags)
+kmem_malloc(struct vmem *vmem __unused, vm_size_t size, int flags)
 {
struct vm_domainset_iter di;
vm_offset_t addr;
@@ -412,7 +407,7 @@ kmem_malloc(struct vmem *vmem, vm_size_t size, int fla
 
vm_domainset_iter_malloc_init(, kernel_object, , );
do {
-   addr = kmem_malloc_domain(vmem, domain, size, flags);
+   addr = kmem_malloc_domain(domain,