Author: markj Date: Mon Jun 15 03:01:28 2020 New Revision: 362187 URL: https://svnweb.freebsd.org/changeset/base/362187
Log: MFC r361945, r362036: Stop computing a "sharedram" value when emulating Linux sysinfo(2). Modified: stable/12/sys/compat/linux/linux_misc.c stable/12/sys/vm/vm_map.c stable/12/sys/vm/vm_map.h stable/12/sys/vm/vm_mmap.c stable/12/sys/vm/vm_unix.c Directory Properties: stable/12/ (props changed) Modified: stable/12/sys/compat/linux/linux_misc.c ============================================================================== --- stable/12/sys/compat/linux/linux_misc.c Sun Jun 14 22:39:34 2020 (r362186) +++ stable/12/sys/compat/linux/linux_misc.c Mon Jun 15 03:01:28 2020 (r362187) @@ -78,7 +78,6 @@ __FBSDID("$FreeBSD$"); #include <vm/vm_kern.h> #include <vm/vm_map.h> #include <vm/vm_extern.h> -#include <vm/vm_object.h> #include <vm/swap_pager.h> #ifdef COMPAT_LINUX32 @@ -150,7 +149,6 @@ int linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args) { struct l_sysinfo sysinfo; - vm_object_t object; int i, j; struct timespec ts; @@ -168,14 +166,13 @@ linux_sysinfo(struct thread *td, struct linux_sysinfo_ sysinfo.totalram = physmem * PAGE_SIZE; sysinfo.freeram = sysinfo.totalram - vm_wire_count() * PAGE_SIZE; + /* + * sharedram counts pages allocated to named, swap-backed objects such + * as shared memory segments and tmpfs files. There is no cheap way to + * compute this, so just leave the field unpopulated. Linux itself only + * started setting this field in the 3.x timeframe. + */ sysinfo.sharedram = 0; - mtx_lock(&vm_object_list_mtx); - TAILQ_FOREACH(object, &vm_object_list, object_list) - if (object->shadow_count > 1) - sysinfo.sharedram += object->resident_page_count; - mtx_unlock(&vm_object_list_mtx); - - sysinfo.sharedram *= PAGE_SIZE; sysinfo.bufferram = 0; swap_pager_status(&i, &j); Modified: stable/12/sys/vm/vm_map.c ============================================================================== --- stable/12/sys/vm/vm_map.c Sun Jun 14 22:39:34 2020 (r362186) +++ stable/12/sys/vm/vm_map.c Mon Jun 15 03:01:28 2020 (r362187) @@ -3097,7 +3097,7 @@ vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm u_long npages; u_int last_timestamp; int rv; - boolean_t need_wakeup, result, user_wire; + boolean_t need_wakeup, result, user_wire, user_wire_limit; vm_prot_t prot; VM_MAP_ASSERT_LOCKED(map); @@ -3108,6 +3108,7 @@ vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm if (flags & VM_MAP_WIRE_WRITE) prot |= VM_PROT_WRITE; user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE; + user_wire_limit = (flags & VM_MAP_WIRE_USER_LIMIT) ? TRUE : FALSE; VM_MAP_RANGE_CHECK(map, start, end); if (!vm_map_lookup_entry(map, start, &first_entry)) { if (flags & VM_MAP_WIRE_HOLESOK) @@ -3188,7 +3189,8 @@ vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm entry->wired_count++; npages = atop(entry->end - entry->start); - if (user_wire && !vm_map_wire_user_count_add(npages)) { + if (user_wire_limit && + !vm_map_wire_user_count_add(npages)) { vm_map_wire_entry_failure(map, entry, entry->start); end = entry->end; @@ -3250,7 +3252,7 @@ vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm last_timestamp = map->timestamp; if (rv != KERN_SUCCESS) { vm_map_wire_entry_failure(map, entry, faddr); - if (user_wire) + if (user_wire_limit) vm_map_wire_user_count_sub(npages); end = entry->end; goto done; @@ -3319,7 +3321,7 @@ done: */ if (entry->wired_count == 1) { vm_map_entry_unwire(map, entry); - if (user_wire) + if (user_wire_limit) vm_map_wire_user_count_sub( atop(entry->end - entry->start)); } else @@ -4455,7 +4457,8 @@ retry: if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) { rv = vm_map_wire_locked(map, grow_start, grow_start + grow_amount, - VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); + VM_MAP_WIRE_USER | VM_MAP_WIRE_USER_LIMIT | + VM_MAP_WIRE_NOHOLES); } vm_map_lock_downgrade(map); Modified: stable/12/sys/vm/vm_map.h ============================================================================== --- stable/12/sys/vm/vm_map.h Sun Jun 14 22:39:34 2020 (r362186) +++ stable/12/sys/vm/vm_map.h Mon Jun 15 03:01:28 2020 (r362187) @@ -397,6 +397,8 @@ long vmspace_resident_count(struct vmspace *vmspace); #define VM_MAP_WIRE_WRITE 4 /* Validate writable. */ +#define VM_MAP_WIRE_USER_LIMIT 8 /* Enfore the user wiring limit */ + #ifdef _KERNEL boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t); vm_map_t vm_map_create(pmap_t, vm_offset_t, vm_offset_t); Modified: stable/12/sys/vm/vm_mmap.c ============================================================================== --- stable/12/sys/vm/vm_mmap.c Sun Jun 14 22:39:34 2020 (r362186) +++ stable/12/sys/vm/vm_mmap.c Mon Jun 15 03:01:28 2020 (r362187) @@ -1046,7 +1046,7 @@ kern_mlock(struct proc *proc, struct ucred *cred, uint } #endif error = vm_map_wire(map, start, end, - VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); + VM_MAP_WIRE_USER | VM_MAP_WIRE_USER_LIMIT | VM_MAP_WIRE_NOHOLES); #ifdef RACCT if (racct_enable && error != KERN_SUCCESS) { PROC_LOCK(proc); @@ -1111,7 +1111,8 @@ sys_mlockall(struct thread *td, struct mlockall_args * * calling vm_fault_wire() for each page in the region. */ error = vm_map_wire(map, vm_map_min(map), vm_map_max(map), - VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK); + VM_MAP_WIRE_USER | VM_MAP_WIRE_USER_LIMIT | + VM_MAP_WIRE_HOLESOK); if (error == KERN_SUCCESS) error = 0; else if (error == KERN_RESOURCE_SHORTAGE) @@ -1589,6 +1590,7 @@ vm_mmap_object(vm_map_t map, vm_offset_t *addr, vm_siz if ((map->flags & MAP_WIREFUTURE) != 0) (void)vm_map_wire_locked(map, *addr, *addr + size, VM_MAP_WIRE_USER | + VM_MAP_WIRE_USER_LIMIT | ((flags & MAP_STACK) ? VM_MAP_WIRE_HOLESOK : VM_MAP_WIRE_NOHOLES)); vm_map_unlock(map); Modified: stable/12/sys/vm/vm_unix.c ============================================================================== --- stable/12/sys/vm/vm_unix.c Sun Jun 14 22:39:34 2020 (r362186) +++ stable/12/sys/vm/vm_unix.c Mon Jun 15 03:01:28 2020 (r362187) @@ -185,7 +185,8 @@ kern_break(struct thread *td, uintptr_t *addr) 0); if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) { rv = vm_map_wire_locked(map, old, new, - VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); + VM_MAP_WIRE_USER | VM_MAP_WIRE_USER_LIMIT | + VM_MAP_WIRE_NOHOLES); if (rv != KERN_SUCCESS) vm_map_delete(map, old, new); } _______________________________________________ svn-src-all@freebsd.org mailing list https://lists.freebsd.org/mailman/listinfo/svn-src-all To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"