Author: markj
Date: Sat Sep  3 20:38:13 2016
New Revision: 305362
URL: https://svnweb.freebsd.org/changeset/base/305362

Log:
  Remove support for idle page zeroing.
  
  Idle page zeroing has been disabled by default on all architectures since
  r170816 and has some bugs that make it seemingly unusable. Specifically,
  the idle-priority pagezero thread exacerbates contention for the free page
  lock, and yields the CPU without releasing it in non-preemptive kernels. The
  pagezero thread also does not behave correctly when superpage reservations
  are enabled: its target is a function of v_free_count, which includes
  reserved-but-free pages, but it is only able to zero pages belonging to the
  physical memory allocator.
  
  Reviewed by:  alc, imp, kib
  Differential Revision:        https://reviews.freebsd.org/D7714

Deleted:
  head/sys/vm/vm_zeroidle.c
Modified:
  head/share/man/man9/Makefile
  head/share/man/man9/pmap.9
  head/share/man/man9/pmap_zero_page.9
  head/sys/amd64/amd64/pmap.c
  head/sys/arm/arm/pmap-v4.c
  head/sys/arm/arm/pmap-v6.c
  head/sys/arm64/arm64/pmap.c
  head/sys/conf/files
  head/sys/i386/i386/pmap.c
  head/sys/i386/include/pmap.h
  head/sys/mips/mips/pmap.c
  head/sys/powerpc/aim/mmu_oea.c
  head/sys/powerpc/aim/mmu_oea64.c
  head/sys/powerpc/booke/pmap.c
  head/sys/powerpc/powerpc/mmu_if.m
  head/sys/powerpc/powerpc/pmap_dispatch.c
  head/sys/riscv/riscv/pmap.c
  head/sys/sparc64/sparc64/pmap.c
  head/sys/vm/pmap.h
  head/sys/vm/vm_meter.c
  head/sys/vm/vm_page.c
  head/sys/vm/vm_page.h
  head/sys/vm/vm_phys.c
  head/sys/vm/vm_phys.h

Modified: head/share/man/man9/Makefile
==============================================================================
--- head/share/man/man9/Makefile        Sat Sep  3 19:09:01 2016        
(r305361)
+++ head/share/man/man9/Makefile        Sat Sep  3 20:38:13 2016        
(r305362)
@@ -1391,8 +1391,7 @@ MLINKS+=pmap_quick_enter_page.9 pmap_qui
 MLINKS+=pmap_remove.9 pmap_remove_all.9 \
        pmap_remove.9 pmap_remove_pages.9
 MLINKS+=pmap_resident_count.9 pmap_wired_count.9
-MLINKS+=pmap_zero_page.9 pmap_zero_area.9 \
-       pmap_zero_page.9 pmap_zero_idle.9
+MLINKS+=pmap_zero_page.9 pmap_zero_area.9
 MLINKS+=printf.9 log.9 \
        printf.9 tprintf.9 \
        printf.9 uprintf.9

Modified: head/share/man/man9/pmap.9
==============================================================================
--- head/share/man/man9/pmap.9  Sat Sep  3 19:09:01 2016        (r305361)
+++ head/share/man/man9/pmap.9  Sat Sep  3 20:38:13 2016        (r305362)
@@ -25,7 +25,7 @@
 .\"
 .\" $FreeBSD$
 .\"
-.Dd August 3, 2014
+.Dd August 30, 2016
 .Dt PMAP 9
 .Os
 .Sh NAME
@@ -121,7 +121,6 @@ operation.
 .Xr pmap_unwire 9 ,
 .Xr pmap_wired_count 9 ,
 .Xr pmap_zero_area 9 ,
-.Xr pmap_zero_idle 9 ,
 .Xr pmap_zero_page 9 ,
 .Xr vm_map 9
 .Sh AUTHORS

Modified: head/share/man/man9/pmap_zero_page.9
==============================================================================
--- head/share/man/man9/pmap_zero_page.9        Sat Sep  3 19:09:01 2016        
(r305361)
+++ head/share/man/man9/pmap_zero_page.9        Sat Sep  3 20:38:13 2016        
(r305362)
@@ -25,13 +25,12 @@
 .\"
 .\" $FreeBSD$
 .\"
-.Dd July 21, 2003
+.Dd August 30, 2016
 .Dt PMAP_ZERO 9
 .Os
 .Sh NAME
 .Nm pmap_zero_page ,
 .Nm pmap_zero_area ,
-.Nm pmap_zero_idle
 .Nd zero-fill a page using machine-dependent optimizations
 .Sh SYNOPSIS
 .In sys/param.h
@@ -41,8 +40,6 @@
 .Fn pmap_zero_page "vm_page_t m"
 .Ft void
 .Fn pmap_zero_page_area "vm_page_t m" "int off" "int size"
-.Ft void
-.Fn pmap_zero_page_idle "vm_page_t m"
 .Sh DESCRIPTION
 The
 .Fn pmap_zero_page
@@ -53,14 +50,6 @@ function is used to zero-fill an area of
 The range specified must not cross a page boundary; it must be contained
 entirely within a single page.
 .Pp
-The
-.Fn pmap_zero_page_idle
-interface is used by the
-.Nm vm_pagezero
-process.
-The system-wide
-.Va Giant
-lock should not be required to be held in order to call this interface.
 .Sh IMPLEMENTATION NOTES
 This function is required to be implemented for each architecture supported by
 .Fx .

Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c Sat Sep  3 19:09:01 2016        (r305361)
+++ head/sys/amd64/amd64/pmap.c Sat Sep  3 20:38:13 2016        (r305362)
@@ -5179,19 +5179,6 @@ pmap_zero_page_area(vm_page_t m, int off
 }
 
 /*
- * Zero the specified hardware page in a way that minimizes cache thrashing.
- * This is intended to be called from the vm_pagezero process only and
- * outside of Giant.
- */
-void
-pmap_zero_page_idle(vm_page_t m)
-{
-       vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
-
-       sse2_pagezero((void *)va);
-}
-
-/*
  * Copy 1 specified hardware page to another.
  */
 void

Modified: head/sys/arm/arm/pmap-v4.c
==============================================================================
--- head/sys/arm/arm/pmap-v4.c  Sat Sep  3 19:09:01 2016        (r305361)
+++ head/sys/arm/arm/pmap-v4.c  Sat Sep  3 20:38:13 2016        (r305362)
@@ -4079,19 +4079,6 @@ pmap_zero_page_area(vm_page_t m, int off
 }
 
 
-/*
- *     pmap_zero_page_idle zeros the specified hardware page by mapping
- *     the page into KVM and using bzero to clear its contents.  This
- *     is intended to be called from the vm_pagezero process only and
- *     outside of Giant.
- */
-void
-pmap_zero_page_idle(vm_page_t m)
-{
-
-       pmap_zero_page(m);
-}
-
 #if 0
 /*
  * pmap_clean_page()

Modified: head/sys/arm/arm/pmap-v6.c
==============================================================================
--- head/sys/arm/arm/pmap-v6.c  Sat Sep  3 19:09:01 2016        (r305361)
+++ head/sys/arm/arm/pmap-v6.c  Sat Sep  3 20:38:13 2016        (r305362)
@@ -306,8 +306,6 @@ struct sysmaps {
        caddr_t CADDR3;
 };
 static struct sysmaps sysmaps_pcpu[MAXCPU];
-static pt2_entry_t *CMAP3;
-static caddr_t CADDR3;
 caddr_t _tmppt = 0;
 
 struct msgbuf *msgbufp = NULL; /* XXX move it to machdep.c */
@@ -1176,7 +1174,6 @@ pmap_bootstrap(vm_offset_t firstaddr)
        /*
         * Local CMAP1/CMAP2 are used for zeroing and copying pages.
         * Local CMAP3 is used for data cache cleaning.
-        * Global CMAP3 is used for the idle process page zeroing.
         */
        for (i = 0; i < MAXCPU; i++) {
                sysmaps = &sysmaps_pcpu[i];
@@ -1185,7 +1182,6 @@ pmap_bootstrap(vm_offset_t firstaddr)
                SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1);
                SYSMAP(caddr_t, sysmaps->CMAP3, sysmaps->CADDR3, 1);
        }
-       SYSMAP(caddr_t, CMAP3, CADDR3, 1);
 
        /*
         * Crashdump maps.
@@ -5805,27 +5801,6 @@ pmap_zero_page_area(vm_page_t m, int off
 }
 
 /*
- *     pmap_zero_page_idle zeros the specified hardware page by mapping
- *     the page into KVM and using bzero to clear its contents.  This
- *     is intended to be called from the vm_pagezero process only and
- *     outside of Giant.
- */
-void
-pmap_zero_page_idle(vm_page_t m)
-{
-
-       if (pte2_load(CMAP3) != 0)
-               panic("%s: CMAP3 busy", __func__);
-       sched_pin();
-       pte2_store(CMAP3, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW,
-           vm_page_pte2_attr(m)));
-       pagezero(CADDR3);
-       pte2_clear(CMAP3);
-       tlb_flush((vm_offset_t)CADDR3);
-       sched_unpin();
-}
-
-/*
  *     pmap_copy_page copies the specified (machine independent)
  *     page by mapping the page into virtual memory and using
  *     bcopy to copy the page, one machine dependent page at a

Modified: head/sys/arm64/arm64/pmap.c
==============================================================================
--- head/sys/arm64/arm64/pmap.c Sat Sep  3 19:09:01 2016        (r305361)
+++ head/sys/arm64/arm64/pmap.c Sat Sep  3 20:38:13 2016        (r305362)
@@ -3264,20 +3264,6 @@ pmap_zero_page_area(vm_page_t m, int off
 }
 
 /*
- *     pmap_zero_page_idle zeros the specified hardware page by mapping
- *     the page into KVM and using bzero to clear its contents.  This
- *     is intended to be called from the vm_pagezero process only and
- *     outside of Giant.
- */
-void
-pmap_zero_page_idle(vm_page_t m)
-{
-       vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
-
-       pagezero((void *)va);
-}
-
-/*
  *     pmap_copy_page copies the specified (machine independent)
  *     page by mapping the page into virtual memory and using
  *     bcopy to copy the page, one machine dependent page at a

Modified: head/sys/conf/files
==============================================================================
--- head/sys/conf/files Sat Sep  3 19:09:01 2016        (r305361)
+++ head/sys/conf/files Sat Sep  3 20:38:13 2016        (r305362)
@@ -4369,7 +4369,6 @@ vm/vm_radix.c                     standard
 vm/vm_reserv.c                 standard
 vm/vm_domain.c                 standard
 vm/vm_unix.c                   standard
-vm/vm_zeroidle.c               standard
 vm/vnode_pager.c               standard
 xen/features.c                 optional xenhvm
 xen/xenbus/xenbus_if.m         optional xenhvm

Modified: head/sys/i386/i386/pmap.c
==============================================================================
--- head/sys/i386/i386/pmap.c   Sat Sep  3 19:09:01 2016        (r305361)
+++ head/sys/i386/i386/pmap.c   Sat Sep  3 20:38:13 2016        (r305362)
@@ -444,7 +444,7 @@ pmap_bootstrap(vm_paddr_t firstaddr)
 
        /*
         * CMAP1/CMAP2 are used for zeroing and copying pages.
-        * CMAP3 is used for the idle process page zeroing.
+        * CMAP3 is used for the boot-time memory test.
         */
        for (i = 0; i < MAXCPU; i++) {
                sysmaps = &sysmaps_pcpu[i];
@@ -452,7 +452,7 @@ pmap_bootstrap(vm_paddr_t firstaddr)
                SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1)
                SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1)
        }
-       SYSMAP(caddr_t, CMAP3, CADDR3, 1)
+       SYSMAP(caddr_t, CMAP3, CADDR3, 1);
 
        /*
         * Crashdump maps.
@@ -4242,26 +4242,6 @@ pmap_zero_page_area(vm_page_t m, int off
 }
 
 /*
- * Zero the specified hardware page in a way that minimizes cache thrashing.
- * This is intended to be called from the vm_pagezero process only and
- * outside of Giant.
- */
-void
-pmap_zero_page_idle(vm_page_t m)
-{
-
-       if (*CMAP3)
-               panic("pmap_zero_page_idle: CMAP3 busy");
-       sched_pin();
-       *CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
-           pmap_cache_bits(m->md.pat_mode, 0);
-       invlcaddr(CADDR3);
-       pagezero(CADDR3);
-       *CMAP3 = 0;
-       sched_unpin();
-}
-
-/*
  * Copy 1 specified hardware page to another.
  */
 void

Modified: head/sys/i386/include/pmap.h
==============================================================================
--- head/sys/i386/include/pmap.h        Sat Sep  3 19:09:01 2016        
(r305361)
+++ head/sys/i386/include/pmap.h        Sat Sep  3 20:38:13 2016        
(r305362)
@@ -353,7 +353,7 @@ struct pv_chunk {
 
 #ifdef _KERNEL
 
-extern caddr_t CADDR3;
+extern caddr_t CADDR3;
 extern pt_entry_t *CMAP3;
 extern vm_paddr_t phys_avail[];
 extern vm_paddr_t dump_avail[];

Modified: head/sys/mips/mips/pmap.c
==============================================================================
--- head/sys/mips/mips/pmap.c   Sat Sep  3 19:09:01 2016        (r305361)
+++ head/sys/mips/mips/pmap.c   Sat Sep  3 20:38:13 2016        (r305362)
@@ -2558,24 +2558,6 @@ pmap_zero_page_area(vm_page_t m, int off
        }
 }
 
-void
-pmap_zero_page_idle(vm_page_t m)
-{
-       vm_offset_t va;
-       vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
-
-       if (MIPS_DIRECT_MAPPABLE(phys)) {
-               va = MIPS_PHYS_TO_DIRECT(phys);
-               bzero((caddr_t)va, PAGE_SIZE);
-               mips_dcache_wbinv_range(va, PAGE_SIZE);
-       } else {
-               va = pmap_lmem_map1(phys);
-               bzero((caddr_t)va, PAGE_SIZE);
-               mips_dcache_wbinv_range(va, PAGE_SIZE);
-               pmap_lmem_unmap();
-       }
-}
-
 /*
  *     pmap_copy_page copies the specified (machine independent)
  *     page by mapping the page into virtual memory and using

Modified: head/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea.c      Sat Sep  3 19:09:01 2016        
(r305361)
+++ head/sys/powerpc/aim/mmu_oea.c      Sat Sep  3 20:38:13 2016        
(r305362)
@@ -300,7 +300,6 @@ void moea_remove_write(mmu_t, vm_page_t)
 void moea_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
 void moea_zero_page(mmu_t, vm_page_t);
 void moea_zero_page_area(mmu_t, vm_page_t, int, int);
-void moea_zero_page_idle(mmu_t, vm_page_t);
 void moea_activate(mmu_t, struct thread *);
 void moea_deactivate(mmu_t, struct thread *);
 void moea_cpu_bootstrap(mmu_t, int);
@@ -349,7 +348,6 @@ static mmu_method_t moea_methods[] = {
        MMUMETHOD(mmu_unwire,           moea_unwire),
        MMUMETHOD(mmu_zero_page,        moea_zero_page),
        MMUMETHOD(mmu_zero_page_area,   moea_zero_page_area),
-       MMUMETHOD(mmu_zero_page_idle,   moea_zero_page_idle),
        MMUMETHOD(mmu_activate,         moea_activate),
        MMUMETHOD(mmu_deactivate,       moea_deactivate),
        MMUMETHOD(mmu_page_set_memattr, moea_page_set_memattr),
@@ -1081,13 +1079,6 @@ moea_zero_page_area(mmu_t mmu, vm_page_t
        bzero(va, size);
 }
 
-void
-moea_zero_page_idle(mmu_t mmu, vm_page_t m)
-{
-
-       moea_zero_page(mmu, m);
-}
-
 vm_offset_t
 moea_quick_enter_page(mmu_t mmu, vm_page_t m)
 {

Modified: head/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea64.c    Sat Sep  3 19:09:01 2016        
(r305361)
+++ head/sys/powerpc/aim/mmu_oea64.c    Sat Sep  3 20:38:13 2016        
(r305362)
@@ -265,7 +265,6 @@ void moea64_remove_write(mmu_t, vm_page_
 void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
 void moea64_zero_page(mmu_t, vm_page_t);
 void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
-void moea64_zero_page_idle(mmu_t, vm_page_t);
 void moea64_activate(mmu_t, struct thread *);
 void moea64_deactivate(mmu_t, struct thread *);
 void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t);
@@ -314,7 +313,6 @@ static mmu_method_t moea64_methods[] = {
        MMUMETHOD(mmu_unwire,           moea64_unwire),
        MMUMETHOD(mmu_zero_page,        moea64_zero_page),
        MMUMETHOD(mmu_zero_page_area,   moea64_zero_page_area),
-       MMUMETHOD(mmu_zero_page_idle,   moea64_zero_page_idle),
        MMUMETHOD(mmu_activate,         moea64_activate),
        MMUMETHOD(mmu_deactivate,       moea64_deactivate),
        MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr),
@@ -1230,13 +1228,6 @@ moea64_zero_page(mmu_t mmu, vm_page_t m)
                mtx_unlock(&moea64_scratchpage_mtx);
 }
 
-void
-moea64_zero_page_idle(mmu_t mmu, vm_page_t m)
-{
-
-       moea64_zero_page(mmu, m);
-}
-
 vm_offset_t
 moea64_quick_enter_page(mmu_t mmu, vm_page_t m)
 {

Modified: head/sys/powerpc/booke/pmap.c
==============================================================================
--- head/sys/powerpc/booke/pmap.c       Sat Sep  3 19:09:01 2016        
(r305361)
+++ head/sys/powerpc/booke/pmap.c       Sat Sep  3 20:38:13 2016        
(r305362)
@@ -130,12 +130,6 @@ static struct mtx zero_page_mutex;
 
 static struct mtx tlbivax_mutex;
 
-/*
- * Reserved KVA space for mmu_booke_zero_page_idle. This is used
- * by idle thred only, no lock required.
- */
-static vm_offset_t zero_page_idle_va;
-
 /* Reserved KVA space and mutex for mmu_booke_copy_page. */
 static vm_offset_t copy_page_src_va;
 static vm_offset_t copy_page_dst_va;
@@ -312,7 +306,6 @@ static void         mmu_booke_remove_write(mmu_
 static void            mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, 
vm_offset_t);
 static void            mmu_booke_zero_page(mmu_t, vm_page_t);
 static void            mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
-static void            mmu_booke_zero_page_idle(mmu_t, vm_page_t);
 static void            mmu_booke_activate(mmu_t, struct thread *);
 static void            mmu_booke_deactivate(mmu_t, struct thread *);
 static void            mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
@@ -371,7 +364,6 @@ static mmu_method_t mmu_booke_methods[] 
        MMUMETHOD(mmu_unwire,           mmu_booke_unwire),
        MMUMETHOD(mmu_zero_page,        mmu_booke_zero_page),
        MMUMETHOD(mmu_zero_page_area,   mmu_booke_zero_page_area),
-       MMUMETHOD(mmu_zero_page_idle,   mmu_booke_zero_page_idle),
        MMUMETHOD(mmu_activate,         mmu_booke_activate),
        MMUMETHOD(mmu_deactivate,       mmu_booke_deactivate),
        MMUMETHOD(mmu_quick_enter_page, mmu_booke_quick_enter_page),
@@ -1147,14 +1139,11 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset
        /* Allocate KVA space for page zero/copy operations. */
        zero_page_va = virtual_avail;
        virtual_avail += PAGE_SIZE;
-       zero_page_idle_va = virtual_avail;
-       virtual_avail += PAGE_SIZE;
        copy_page_src_va = virtual_avail;
        virtual_avail += PAGE_SIZE;
        copy_page_dst_va = virtual_avail;
        virtual_avail += PAGE_SIZE;
        debugf("zero_page_va = 0x%08x\n", zero_page_va);
-       debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va);
        debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va);
        debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va);
 
@@ -2326,23 +2315,6 @@ mmu_booke_copy_pages(mmu_t mmu, vm_page_
        mtx_unlock(&copy_page_mutex);
 }
 
-/*
- * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it
- * into virtual memory and using bzero to clear its contents. This is intended
- * to be called from the vm_pagezero process only and outside of Giant. No
- * lock is required.
- */
-static void
-mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m)
-{
-       vm_offset_t va;
-
-       va = zero_page_idle_va;
-       mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
-       bzero((caddr_t)va, PAGE_SIZE);
-       mmu_booke_kremove(mmu, va);
-}
-
 static vm_offset_t
 mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
 {

Modified: head/sys/powerpc/powerpc/mmu_if.m
==============================================================================
--- head/sys/powerpc/powerpc/mmu_if.m   Sat Sep  3 19:09:01 2016        
(r305361)
+++ head/sys/powerpc/powerpc/mmu_if.m   Sat Sep  3 20:38:13 2016        
(r305362)
@@ -659,18 +659,6 @@ METHOD void zero_page_area {
 
 
 /**
- * @brief Called from the idle loop to zero pages. XXX I think locking
- * constraints might be different here compared to zero_page.
- *
- * @param _pg          physical page
- */
-METHOD void zero_page_idle {
-       mmu_t           _mmu;
-       vm_page_t       _pg;
-};
-
-
-/**
  * @brief Extract mincore(2) information from a mapping.
  *
  * @param _pmap                physical map

Modified: head/sys/powerpc/powerpc/pmap_dispatch.c
==============================================================================
--- head/sys/powerpc/powerpc/pmap_dispatch.c    Sat Sep  3 19:09:01 2016        
(r305361)
+++ head/sys/powerpc/powerpc/pmap_dispatch.c    Sat Sep  3 20:38:13 2016        
(r305362)
@@ -380,14 +380,6 @@ pmap_zero_page_area(vm_page_t m, int off
        MMU_ZERO_PAGE_AREA(mmu_obj, m, off, size);
 }
 
-void
-pmap_zero_page_idle(vm_page_t m)
-{
-
-       CTR2(KTR_PMAP, "%s(%p)", __func__, m);
-       MMU_ZERO_PAGE_IDLE(mmu_obj, m);
-}
-
 int
 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
 {

Modified: head/sys/riscv/riscv/pmap.c
==============================================================================
--- head/sys/riscv/riscv/pmap.c Sat Sep  3 19:09:01 2016        (r305361)
+++ head/sys/riscv/riscv/pmap.c Sat Sep  3 20:38:13 2016        (r305362)
@@ -2538,20 +2538,6 @@ pmap_zero_page_area(vm_page_t m, int off
 }
 
 /*
- *     pmap_zero_page_idle zeros the specified hardware page by mapping 
- *     the page into KVM and using bzero to clear its contents.  This
- *     is intended to be called from the vm_pagezero process only and
- *     outside of Giant.
- */
-void
-pmap_zero_page_idle(vm_page_t m)
-{
-       vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
-
-       pagezero((void *)va);
-}
-
-/*
  *     pmap_copy_page copies the specified (machine independent)
  *     page by mapping the page into virtual memory and using
  *     bcopy to copy the page, one machine dependent page at a

Modified: head/sys/sparc64/sparc64/pmap.c
==============================================================================
--- head/sys/sparc64/sparc64/pmap.c     Sat Sep  3 19:09:01 2016        
(r305361)
+++ head/sys/sparc64/sparc64/pmap.c     Sat Sep  3 20:38:13 2016        
(r305362)
@@ -223,10 +223,6 @@ PMAP_STATS_VAR(pmap_nzero_page_area);
 PMAP_STATS_VAR(pmap_nzero_page_area_c);
 PMAP_STATS_VAR(pmap_nzero_page_area_oc);
 PMAP_STATS_VAR(pmap_nzero_page_area_nc);
-PMAP_STATS_VAR(pmap_nzero_page_idle);
-PMAP_STATS_VAR(pmap_nzero_page_idle_c);
-PMAP_STATS_VAR(pmap_nzero_page_idle_oc);
-PMAP_STATS_VAR(pmap_nzero_page_idle_nc);
 PMAP_STATS_VAR(pmap_ncopy_page);
 PMAP_STATS_VAR(pmap_ncopy_page_c);
 PMAP_STATS_VAR(pmap_ncopy_page_oc);
@@ -1849,35 +1845,6 @@ pmap_zero_page_area(vm_page_t m, int off
 }
 
 void
-pmap_zero_page_idle(vm_page_t m)
-{
-       struct tte *tp;
-       vm_offset_t va;
-       vm_paddr_t pa;
-
-       KASSERT((m->flags & PG_FICTITIOUS) == 0,
-           ("pmap_zero_page_idle: fake page"));
-       PMAP_STATS_INC(pmap_nzero_page_idle);
-       pa = VM_PAGE_TO_PHYS(m);
-       if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) {
-               PMAP_STATS_INC(pmap_nzero_page_idle_c);
-               va = TLB_PHYS_TO_DIRECT(pa);
-               cpu_block_zero((void *)va, PAGE_SIZE);
-       } else if (m->md.color == -1) {
-               PMAP_STATS_INC(pmap_nzero_page_idle_nc);
-               aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
-       } else {
-               PMAP_STATS_INC(pmap_nzero_page_idle_oc);
-               va = pmap_idle_map + (m->md.color * PAGE_SIZE);
-               tp = tsb_kvtotte(va);
-               tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
-               tp->tte_vpn = TV_VPN(va, TS_8K);
-               cpu_block_zero((void *)va, PAGE_SIZE);
-               tlb_page_demap(kernel_pmap, va);
-       }
-}
-
-void
 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
 {
        vm_offset_t vdst;

Modified: head/sys/vm/pmap.h
==============================================================================
--- head/sys/vm/pmap.h  Sat Sep  3 19:09:01 2016        (r305361)
+++ head/sys/vm/pmap.h  Sat Sep  3 20:38:13 2016        (r305362)
@@ -153,7 +153,6 @@ boolean_t    pmap_ts_referenced(vm_page_t 
 void            pmap_unwire(pmap_t pmap, vm_offset_t start, vm_offset_t end);
 void            pmap_zero_page(vm_page_t);
 void            pmap_zero_page_area(vm_page_t, int off, int size);
-void            pmap_zero_page_idle(vm_page_t);
 
 #define        pmap_resident_count(pm) ((pm)->pm_stats.resident_count)
 #define        pmap_wired_count(pm)    ((pm)->pm_stats.wired_count)

Modified: head/sys/vm/vm_meter.c
==============================================================================
--- head/sys/vm/vm_meter.c      Sat Sep  3 19:09:01 2016        (r305361)
+++ head/sys/vm/vm_meter.c      Sat Sep  3 20:38:13 2016        (r305362)
@@ -306,6 +306,3 @@ VM_STATS_VM(v_forkpages, "VM pages affec
 VM_STATS_VM(v_vforkpages, "VM pages affected by vfork()");
 VM_STATS_VM(v_rforkpages, "VM pages affected by rfork()");
 VM_STATS_VM(v_kthreadpages, "VM pages affected by fork() by kernel");
-
-SYSCTL_INT(_vm_stats_misc, OID_AUTO, zero_page_count, CTLFLAG_RD,
-       &vm_page_zero_count, 0, "Number of zero-ed free pages");

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c       Sat Sep  3 19:09:01 2016        (r305361)
+++ head/sys/vm/vm_page.c       Sat Sep  3 20:38:13 2016        (r305362)
@@ -134,7 +134,6 @@ struct mtx_padalign pa_lock[PA_LOCK_COUN
 vm_page_t vm_page_array;
 long vm_page_array_size;
 long first_page;
-int vm_page_zero_count;
 
 static int boot_pages = UMA_BOOT_PAGES;
 SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
@@ -1735,8 +1734,6 @@ vm_page_alloc(vm_object_t object, vm_pin
                KASSERT(m->valid == 0,
                    ("vm_page_alloc: free page %p is valid", m));
                vm_phys_freecnt_adj(m, -1);
-               if ((m->flags & PG_ZERO) != 0)
-                       vm_page_zero_count--;
        }
        mtx_unlock(&vm_page_queue_free_mtx);
 
@@ -2042,8 +2039,6 @@ vm_page_alloc_init(vm_page_t m)
                KASSERT(m->valid == 0,
                    ("vm_page_alloc_init: free page %p is valid", m));
                vm_phys_freecnt_adj(m, -1);
-               if ((m->flags & PG_ZERO) != 0)
-                       vm_page_zero_count--;
        }
        return (drop);
 }
@@ -2597,7 +2592,6 @@ cached:
 #endif
                                vm_phys_free_pages(m, 0);
                } while ((m = SLIST_FIRST(&free)) != NULL);
-               vm_page_zero_idle_wakeup();
                vm_page_free_wakeup();
                mtx_unlock(&vm_page_queue_free_mtx);
        }
@@ -3041,10 +3035,6 @@ vm_page_free_toq(vm_page_t m)
                if (TRUE)
 #endif
                        vm_phys_free_pages(m, 0);
-               if ((m->flags & PG_ZERO) != 0)
-                       ++vm_page_zero_count;
-               else
-                       vm_page_zero_idle_wakeup();
                vm_page_free_wakeup();
                mtx_unlock(&vm_page_queue_free_mtx);
        }

Modified: head/sys/vm/vm_page.h
==============================================================================
--- head/sys/vm/vm_page.h       Sat Sep  3 19:09:01 2016        (r305361)
+++ head/sys/vm/vm_page.h       Sat Sep  3 20:38:13 2016        (r305362)
@@ -504,7 +504,6 @@ void vm_page_test_dirty (vm_page_t);
 vm_page_bits_t vm_page_bits(int base, int size);
 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
 void vm_page_free_toq(vm_page_t m);
-void vm_page_zero_idle_wakeup(void);
 
 void vm_page_dirty_KBI(vm_page_t m);
 void vm_page_lock_KBI(vm_page_t m, const char *file, int line);

Modified: head/sys/vm/vm_phys.c
==============================================================================
--- head/sys/vm/vm_phys.c       Sat Sep  3 19:09:01 2016        (r305361)
+++ head/sys/vm/vm_phys.c       Sat Sep  3 20:38:13 2016        (r305362)
@@ -132,10 +132,6 @@ CTASSERT(VM_ISADMA_BOUNDARY < VM_LOWMEM_
 CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY);
 #endif
 
-static int cnt_prezero;
-SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD,
-    &cnt_prezero, 0, "The number of physical pages prezeroed at idle time");
-
 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
     NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
@@ -1298,53 +1294,6 @@ vm_phys_unfree_page(vm_page_t m)
 }
 
 /*
- * Try to zero one physical page.  Used by an idle priority thread.
- */
-boolean_t
-vm_phys_zero_pages_idle(void)
-{
-       static struct vm_freelist *fl;
-       static int flind, oind, pind;
-       vm_page_t m, m_tmp;
-       int domain;
-
-       domain = vm_rr_selectdomain();
-       fl = vm_phys_free_queues[domain][0][0];
-       mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
-       for (;;) {
-               TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, plinks.q) {
-                       for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) {
-                               if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 
0) {
-                                       vm_phys_unfree_page(m_tmp);
-                                       vm_phys_freecnt_adj(m, -1);
-                                       mtx_unlock(&vm_page_queue_free_mtx);
-                                       pmap_zero_page_idle(m_tmp);
-                                       m_tmp->flags |= PG_ZERO;
-                                       mtx_lock(&vm_page_queue_free_mtx);
-                                       vm_phys_freecnt_adj(m, 1);
-                                       vm_phys_free_pages(m_tmp, 0);
-                                       vm_page_zero_count++;
-                                       cnt_prezero++;
-                                       return (TRUE);
-                               }
-                       }
-               }
-               oind++;
-               if (oind == VM_NFREEORDER) {
-                       oind = 0;
-                       pind++;
-                       if (pind == VM_NFREEPOOL) {
-                               pind = 0;
-                               flind++;
-                               if (flind == vm_nfreelists)
-                                       flind = 0;
-                       }
-                       fl = vm_phys_free_queues[domain][flind][pind];
-               }
-       }
-}
-
-/*
  * Allocate a contiguous set of physical pages of the given size
  * "npages" from the free lists.  All of the physical pages must be at
  * or above the given physical address "low" and below the given

Modified: head/sys/vm/vm_phys.h
==============================================================================
--- head/sys/vm/vm_phys.h       Sat Sep  3 19:09:01 2016        (r305361)
+++ head/sys/vm/vm_phys.h       Sat Sep  3 20:38:13 2016        (r305362)
@@ -88,7 +88,6 @@ vm_page_t vm_phys_scan_contig(u_long npa
     u_long alignment, vm_paddr_t boundary, int options);
 void vm_phys_set_pool(int pool, vm_page_t m, int order);
 boolean_t vm_phys_unfree_page(vm_page_t m);
-boolean_t vm_phys_zero_pages_idle(void);
 int vm_phys_mem_affinity(int f, int t);
 
 /*
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to