svn commit: r353163 - in head/sys: arm64/include riscv/include

2019-10-06 Thread Alan Cox
Author: alc
Date: Mon Oct  7 04:22:03 2019
New Revision: 353163
URL: https://svnweb.freebsd.org/changeset/base/353163

Log:
  Eliminate an unused declaration.  The variable in question is only defined
  and used on sparc64.
  
  MFC after:1 week

Modified:
  head/sys/arm64/include/vmparam.h
  head/sys/riscv/include/vmparam.h

Modified: head/sys/arm64/include/vmparam.h
==
--- head/sys/arm64/include/vmparam.hMon Oct  7 03:37:28 2019
(r353162)
+++ head/sys/arm64/include/vmparam.hMon Oct  7 04:22:03 2019
(r353163)
@@ -234,7 +234,6 @@
 extern vm_paddr_t dmap_phys_base;
 extern vm_paddr_t dmap_phys_max;
 extern vm_offset_t dmap_max_addr;
-extern u_int tsb_kernel_ldd_phys;
 extern vm_offset_t vm_max_kernel_address;
 extern vm_offset_t init_pt_va;
 

Modified: head/sys/riscv/include/vmparam.h
==
--- head/sys/riscv/include/vmparam.hMon Oct  7 03:37:28 2019
(r353162)
+++ head/sys/riscv/include/vmparam.hMon Oct  7 04:22:03 2019
(r353163)
@@ -228,7 +228,6 @@
 extern vm_paddr_t dmap_phys_base;
 extern vm_paddr_t dmap_phys_max;
 extern vm_offset_t dmap_max_addr;
-extern u_int tsb_kernel_ldd_phys;
 extern vm_offset_t vm_max_kernel_address;
 extern vm_offset_t init_pt_va;
 #endif
___
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"


Re: svn commit: r353149 - head/sys/amd64/amd64

2019-10-06 Thread Cy Schubert
In message <201910070406.x9746n0u009...@slippy.cwsent.com>, Cy Schubert 
writes:
> In message <201910062213.x96mdzv3085...@repo.freebsd.org>, Mateusz Guzik 
> writes
> :
> > Author: mjg
> > Date: Sun Oct  6 22:13:35 2019
> > New Revision: 353149
> > URL: https://svnweb.freebsd.org/changeset/base/353149
> >
> > Log:
> >   amd64 pmap: implement per-superpage locks
> >   
> >   The current 256-lock sized array is a problem in the following ways:
> >   - it's way too small
> >   - there are 2 locks per cacheline
> >   - it is not NUMA-aware
> >   
> >   Solve these issues by introducing per-superpage locks backed by pages
> >   allocated from respective domains.
> >   
> >   This significantly reduces contention e.g. during poudriere -j 104.
> >   See the review for results.
> >   
> >   Reviewed by:  kib
> >   Discussed with:   jeff
> >   Sponsored by: The FreeBSD Foundation
> >   Differential Revision:https://reviews.freebsd.org/D21833
> >
> > Modified:
> >   head/sys/amd64/amd64/pmap.c
> >
> > Modified: head/sys/amd64/amd64/pmap.c
> > ===
> ==
> > =
> > --- head/sys/amd64/amd64/pmap.c Sun Oct  6 20:36:25 2019(r35314
> > 8)
> > +++ head/sys/amd64/amd64/pmap.c Sun Oct  6 22:13:35 2019(r35314
> > 9)
> > @@ -316,13 +316,25 @@ pmap_pku_mask_bit(pmap_t pmap)
> >  #define PV_STAT(x) do { } while (0)
> >  #endif
> >  
> > -#definepa_index(pa)((pa) >> PDRSHIFT)
> > +#undef pa_index
> > +#definepa_index(pa)({  \
> > +   KASSERT((pa) <= vm_phys_segs[vm_phys_nsegs - 1].end,\
> > +   ("address %lx beyond the last segment", (pa))); \
> > +   (pa) >> PDRSHIFT;   \
> > +})
> > +#if VM_NRESERVLEVEL > 0
> > +#definepa_to_pmdp(pa)  (_table[pa_index(pa)])
> > +#definepa_to_pvh(pa)   (&(pa_to_pmdp(pa)->pv_page))
> > +#definePHYS_TO_PV_LIST_LOCK(pa)\
> > +   (&(pa_to_pmdp(pa)->pv_lock))
> > +#else
> >  #definepa_to_pvh(pa)   (_table[pa_index(pa)])
> >  
> >  #defineNPV_LIST_LOCKS  MAXCPU
> >  
> >  #definePHYS_TO_PV_LIST_LOCK(pa)\
> > (_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
> > +#endif
> >  
> >  #defineCHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa)  do {\
> > struct rwlock **_lockp = (lockp);   \
> > @@ -400,14 +412,22 @@ static int pmap_initialized;
> >  
> >  /*
> >   * Data for the pv entry allocation mechanism.
> > - * Updates to pv_invl_gen are protected by the pv_list_locks[]
> > - * elements, but reads are not.
> > + * Updates to pv_invl_gen are protected by the pv list lock but reads are 
> no
> > t.
> >   */
> >  static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chu
> nk
> > s);
> >  static struct mtx __exclusive_cache_line pv_chunks_mutex;
> > +#if VM_NRESERVLEVEL > 0
> > +struct pmap_large_md_page {
> > +   struct rwlock   pv_lock;
> > +   struct md_page  pv_page;
> > +   u_long pv_invl_gen;
> > +};
> > +static struct pmap_large_md_page *pv_table;
> > +#else
> >  static struct rwlock __exclusive_cache_line pv_list_locks[NPV_LIST_LOCKS];
> >  static u_long pv_invl_gen[NPV_LIST_LOCKS];
> >  static struct md_page *pv_table;
> > +#endif
> >  static struct md_page pv_dummy;
> >  
> >  /*
> > @@ -918,12 +938,21 @@ SYSCTL_LONG(_vm_pmap, OID_AUTO, invl_wait_slow, CTLFL
> A
> >  "Number of slow invalidation waits for lockless DI");
> >  #endif
> >  
> > +#if VM_NRESERVLEVEL > 0
> >  static u_long *
> >  pmap_delayed_invl_genp(vm_page_t m)
> >  {
> >  
> > +   return (_to_pmdp(VM_PAGE_TO_PHYS(m))->pv_invl_gen);
> > +}
> > +#else
> > +static u_long *
> > +pmap_delayed_invl_genp(vm_page_t m)
> > +{
> > +
> > return (_invl_gen[pa_index(VM_PAGE_TO_PHYS(m)) % NPV_LIST_LOCKS]);
> >  }
> > +#endif
> >  
> >  static void
> >  pmap_delayed_invl_callout_func(void *arg __unused)
> > @@ -1803,6 +1832,112 @@ pmap_page_init(vm_page_t m)
> > m->md.pat_mode = PAT_WRITE_BACK;
> >  }
> >  
> > +#if VM_NRESERVLEVEL > 0
> > +static void
> > +pmap_init_pv_table(void)
> > +{
> > +   struct pmap_large_md_page *pvd;
> > +   vm_size_t s;
> > +   long start, end, highest, pv_npg;
> > +   int domain, i, j, pages;
> > +
> > +   /*
> > +* We strongly depend on the size being a power of two, so the assert
> > +* is overzealous. However, should the struct be resized to a
> > +* different power of two, the code below needs to be revisited.
> > +*/
> > +   CTASSERT((sizeof(*pvd) == 64));
> > +
> > +   /*
> > +* Calculate the size of the array.
> > +*/
> > +   pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, NBPDR);
> > +   s = (vm_size_t)pv_npg * sizeof(struct pmap_large_md_page);
> > +   s = round_page(s);
> > +   pv_table = (struct pmap_large_md_page *)kva_alloc(s);
> > +   if (pv_table == NULL)
> > +   panic("%s: kva_alloc failed\n", __func__);
> > +
> > +   /*
> > +* Iterate 

Re: svn commit: r353149 - head/sys/amd64/amd64

2019-10-06 Thread Cy Schubert
In message <201910062213.x96mdzv3085...@repo.freebsd.org>, Mateusz Guzik 
writes
:
> Author: mjg
> Date: Sun Oct  6 22:13:35 2019
> New Revision: 353149
> URL: https://svnweb.freebsd.org/changeset/base/353149
>
> Log:
>   amd64 pmap: implement per-superpage locks
>   
>   The current 256-lock sized array is a problem in the following ways:
>   - it's way too small
>   - there are 2 locks per cacheline
>   - it is not NUMA-aware
>   
>   Solve these issues by introducing per-superpage locks backed by pages
>   allocated from respective domains.
>   
>   This significantly reduces contention e.g. during poudriere -j 104.
>   See the review for results.
>   
>   Reviewed by:kib
>   Discussed with: jeff
>   Sponsored by:   The FreeBSD Foundation
>   Differential Revision:  https://reviews.freebsd.org/D21833
>
> Modified:
>   head/sys/amd64/amd64/pmap.c
>
> Modified: head/sys/amd64/amd64/pmap.c
> =
> =
> --- head/sys/amd64/amd64/pmap.c   Sun Oct  6 20:36:25 2019(r35314
> 8)
> +++ head/sys/amd64/amd64/pmap.c   Sun Oct  6 22:13:35 2019(r35314
> 9)
> @@ -316,13 +316,25 @@ pmap_pku_mask_bit(pmap_t pmap)
>  #define PV_STAT(x)   do { } while (0)
>  #endif
>  
> -#define  pa_index(pa)((pa) >> PDRSHIFT)
> +#undef pa_index
> +#define  pa_index(pa)({  \
> + KASSERT((pa) <= vm_phys_segs[vm_phys_nsegs - 1].end,\
> + ("address %lx beyond the last segment", (pa))); \
> + (pa) >> PDRSHIFT;   \
> +})
> +#if VM_NRESERVLEVEL > 0
> +#define  pa_to_pmdp(pa)  (_table[pa_index(pa)])
> +#define  pa_to_pvh(pa)   (&(pa_to_pmdp(pa)->pv_page))
> +#define  PHYS_TO_PV_LIST_LOCK(pa)\
> + (&(pa_to_pmdp(pa)->pv_lock))
> +#else
>  #define  pa_to_pvh(pa)   (_table[pa_index(pa)])
>  
>  #define  NPV_LIST_LOCKS  MAXCPU
>  
>  #define  PHYS_TO_PV_LIST_LOCK(pa)\
>   (_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
> +#endif
>  
>  #define  CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa)  do {\
>   struct rwlock **_lockp = (lockp);   \
> @@ -400,14 +412,22 @@ static int pmap_initialized;
>  
>  /*
>   * Data for the pv entry allocation mechanism.
> - * Updates to pv_invl_gen are protected by the pv_list_locks[]
> - * elements, but reads are not.
> + * Updates to pv_invl_gen are protected by the pv list lock but reads are no
> t.
>   */
>  static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunk
> s);
>  static struct mtx __exclusive_cache_line pv_chunks_mutex;
> +#if VM_NRESERVLEVEL > 0
> +struct pmap_large_md_page {
> + struct rwlock   pv_lock;
> + struct md_page  pv_page;
> + u_long pv_invl_gen;
> +};
> +static struct pmap_large_md_page *pv_table;
> +#else
>  static struct rwlock __exclusive_cache_line pv_list_locks[NPV_LIST_LOCKS];
>  static u_long pv_invl_gen[NPV_LIST_LOCKS];
>  static struct md_page *pv_table;
> +#endif
>  static struct md_page pv_dummy;
>  
>  /*
> @@ -918,12 +938,21 @@ SYSCTL_LONG(_vm_pmap, OID_AUTO, invl_wait_slow, CTLFLA
>  "Number of slow invalidation waits for lockless DI");
>  #endif
>  
> +#if VM_NRESERVLEVEL > 0
>  static u_long *
>  pmap_delayed_invl_genp(vm_page_t m)
>  {
>  
> + return (_to_pmdp(VM_PAGE_TO_PHYS(m))->pv_invl_gen);
> +}
> +#else
> +static u_long *
> +pmap_delayed_invl_genp(vm_page_t m)
> +{
> +
>   return (_invl_gen[pa_index(VM_PAGE_TO_PHYS(m)) % NPV_LIST_LOCKS]);
>  }
> +#endif
>  
>  static void
>  pmap_delayed_invl_callout_func(void *arg __unused)
> @@ -1803,6 +1832,112 @@ pmap_page_init(vm_page_t m)
>   m->md.pat_mode = PAT_WRITE_BACK;
>  }
>  
> +#if VM_NRESERVLEVEL > 0
> +static void
> +pmap_init_pv_table(void)
> +{
> + struct pmap_large_md_page *pvd;
> + vm_size_t s;
> + long start, end, highest, pv_npg;
> + int domain, i, j, pages;
> +
> + /*
> +  * We strongly depend on the size being a power of two, so the assert
> +  * is overzealous. However, should the struct be resized to a
> +  * different power of two, the code below needs to be revisited.
> +  */
> + CTASSERT((sizeof(*pvd) == 64));
> +
> + /*
> +  * Calculate the size of the array.
> +  */
> + pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, NBPDR);
> + s = (vm_size_t)pv_npg * sizeof(struct pmap_large_md_page);
> + s = round_page(s);
> + pv_table = (struct pmap_large_md_page *)kva_alloc(s);
> + if (pv_table == NULL)
> + panic("%s: kva_alloc failed\n", __func__);
> +
> + /*
> +  * Iterate physical segments to allocate space for respective pages.
> +  */
> + highest = -1;
> + s = 0;
> + for (i = 0; i < vm_phys_nsegs; i++) {
> + start = vm_phys_segs[i].start / NBPDR;
> + end = vm_phys_segs[i].end / NBPDR;
> + domain = 

svn commit: r353162 - head/sys/arm64/arm64

2019-10-06 Thread Alan Cox
Author: alc
Date: Mon Oct  7 03:37:28 2019
New Revision: 353162
URL: https://svnweb.freebsd.org/changeset/base/353162

Log:
  Eliminate a redundant bzero().  The l0 page table page was already zeroed
  by efi_1t1_page().
  
  MFC after:1 week

Modified:
  head/sys/arm64/arm64/efirt_machdep.c

Modified: head/sys/arm64/arm64/efirt_machdep.c
==
--- head/sys/arm64/arm64/efirt_machdep.cMon Oct  7 03:28:11 2019
(r353161)
+++ head/sys/arm64/arm64/efirt_machdep.cMon Oct  7 03:37:28 2019
(r353162)
@@ -176,7 +176,6 @@ efi_create_1t1_map(struct efi_md *map, int ndesc, int 
efi_l0_page = efi_1t1_page();
VM_OBJECT_WUNLOCK(obj_1t1_pt);
efi_l0 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(efi_l0_page));
-   bzero(efi_l0, L0_ENTRIES * sizeof(*efi_l0));
 
for (i = 0, p = map; i < ndesc; i++, p = efi_next_descriptor(p,
descsz)) {
___
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"


svn commit: r353161 - head/stand/powerpc/uboot

2019-10-06 Thread Kyle Evans
Author: kevans
Date: Mon Oct  7 03:28:11 2019
New Revision: 353161
URL: https://svnweb.freebsd.org/changeset/base/353161

Log:
  Revert r352557: powerpc/loader: Install ubldr without stripping
  
  This was committed due to what was later diagnosed as an msdosfs bug
  preventing in-place strip. This bug was fixed in r352564, and we agreed to
  keep the workaround in for a bit to allow the driver fix a suitable amount
  of propagation time for folks building/installing powerpc/ubldr, seeing as
  how we were not in any hurry to revert.

Modified:
  head/stand/powerpc/uboot/Makefile

Modified: head/stand/powerpc/uboot/Makefile
==
--- head/stand/powerpc/uboot/Makefile   Mon Oct  7 03:05:32 2019
(r353160)
+++ head/stand/powerpc/uboot/Makefile   Mon Oct  7 03:28:11 2019
(r353161)
@@ -13,7 +13,6 @@ LOADER_BZIP2_SUPPORT?=no
 
 BINDIR=/boot/uboot
 PROG=  ubldr
-STRIP=
 NEWVERSWHAT=   "U-Boot loader" ${MACHINE_ARCH}
 INSTALLFLAGS=  -b
 
___
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"


svn commit: r353160 - head/stand/powerpc/ofw

2019-10-06 Thread Justin Hibbits
Author: jhibbits
Date: Mon Oct  7 03:05:32 2019
New Revision: 353160
URL: https://svnweb.freebsd.org/changeset/base/353160

Log:
  loader/powerpc64: Fix HV check for CAS usage
  
  Logic was backwards.  The function returns true if it *is* running as a
  hypervisor, whereas we want to only call the CAS utility if we're running as a
  guest.
  
  Reported by:  Shawn Anastasio 

Modified:
  head/stand/powerpc/ofw/cas.c

Modified: head/stand/powerpc/ofw/cas.c
==
--- head/stand/powerpc/ofw/cas.cMon Oct  7 02:57:00 2019
(r353159)
+++ head/stand/powerpc/ofw/cas.cMon Oct  7 03:05:32 2019
(r353160)
@@ -203,7 +203,7 @@ ppc64_cas(void)
}
 
/* Skip CAS when running on PowerNV */
-   if (!ppc64_hv())
+   if (ppc64_hv())
return (0);
 
ihandle = OF_open("/");
___
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"


svn commit: r353158 - head/sys/powerpc/aim

2019-10-06 Thread Justin Hibbits
Author: jhibbits
Date: Mon Oct  7 02:36:42 2019
New Revision: 353158
URL: https://svnweb.freebsd.org/changeset/base/353158

Log:
  powerpc64/pmap: Fix release order to match lock order in moea64_enter()
  
  Page PV lock is always taken first, so should be released last.  This also
  (trivially) shortens the hold time of the pmap lock.
  
  Submitted by: mjg

Modified:
  head/sys/powerpc/aim/mmu_oea64.c

Modified: head/sys/powerpc/aim/mmu_oea64.c
==
--- head/sys/powerpc/aim/mmu_oea64.cMon Oct  7 01:03:14 2019
(r353157)
+++ head/sys/powerpc/aim/mmu_oea64.cMon Oct  7 02:36:42 2019
(r353158)
@@ -1453,8 +1453,8 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, v
moea64_pvo_enter(mmu, pvo, pvo_head, NULL);
}
}
-   PV_PAGE_UNLOCK(m);
PMAP_UNLOCK(pmap);
+   PV_PAGE_UNLOCK(m);
 
/* Free any dead pages */
if (error == EEXIST) {
___
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"


svn commit: r353156 - in head/sys: netinet sys

2019-10-06 Thread Randall Stewart
Author: rrs
Date: Sun Oct  6 22:29:02 2019
New Revision: 353156
URL: https://svnweb.freebsd.org/changeset/base/353156

Log:
  Brad Davis identified a problem with the new LRO code, VLAN's
  no longer worked. The problem was that the defines used the
  same space as the VLAN id. This commit does three things.
  1) Move the LRO used fields to the PH_per fields. This is
 safe since the entire PH_per is used for IP reassembly
 which LRO code will not hit.
  2) Remove old unused pace fields that are not used in mbuf.h
  3) The VLAN processing is not in the mbuf queueing code. Consequently
 if a VLAN submits to Rack or BBR we need to bypass the mbuf queueing
 for now until rack_bbr_common is updated to handle the VLAN properly.
  
  Reported by:  Brad Davis

Modified:
  head/sys/netinet/tcp_lro.c
  head/sys/sys/mbuf.h

Modified: head/sys/netinet/tcp_lro.c
==
--- head/sys/netinet/tcp_lro.c  Sun Oct  6 22:18:03 2019(r353155)
+++ head/sys/netinet/tcp_lro.c  Sun Oct  6 22:29:02 2019(r353156)
@@ -875,7 +875,14 @@ tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *l
 
/* Now lets lookup the inp first */
CURVNET_SET(lc->ifp->if_vnet);
-   if (tcplro_stacks_wanting_mbufq == 0)
+   /*
+* XXXRRS Currently the common input handler for
+* mbuf queuing cannot handle VLAN Tagged. This needs
+* to be fixed and the or condition removed (i.e. the 
+* common code should do the right lookup for the vlan
+* tag and anything else that the vlan_input() does).
+*/
+   if ((tcplro_stacks_wanting_mbufq == 0) || (le->m_head->m_flags & 
M_VLANTAG))
goto skip_lookup;
INP_INFO_RLOCK_ET(_tcbinfo, et);
switch (le->eh_type) {

Modified: head/sys/sys/mbuf.h
==
--- head/sys/sys/mbuf.h Sun Oct  6 22:18:03 2019(r353155)
+++ head/sys/sys/mbuf.h Sun Oct  6 22:29:02 2019(r353156)
@@ -194,18 +194,13 @@ struct pkthdr {
 };
 #defineether_vtag  PH_per.sixteen[0]
 #definePH_vt   PH_per
-#definevt_nrecssixteen[0]
-#definetso_segsz   PH_per.sixteen[1]
-#definelro_nsegs   tso_segsz
-#definecsum_phsum  PH_per.sixteen[2]
-#definecsum_data   PH_per.thirtytwo[1]
-#define lro_lenPH_per.sixteen[0] /* inbound during LRO */
-#define lro_csum   PH_per.sixteen[1] /* inbound during LRO */
-#define pace_thoff PH_loc.sixteen[0]
-#define pace_tlen  PH_loc.sixteen[1]
-#define pace_drphdrlen PH_loc.sixteen[2]
-#define pace_tos   PH_loc.eight[6]
-#define pace_lock  PH_loc.eight[7]
+#definevt_nrecssixteen[0]/* mld and v6-ND */
+#definetso_segsz   PH_per.sixteen[1] /* inbound after LRO */
+#definelro_nsegs   tso_segsz /* inbound after LRO */
+#definecsum_data   PH_per.thirtytwo[1] /* inbound from hardware up 
*/
+#define lro_lenPH_loc.sixteen[0] /* inbound during LRO (no 
reassembly) */
+#define lro_csum   PH_loc.sixteen[1] /* inbound during LRO (no reassembly) 
*/
+/* Note PH_loc is used during IP reassembly (all 8 bytes as a ptr) */
 
 /*
  * Description of external storage mapped into mbuf; valid only if M_EXT is
___
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"


svn commit: r353155 - head/sys/ufs/ffs

2019-10-06 Thread Mateusz Guzik
Author: mjg
Date: Sun Oct  6 22:18:03 2019
New Revision: 353155
URL: https://svnweb.freebsd.org/changeset/base/353155

Log:
  ufs: add root vnode caching
  
  See r353150.
  
  Sponsored by:   The FreeBSD Foundation
  Differential Revision:  https://reviews.freebsd.org/D21646

Modified:
  head/sys/ufs/ffs/ffs_vfsops.c

Modified: head/sys/ufs/ffs/ffs_vfsops.c
==
--- head/sys/ufs/ffs/ffs_vfsops.c   Sun Oct  6 22:17:29 2019
(r353154)
+++ head/sys/ufs/ffs/ffs_vfsops.c   Sun Oct  6 22:18:03 2019
(r353155)
@@ -109,7 +109,8 @@ static struct vfsops ufs_vfsops = {
.vfs_mount =ffs_mount,
.vfs_cmount =   ffs_cmount,
.vfs_quotactl = ufs_quotactl,
-   .vfs_root = ufs_root,
+   .vfs_root = vfs_cache_root,
+   .vfs_cachedroot =   ufs_root,
.vfs_statfs =   ffs_statfs,
.vfs_sync = ffs_sync,
.vfs_uninit =   ffs_uninit,
___
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"


svn commit: r353154 - head/sys/fs/nfsclient

2019-10-06 Thread Mateusz Guzik
Author: mjg
Date: Sun Oct  6 22:17:29 2019
New Revision: 353154
URL: https://svnweb.freebsd.org/changeset/base/353154

Log:
  nfsclient: add root vnode caching
  
  See r353150.
  
  Sponsored by:   The FreeBSD Foundation
  Differential Revision:  https://reviews.freebsd.org/D21646

Modified:
  head/sys/fs/nfsclient/nfs_clvfsops.c

Modified: head/sys/fs/nfsclient/nfs_clvfsops.c
==
--- head/sys/fs/nfsclient/nfs_clvfsops.cSun Oct  6 22:17:11 2019
(r353153)
+++ head/sys/fs/nfsclient/nfs_clvfsops.cSun Oct  6 22:17:29 2019
(r353154)
@@ -136,7 +136,8 @@ static struct vfsops nfs_vfsops = {
.vfs_init = ncl_init,
.vfs_mount =nfs_mount,
.vfs_cmount =   nfs_cmount,
-   .vfs_root = nfs_root,
+   .vfs_root = vfs_cache_root,
+   .vfs_cachedroot =   nfs_root,
.vfs_statfs =   nfs_statfs,
.vfs_sync = nfs_sync,
.vfs_uninit =   ncl_uninit,
@@ -1626,6 +1627,7 @@ mountnfs(struct nfs_args *argp, struct mount *mp, stru
 * Lose the lock but keep the ref.
 */
NFSVOPUNLOCK(*vpp, 0);
+   vfs_cache_root_set(mp, *vpp);
return (0);
}
error = EIO;
___
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"


svn commit: r353153 - head/sys/fs/tmpfs

2019-10-06 Thread Mateusz Guzik
Author: mjg
Date: Sun Oct  6 22:17:11 2019
New Revision: 353153
URL: https://svnweb.freebsd.org/changeset/base/353153

Log:
  tmpfs: add root vnode caching
  
  See r353150.
  
  Sponsored by:   The FreeBSD Foundation
  Differential Revision:  https://reviews.freebsd.org/D21646

Modified:
  head/sys/fs/tmpfs/tmpfs_vfsops.c

Modified: head/sys/fs/tmpfs/tmpfs_vfsops.c
==
--- head/sys/fs/tmpfs/tmpfs_vfsops.cSun Oct  6 22:16:55 2019
(r353152)
+++ head/sys/fs/tmpfs/tmpfs_vfsops.cSun Oct  6 22:17:11 2019
(r353153)
@@ -710,7 +710,8 @@ tmpfs_susp_clean(struct mount *mp __unused)
 struct vfsops tmpfs_vfsops = {
.vfs_mount =tmpfs_mount,
.vfs_unmount =  tmpfs_unmount,
-   .vfs_root = tmpfs_root,
+   .vfs_root = vfs_cache_root,
+   .vfs_cachedroot =   tmpfs_root,
.vfs_statfs =   tmpfs_statfs,
.vfs_fhtovp =   tmpfs_fhtovp,
.vfs_sync = tmpfs_sync,
___
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"


svn commit: r353152 - head/sys/fs/devfs

2019-10-06 Thread Mateusz Guzik
Author: mjg
Date: Sun Oct  6 22:16:55 2019
New Revision: 353152
URL: https://svnweb.freebsd.org/changeset/base/353152

Log:
  devfs: add root vnode caching
  
  See r353150.
  
  Sponsored by: The FreeBSD Foundation
  Differential Revision:https://reviews.freebsd.org/D21646

Modified:
  head/sys/fs/devfs/devfs_vfsops.c

Modified: head/sys/fs/devfs/devfs_vfsops.c
==
--- head/sys/fs/devfs/devfs_vfsops.cSun Oct  6 22:16:00 2019
(r353151)
+++ head/sys/fs/devfs/devfs_vfsops.cSun Oct  6 22:16:55 2019
(r353152)
@@ -156,6 +156,7 @@ devfs_mount(struct mount *mp)
}
 
VOP_UNLOCK(rvp, 0);
+   vfs_cache_root_set(mp, rvp);
 
vfs_mountedfrom(mp, "devfs");
 
@@ -237,7 +238,8 @@ devfs_statfs(struct mount *mp, struct statfs *sbp)
 
 static struct vfsops devfs_vfsops = {
.vfs_mount =devfs_mount,
-   .vfs_root = devfs_root,
+   .vfs_root = vfs_cache_root,
+   .vfs_cachedroot =   devfs_root,
.vfs_statfs =   devfs_statfs,
.vfs_unmount =  devfs_unmount,
 };
___
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"


svn commit: r353151 - in head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs: . sys

2019-10-06 Thread Mateusz Guzik
Author: mjg
Date: Sun Oct  6 22:16:00 2019
New Revision: 353151
URL: https://svnweb.freebsd.org/changeset/base/353151

Log:
  zfs: add root vnode caching
  
  This replaces the approach added in r338927.
  
  See r353150.
  
  Sponsored by: The FreeBSD Foundation

Modified:
  head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_vfsops.h
  head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c

Modified: head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_vfsops.h
==
--- head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_vfsops.h
Sun Oct  6 22:14:32 2019(r353150)
+++ head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_vfsops.h
Sun Oct  6 22:16:00 2019(r353151)
@@ -46,8 +46,6 @@ struct zfsvfs {
zfsvfs_t*z_parent;  /* parent fs */
objset_t*z_os;  /* objset reference */
uint64_tz_root; /* id of root znode */
-   struct vnode*z_rootvnode;   /* root vnode */
-   struct rmlock   z_rootvnodelock;/* protection for root vnode */
uint64_tz_unlinkedobj;  /* id of unlinked zapobj */
uint64_tz_max_blksz;/* maximum block size for files */
uint64_tz_fuid_obj; /* fuid table object number */

Modified: head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c
==
--- head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.cSun Oct 
 6 22:14:32 2019(r353150)
+++ head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.cSun Oct 
 6 22:16:00 2019(r353151)
@@ -65,7 +65,6 @@
 #include 
 #include 
 #include 
-#include 
 
 #include "zfs_comutil.h"
 
@@ -93,9 +92,6 @@ static int zfs_version_zpl = ZPL_VERSION;
 SYSCTL_INT(_vfs_zfs_version, OID_AUTO, zpl, CTLFLAG_RD, _version_zpl, 0,
 "ZPL_VERSION");
 
-static int zfs_root_setvnode(zfsvfs_t *zfsvfs);
-static void zfs_root_dropvnode(zfsvfs_t *zfsvfs);
-
 static int zfs_quotactl(vfs_t *vfsp, int cmds, uid_t id, void *arg);
 static int zfs_mount(vfs_t *vfsp);
 static int zfs_umount(vfs_t *vfsp, int fflag);
@@ -112,7 +108,8 @@ static void zfs_freevfs(vfs_t *vfsp);
 struct vfsops zfs_vfsops = {
.vfs_mount =zfs_mount,
.vfs_unmount =  zfs_umount,
-   .vfs_root = zfs_root,
+   .vfs_root = vfs_cache_root,
+   .vfs_cachedroot =   zfs_root,
.vfs_statfs =   zfs_statfs,
.vfs_vget = zfs_vget,
.vfs_sync = zfs_sync,
@@ -1213,8 +1210,6 @@ zfsvfs_create_impl(zfsvfs_t **zfvp, zfsvfs_t *zfsvfs, 
for (int i = 0; i != ZFS_OBJ_MTX_SZ; i++)
mutex_init(>z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
 
-   rm_init(>z_rootvnodelock, "zfs root vnode lock");
-
error = zfsvfs_init(zfsvfs, os);
if (error != 0) {
*zfvp = NULL;
@@ -1321,8 +1316,6 @@ zfsvfs_free(zfsvfs_t *zfsvfs)
rw_enter(_lock, RW_READER);
rw_exit(_lock);
 
-   rm_destroy(>z_rootvnodelock);
-
zfs_fuid_destroy(zfsvfs);
 
mutex_destroy(>z_znodes_lock);
@@ -1929,9 +1922,6 @@ zfs_mount(vfs_t *vfsp)
error = zfs_domount(vfsp, osname);
PICKUP_GIANT();
 
-   if (error == 0)
-   zfs_root_setvnode((zfsvfs_t *)vfsp->vfs_data);
-
 #ifdef illumos
/*
 * Add an extra VFS_HOLD on our parent vfs so that it can't
@@ -2004,65 +1994,14 @@ zfs_statfs(vfs_t *vfsp, struct statfs *statp)
 }
 
 static int
-zfs_root_setvnode(zfsvfs_t *zfsvfs)
-{
-   znode_t *rootzp;
-   int error;
-
-   ZFS_ENTER(zfsvfs);
-   error = zfs_zget(zfsvfs, zfsvfs->z_root, );
-   if (error != 0)
-   panic("could not zfs_zget for root vnode");
-   ZFS_EXIT(zfsvfs);
-
-   rm_wlock(>z_rootvnodelock);
-   if (zfsvfs->z_rootvnode != NULL)
-   panic("zfs mount point already has a root vnode: %p\n",
-   zfsvfs->z_rootvnode);
-   zfsvfs->z_rootvnode = ZTOV(rootzp);
-   rm_wunlock(>z_rootvnodelock);
-   return (0);
-}
-
-static void
-zfs_root_putvnode(zfsvfs_t *zfsvfs)
-{
-   struct vnode *vp;
-
-   rm_wlock(>z_rootvnodelock);
-   vp = zfsvfs->z_rootvnode;
-   zfsvfs->z_rootvnode = NULL;
-   rm_wunlock(>z_rootvnodelock);
-   if (vp != NULL)
-   vrele(vp);
-}
-
-static int
 zfs_root(vfs_t *vfsp, int flags, vnode_t **vpp)
 {
-   struct rm_priotracker tracker;
zfsvfs_t *zfsvfs = vfsp->vfs_data;
znode_t *rootzp;
int error;
 
-   rm_rlock(>z_rootvnodelock, );
-   *vpp = zfsvfs->z_rootvnode;
-   if (*vpp != NULL && (((*vpp)->v_iflag & VI_DOOMED) == 0)) {
-   vrefact(*vpp);
-   rm_runlock(>z_rootvnodelock, );
-   goto lock;
-   }
-   

svn commit: r353150 - in head/sys: kern sys

2019-10-06 Thread Mateusz Guzik
Author: mjg
Date: Sun Oct  6 22:14:32 2019
New Revision: 353150
URL: https://svnweb.freebsd.org/changeset/base/353150

Log:
  vfs: add optional root vnode caching
  
  Root vnodes looekd up all the time, e.g. when crossing a mount point.
  Currently used routines always perform a costly lookup which can be
  trivially avoided.
  
  Reviewed by:  jeff (previous version), kib
  Sponsored by: The FreeBSD Foundation
  Differential Revision:https://reviews.freebsd.org/D21646

Modified:
  head/sys/kern/vfs_init.c
  head/sys/kern/vfs_mount.c
  head/sys/kern/vfs_subr.c
  head/sys/sys/mount.h
  head/sys/sys/vnode.h

Modified: head/sys/kern/vfs_init.c
==
--- head/sys/kern/vfs_init.cSun Oct  6 22:13:35 2019(r353149)
+++ head/sys/kern/vfs_init.cSun Oct  6 22:14:32 2019(r353150)
@@ -201,6 +201,17 @@ vfs_root_sigdefer(struct mount *mp, int flags, struct 
 }
 
 static int
+vfs_cachedroot_sigdefer(struct mount *mp, int flags, struct vnode **vpp)
+{
+   int prev_stops, rc;
+
+   prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT);
+   rc = (*mp->mnt_vfc->vfc_vfsops_sd->vfs_cachedroot)(mp, flags, vpp);
+   sigallowstop(prev_stops);
+   return (rc);
+}
+
+static int
 vfs_quotactl_sigdefer(struct mount *mp, int cmd, uid_t uid, void *arg)
 {
int prev_stops, rc;
@@ -343,6 +354,7 @@ static struct vfsops vfsops_sigdefer = {
.vfs_mount =vfs_mount_sigdefer,
.vfs_unmount =  vfs_unmount_sigdefer,
.vfs_root = vfs_root_sigdefer,
+   .vfs_cachedroot =   vfs_cachedroot_sigdefer,
.vfs_quotactl = vfs_quotactl_sigdefer,
.vfs_statfs =   vfs_statfs_sigdefer,
.vfs_sync = vfs_sync_sigdefer,

Modified: head/sys/kern/vfs_mount.c
==
--- head/sys/kern/vfs_mount.c   Sun Oct  6 22:13:35 2019(r353149)
+++ head/sys/kern/vfs_mount.c   Sun Oct  6 22:14:32 2019(r353150)
@@ -134,6 +134,7 @@ mount_init(void *mem, int size, int flags)
M_WAITOK | M_ZERO);
mp->mnt_ref = 0;
mp->mnt_vfs_ops = 1;
+   mp->mnt_rootvnode = NULL;
return (0);
 }
 
@@ -582,6 +583,10 @@ vfs_mount_destroy(struct mount *mp)
panic("%s: vfs_ops should be 1 but %d found\n", __func__,
mp->mnt_vfs_ops);
 
+   if (mp->mnt_rootvnode != NULL)
+   panic("%s: mount point still has a root vnode %p\n", __func__,
+   mp->mnt_rootvnode);
+
if (mp->mnt_vnodecovered != NULL)
vrele(mp->mnt_vnodecovered);
 #ifdef MAC
@@ -1034,6 +1039,7 @@ vfs_domount_update(
)
 {
struct export_args export;
+   struct vnode *rootvp;
void *bufp;
struct mount *mp;
int error, export_error, len;
@@ -1099,7 +1105,10 @@ vfs_domount_update(
MNT_SNAPSHOT | MNT_ROOTFS | MNT_UPDATEMASK | MNT_RDONLY);
if ((mp->mnt_flag & MNT_ASYNC) == 0)
mp->mnt_kern_flag &= ~MNTK_ASYNC;
+   rootvp = vfs_cache_root_clear(mp);
MNT_IUNLOCK(mp);
+   if (rootvp != NULL)
+   vrele(rootvp);
mp->mnt_optnew = *optlist;
vfs_mergeopts(mp->mnt_optnew, mp->mnt_opt);
 
@@ -1582,7 +1591,7 @@ vfs_mount_fetch_counter(struct mount *mp, enum mount_c
 int
 dounmount(struct mount *mp, int flags, struct thread *td)
 {
-   struct vnode *coveredvp;
+   struct vnode *coveredvp, *rootvp;
int error;
uint64_t async_flag;
int mnt_gen_r;
@@ -1630,12 +1639,15 @@ dounmount(struct mount *mp, int flags, struct thread *
return (EBUSY);
}
mp->mnt_kern_flag |= MNTK_UNMOUNT;
+   rootvp = vfs_cache_root_clear(mp);
if (flags & MNT_NONBUSY) {
MNT_IUNLOCK(mp);
error = vfs_check_usecounts(mp);
MNT_ILOCK(mp);
if (error != 0) {
dounmount_cleanup(mp, coveredvp, MNTK_UNMOUNT);
+   if (rootvp != NULL)
+   vrele(rootvp);
return (error);
}
}
@@ -1663,6 +1675,9 @@ dounmount(struct mount *mp, int flags, struct thread *
KASSERT(error == 0,
("%s: invalid return value for msleep in the drain path @ %s:%d",
__func__, __FILE__, __LINE__));
+
+   if (rootvp != NULL)
+   vrele(rootvp);
 
if (mp->mnt_flag & MNT_EXPUBLIC)
vfs_setpublicfs(NULL, NULL, NULL);

Modified: head/sys/kern/vfs_subr.c
==
--- head/sys/kern/vfs_subr.cSun Oct  6 22:13:35 2019(r353149)
+++ head/sys/kern/vfs_subr.cSun Oct  6 22:14:32 2019(r353150)
@@ -5700,6 +5700,121 @@ vfs_unixify_accmode(accmode_t *accmode)
 }
 
 /*
+ * Clear out 

svn commit: r353149 - head/sys/amd64/amd64

2019-10-06 Thread Mateusz Guzik
Author: mjg
Date: Sun Oct  6 22:13:35 2019
New Revision: 353149
URL: https://svnweb.freebsd.org/changeset/base/353149

Log:
  amd64 pmap: implement per-superpage locks
  
  The current 256-lock sized array is a problem in the following ways:
  - it's way too small
  - there are 2 locks per cacheline
  - it is not NUMA-aware
  
  Solve these issues by introducing per-superpage locks backed by pages
  allocated from respective domains.
  
  This significantly reduces contention e.g. during poudriere -j 104.
  See the review for results.
  
  Reviewed by:  kib
  Discussed with:   jeff
  Sponsored by: The FreeBSD Foundation
  Differential Revision:https://reviews.freebsd.org/D21833

Modified:
  head/sys/amd64/amd64/pmap.c

Modified: head/sys/amd64/amd64/pmap.c
==
--- head/sys/amd64/amd64/pmap.c Sun Oct  6 20:36:25 2019(r353148)
+++ head/sys/amd64/amd64/pmap.c Sun Oct  6 22:13:35 2019(r353149)
@@ -316,13 +316,25 @@ pmap_pku_mask_bit(pmap_t pmap)
 #define PV_STAT(x) do { } while (0)
 #endif
 
-#definepa_index(pa)((pa) >> PDRSHIFT)
+#undef pa_index
+#definepa_index(pa)({  \
+   KASSERT((pa) <= vm_phys_segs[vm_phys_nsegs - 1].end,\
+   ("address %lx beyond the last segment", (pa))); \
+   (pa) >> PDRSHIFT;   \
+})
+#if VM_NRESERVLEVEL > 0
+#definepa_to_pmdp(pa)  (_table[pa_index(pa)])
+#definepa_to_pvh(pa)   (&(pa_to_pmdp(pa)->pv_page))
+#definePHYS_TO_PV_LIST_LOCK(pa)\
+   (&(pa_to_pmdp(pa)->pv_lock))
+#else
 #definepa_to_pvh(pa)   (_table[pa_index(pa)])
 
 #defineNPV_LIST_LOCKS  MAXCPU
 
 #definePHYS_TO_PV_LIST_LOCK(pa)\
(_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
+#endif
 
 #defineCHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa)  do {\
struct rwlock **_lockp = (lockp);   \
@@ -400,14 +412,22 @@ static int pmap_initialized;
 
 /*
  * Data for the pv entry allocation mechanism.
- * Updates to pv_invl_gen are protected by the pv_list_locks[]
- * elements, but reads are not.
+ * Updates to pv_invl_gen are protected by the pv list lock but reads are not.
  */
 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
 static struct mtx __exclusive_cache_line pv_chunks_mutex;
+#if VM_NRESERVLEVEL > 0
+struct pmap_large_md_page {
+   struct rwlock   pv_lock;
+   struct md_page  pv_page;
+   u_long pv_invl_gen;
+};
+static struct pmap_large_md_page *pv_table;
+#else
 static struct rwlock __exclusive_cache_line pv_list_locks[NPV_LIST_LOCKS];
 static u_long pv_invl_gen[NPV_LIST_LOCKS];
 static struct md_page *pv_table;
+#endif
 static struct md_page pv_dummy;
 
 /*
@@ -918,12 +938,21 @@ SYSCTL_LONG(_vm_pmap, OID_AUTO, invl_wait_slow, CTLFLA
 "Number of slow invalidation waits for lockless DI");
 #endif
 
+#if VM_NRESERVLEVEL > 0
 static u_long *
 pmap_delayed_invl_genp(vm_page_t m)
 {
 
+   return (_to_pmdp(VM_PAGE_TO_PHYS(m))->pv_invl_gen);
+}
+#else
+static u_long *
+pmap_delayed_invl_genp(vm_page_t m)
+{
+
return (_invl_gen[pa_index(VM_PAGE_TO_PHYS(m)) % NPV_LIST_LOCKS]);
 }
+#endif
 
 static void
 pmap_delayed_invl_callout_func(void *arg __unused)
@@ -1803,6 +1832,112 @@ pmap_page_init(vm_page_t m)
m->md.pat_mode = PAT_WRITE_BACK;
 }
 
+#if VM_NRESERVLEVEL > 0
+static void
+pmap_init_pv_table(void)
+{
+   struct pmap_large_md_page *pvd;
+   vm_size_t s;
+   long start, end, highest, pv_npg;
+   int domain, i, j, pages;
+
+   /*
+* We strongly depend on the size being a power of two, so the assert
+* is overzealous. However, should the struct be resized to a
+* different power of two, the code below needs to be revisited.
+*/
+   CTASSERT((sizeof(*pvd) == 64));
+
+   /*
+* Calculate the size of the array.
+*/
+   pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, NBPDR);
+   s = (vm_size_t)pv_npg * sizeof(struct pmap_large_md_page);
+   s = round_page(s);
+   pv_table = (struct pmap_large_md_page *)kva_alloc(s);
+   if (pv_table == NULL)
+   panic("%s: kva_alloc failed\n", __func__);
+
+   /*
+* Iterate physical segments to allocate space for respective pages.
+*/
+   highest = -1;
+   s = 0;
+   for (i = 0; i < vm_phys_nsegs; i++) {
+   start = vm_phys_segs[i].start / NBPDR;
+   end = vm_phys_segs[i].end / NBPDR;
+   domain = vm_phys_segs[i].domain;
+
+   if (highest >= end)
+   continue;
+
+   if (start < highest) {
+   start = highest + 1;
+   pvd = _table[start];
+   } else {
+   /*
+* The 

svn commit: r353147 - head/sys/powerpc/aim

2019-10-06 Thread Justin Hibbits
Author: jhibbits
Date: Sun Oct  6 19:11:01 2019
New Revision: 353147
URL: https://svnweb.freebsd.org/changeset/base/353147

Log:
  powerpc/pmap64: Properly parenthesize PV_LOCK_COUNT macros
  
  As pointed out by mjg, without the parentheses the calculations done against
  these macros are incorrect, resulting in only 1/3 of locks being used.
  
  Reported by:  mjg

Modified:
  head/sys/powerpc/aim/mmu_oea64.c

Modified: head/sys/powerpc/aim/mmu_oea64.c
==
--- head/sys/powerpc/aim/mmu_oea64.cSun Oct  6 18:38:58 2019
(r353146)
+++ head/sys/powerpc/aim/mmu_oea64.cSun Oct  6 19:11:01 2019
(r353147)
@@ -119,8 +119,8 @@ uintptr_t moea64_get_unique_vsid(void); 
  *
  */
 
-#define PV_LOCK_PER_DOMPA_LOCK_COUNT*3
-#define PV_LOCK_COUNT  PV_LOCK_PER_DOM*MAXMEMDOM
+#define PV_LOCK_PER_DOM(PA_LOCK_COUNT * 3)
+#define PV_LOCK_COUNT  (PV_LOCK_PER_DOM * MAXMEMDOM)
 static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
  
 /*
___
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"


svn commit: r353146 - head/stand/efi/libefi

2019-10-06 Thread Toomas Soome
Author: tsoome
Date: Sun Oct  6 18:38:58 2019
New Revision: 353146
URL: https://svnweb.freebsd.org/changeset/base/353146

Log:
  loader.efi: for text mode, use STM to scroll the whole screen
  
  Since local UEFI console is implemented on top of framebuffer,
  we need to avoid redrawing the whole screen ourselves, but let
  Simple Text Mode to do the scroll for us.

Modified:
  head/stand/efi/libefi/efi_console.c

Modified: head/stand/efi/libefi/efi_console.c
==
--- head/stand/efi/libefi/efi_console.c Sun Oct  6 08:47:10 2019
(r353145)
+++ head/stand/efi/libefi/efi_console.c Sun Oct  6 18:38:58 2019
(r353146)
@@ -136,7 +136,7 @@ efi_text_cursor(void *s __unused, const teken_pos_t *p
 }
 
 static void
-efi_text_printchar(const teken_pos_t *p)
+efi_text_printchar(const teken_pos_t *p, bool autoscroll)
 {
UINTN a, attr;
struct text_pixel *px;
@@ -164,7 +164,8 @@ efi_text_printchar(const teken_pos_t *p)
conout->SetCursorPosition(conout, p->tp_col, p->tp_row);
 
/* to prvent autoscroll, skip print of lower right char */
-   if (p->tp_row == tp.tp_row - 1 &&
+   if (!autoscroll &&
+   p->tp_row == tp.tp_row - 1 &&
p->tp_col == tp.tp_col - 1)
return;
 
@@ -183,7 +184,7 @@ efi_text_putchar(void *s __unused, const teken_pos_t *
idx = p->tp_col + p->tp_row * tp.tp_col;
buffer[idx].c = c;
buffer[idx].a = *a;
-   efi_text_printchar(p);
+   efi_text_printchar(p, false);
 }
 
 static void
@@ -226,6 +227,7 @@ efi_text_copy(void *ptr __unused, const teken_rect_t *
int srow, drow;
int nrow, ncol, x, y; /* Has to be signed - >= 0 comparison */
teken_pos_t d, s;
+   bool scroll = false;
 
/*
 * Copying is a little tricky. We must make sure we do it in
@@ -235,6 +237,13 @@ efi_text_copy(void *ptr __unused, const teken_rect_t *
nrow = r->tr_end.tp_row - r->tr_begin.tp_row;
ncol = r->tr_end.tp_col - r->tr_begin.tp_col;
 
+   /*
+* Check if we do copy whole screen.
+*/
+   if (p->tp_row == 0 && p->tp_col == 0 &&
+   nrow == tp.tp_row - 2 && ncol == tp.tp_col - 2)
+   scroll = true;
+
conout->EnableCursor(conout, FALSE);
if (p->tp_row < r->tr_begin.tp_row) {
/* Copy from bottom to top. */
@@ -252,7 +261,17 @@ efi_text_copy(void *ptr __unused, const teken_rect_t *
[s.tp_col + srow])) {
buffer[d.tp_col + drow] =
buffer[s.tp_col + srow];
-   efi_text_printchar();
+   if (!scroll)
+   efi_text_printchar(, false);
+   } else if (scroll) {
+   /*
+* Draw last char and trigger
+* scroll.
+*/
+   if (y == nrow - 1 &&
+   x == ncol - 1) {
+   efi_text_printchar(, true);
+   }
}
}
}
@@ -274,7 +293,7 @@ efi_text_copy(void *ptr __unused, const teken_rect_t *
[s.tp_col + srow])) {
buffer[d.tp_col + drow] =
buffer[s.tp_col + srow];
-   efi_text_printchar();
+   efi_text_printchar(, false);
}
}
}
@@ -294,7 +313,7 @@ efi_text_copy(void *ptr __unused, const teken_rect_t *
[s.tp_col + srow])) {
buffer[d.tp_col + drow] =
buffer[s.tp_col + srow];
-   efi_text_printchar();
+   efi_text_printchar(, false);
}
}
}
___
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"


Re: svn commit: r353057 - head/sys/net

2019-10-06 Thread Allan Jude
On 2019-10-04 08:57, Kyle Evans wrote:
> On Thu, Oct 3, 2019 at 12:54 PM Kyle Evans  wrote:
>>
>> Author: kevans
>> Date: Thu Oct  3 17:54:00 2019
>> New Revision: 353057
>> URL: https://svnweb.freebsd.org/changeset/base/353057
>>
>> Log:
>>   if_tuntap: create /dev aliases when a tuntap device gets renamed
>>
>>   Currently, if you do:
>>
>>   $ ifconfig tun0 create
>>   $ ifconfig tun0 name wg0
>>   $ ls -l /dev | egrep 'wg|tun'
>>
>>   You will see tun0, but no wg0. In fact, it's slightly more annoying to make
>>   the association between the new name and the old name in order to open the
>>   device (if it hadn't been opened during the rename).
>>
>>   Register an eventhandler for ifnet_arrival_events and catch interface
>>   renames. We can determine if the ifnet is a tun easily enough from the
>>   if_dname, which matches the cevsw.d_name from the associated tuntap_driver.
>>
>>   Some locking dance is required because renames don't require the device to
>>   be opened, so it could go away in the middle of handling the ioctl, but as
>>   soon as we've verified this isn't the case we can attempt to busy the tun
>>   and either bail out if the tun device is dying, or we can proceed with the
>>   rename.
>>
>>   We only create these aliases on a best-effort basis. Renaming a tun device
>>   to "usbctl", which doesn't exist as an ifnet but does as a /dev, is clearly
>>   not that disastrous, but we can't and won't create a /dev for that.
>>
> 
> It's been brought to my attention that I actually had a PR that I took
> six months ago that this should've belonged to.
> 
> PR: 219746
> 

Thanks for this, I was having similar problems with this trying to use
wireguard inside a VNET jail, so it was even harder to find and destroy
the correct interface.

-- 
Allan Jude



signature.asc
Description: OpenPGP digital signature


svn commit: r353145 - head/sys/netinet

2019-10-06 Thread Michael Tuexen
Author: tuexen
Date: Sun Oct  6 08:47:10 2019
New Revision: 353145
URL: https://svnweb.freebsd.org/changeset/base/353145

Log:
  Plumb an mbuf leak in a code path that should not be taken. Also avoid
  that this path is taken by setting the tail pointer correctly.
  There is still bug related to handling unordered unfragmented messages
  which were delayed in deferred handling.
  This issue was found by OSS-Fuzz testing the usrsctp stack and reported in
  https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=17794
  
  MFC after:3 days

Modified:
  head/sys/netinet/sctp_indata.c

Modified: head/sys/netinet/sctp_indata.c
==
--- head/sys/netinet/sctp_indata.c  Sun Oct  6 04:36:53 2019
(r353144)
+++ head/sys/netinet/sctp_indata.c  Sun Oct  6 08:47:10 2019
(r353145)
@@ -716,6 +716,7 @@ sctp_add_to_tail_pointer(struct sctp_queued_to_read *c
}
if (control->tail_mbuf == NULL) {
/* TSNH */
+   sctp_m_freem(control->data);
control->data = m;
sctp_setup_tail_pointer(control);
return;
@@ -2119,10 +2120,13 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struc
struct mbuf *mm;
 
control->data = dmbuf;
+   control->tail_mbuf = NULL;
for (mm = control->data; mm; mm = mm->m_next) {
control->length += SCTP_BUF_LEN(mm);
+   if (SCTP_BUF_NEXT(mm) == NULL) {
+   control->tail_mbuf = mm;
+   }
}
-   control->tail_mbuf = NULL;
control->end_added = 1;
control->last_frag_seen = 1;
control->first_frag_seen = 1;
___
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"