Re: somewhat reproducable vimage panic

2020-07-21 Thread John-Mark Gurney
Peter Libassi wrote this message on Wed, Jul 22, 2020 at 06:54 +0200:
> Is this related to 
> 
> https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=234985 
>  and 
> https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=238326 
> 

Definitely not 234985..  I'm using ue interfaces, and so they don't
get destroyed while the jail is going away...

I don't think it's 238326 either.  This is 100% reliable and it's in
the IP multicast code..  It looks like in_multi isn't holding an
interface or address lock waiting for things to free up...

> > 21 juli 2020 kl. 22:23 skrev John-Mark Gurney :
> > 
> > Marko Zec wrote this message on Tue, Jul 21, 2020 at 11:31 +0200:
> >> On Tue, 21 Jul 2020 02:16:55 -0700
> >> John-Mark Gurney  wrote:
> >> 
> >>> I'm running:
> >>> FreeBSD test 13.0-CURRENT FreeBSD 13.0-CURRENT #0 r362596: Thu Jun 25
> >>> 05:02:51 UTC 2020
> >>> r...@releng1.nyi.freebsd.org:/usr/obj/usr/src/amd64.amd64/sys/GENERIC
> >>> amd64
> >>> 
> >>> and I'm working on improve the if_ure driver.  I've put together a
> >>> little script that I've attached that I'm using to test the driver..
> >>> It puts a couple ue interfaces each into their own jail, configures
> >>> them, and tries to pass traffic.  This assumes that the two interfaces
> >>> are connected together.
> >>> 
> >>> Pretty regularly when destroying the jails, I get the following
> >>> panic: CURVNET_SET at /usr/src/sys/netinet/in_mcast.c:626
> >>> inm_release() curvnet=0 vnet=0xf80154c82a80
> >> 
> >> Perhaps the attached patch could help? (disclaimer: not even
> >> compile-tested)
> > 
> > The patch compiled, but it just moved the panic earlier than before.
> > 
> > #4  0x80bc2123 in panic (fmt=)
> >at ../../../kern/kern_shutdown.c:839
> > #5  0x80d61726 in inm_release_task (arg=, 
> >pending=) at ../../../netinet/in_mcast.c:633
> > #6  0x80c2166a in taskqueue_run_locked (queue=0xf800033cfd00)
> >at ../../../kern/subr_taskqueue.c:476
> > #7  0x80c226e4 in taskqueue_thread_loop (arg=)
> >at ../../../kern/subr_taskqueue.c:793
> > 
> > Now it panics at the location of the new CURVNET_SET and not the
> > old one..
> > 
> > Ok, decided to dump the contents of the vnet, and it looks like
> > it's a use after free:
> > (kgdb) print/x *(struct vnet *)0xf8012a283140
> > $2 = {vnet_le = {le_next = 0xdeadc0dedeadc0de, le_prev = 
> > 0xdeadc0dedeadc0de}, vnet_magic_n = 0xdeadc0de, 
> >  vnet_ifcnt = 0xdeadc0de, vnet_sockcnt = 0xdeadc0de, vnet_state = 
> > 0xdeadc0de, vnet_data_mem = 0xdeadc0dedeadc0de, 
> >  vnet_data_base = 0xdeadc0dedeadc0de, vnet_shutdown = 0xde}
> > 
> > The patch did seem to make it happen quicker, or maybe I was just more
> > lucky this morning...
> > 
> >>> (kgdb) #0  __curthread () at /usr/src/sys/amd64/include/pcpu_aux.h:55
> >>> #1  doadump (textdump=1) at /usr/src/sys/kern/kern_shutdown.c:394
> >>> #2  0x80bc6250 in kern_reboot (howto=260)
> >>>at /usr/src/sys/kern/kern_shutdown.c:481
> >>> #3  0x80bc66aa in vpanic (fmt=, ap= >>> out>) at /usr/src/sys/kern/kern_shutdown.c:913
> >>> #4  0x80bc6403 in panic (fmt=)
> >>>at /usr/src/sys/kern/kern_shutdown.c:839
> >>> #5  0x80d6553b in inm_release (inm=0xf80029043700)
> >>>at /usr/src/sys/netinet/in_mcast.c:630
> >>> #6  inm_release_task (arg=, pending=)
> >>>at /usr/src/sys/netinet/in_mcast.c:312
> >>> #7  0x80c2521a in taskqueue_run_locked
> >>> (queue=0xf80003116b00) at /usr/src/sys/kern/subr_taskqueue.c:476
> >>> #8  0x80c26294 in taskqueue_thread_loop (arg=)
> >>>at /usr/src/sys/kern/subr_taskqueue.c:793
> >>> #9  0x80b830f0 in fork_exit (
> >>>callout=0x80c26200 , 
> >>>arg=0x81cf4f70 ,
> >>> frame=0xfe0049e99b80) at /usr/src/sys/kern/kern_fork.c:1052
> >>> #10 
> >>> (kgdb) 
> >>> 
> >>> I have the core files so I can get additional information.
> >>> 
> >>> Let me know if you need any additional information.
> >>> 
> >> 
> > 
> >> Index: sys/netinet/in_mcast.c
> >> ===
> >> --- sys/netinet/in_mcast.c (revision 363386)
> >> +++ sys/netinet/in_mcast.c (working copy)
> >> @@ -309,8 +309,10 @@
> >>IN_MULTI_LOCK();
> >>SLIST_FOREACH_SAFE(inm, &inm_free_tmp, inm_nrele, tinm) {
> >>SLIST_REMOVE_HEAD(&inm_free_tmp, inm_nrele);
> >> +  CURVNET_SET(inm->inm_ifp->if_vnet);
> >>MPASS(inm);
> >>inm_release(inm);
> >> +  CURVNET_RESTORE();
> >>}
> >>IN_MULTI_UNLOCK();
> >> }

-- 
  John-Mark Gurney  Voice: +1 415 225 5579

 "All that I will do, has been done, All that I have, has not."
___
freebsd-net@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/freebsd-net
To unsubscribe, send any m

Re: somewhat reproducable vimage panic

2020-07-21 Thread Peter Libassi
Is this related to 

https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=234985 
 and 
https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=238326 


/Peter


> 21 juli 2020 kl. 22:23 skrev John-Mark Gurney :
> 
> Marko Zec wrote this message on Tue, Jul 21, 2020 at 11:31 +0200:
>> On Tue, 21 Jul 2020 02:16:55 -0700
>> John-Mark Gurney  wrote:
>> 
>>> I'm running:
>>> FreeBSD test 13.0-CURRENT FreeBSD 13.0-CURRENT #0 r362596: Thu Jun 25
>>> 05:02:51 UTC 2020
>>> r...@releng1.nyi.freebsd.org:/usr/obj/usr/src/amd64.amd64/sys/GENERIC
>>> amd64
>>> 
>>> and I'm working on improve the if_ure driver.  I've put together a
>>> little script that I've attached that I'm using to test the driver..
>>> It puts a couple ue interfaces each into their own jail, configures
>>> them, and tries to pass traffic.  This assumes that the two interfaces
>>> are connected together.
>>> 
>>> Pretty regularly when destroying the jails, I get the following
>>> panic: CURVNET_SET at /usr/src/sys/netinet/in_mcast.c:626
>>> inm_release() curvnet=0 vnet=0xf80154c82a80
>> 
>> Perhaps the attached patch could help? (disclaimer: not even
>> compile-tested)
> 
> The patch compiled, but it just moved the panic earlier than before.
> 
> #4  0x80bc2123 in panic (fmt=)
>at ../../../kern/kern_shutdown.c:839
> #5  0x80d61726 in inm_release_task (arg=, 
>pending=) at ../../../netinet/in_mcast.c:633
> #6  0x80c2166a in taskqueue_run_locked (queue=0xf800033cfd00)
>at ../../../kern/subr_taskqueue.c:476
> #7  0x80c226e4 in taskqueue_thread_loop (arg=)
>at ../../../kern/subr_taskqueue.c:793
> 
> Now it panics at the location of the new CURVNET_SET and not the
> old one..
> 
> Ok, decided to dump the contents of the vnet, and it looks like
> it's a use after free:
> (kgdb) print/x *(struct vnet *)0xf8012a283140
> $2 = {vnet_le = {le_next = 0xdeadc0dedeadc0de, le_prev = 0xdeadc0dedeadc0de}, 
> vnet_magic_n = 0xdeadc0de, 
>  vnet_ifcnt = 0xdeadc0de, vnet_sockcnt = 0xdeadc0de, vnet_state = 0xdeadc0de, 
> vnet_data_mem = 0xdeadc0dedeadc0de, 
>  vnet_data_base = 0xdeadc0dedeadc0de, vnet_shutdown = 0xde}
> 
> The patch did seem to make it happen quicker, or maybe I was just more
> lucky this morning...
> 
>>> (kgdb) #0  __curthread () at /usr/src/sys/amd64/include/pcpu_aux.h:55
>>> #1  doadump (textdump=1) at /usr/src/sys/kern/kern_shutdown.c:394
>>> #2  0x80bc6250 in kern_reboot (howto=260)
>>>at /usr/src/sys/kern/kern_shutdown.c:481
>>> #3  0x80bc66aa in vpanic (fmt=, ap=>> out>) at /usr/src/sys/kern/kern_shutdown.c:913
>>> #4  0x80bc6403 in panic (fmt=)
>>>at /usr/src/sys/kern/kern_shutdown.c:839
>>> #5  0x80d6553b in inm_release (inm=0xf80029043700)
>>>at /usr/src/sys/netinet/in_mcast.c:630
>>> #6  inm_release_task (arg=, pending=)
>>>at /usr/src/sys/netinet/in_mcast.c:312
>>> #7  0x80c2521a in taskqueue_run_locked
>>> (queue=0xf80003116b00) at /usr/src/sys/kern/subr_taskqueue.c:476
>>> #8  0x80c26294 in taskqueue_thread_loop (arg=)
>>>at /usr/src/sys/kern/subr_taskqueue.c:793
>>> #9  0x80b830f0 in fork_exit (
>>>callout=0x80c26200 , 
>>>arg=0x81cf4f70 ,
>>> frame=0xfe0049e99b80) at /usr/src/sys/kern/kern_fork.c:1052
>>> #10 
>>> (kgdb) 
>>> 
>>> I have the core files so I can get additional information.
>>> 
>>> Let me know if you need any additional information.
>>> 
>> 
> 
>> Index: sys/netinet/in_mcast.c
>> ===
>> --- sys/netinet/in_mcast.c   (revision 363386)
>> +++ sys/netinet/in_mcast.c   (working copy)
>> @@ -309,8 +309,10 @@
>>  IN_MULTI_LOCK();
>>  SLIST_FOREACH_SAFE(inm, &inm_free_tmp, inm_nrele, tinm) {
>>  SLIST_REMOVE_HEAD(&inm_free_tmp, inm_nrele);
>> +CURVNET_SET(inm->inm_ifp->if_vnet);
>>  MPASS(inm);
>>  inm_release(inm);
>> +CURVNET_RESTORE();
>>  }
>>  IN_MULTI_UNLOCK();
>> }
> 
> 
> -- 
>  John-Mark Gurney Voice: +1 415 225 5579
> 
> "All that I will do, has been done, All that I have, has not."
> ___
> freebsd-net@freebsd.org mailing list
> https://lists.freebsd.org/mailman/listinfo/freebsd-net
> To unsubscribe, send any mail to "freebsd-net-unsubscr...@freebsd.org"

___
freebsd-net@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/freebsd-net
To unsubscribe, send any mail to "freebsd-net-unsubscr...@freebsd.org"


Re: somewhat reproducable vimage panic

2020-07-21 Thread John-Mark Gurney
Marko Zec wrote this message on Tue, Jul 21, 2020 at 11:31 +0200:
> On Tue, 21 Jul 2020 02:16:55 -0700
> John-Mark Gurney  wrote:
> 
> > I'm running:
> > FreeBSD test 13.0-CURRENT FreeBSD 13.0-CURRENT #0 r362596: Thu Jun 25
> > 05:02:51 UTC 2020
> > r...@releng1.nyi.freebsd.org:/usr/obj/usr/src/amd64.amd64/sys/GENERIC
> >  amd64
> > 
> > and I'm working on improve the if_ure driver.  I've put together a
> > little script that I've attached that I'm using to test the driver..
> > It puts a couple ue interfaces each into their own jail, configures
> > them, and tries to pass traffic.  This assumes that the two interfaces
> > are connected together.
> > 
> > Pretty regularly when destroying the jails, I get the following
> > panic: CURVNET_SET at /usr/src/sys/netinet/in_mcast.c:626
> > inm_release() curvnet=0 vnet=0xf80154c82a80
> 
> Perhaps the attached patch could help? (disclaimer: not even
> compile-tested)

The patch compiled, but it just moved the panic earlier than before.

#4  0x80bc2123 in panic (fmt=)
at ../../../kern/kern_shutdown.c:839
#5  0x80d61726 in inm_release_task (arg=, 
pending=) at ../../../netinet/in_mcast.c:633
#6  0x80c2166a in taskqueue_run_locked (queue=0xf800033cfd00)
at ../../../kern/subr_taskqueue.c:476
#7  0x80c226e4 in taskqueue_thread_loop (arg=)
at ../../../kern/subr_taskqueue.c:793

Now it panics at the location of the new CURVNET_SET and not the
old one..

Ok, decided to dump the contents of the vnet, and it looks like
it's a use after free:
(kgdb) print/x *(struct vnet *)0xf8012a283140
$2 = {vnet_le = {le_next = 0xdeadc0dedeadc0de, le_prev = 0xdeadc0dedeadc0de}, 
vnet_magic_n = 0xdeadc0de, 
  vnet_ifcnt = 0xdeadc0de, vnet_sockcnt = 0xdeadc0de, vnet_state = 0xdeadc0de, 
vnet_data_mem = 0xdeadc0dedeadc0de, 
  vnet_data_base = 0xdeadc0dedeadc0de, vnet_shutdown = 0xde}

The patch did seem to make it happen quicker, or maybe I was just more
lucky this morning...

> > (kgdb) #0  __curthread () at /usr/src/sys/amd64/include/pcpu_aux.h:55
> > #1  doadump (textdump=1) at /usr/src/sys/kern/kern_shutdown.c:394
> > #2  0x80bc6250 in kern_reboot (howto=260)
> > at /usr/src/sys/kern/kern_shutdown.c:481
> > #3  0x80bc66aa in vpanic (fmt=, ap= > out>) at /usr/src/sys/kern/kern_shutdown.c:913
> > #4  0x80bc6403 in panic (fmt=)
> > at /usr/src/sys/kern/kern_shutdown.c:839
> > #5  0x80d6553b in inm_release (inm=0xf80029043700)
> > at /usr/src/sys/netinet/in_mcast.c:630
> > #6  inm_release_task (arg=, pending=)
> > at /usr/src/sys/netinet/in_mcast.c:312
> > #7  0x80c2521a in taskqueue_run_locked
> > (queue=0xf80003116b00) at /usr/src/sys/kern/subr_taskqueue.c:476
> > #8  0x80c26294 in taskqueue_thread_loop (arg=)
> > at /usr/src/sys/kern/subr_taskqueue.c:793
> > #9  0x80b830f0 in fork_exit (
> > callout=0x80c26200 , 
> > arg=0x81cf4f70 ,
> > frame=0xfe0049e99b80) at /usr/src/sys/kern/kern_fork.c:1052
> > #10 
> > (kgdb) 
> > 
> > I have the core files so I can get additional information.
> > 
> > Let me know if you need any additional information.
> > 
> 

> Index: sys/netinet/in_mcast.c
> ===
> --- sys/netinet/in_mcast.c(revision 363386)
> +++ sys/netinet/in_mcast.c(working copy)
> @@ -309,8 +309,10 @@
>   IN_MULTI_LOCK();
>   SLIST_FOREACH_SAFE(inm, &inm_free_tmp, inm_nrele, tinm) {
>   SLIST_REMOVE_HEAD(&inm_free_tmp, inm_nrele);
> + CURVNET_SET(inm->inm_ifp->if_vnet);
>   MPASS(inm);
>   inm_release(inm);
> + CURVNET_RESTORE();
>   }
>   IN_MULTI_UNLOCK();
>  }


-- 
  John-Mark Gurney  Voice: +1 415 225 5579

 "All that I will do, has been done, All that I have, has not."
___
freebsd-net@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/freebsd-net
To unsubscribe, send any mail to "freebsd-net-unsubscr...@freebsd.org"


[Bug 238478] TCP Cubic code bug in cubic_ack_received

2020-07-21 Thread bugzilla-noreply
https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=238478

--- Comment #5 from commit-h...@freebsd.org ---
A commit references this bug:

Author: rscheff
Date: Tue Jul 21 16:21:53 UTC 2020
New revision: 363397
URL: https://svnweb.freebsd.org/changeset/base/363397

Log:
  Fix style and comment around concave/convex regions in TCP cubic.

  In cubic, the concave region is when snd_cwnd starts growing slower
  towards max_cwnd (cwnd at the time of the congestion event), and
  the convex region is when snd_cwnd starts to grow faster and
  eventually appearing like slow-start like growth.

  PR:   238478
  Reviewed by:  tuexen (mentor), rgrimes (mentor)
  Approved by:  tuexen (mentor), rgrimes (mentor)
  MFC after:2 weeks
  Sponsored by: NetApp, Inc.
  Differential Revision:https://reviews.freebsd.org/D24657

Changes:
  head/sys/netinet/cc/cc_cubic.c

-- 
You are receiving this mail because:
You are on the CC list for the bug.
___
freebsd-net@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/freebsd-net
To unsubscribe, send any mail to "freebsd-net-unsubscr...@freebsd.org"


[Bug 238478] TCP Cubic code bug in cubic_ack_received

2020-07-21 Thread bugzilla-noreply
https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=238478

--- Comment #4 from Richard Scheffenegger  ---
Actually, concave and convex regions are defined with respect to the previous
cwnd (max_cwnd in the code), not the currently used cwnd.

The conditional in that space is there to prevent cwnd from shrinking, should
it have outgrown the currently calculated w_cubic_next value (e.g. during
slow-start).

So, after a close inspection, showing the cwnd to follow both convex and
concave trajectories properly, I believe the current code is correct. As there
are some style issues, and the comment could be expanded around this, will
commit a patch against this PR nevertheless.

-- 
You are receiving this mail because:
You are on the CC list for the bug.
___
freebsd-net@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/freebsd-net
To unsubscribe, send any mail to "freebsd-net-unsubscr...@freebsd.org"


Re: somewhat reproducable vimage panic

2020-07-21 Thread Marko Zec
On Tue, 21 Jul 2020 02:16:55 -0700
John-Mark Gurney  wrote:

> I'm running:
> FreeBSD test 13.0-CURRENT FreeBSD 13.0-CURRENT #0 r362596: Thu Jun 25
> 05:02:51 UTC 2020
> r...@releng1.nyi.freebsd.org:/usr/obj/usr/src/amd64.amd64/sys/GENERIC
>  amd64
> 
> and I'm working on improve the if_ure driver.  I've put together a
> little script that I've attached that I'm using to test the driver..
> It puts a couple ue interfaces each into their own jail, configures
> them, and tries to pass traffic.  This assumes that the two interfaces
> are connected together.
> 
> Pretty regularly when destroying the jails, I get the following
> panic: CURVNET_SET at /usr/src/sys/netinet/in_mcast.c:626
> inm_release() curvnet=0 vnet=0xf80154c82a80

Perhaps the attached patch could help? (disclaimer: not even
compile-tested)

Marko


> (kgdb) #0  __curthread () at /usr/src/sys/amd64/include/pcpu_aux.h:55
> #1  doadump (textdump=1) at /usr/src/sys/kern/kern_shutdown.c:394
> #2  0x80bc6250 in kern_reboot (howto=260)
> at /usr/src/sys/kern/kern_shutdown.c:481
> #3  0x80bc66aa in vpanic (fmt=, ap= out>) at /usr/src/sys/kern/kern_shutdown.c:913
> #4  0x80bc6403 in panic (fmt=)
> at /usr/src/sys/kern/kern_shutdown.c:839
> #5  0x80d6553b in inm_release (inm=0xf80029043700)
> at /usr/src/sys/netinet/in_mcast.c:630
> #6  inm_release_task (arg=, pending=)
> at /usr/src/sys/netinet/in_mcast.c:312
> #7  0x80c2521a in taskqueue_run_locked
> (queue=0xf80003116b00) at /usr/src/sys/kern/subr_taskqueue.c:476
> #8  0x80c26294 in taskqueue_thread_loop (arg=)
> at /usr/src/sys/kern/subr_taskqueue.c:793
> #9  0x80b830f0 in fork_exit (
> callout=0x80c26200 , 
> arg=0x81cf4f70 ,
> frame=0xfe0049e99b80) at /usr/src/sys/kern/kern_fork.c:1052
> #10 
> (kgdb) 
> 
> I have the core files so I can get additional information.
> 
> Let me know if you need any additional information.
> 

Index: sys/netinet/in_mcast.c
===
--- sys/netinet/in_mcast.c	(revision 363386)
+++ sys/netinet/in_mcast.c	(working copy)
@@ -309,8 +309,10 @@
 	IN_MULTI_LOCK();
 	SLIST_FOREACH_SAFE(inm, &inm_free_tmp, inm_nrele, tinm) {
 		SLIST_REMOVE_HEAD(&inm_free_tmp, inm_nrele);
+		CURVNET_SET(inm->inm_ifp->if_vnet);
 		MPASS(inm);
 		inm_release(inm);
+		CURVNET_RESTORE();
 	}
 	IN_MULTI_UNLOCK();
 }
___
freebsd-net@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/freebsd-net
To unsubscribe, send any mail to "freebsd-net-unsubscr...@freebsd.org"


somewhat reproducable vimage panic

2020-07-21 Thread John-Mark Gurney
I'm running:
FreeBSD test 13.0-CURRENT FreeBSD 13.0-CURRENT #0 r362596: Thu Jun 25 05:02:51 
UTC 2020 
r...@releng1.nyi.freebsd.org:/usr/obj/usr/src/amd64.amd64/sys/GENERIC  amd64

and I'm working on improve the if_ure driver.  I've put together a
little script that I've attached that I'm using to test the driver..
It puts a couple ue interfaces each into their own jail, configures
them, and tries to pass traffic.  This assumes that the two interfaces
are connected together.

Pretty regularly when destroying the jails, I get the following
panic: CURVNET_SET at /usr/src/sys/netinet/in_mcast.c:626 inm_release() 
curvnet=0 vnet=0xf80154c82a80

(kgdb) #0  __curthread () at /usr/src/sys/amd64/include/pcpu_aux.h:55
#1  doadump (textdump=1) at /usr/src/sys/kern/kern_shutdown.c:394
#2  0x80bc6250 in kern_reboot (howto=260)
at /usr/src/sys/kern/kern_shutdown.c:481
#3  0x80bc66aa in vpanic (fmt=, ap=)
at /usr/src/sys/kern/kern_shutdown.c:913
#4  0x80bc6403 in panic (fmt=)
at /usr/src/sys/kern/kern_shutdown.c:839
#5  0x80d6553b in inm_release (inm=0xf80029043700)
at /usr/src/sys/netinet/in_mcast.c:630
#6  inm_release_task (arg=, pending=)
at /usr/src/sys/netinet/in_mcast.c:312
#7  0x80c2521a in taskqueue_run_locked (queue=0xf80003116b00)
at /usr/src/sys/kern/subr_taskqueue.c:476
#8  0x80c26294 in taskqueue_thread_loop (arg=)
at /usr/src/sys/kern/subr_taskqueue.c:793
#9  0x80b830f0 in fork_exit (
callout=0x80c26200 , 
arg=0x81cf4f70 , frame=0xfe0049e99b80)
at /usr/src/sys/kern/kern_fork.c:1052
#10 
(kgdb) 

I have the core files so I can get additional information.

Let me know if you need any additional information.

-- 
  John-Mark Gurney  Voice: +1 415 225 5579

 "All that I will do, has been done, All that I have, has not."
#!/bin/sh -
#
# ls testinterfaces.sh | entr sh -c 'fsync testinterfaces.sh && sh 
testinterfaces.sh ue0 ue1'
#

testiface="$1"
checkiface="$2"

echo starting, test $1, check $2
# ifconfig  -m to get caps  
getcaps()
{
ifconfig -m "$1" | awk '$1 ~ /^capabilities=/ { split($0, a, "<"); 
split(a[2], b, ">"); split(b[1], caps, ","); for (i in caps) print caps[i] }'
}


if ! testjail=$(jail -i -c persist=1 path=/ name=testjail vnet=new 
vnet.interface="$testiface"); then
echo failed to start test jail
exit 1
fi

if ! checkjail=$(jail -i -c persist=1 path=/ name=checkjail vnet=new 
vnet.interface="$checkiface"); then
echo failed to start check jail
exit 1
fi

cleanup()
{
jail -r "$testjail"
jail -r "$checkjail"
}

trap cleanup EXIT

run()
{
if [ x"$1" = x"check" ]; then
jid="$checkjail"
elif [ x"$1" = x"test" ]; then
jid="$testjail"
else
echo Invalid: "$1" >&2
exit 1
fi

shift
jexec "$jid" "$@"
}

ifjail()
{
if [ x"$1" = x"check" ]; then
iface="$checkiface"
elif [ x"$1" = x"test" ]; then
iface="$testiface"
else
echo Invalid: "$1" >&2
exit 1
fi

j="$1"
shift

run "$j" ifconfig "$iface" "$@"
}

waitcarrier()
{
local i

for i in test check; do
while :; do
if ifjail "$i" | grep 1000baseT >/dev/null; then
break
fi
sleep .5
done
done
}

hwvlantest()
{
run test ifconfig lo0 up
run check ifconfig lo0 up

for i in "vlanhwtag" "-vlanhwtag"; do
ifjail test down
ifjail check down

ifjail test "$i"
ifjail check -vlanhwtag

ifjail test up
ifjail check up

sleep 2

run test ifconfig $testiface.42 create 172.30.5.5/24
run check ifconfig $checkiface.42 create 172.30.5.4/24

waitcarrier

run check tcpdump -p -q -c 2 -n -i "$checkiface" vlan 42 and 
icmp &
sleep 1
if ! run test ping -c 1 -t 2 172.30.5.4; then
echo FAILED on "$i"
exit 1
fi
done
}

hwvlantest
___
freebsd-net@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/freebsd-net
To unsubscribe, send any mail to "freebsd-net-unsubscr...@freebsd.org"