RE: using memory after freed in tcp_syncache (syncache_timer()) with ipfw: patch attached
From: Don Bowman [mailto:[EMAIL PROTECTED] > > Synopsis: under some ipfw conditions, tcp_syncache has > syncache_respond() call ip_output call ip_input call syncache_drop(), > which drops the 'syncache' that is being worked on, or corrupts > the list, etc. This is typically seen from syncache_timer or > syncache_add. > > I've attached a patch that I believe corrects this problem. > I'm observing it on 4.7, but I believe it equally affects RELENG_4 > and CURRENT. > > This seems to make the problem I was seeing go away. I'm > currently running with 2K syn/second through the original condition, > will let it go overnight like that. I think that will flush > out if i've introduced a leak or other crash. > > Can someone who knows this code perhaps critique what I've done? > > Essentially I have made syncache_drop() instead defer the delete > onto a different list. In the timer, I delete the syncache entries > from the delete list. This costs some performance and memory, but > was the best way I could come up with. > There was an error in the previous patch. Index: tcp_syncache.c === RCS file: /usr/cvs/src/sys/netinet/tcp_syncache.c,v retrieving revision 1.5.2.8.1000.3 diff -U5 -r1.5.2.8.1000.3 tcp_syncache.c --- tcp_syncache.c 4 Feb 2003 01:52:03 - 1.5.2.8.1000.3 +++ tcp_syncache.c 1 Jul 2003 14:32:29 - @@ -83,16 +83,18 @@ #endif /*IPSEC*/ #include #include +static int syncache_delete_flag; static int tcp_syncookies = 1; SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW, &tcp_syncookies, 0, "Use TCP SYN cookies if the syncache overflows"); static void syncache_drop(struct syncache *, struct syncache_head *); +static void syncache_delete(struct syncache *, struct syncache_head *); static void syncache_free(struct syncache *); static void syncache_insert(struct syncache *, struct syncache_head *); struct syncache *syncache_lookup(struct in_conninfo *, struct syncache_head **); static int syncache_respond(struct syncache *, struct mbuf *); static struct socket *syncache_socket(struct syncache *, struct socket *); @@ -125,10 +127,11 @@ u_int next_reseed; TAILQ_HEAD(, syncache) timerq[SYNCACHE_MAXREXMTS + 1]; struct callout tt_timerq[SYNCACHE_MAXREXMTS + 1]; }; static struct tcp_syncache tcp_syncache; +static TAILQ_HEAD(syncache_delete_list, syncache) sc_delete_list; SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, "TCP SYN cache"); SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RD, &tcp_syncache.bucket_limit, 0, "Per-bucket hash limit for syncache"); @@ -202,10 +205,13 @@ rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt), rt->rt_flags, NULL); RTFREE(rt); } +#if defined(DIAGNOSTIC) + memset(sc, 0xee, sizeof(struct syncache)); +#endif zfree(tcp_syncache.zone, sc); } void syncache_init(void) @@ -256,10 +262,12 @@ * older one. */ tcp_syncache.cache_limit -= 1; tcp_syncache.zone = zinit("syncache", sizeof(struct syncache), tcp_syncache.cache_limit, ZONE_INTERRUPT, 0); + + TAILQ_INIT(&sc_delete_list); } static void syncache_insert(sc, sch) struct syncache *sc; @@ -312,12 +320,28 @@ static void syncache_drop(sc, sch) struct syncache *sc; struct syncache_head *sch; { + if ((sc->sc_flags & SCF_DELETE) == 0) { + sc->sc_flags |= SCF_DELETE; + syncache_delete_flag = 1; + TAILQ_INSERT_TAIL(&sc_delete_list, sc, sc_delete); + } +} + +static void +syncache_delete(sc, sch) + struct syncache *sc; + struct syncache_head *sch; +{ int s; + if ((sc->sc_flags & SCF_DELETE) == 0) { + printf("ERROR ERROR ERROR: SCF_DELETE == 0\n"); + return; + } if (sch == NULL) { #ifdef INET6 if (sc->sc_inc.inc_isipv6) { sch = &tcp_syncache.hashbase[ SYNCACHE_HASH6(&sc->sc_inc, tcp_syncache.hashmask)]; @@ -329,10 +353,12 @@ } } s = splnet(); + TAILQ_REMOVE(&sc_delete_list, sc, sc_delete); + TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash); sch->sch_length--; tcp_syncache.cache_count--; TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot], sc, sc_timerq); @@ -357,10 +383,12 @@ int s; s = splnet(); if (callout_pending(&tcp_syncache.tt_timerq[slot]) || !callout_active(&tcp_syncache.tt_timerq[slot])) { + if (syncache_delete_flag) + goto delete_cleanup; splx(s); return; } callout_deactivate(&tcp_syncache.tt_timerq[slot]); @@ -390,10 +418,
RE: using memory after freed in tcp_syncache (syncache_timer()) with ipfw: patch attached
Synopsis: under some ipfw conditions, tcp_syncache has syncache_respond() call ip_output call ip_input call syncache_drop(), which drops the 'syncache' that is being worked on, or corrupts the list, etc. This is typically seen from syncache_timer or syncache_add. I've attached a patch that I believe corrects this problem. I'm observing it on 4.7, but I believe it equally affects RELENG_4 and CURRENT. This seems to make the problem I was seeing go away. I'm currently running with 2K syn/second through the original condition, will let it go overnight like that. I think that will flush out if i've introduced a leak or other crash. Can someone who knows this code perhaps critique what I've done? Essentially I have made syncache_drop() instead defer the delete onto a different list. In the timer, I delete the syncache entries from the delete list. This costs some performance and memory, but was the best way I could come up with. --don Index: tcp_syncache.c === RCS file: /usr/cvs/src/sys/netinet/tcp_syncache.c,v retrieving revision 1.5.2.8.1000.3 diff -U3 -r1.5.2.8.1000.3 tcp_syncache.c --- tcp_syncache.c 4 Feb 2003 01:52:03 - 1.5.2.8.1000.3 +++ tcp_syncache.c 1 Jul 2003 03:05:22 - @@ -85,6 +85,7 @@ #include #include +static int syncache_delete; static int tcp_syncookies = 1; SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW, &tcp_syncookies, 0, @@ -127,6 +128,7 @@ struct callout tt_timerq[SYNCACHE_MAXREXMTS + 1]; }; static struct tcp_syncache tcp_syncache; +static TAILQ_HEAD(syncache_delete_list, syncache) sc_delete_list; SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, "TCP SYN cache"); @@ -204,6 +206,9 @@ rt->rt_flags, NULL); RTFREE(rt); } +#if defined(DIAGNOSTIC) + memset(sc, 0xee, sizeof(struct syncache)); +#endif zfree(tcp_syncache.zone, sc); } @@ -258,6 +263,8 @@ tcp_syncache.cache_limit -= 1; tcp_syncache.zone = zinit("syncache", sizeof(struct syncache), tcp_syncache.cache_limit, ZONE_INTERRUPT, 0); + + TAILQ_INIT(&sc_delete_list); } static void @@ -331,6 +338,18 @@ s = splnet(); + if ((sc->sc_flags & SCF_DELETE) == 0) { + sc->sc_flags |= SCF_DELETE; + syncache_delete = 1; + TAILQ_INSERT_TAIL(&sc_delete_list, sc, sc_delete); + + splx(s); + return; + } + if (sc->sc_delete.tqe_next || sc->sc_delete.tqe_prev) { + TAILQ_REMOVE(&sc_delete_list, sc, sc_delete); + } + TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash); sch->sch_length--; tcp_syncache.cache_count--; @@ -359,6 +378,8 @@ s = splnet(); if (callout_pending(&tcp_syncache.tt_timerq[slot]) || !callout_active(&tcp_syncache.tt_timerq[slot])) { + if (syncache_delete) + goto delete_cleanup; splx(s); return; } @@ -392,6 +413,17 @@ if (nsc != NULL) callout_reset(&tcp_syncache.tt_timerq[slot], nsc->sc_rxttime - ticks, syncache_timer, (void *)(slot)); + +delete_cleanup: + sc = TAILQ_FIRST(&sc_delete_list); + while (sc != NULL) { + nsc = TAILQ_NEXT(sc, sc_delete); + syncache_drop(sc, NULL); + sc = nsc; + } + TAILQ_INIT(&sc_delete_list); + syncache_delete = 0; + splx(s); } @@ -1335,6 +1367,7 @@ sc = zalloc(tcp_syncache.zone); if (sc == NULL) return (NULL); + bzero(sc, sizeof(*sc)); /* * Fill in the syncache values. * XXX duplicate code from syncache_add Index: tcp_var.h === RCS file: /usr/cvs/src/sys/netinet/tcp_var.h,v retrieving revision 1.56.2.12 diff -U3 -r1.56.2.12 tcp_var.h --- tcp_var.h 24 Aug 2002 18:40:26 - 1.56.2.12 +++ tcp_var.h 1 Jul 2003 02:33:57 - @@ -224,8 +224,10 @@ #define SCF_CC 0x08/* negotiated CC */ #define SCF_UNREACH0x10/* icmp unreachable received */ #define SCF_KEEPROUTE 0x20/* keep cloned route */ +#define SCF_DELETE 0x40/* I'm being deleted */ TAILQ_ENTRY(syncache) sc_hash; TAILQ_ENTRY(syncache) sc_timerq; + TAILQ_ENTRY(syncache) sc_delete; }; struct syncache_head { ___ [EMAIL PROTECTED] mailing list http://lists.freebsd.org/mailman/listinfo/freebsd-net To unsubscribe, send any mail to "[EMAIL PROTECTED]"
RE: using memory after freed in tcp_syncache (syncache_timer())
From: Don Bowman ... It appears this may also occur in syncache_add(): in this case, syncache_respond() alters the list. sc->sc_tp = tp; sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt; if (syncache_respond(sc, m) == 0) { s = splnet(); TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot], sc, sc_timerq); SYNCACHE_TIMEOUT(sc, sc->sc_rxtslot); splx(s); tcpstat.tcps_sndacks++; tcpstat.tcps_sndtotal++; } *sop = NULL; ___ [EMAIL PROTECTED] mailing list http://lists.freebsd.org/mailman/listinfo/freebsd-net To unsubscribe, send any mail to "[EMAIL PROTECTED]"
using memory after freed in tcp_syncache (syncache_timer())
syncache_timer() ... /* * syncache_respond() may call back into the syncache to * to modify another entry, so do not obtain the next * entry on the timer chain until it has completed. */ (void) syncache_respond(sc, NULL); nsc = TAILQ_NEXT(sc, sc_timerq); tcpstat.tcps_sc_retransmitted++; TAILQ_REMOVE(&tcp_syncache.timerq[slot], sc, sc_timerq); so what happens is that syncache_respond() calls ip_output, which ends up calling ip_input, which ends up doing something that causes 'sc' to be freed. Now 'sc' is freed, we return to syncache_timer(), and then we use it in nsc = TAILQ_NEXT(...) line. This particular part of the problem was introduced in 1.23 of tcp_syncache.c in response to another bug that i had found. Does anyone have a suggestion on a proper fix? ___ [EMAIL PROTECTED] mailing list http://lists.freebsd.org/mailman/listinfo/freebsd-net To unsubscribe, send any mail to "[EMAIL PROTECTED]"