We experienced a "quirk" with persistence and multiple listen addresses on 
redirects, such as listening to both port 21 and a passive port range for load 
balancing FTP, because the session's stickiness seems to operate per-pass-rule. 
One solution would be group all rules as matches under one pass. The following 
diff is a bit rough, but applies to current (with a little fuzz). I submit it 
so that you can reflect upon it. 

--- pfe_filter.c.orig   Wed Nov 14 09:31:47 2012
+++ pfe_filter.c        Wed Nov 14 16:09:47 2012
@@ -395,11 +395,20 @@
                return;
        }

+       /* In case we have multiple >listen on< we need to aggregate them into 
one policy which does
+        * the rdr-to (this makes stat counters and src-tracking work for 
mulit-port protocols).
+        * eg. 21 and passive ftp ports (50000:50500).
+        *
+        * match <listen on...>
+        * match <listen on...>
+        * pass ... rdr-to <table>
+        */
+       int multiple_listen = TAILQ_NEXT(TAILQ_FIRST(&rdr->virts), entry) != 
NULL;
        TAILQ_FOREACH(address, &rdr->virts, entry) {
                memset(&rio, 0, sizeof(rio));
                (void)strlcpy(rio.anchor, anchor, sizeof(rio.anchor));

-               if (rdr->conf.flags & F_MATCH) {
+               if (rdr->conf.flags & F_MATCH || multiple_listen) {
                        rio.rule.action = PF_MATCH;
                        rio.rule.quick = 0;
                } else {
@@ -444,7 +453,10 @@
                        rio.rule.timeout[PFTM_TCP_ESTABLISHED] =
                            rdr->conf.timeout.tv_sec;

-               if (strlen(rdr->conf.tag))
+               if (multiple_listen)
+                       (void)strlcpy(rio.rule.tagname, anchor,
+                           sizeof(rio.rule.tagname));
+               else if (strlen(rdr->conf.tag))
                        (void)strlcpy(rio.rule.tagname, rdr->conf.tag,
                            sizeof(rio.rule.tagname));
                if (strlen(address->ifname))
@@ -467,6 +479,9 @@
                }

                rio.rule.nat.addr.type = PF_ADDR_NONE;
+               if (multiple_listen)
+               rio.rule.rdr.addr.type = PF_ADDR_NONE;
+               else {
                rio.rule.rdr.addr.type = PF_ADDR_TABLE;
                if (strlen(t->conf.ifname))
                        (void)strlcpy(rio.rule.rdr.ifname, t->conf.ifname,
@@ -491,6 +506,68 @@
                           sizeof(rio.rule.route));
                        rio.rule.rdr.addr.type = PF_ADDR_NONE;
                }
+               }
+
+               rio.rule.prio[0] = PF_PRIO_NOTSET;
+               rio.rule.prio[1] = PF_PRIO_NOTSET;
+
+               if (ioctl(env->sc_pf->dev, DIOCADDRULE, &rio) == -1)
+                       fatal("cannot add rule");
+               log_debug("%s: rule added to anchor \"%s\"", __func__, anchor);
+       }
+       if (multiple_listen) {
+               memset(&rio, 0, sizeof(rio));
+               (void)strlcpy(rio.anchor, anchor, sizeof(rio.anchor));
+
+               if (rdr->conf.flags & F_MATCH) {
+                       rio.rule.action = PF_MATCH;
+                       rio.rule.quick = 0;
+               } else {
+                       rio.rule.action = PF_PASS;
+                       rio.rule.quick = 1; /* force first match */
+               }
+               rio.rule.direction = PF_IN;
+               rio.rule.keep_state = PF_STATE_NORMAL;
+               rio.ticket = env->sc_pf->pfte.ticket;
+               rio.rule.af = 0;
+               rio.rule.proto = 0;
+               rio.rule.src.addr.type = PF_ADDR_ADDRMASK;
+               rio.rule.dst.addr.type = PF_ADDR_ADDRMASK;
+               rio.rule.dst.port_op = PF_OP_NONE;
+               rio.rule.rtableid = -1;
+               (void)strlcpy(rio.rule.match_tagname, anchor,
+                               sizeof(rio.rule.match_tagname));
+               if (strlen(rdr->conf.tag))
+                       (void)strlcpy(rio.rule.tagname, rdr->conf.tag,
+                           sizeof(rio.rule.tagname));
+
+               rio.rule.nat.addr.type = PF_ADDR_NONE;
+               rio.rule.rdr.addr.type = PF_ADDR_TABLE;
+               if (strlen(t->conf.ifname))
+                       (void)strlcpy(rio.rule.rdr.ifname, t->conf.ifname,
+                           sizeof(rio.rule.rdr.ifname));
+               if (strlcpy(rio.rule.rdr.addr.v.tblname, rdr->conf.name,
+                   sizeof(rio.rule.rdr.addr.v.tblname)) >=
+                   sizeof(rio.rule.rdr.addr.v.tblname))
+                       fatal("sync_ruleset: table name too long");
+
+               if (rdr->table->conf.flags & F_PORT) {
+                       rio.rule.rdr.proxy_port[0] =
+                           ntohs(rdr->table->conf.port);
+                       rio.rule.rdr.port_op = PF_OP_EQ;
+               }
+               switch (rdr->conf.mode) {
+               case RELAY_DSTMODE_ROUNDROBIN:
+                       rio.rule.rdr.opts = PF_POOL_ROUNDROBIN;
+                       break;
+               case RELAY_DSTMODE_LEASTSTATES:
+                       rio.rule.rdr.opts = PF_POOL_LEASTSTATES;
+                       break;
+               default:
+                       fatalx("sync_ruleset: unsupported mode");
+                       /* NOTREACHED */
+               }
+               if (rdr->conf.flags & F_STICKY)
+                       rio.rule.rdr.opts |= PF_POOL_STICKYADDR;
+
+               if (rio.rule.rt == PF_ROUTETO) {
+                       memcpy(&rio.rule.route, &rio.rule.rdr,
+                          sizeof(rio.rule.route));
+                       rio.rule.rdr.addr.type = PF_ADDR_NONE;
+               }
+
+               rio.rule.prio[0] = PF_PRIO_NOTSET;
+               rio.rule.prio[1] = PF_PRIO_NOTSET;

                if (ioctl(env->sc_pf->dev, DIOCADDRULE, &rio) == -1)
                        fatal("cannot add rule");


Reply via email to