Hello, re-sending with updated patch version. I'd like to get it in, so I can start moving forward with other things.
any O.Ks? thanks and regards sasha On Sat, Aug 08, 2015 at 12:16:26PM +0200, Alexandr Nedvedicky wrote: > Hello, > > I've reworked the anchor handling so the traversal uses true recursion now. > Using recursion here will allow us to implement ruleset locking in nicer > fashion. The idea is to split current pf_test_rule() into two functions: > pf_test_rule() and pf_match_rule(). > > pf_step_into_anchor() is changed to drive recursive anchor traversal. It calls > pf_match_rule() to match rules in nested rulesets. pf_step_out_of_anchor() > has > been merged into new pf_step_into_anchor() > > To minimize stack frame size a pf_test_ctx is introduced. Its members are > various variables, which used to be local at former pf_test_rule(). The > pf_test_ctx instance is a local variable of new pf_test_rule(). > pf_match_rule() receives pointer to pf_test_ctx as its argument so it can > reach > all variables it needs. The goal is to move out as many local variables from > pf_match_rule() and pf_step_into_anchor() as possible to save memory. > > To minimize amount of differences macros to access members in pf_test_ctx > are introduced. Once consensus on proposed approach will be reached, we > can polish the patch a bit. > > I did some basic testing with rules as follows: > > pass all > anchor "ap" self to 10.0.0.0/8 { > block proto tcp from self to 10.0.0.138 port 23 > pass proto tcp from self to 10.0.0.138 port 23 once > } > > and wildcard variant. It seems to me it works, but I'll be glad for any > further testing tips. > > regards > sasha > --------8<---------------8<---------------8<------------------8<-------- Index: pf.c =================================================================== RCS file: /cvs/src/sys/net/pf.c,v retrieving revision 1.941 diff -u -p -r1.941 pf.c --- pf.c 11 Sep 2015 15:21:31 -0000 1.941 +++ pf.c 12 Sep 2015 16:31:24 -0000 @@ -114,13 +114,37 @@ u_char pf_tcp_secret[16]; int pf_tcp_secret_init; int pf_tcp_iss_off; -struct pf_anchor_stackframe { - struct pf_ruleset *rs; - struct pf_rule *r; - struct pf_anchor_node *parent; - struct pf_anchor *child; -} pf_anchor_stack[64]; +struct pf_test_ctx { + int test_status; + struct pf_pdesc *pd; + struct pf_rule_actions act; + u_int8_t icmpcode; + u_int8_t icmptype; + int icmp_dir; + int state_icmp; + int tag; + u_short reason; + struct pf_rule_item *ri; + struct pf_src_node *sns[PF_SN_MAX]; + struct pf_rule_slist rules; + struct pf_rule *nr; + struct pf_rule **rm; + struct pf_rule *a; + struct pf_rule **am; + struct pf_ruleset **rsm; + struct pf_ruleset *arsm; + struct pf_ruleset *aruleset; + struct tcphdr *th; + int depth; +}; + +#define PF_ANCHOR_STACK_MAX 64 +enum { + PF_TEST_FAIL = -1, + PF_TEST_OK, + PF_TEST_QUICK +}; /* * Cannot fold into pf_pdesc directly, unknown storage size outside pf.c. * Keep in sync with union pf_headers in pflog_bpfcopy() in if_pflog.c. @@ -225,11 +249,8 @@ struct pf_state *pf_find_state(struct p struct pf_state_key_cmp *, u_int, struct mbuf *); int pf_src_connlimit(struct pf_state **); int pf_match_rcvif(struct mbuf *, struct pf_rule *); -void pf_step_into_anchor(int *, struct pf_ruleset **, - struct pf_rule **, struct pf_rule **); -int pf_step_out_of_anchor(int *, struct pf_ruleset **, - struct pf_rule **, struct pf_rule **, - int *); +int pf_step_into_anchor(struct pf_test_ctx *, struct pf_rule *); +int pf_match_rule(struct pf_test_ctx *, struct pf_ruleset *); void pf_counters_inc(int, struct pf_pdesc *, struct pf_state *, struct pf_rule *, struct pf_rule *); @@ -2628,74 +2649,37 @@ pf_tag_packet(struct mbuf *m, int tag, i m->m_pkthdr.ph_rtableid = (u_int)rtableid; } -void -pf_step_into_anchor(int *depth, struct pf_ruleset **rs, - struct pf_rule **r, struct pf_rule **a) +int +pf_step_into_anchor(struct pf_test_ctx *cx, struct pf_rule *r) { - struct pf_anchor_stackframe *f; + int rv; - if (*depth >= sizeof(pf_anchor_stack) / - sizeof(pf_anchor_stack[0])) { + if (cx->depth >= PF_ANCHOR_STACK_MAX) { log(LOG_ERR, "pf_step_into_anchor: stack overflow\n"); - *r = TAILQ_NEXT(*r, entries); - return; - } else if (a != NULL) - *a = *r; - f = pf_anchor_stack + (*depth)++; - f->rs = *rs; - f->r = *r; - if ((*r)->anchor_wildcard) { - f->parent = &(*r)->anchor->children; - if ((f->child = RB_MIN(pf_anchor_node, f->parent)) == NULL) { - *r = NULL; - return; - } - *rs = &f->child->ruleset; - } else { - f->parent = NULL; - f->child = NULL; - *rs = &(*r)->anchor->ruleset; + return (PF_TEST_FAIL); } - *r = TAILQ_FIRST((*rs)->rules.active.ptr); -} -int -pf_step_out_of_anchor(int *depth, struct pf_ruleset **rs, - struct pf_rule **r, struct pf_rule **a, int *match) -{ - struct pf_anchor_stackframe *f; - int quick = 0; + cx->depth++; - do { - if (*depth <= 0) - break; - f = pf_anchor_stack + *depth - 1; - if (f->parent != NULL && f->child != NULL) { - f->child = RB_NEXT(pf_anchor_node, f->parent, f->child); - if (f->child != NULL) { - *rs = &f->child->ruleset; - *r = TAILQ_FIRST((*rs)->rules.active.ptr); - if (*r == NULL) - continue; - else - break; + if (r->anchor_wildcard) { + struct pf_anchor *child; + rv = PF_TEST_OK; + RB_FOREACH(child, pf_anchor_node, &r->anchor->children) { + rv = pf_match_rule(cx, &child->ruleset); + if (rv != 0) { + /* + * break on quick rule or failure + */ + break; } } - (*depth)--; - if (*depth == 0 && a != NULL) - *a = NULL; - else if (a != NULL) - *a = f->r; - *rs = f->rs; - if (*match > *depth) { - *match = *depth; - if (f->r->quick) - quick = 1; - } - *r = TAILQ_NEXT(f->r, entries); - } while (*r == NULL); + } else { + rv = pf_match_rule(cx, &r->anchor->ruleset); + } + + cx->depth--; - return (quick); + return (rv); } void @@ -3069,100 +3053,38 @@ pf_rule_to_actions(struct pf_rule *r, st } while (0) int -pf_test_rule(struct pf_pdesc *pd, struct pf_rule **rm, struct pf_state **sm, - struct pf_rule **am, struct pf_ruleset **rsm) +pf_match_rule(struct pf_test_ctx *cx, struct pf_ruleset *ruleset) { - struct pf_rule *r; - struct pf_rule *nr = NULL; - struct pf_rule *a = NULL; - struct pf_ruleset *arsm = NULL; - struct pf_ruleset *aruleset = NULL; - struct pf_ruleset *ruleset = NULL; - struct pf_rule_slist rules; - struct pf_rule_item *ri; - struct pf_src_node *sns[PF_SN_MAX]; - struct tcphdr *th = pd->hdr.tcp; - struct pf_state_key *skw = NULL, *sks = NULL; - struct pf_rule_actions act; - u_short reason; - int rewrite = 0; - int tag = -1; - int asd = 0; - int match = 0; - int state_icmp = 0, icmp_dir = 0; - u_int16_t virtual_type, virtual_id; - u_int8_t icmptype = 0, icmpcode = 0; - int action = PF_DROP; - - bzero(&act, sizeof(act)); - bzero(sns, sizeof(sns)); - act.rtableid = pd->rdomain; - SLIST_INIT(&rules); - - if (pd->dir == PF_IN && if_congested()) { - REASON_SET(&reason, PFRES_CONGEST); - return (PF_DROP); - } - - switch (pd->virtual_proto) { - case IPPROTO_ICMP: - icmptype = pd->hdr.icmp->icmp_type; - icmpcode = pd->hdr.icmp->icmp_code; - state_icmp = pf_icmp_mapping(pd, icmptype, - &icmp_dir, &virtual_id, &virtual_type); - if (icmp_dir == PF_IN) { - pd->osport = pd->nsport = virtual_id; - pd->odport = pd->ndport = virtual_type; - } else { - pd->osport = pd->nsport = virtual_type; - pd->odport = pd->ndport = virtual_id; - } - break; -#ifdef INET6 - case IPPROTO_ICMPV6: - icmptype = pd->hdr.icmp6->icmp6_type; - icmpcode = pd->hdr.icmp6->icmp6_code; - state_icmp = pf_icmp_mapping(pd, icmptype, - &icmp_dir, &virtual_id, &virtual_type); - if (icmp_dir == PF_IN) { - pd->osport = pd->nsport = virtual_id; - pd->odport = pd->ndport = virtual_type; - } else { - pd->osport = pd->nsport = virtual_type; - pd->odport = pd->ndport = virtual_id; - } - break; -#endif /* INET6 */ - } + struct pf_rule *r; - ruleset = &pf_main_ruleset; - r = TAILQ_FIRST(pf_main_ruleset.rules.active.ptr); + r = TAILQ_FIRST(ruleset->rules.active.ptr); while (r != NULL) { r->evaluations++; - PF_TEST_ATTRIB((pfi_kif_match(r->kif, pd->kif) == r->ifnot), + PF_TEST_ATTRIB((pfi_kif_match(r->kif, cx->pd->kif) == r->ifnot), r->skip[PF_SKIP_IFP].ptr); - PF_TEST_ATTRIB((r->direction && r->direction != pd->dir), + PF_TEST_ATTRIB((r->direction && r->direction != cx->pd->dir), r->skip[PF_SKIP_DIR].ptr); PF_TEST_ATTRIB((r->onrdomain >= 0 && - (r->onrdomain == pd->rdomain) == r->ifnot), + (r->onrdomain == cx->pd->rdomain) == r->ifnot), r->skip[PF_SKIP_RDOM].ptr); - PF_TEST_ATTRIB((r->af && r->af != pd->af), + PF_TEST_ATTRIB((r->af && r->af != cx->pd->af), r->skip[PF_SKIP_AF].ptr); - PF_TEST_ATTRIB((r->proto && r->proto != pd->proto), + PF_TEST_ATTRIB((r->proto && r->proto != cx->pd->proto), r->skip[PF_SKIP_PROTO].ptr); - PF_TEST_ATTRIB((PF_MISMATCHAW(&r->src.addr, &pd->nsaddr, - pd->naf, r->src.neg, pd->kif, act.rtableid)), + PF_TEST_ATTRIB((PF_MISMATCHAW(&r->src.addr, &cx->pd->nsaddr, + cx->pd->naf, r->src.neg, cx->pd->kif, cx->act.rtableid)), r->skip[PF_SKIP_SRC_ADDR].ptr); - PF_TEST_ATTRIB((PF_MISMATCHAW(&r->dst.addr, &pd->ndaddr, pd->af, - r->dst.neg, NULL, act.rtableid)), + PF_TEST_ATTRIB((PF_MISMATCHAW(&r->dst.addr, &cx->pd->ndaddr, + cx->pd->af, r->dst.neg, NULL, cx->act.rtableid)), r->skip[PF_SKIP_DST_ADDR].ptr); - switch (pd->virtual_proto) { + switch (cx->pd->virtual_proto) { case PF_VPROTO_FRAGMENT: /* tcp/udp only. port_op always 0 in other cases */ PF_TEST_ATTRIB((r->src.port_op || r->dst.port_op), TAILQ_NEXT(r, entries)); - PF_TEST_ATTRIB((pd->proto == IPPROTO_TCP && r->flagset), + PF_TEST_ATTRIB((cx->pd->proto == IPPROTO_TCP && + r->flagset), TAILQ_NEXT(r, entries)); /* icmp only. type/code always 0 in other cases */ PF_TEST_ATTRIB((r->type || r->code), @@ -3173,11 +3095,11 @@ pf_test_rule(struct pf_pdesc *pd, struct break; case IPPROTO_TCP: - PF_TEST_ATTRIB(((r->flagset & th->th_flags) != + PF_TEST_ATTRIB(((r->flagset & cx->th->th_flags) != r->flags), TAILQ_NEXT(r, entries)); PF_TEST_ATTRIB((r->os_fingerprint != PF_OSFP_ANY && - !pf_osfp_match(pf_osfp_fingerprint(pd), + !pf_osfp_match(pf_osfp_fingerprint(cx->pd), r->os_fingerprint)), TAILQ_NEXT(r, entries)); /* FALLTHROUGH */ @@ -3186,40 +3108,40 @@ pf_test_rule(struct pf_pdesc *pd, struct /* tcp/udp only. port_op always 0 in other cases */ PF_TEST_ATTRIB((r->src.port_op && !pf_match_port(r->src.port_op, r->src.port[0], - r->src.port[1], pd->nsport)), + r->src.port[1], cx->pd->nsport)), r->skip[PF_SKIP_SRC_PORT].ptr); PF_TEST_ATTRIB((r->dst.port_op && !pf_match_port(r->dst.port_op, r->dst.port[0], - r->dst.port[1], pd->ndport)), + r->dst.port[1], cx->pd->ndport)), r->skip[PF_SKIP_DST_PORT].ptr); /* tcp/udp only. uid.op always 0 in other cases */ - PF_TEST_ATTRIB((r->uid.op && (pd->lookup.done || - (pd->lookup.done = - pf_socket_lookup(pd), 1)) && + PF_TEST_ATTRIB((r->uid.op && (cx->pd->lookup.done || + (cx->pd->lookup.done = + pf_socket_lookup(cx->pd), 1)) && !pf_match_uid(r->uid.op, r->uid.uid[0], - r->uid.uid[1], pd->lookup.uid)), + r->uid.uid[1], cx->pd->lookup.uid)), TAILQ_NEXT(r, entries)); /* tcp/udp only. gid.op always 0 in other cases */ - PF_TEST_ATTRIB((r->gid.op && (pd->lookup.done || - (pd->lookup.done = - pf_socket_lookup(pd), 1)) && + PF_TEST_ATTRIB((r->gid.op && (cx->pd->lookup.done || + (cx->pd->lookup.done = + pf_socket_lookup(cx->pd), 1)) && !pf_match_gid(r->gid.op, r->gid.gid[0], - r->gid.gid[1], pd->lookup.gid)), + r->gid.gid[1], cx->pd->lookup.gid)), TAILQ_NEXT(r, entries)); break; case IPPROTO_ICMP: case IPPROTO_ICMPV6: /* icmp only. type always 0 in other cases */ - PF_TEST_ATTRIB((r->type && r->type != icmptype + 1), + PF_TEST_ATTRIB((r->type && r->type != cx->icmptype + 1), TAILQ_NEXT(r, entries)); /* icmp only. type always 0 in other cases */ - PF_TEST_ATTRIB((r->code && r->code != icmpcode + 1), + PF_TEST_ATTRIB((r->code && r->code != cx->icmpcode + 1), TAILQ_NEXT(r, entries)); /* icmp only. don't create states on replies */ - PF_TEST_ATTRIB((r->keep_state && !state_icmp && + PF_TEST_ATTRIB((r->keep_state && !cx->state_icmp && (r->rule_flag & PFRULE_STATESLOPPY) == 0 && - icmp_dir != PF_IN), + cx->icmp_dir != PF_IN), TAILQ_NEXT(r, entries)); break; @@ -3228,95 +3150,186 @@ pf_test_rule(struct pf_pdesc *pd, struct } PF_TEST_ATTRIB((r->rule_flag & PFRULE_FRAGMENT && - pd->virtual_proto != PF_VPROTO_FRAGMENT), + cx->pd->virtual_proto != PF_VPROTO_FRAGMENT), TAILQ_NEXT(r, entries)); - PF_TEST_ATTRIB((r->tos && !(r->tos == pd->tos)), + PF_TEST_ATTRIB((r->tos && !(r->tos == cx->pd->tos)), TAILQ_NEXT(r, entries)); PF_TEST_ATTRIB((r->prob && r->prob <= arc4random_uniform(UINT_MAX - 1) + 1), TAILQ_NEXT(r, entries)); - PF_TEST_ATTRIB((r->match_tag && !pf_match_tag(pd->m, r, &tag)), + PF_TEST_ATTRIB((r->match_tag && + !pf_match_tag(cx->pd->m, r, &cx->tag)), TAILQ_NEXT(r, entries)); - PF_TEST_ATTRIB((r->rcv_kif && pf_match_rcvif(pd->m, r) == + PF_TEST_ATTRIB((r->rcv_kif && pf_match_rcvif(cx->pd->m, r) == r->rcvifnot), TAILQ_NEXT(r, entries)); PF_TEST_ATTRIB((r->prio && - (r->prio == PF_PRIO_ZERO ? 0 : r->prio) != pd->m->m_pkthdr.pf.prio), + (r->prio == PF_PRIO_ZERO ? 0 : r->prio) != + cx->pd->m->m_pkthdr.pf.prio), TAILQ_NEXT(r, entries)); /* FALLTHROUGH */ if (r->tag) - tag = r->tag; + cx->tag = r->tag; if (r->anchor == NULL) { if (r->action == PF_MATCH) { - if ((ri = pool_get(&pf_rule_item_pl, + if ((cx->ri = pool_get(&pf_rule_item_pl, PR_NOWAIT)) == NULL) { - REASON_SET(&reason, PFRES_MEMORY); - goto cleanup; + REASON_SET(&cx->reason, PFRES_MEMORY); + cx->test_status = PF_TEST_FAIL; + break; } - ri->r = r; + cx->ri->r = r; /* order is irrelevant */ - SLIST_INSERT_HEAD(&rules, ri, entry); - pf_rule_to_actions(r, &act); + SLIST_INSERT_HEAD(&cx->rules, cx->ri, entry); + cx->ri = NULL; + pf_rule_to_actions(r, &cx->act); if (r->rule_flag & PFRULE_AFTO) - pd->naf = r->naf; - if (pf_get_transaddr(r, pd, sns, &nr) == -1) { - REASON_SET(&reason, PFRES_TRANSLATE); - goto cleanup; + cx->pd->naf = r->naf; + if (pf_get_transaddr(r, cx->pd, cx->sns, + &cx->nr) == -1) { + REASON_SET(&cx->reason, PFRES_TRANSLATE); + cx->test_status = PF_TEST_FAIL; + break; } #if NPFLOG > 0 if (r->log) { - REASON_SET(&reason, PFRES_MATCH); - PFLOG_PACKET(pd, reason, r, a, ruleset, - NULL); + REASON_SET(&cx->reason, PFRES_MATCH); + PFLOG_PACKET(cx->pd, cx->reason, r, + cx->a, ruleset, NULL); } #endif /* NPFLOG > 0 */ } else { - match = asd; - *rm = r; - *am = a; - *rsm = ruleset; - arsm = aruleset; + /* + * found matching r + */ + *cx->rm = r; + /* + * anchor, with ruleset, where r belongs to + */ + *cx->am = cx->a; + /* + * ruleset where r belongs to + */ + *cx->rsm = ruleset; + /* + * ruleset, where anchor belongs to. + */ + cx->arsm = cx->aruleset; } #if NPFLOG > 0 - if (act.log & PF_LOG_MATCHES) - pf_log_matches(pd, r, a, ruleset, &rules); + if (cx->act.log & PF_LOG_MATCHES) + pf_log_matches(cx->pd, r, cx->a, ruleset, + &cx->rules); #endif /* NPFLOG > 0 */ - if (r->quick) + if (r->quick) { + cx->test_status = PF_TEST_QUICK; break; - r = TAILQ_NEXT(r, entries); + } } else { - aruleset = ruleset; - pf_step_into_anchor(&asd, &ruleset, &r, &a); + cx->a = r; /* remember anchor */ + cx->aruleset = ruleset; /* and its ruleset */ + if (pf_step_into_anchor(cx, r) != PF_TEST_OK) { + break; + } } + r = TAILQ_NEXT(r, entries); +nextrule:; + } - nextrule: - if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset, - &r, &a, &match)) - break; + return (cx->test_status); +} + +int +pf_test_rule(struct pf_pdesc *pd, struct pf_rule **rm, struct pf_state **sm, + struct pf_rule **am, struct pf_ruleset **rsm) +{ + struct pf_rule *r = NULL; + struct pf_rule *a = NULL; + struct pf_ruleset *ruleset = NULL; + struct pf_state_key *skw = NULL, *sks = NULL; + int rewrite = 0; + u_int16_t virtual_type, virtual_id; + int action = PF_DROP; + struct pf_test_ctx cx; + int rv; + + bzero(&cx, sizeof(cx)); + cx.pd = pd; + cx.rm = rm; + cx.am = am; + cx.rsm = rsm; + cx.th = pd->hdr.tcp; + cx.act.rtableid = pd->rdomain; + SLIST_INIT(&cx.rules); + + if (pd->dir == PF_IN && if_congested()) { + REASON_SET(&cx.reason, PFRES_CONGEST); + return (PF_DROP); + } + + switch (pd->virtual_proto) { + case IPPROTO_ICMP: + cx.icmptype = pd->hdr.icmp->icmp_type; + cx.icmpcode = pd->hdr.icmp->icmp_code; + cx.state_icmp = pf_icmp_mapping(pd, cx.icmptype, + &cx.icmp_dir, &virtual_id, &virtual_type); + if (cx.icmp_dir == PF_IN) { + pd->osport = pd->nsport = virtual_id; + pd->odport = pd->ndport = virtual_type; + } else { + pd->osport = pd->nsport = virtual_type; + pd->odport = pd->ndport = virtual_id; + } + break; +#ifdef INET6 + case IPPROTO_ICMPV6: + cx.icmptype = pd->hdr.icmp6->icmp6_type; + cx.icmpcode = pd->hdr.icmp6->icmp6_code; + cx.state_icmp = pf_icmp_mapping(pd, cx.icmptype, + &cx.icmp_dir, &virtual_id, &virtual_type); + if (cx.icmp_dir == PF_IN) { + pd->osport = pd->nsport = virtual_id; + pd->odport = pd->ndport = virtual_type; + } else { + pd->osport = pd->nsport = virtual_type; + pd->odport = pd->ndport = virtual_id; + } + break; +#endif /* INET6 */ } - r = *rm; /* matching rule */ - a = *am; /* rule that defines an anchor containing 'r' */ - ruleset = *rsm; /* ruleset of the anchor defined by the rule 'a' */ - aruleset = arsm;/* ruleset of the 'a' rule itself */ + + ruleset = &pf_main_ruleset; + rv = pf_match_rule(&cx, ruleset); + if (rv == PF_TEST_FAIL) { + /* + * Reason has been set in pf_match_rule() already. + */ + goto cleanup; + } + + r = *cx.rm; /* matching rule */ + a = *cx.am; /* rule that defines an anchor containing 'r' */ + ruleset = *cx.rsm;/* ruleset of the anchor defined by the rule 'a' */ + cx.aruleset = cx.arsm;/* ruleset of the 'a' rule itself */ /* apply actions for last matching pass/block rule */ - pf_rule_to_actions(r, &act); + pf_rule_to_actions(r, &cx.act); if (r->rule_flag & PFRULE_AFTO) pd->naf = r->naf; - if (pf_get_transaddr(r, pd, sns, &nr) == -1) { - REASON_SET(&reason, PFRES_TRANSLATE); + if (pf_get_transaddr(r, pd, cx.sns, &cx.nr) == -1) { + REASON_SET(&cx.reason, PFRES_TRANSLATE); goto cleanup; } - REASON_SET(&reason, PFRES_MATCH); + REASON_SET(&cx.reason, PFRES_MATCH); #if NPFLOG > 0 if (r->log) - PFLOG_PACKET(pd, reason, r, a, ruleset, NULL); - if (act.log & PF_LOG_MATCHES) - pf_log_matches(pd, r, a, ruleset, &rules); + PFLOG_PACKET(pd, cx.reason, r, cx.a, ruleset, NULL); + if (cx.act.log & PF_LOG_MATCHES) + pf_log_matches(pd, r, cx.a, ruleset, &cx.rules); #endif /* NPFLOG > 0 */ if (pd->virtual_proto != PF_VPROTO_FRAGMENT && @@ -3327,30 +3340,30 @@ pf_test_rule(struct pf_pdesc *pd, struct if (pd->proto == IPPROTO_TCP && ((r->rule_flag & PFRULE_RETURNRST) || (r->rule_flag & PFRULE_RETURN)) && - !(th->th_flags & TH_RST)) { - u_int32_t ack = ntohl(th->th_seq) + pd->p_len; + !(cx.th->th_flags & TH_RST)) { + u_int32_t ack = ntohl(cx.th->th_seq) + pd->p_len; if (pf_check_proto_cksum(pd, pd->off, pd->tot_len - pd->off, IPPROTO_TCP, pd->af)) - REASON_SET(&reason, PFRES_PROTCKSUM); + REASON_SET(&cx.reason, PFRES_PROTCKSUM); else { - if (th->th_flags & TH_SYN) + if (cx.th->th_flags & TH_SYN) ack++; - if (th->th_flags & TH_FIN) + if (cx.th->th_flags & TH_FIN) ack++; pf_send_tcp(r, pd->af, pd->dst, - pd->src, th->th_dport, th->th_sport, - ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0, - r->return_ttl, 1, 0, pd->rdomain); + pd->src, cx.th->th_dport, cx.th->th_sport, + ntohl(cx.th->th_ack), ack, TH_RST|TH_ACK, + 0, 0, r->return_ttl, 1, 0, pd->rdomain); } } else if ((pd->proto != IPPROTO_ICMP || - ICMP_INFOTYPE(icmptype)) && pd->af == AF_INET && + ICMP_INFOTYPE(cx.icmptype)) && pd->af == AF_INET && r->return_icmp) pf_send_icmp(pd->m, r->return_icmp >> 8, r->return_icmp & 255, pd->af, r, pd->rdomain); else if ((pd->proto != IPPROTO_ICMPV6 || - (icmptype >= ICMP6_ECHO_REQUEST && - icmptype != ND_REDIRECT)) && pd->af == AF_INET6 && + (cx.icmptype >= ICMP6_ECHO_REQUEST && + cx.icmptype != ND_REDIRECT)) && pd->af == AF_INET6 && r->return_icmp6) pf_send_icmp(pd->m, r->return_icmp6 >> 8, r->return_icmp6 & 255, pd->af, r, pd->rdomain); @@ -3359,13 +3372,13 @@ pf_test_rule(struct pf_pdesc *pd, struct if (r->action == PF_DROP) goto cleanup; - pf_tag_packet(pd->m, tag, act.rtableid); - if (act.rtableid >= 0 && - rtable_l2(act.rtableid) != pd->rdomain) + pf_tag_packet(pd->m, cx.tag, cx.act.rtableid); + if (cx.act.rtableid >= 0 && + rtable_l2(cx.act.rtableid) != pd->rdomain) pd->destchg = 1; if (r->action == PF_PASS && pd->badopts && ! r->allow_opts) { - REASON_SET(&reason, PFRES_IPOPTIONS); + REASON_SET(&cx.reason, PFRES_IPOPTIONS); #if NPFLOG > 0 pd->pflog |= PF_LOG_FORCE; #endif /* NPFLOG > 0 */ @@ -3375,23 +3388,23 @@ pf_test_rule(struct pf_pdesc *pd, struct } if (pd->virtual_proto != PF_VPROTO_FRAGMENT - && !state_icmp && r->keep_state) { + && !cx.state_icmp && r->keep_state) { if (r->rule_flag & PFRULE_SRCTRACK && - pf_insert_src_node(&sns[PF_SN_NONE], r, PF_SN_NONE, pd->af, - pd->src, NULL, 0) != 0) { - REASON_SET(&reason, PFRES_SRCLIMIT); + pf_insert_src_node(&cx.sns[PF_SN_NONE], r, PF_SN_NONE, + pd->af, pd->src, NULL, 0) != 0) { + REASON_SET(&cx.reason, PFRES_SRCLIMIT); goto cleanup; } if (r->max_states && (r->states_cur >= r->max_states)) { pf_status.lcounters[LCNT_STATES]++; - REASON_SET(&reason, PFRES_MAXSTATES); + REASON_SET(&cx.reason, PFRES_MAXSTATES); goto cleanup; } - action = pf_create_state(pd, r, a, nr, &skw, &sks, &rewrite, - sm, tag, &rules, &act, sns); + action = pf_create_state(pd, r, a, cx.nr, &skw, &sks, &rewrite, + sm, cx.tag, &cx.rules, &cx.act, cx.sns); if (action != PF_PASS) goto cleanup; @@ -3407,12 +3420,12 @@ pf_test_rule(struct pf_pdesc *pd, struct sk->port[pd->af == pd->naf ? pd->sidx : pd->didx], &sk->addr[pd->af == pd->naf ? pd->didx : pd->sidx], sk->port[pd->af == pd->naf ? pd->didx : pd->sidx], - virtual_type, icmp_dir); + virtual_type, cx.icmp_dir); } } else { - while ((ri = SLIST_FIRST(&rules))) { - SLIST_REMOVE_HEAD(&rules, entry); - pool_put(&pf_rule_item_pl, ri); + while ((cx.ri = SLIST_FIRST(&cx.rules))) { + SLIST_REMOVE_HEAD(&cx.rules, entry); + pool_put(&pf_rule_item_pl, cx.ri); } } @@ -3437,7 +3450,7 @@ pf_test_rule(struct pf_pdesc *pd, struct #endif /* NPFSYNC > 0 */ if (r->rule_flag & PFRULE_ONCE) - pf_purge_rule(ruleset, r, aruleset, a); + pf_purge_rule(ruleset, r, cx.aruleset, cx.a); #ifdef INET6 if (rewrite && skw->af != sks->af) @@ -3447,9 +3460,9 @@ pf_test_rule(struct pf_pdesc *pd, struct return (PF_PASS); cleanup: - while ((ri = SLIST_FIRST(&rules))) { - SLIST_REMOVE_HEAD(&rules, entry); - pool_put(&pf_rule_item_pl, ri); + while ((cx.ri = SLIST_FIRST(&cx.rules))) { + SLIST_REMOVE_HEAD(&cx.rules, entry); + pool_put(&pf_rule_item_pl, cx.ri); } return (action);