On Wed, Oct 16, 2013 at 09:16:43AM +0200, Martin Matuska wrote: M> Hi, I have encountered the same mtag panic Craig had with VIMAGE + PF M> and have reported this in a PR 182964: M> http://www.freebsd.org/cgi/query-pr.cgi?pr=kern/182964 M> M> Here are two possible solutions I would like to discuss, both make the M> panic go away: M> M> 1.) de-virtualize the variable as Marco suggested, this solution is a M> more intrusive change to pf.c M> http://people.freebsd.org/~mm/patches/pf_mtag.patch M> M> 2.) add vnet context to struct m_tag, this is less intrusive to pf.c and M> the uma zone remains virtualized: M> http://people.freebsd.org/~mm/patches/pf_mtag.2.patch M> M> Which of the approaches should we take or is this to be solved in a M> completely different way?
As I already said, both patches look incorrect. The correct way is to separate things we want to be global, and things that we want to be V. Then, put the latter on SYSINIT_VNET(). There is work in progress on that by Nikos. Unfortunately, he is now very busy with real life and his hacking is on hiatus. You can find his WIP patches in attachments. Although they aren't finished, the way is correct. I also checked in some of his code into projects/pf: http://svnweb.freebsd.org/base?view=revision&revision=251993 -- Totus tuus, Glebius.
Index: sys/net/pfvar.h =================================================================== --- sys/net/pfvar.h (revision 251294) +++ sys/net/pfvar.h (working copy) @@ -901,7 +901,6 @@ struct pf_ruleset *, struct pf_pdesc *, int); extern pflog_packet_t *pflog_packet_ptr; -#define V_pf_end_threads VNET(pf_end_threads) #endif /* _KERNEL */ #define PFSYNC_FLAG_SRCNODE 0x04 Index: sys/netpfil/pf/pf.c =================================================================== --- sys/netpfil/pf/pf.c (revision 251294) +++ sys/netpfil/pf/pf.c (working copy) @@ -300,8 +300,6 @@ int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len); -VNET_DECLARE(int, pf_end_threads); - VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]); #define PACKET_LOOPED(pd) ((pd)->pf_mtag && \ @@ -359,15 +357,13 @@ SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW, 0, "pf(4)"); -VNET_DEFINE(u_long, pf_hashsize); -#define V_pf_hashsize VNET(pf_hashsize) -SYSCTL_VNET_UINT(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN, - &VNET_NAME(pf_hashsize), 0, "Size of pf(4) states hashtable"); +u_long pf_hashsize; +SYSCTL_UINT(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN, + &pf_hashsize, 0, "Size of pf(4) states hashtable"); -VNET_DEFINE(u_long, pf_srchashsize); -#define V_pf_srchashsize VNET(pf_srchashsize) -SYSCTL_VNET_UINT(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN, - &VNET_NAME(pf_srchashsize), 0, "Size of pf(4) source nodes hashtable"); +u_long pf_srchashsize; +SYSCTL_UINT(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN, + &pf_srchashsize, 0, "Size of pf(4) source nodes hashtable"); VNET_DEFINE(void *, pf_swi_cookie); @@ -698,12 +694,12 @@ struct pf_srchash *sh; u_int i; - TUNABLE_ULONG_FETCH("net.pf.states_hashsize", &V_pf_hashsize); - if (V_pf_hashsize == 0 || !powerof2(V_pf_hashsize)) - V_pf_hashsize = PF_HASHSIZ; - TUNABLE_ULONG_FETCH("net.pf.source_nodes_hashsize", &V_pf_srchashsize); - if (V_pf_srchashsize == 0 || !powerof2(V_pf_srchashsize)) - V_pf_srchashsize = PF_HASHSIZ / 4; + TUNABLE_ULONG_FETCH("net.pf.states_hashsize", &pf_hashsize); + if (pf_hashsize == 0 || !powerof2(pf_hashsize)) + pf_hashsize = PF_HASHSIZ; + TUNABLE_ULONG_FETCH("net.pf.source_nodes_hashsize", &pf_srchashsize); + if (pf_srchashsize == 0 || !powerof2(pf_srchashsize)) + pf_srchashsize = PF_HASHSIZ / 4; V_pf_hashseed = arc4random(); @@ -717,11 +713,11 @@ V_pf_state_key_z = uma_zcreate("pf state keys", sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); - V_pf_keyhash = malloc(V_pf_hashsize * sizeof(struct pf_keyhash), + V_pf_keyhash = malloc(pf_hashsize * sizeof(struct pf_keyhash), M_PFHASH, M_WAITOK | M_ZERO); - V_pf_idhash = malloc(V_pf_hashsize * sizeof(struct pf_idhash), + V_pf_idhash = malloc(pf_hashsize * sizeof(struct pf_idhash), M_PFHASH, M_WAITOK | M_ZERO); - V_pf_hashmask = V_pf_hashsize - 1; + V_pf_hashmask = pf_hashsize - 1; for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= V_pf_hashmask; i++, kh++, ih++) { mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF); @@ -735,9 +731,9 @@ V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z; uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT); uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached"); - V_pf_srchash = malloc(V_pf_srchashsize * sizeof(struct pf_srchash), + V_pf_srchash = malloc(pf_srchashsize * sizeof(struct pf_srchash), M_PFHASH, M_WAITOK|M_ZERO); - V_pf_srchashmask = V_pf_srchashsize - 1; + V_pf_srchashmask = pf_srchashsize - 1; for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF); @@ -757,13 +753,17 @@ STAILQ_INIT(&V_pf_sendqueue); SLIST_INIT(&V_pf_overloadqueue); TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, &V_pf_overloadqueue); - mtx_init(&pf_sendqueue_mtx, "pf send queue", NULL, MTX_DEF); - mtx_init(&pf_overloadqueue_mtx, "pf overload/flush queue", NULL, - MTX_DEF); + if (IS_DEFAULT_VNET(curvnet)) { + mtx_init(&pf_sendqueue_mtx, "pf send queue", NULL, MTX_DEF); + mtx_init(&pf_overloadqueue_mtx, "pf overload/flush queue", NULL, + MTX_DEF); + } /* Unlinked, but may be referenced rules. */ TAILQ_INIT(&V_pf_unlinked_rules); - mtx_init(&pf_unlnkdrules_mtx, "pf unlinked rules", NULL, MTX_DEF); + if (IS_DEFAULT_VNET(curvnet)) + mtx_init(&pf_unlnkdrules_mtx, "pf unlinked rules", NULL, MTX_DEF); + } void @@ -1309,68 +1309,35 @@ pf_purge_thread(void *v) { u_int idx = 0; + VNET_ITERATOR_DECL(vnet_iter); - CURVNET_SET((struct vnet *)v); - for (;;) { - PF_RULES_RLOCK(); - rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftm", hz / 10); + tsleep(pf_purge_thread, PWAIT, "pftm", hz / 10); + VNET_LIST_RLOCK(); + VNET_FOREACH(vnet_iter) { + CURVNET_SET(vnet_iter); - if (V_pf_end_threads) { - /* - * To cleanse up all kifs and rules we need - * two runs: first one clears reference flags, - * then pf_purge_expired_states() doesn't - * raise them, and then second run frees. - */ - PF_RULES_RUNLOCK(); - pf_purge_unlinked_rules(); - pfi_kif_purge(); - - /* - * Now purge everything. - */ - pf_purge_expired_states(0, V_pf_hashmask); - pf_purge_expired_fragments(); - pf_purge_expired_src_nodes(); - - /* - * Now all kifs & rules should be unreferenced, - * thus should be successfully freed. - */ - pf_purge_unlinked_rules(); - pfi_kif_purge(); - - /* - * Announce success and exit. - */ - PF_RULES_RLOCK(); - V_pf_end_threads++; - PF_RULES_RUNLOCK(); - wakeup(pf_purge_thread); - kproc_exit(0); - } - PF_RULES_RUNLOCK(); - /* Process 1/interval fraction of the state table every run. */ idx = pf_purge_expired_states(idx, V_pf_hashmask / - (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10)); + (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10)); /* Purge other expired types every PFTM_INTERVAL seconds. */ if (idx == 0) { - /* - * Order is important: - * - states and src nodes reference rules - * - states and rules reference kifs - */ - pf_purge_expired_fragments(); - pf_purge_expired_src_nodes(); - pf_purge_unlinked_rules(); - pfi_kif_purge(); + /* + * Order is important: + * - states and src nodes reference rules + * - states and rules reference kifs + */ + pf_purge_expired_fragments(); + pf_purge_expired_src_nodes(); + pf_purge_unlinked_rules(); + pfi_kif_purge(); } + CURVNET_RESTORE(); + } + VNET_LIST_RUNLOCK(); } /* not reached */ - CURVNET_RESTORE(); } u_int32_t Index: sys/netpfil/pf/pf_if.c =================================================================== --- sys/netpfil/pf/pf_if.c (revision 251294) +++ sys/netpfil/pf/pf_if.c (working copy) @@ -110,7 +110,8 @@ V_pfi_buffer = malloc(V_pfi_buffer_max * sizeof(*V_pfi_buffer), PFI_MTYPE, M_WAITOK); - mtx_init(&pfi_unlnkdkifs_mtx, "pf unlinked interfaces", NULL, MTX_DEF); + if (IS_DEFAULT_VNET(curvnet)) + mtx_init(&pfi_unlnkdkifs_mtx, "pf unlinked interfaces", NULL, MTX_DEF); kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK); PF_RULES_WLOCK(); @@ -124,18 +125,20 @@ pfi_attach_ifnet(ifp); IFNET_RUNLOCK(); - pfi_attach_cookie = EVENTHANDLER_REGISTER(ifnet_arrival_event, - pfi_attach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY); - pfi_detach_cookie = EVENTHANDLER_REGISTER(ifnet_departure_event, - pfi_detach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY); - pfi_attach_group_cookie = EVENTHANDLER_REGISTER(group_attach_event, - pfi_attach_group_event, curvnet, EVENTHANDLER_PRI_ANY); - pfi_change_group_cookie = EVENTHANDLER_REGISTER(group_change_event, - pfi_change_group_event, curvnet, EVENTHANDLER_PRI_ANY); - pfi_detach_group_cookie = EVENTHANDLER_REGISTER(group_detach_event, - pfi_detach_group_event, curvnet, EVENTHANDLER_PRI_ANY); - pfi_ifaddr_event_cookie = EVENTHANDLER_REGISTER(ifaddr_event, - pfi_ifaddr_event, NULL, EVENTHANDLER_PRI_ANY); + if (IS_DEFAULT_VNET(curvnet)) { + pfi_attach_cookie = EVENTHANDLER_REGISTER(ifnet_arrival_event, + pfi_attach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY); + pfi_detach_cookie = EVENTHANDLER_REGISTER(ifnet_departure_event, + pfi_detach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY); + pfi_attach_group_cookie = EVENTHANDLER_REGISTER(group_attach_event, + pfi_attach_group_event, curvnet, EVENTHANDLER_PRI_ANY); + pfi_change_group_cookie = EVENTHANDLER_REGISTER(group_change_event, + pfi_change_group_event, curvnet, EVENTHANDLER_PRI_ANY); + pfi_detach_group_cookie = EVENTHANDLER_REGISTER(group_detach_event, + pfi_detach_group_event, curvnet, EVENTHANDLER_PRI_ANY); + pfi_ifaddr_event_cookie = EVENTHANDLER_REGISTER(ifaddr_event, + pfi_ifaddr_event, NULL, EVENTHANDLER_PRI_ANY); + } } void Index: sys/netpfil/pf/pf_ioctl.c =================================================================== --- sys/netpfil/pf/pf_ioctl.c (revision 251294) +++ sys/netpfil/pf/pf_ioctl.c (working copy) @@ -183,7 +183,6 @@ static volatile VNET_DEFINE(int, pf_pfil_hooked); #define V_pf_pfil_hooked VNET(pf_pfil_hooked) -VNET_DEFINE(int, pf_end_threads); struct rwlock pf_rules_lock; @@ -254,10 +253,13 @@ /* XXX do our best to avoid a conflict */ V_pf_status.hostid = arc4random(); - if ((error = kproc_create(pf_purge_thread, curvnet, NULL, 0, 0, - "pf purge")) != 0) - /* XXXGL: leaked all above. */ - return (error); + if (IS_DEFAULT_VNET(curvnet)) { + if ((error = kproc_create(pf_purge_thread, curvnet, NULL, 0, 0, + "pf purge")) != 0) { + /* XXXGL: leaked all above. */ + return (error); + } + } if ((error = swi_add(NULL, "pf send", pf_intr, curvnet, SWI_NET, INTR_MPSAFE, &V_pf_swi_cookie)) != 0) /* XXXGL: leaked all above. */ @@ -3631,24 +3633,22 @@ static int pf_load(void) { - int error; - VNET_ITERATOR_DECL(vnet_iter); + rw_init(&pf_rules_lock, "pf rulesets"); + pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME); - VNET_LIST_RLOCK(); - VNET_FOREACH(vnet_iter) { - CURVNET_SET(vnet_iter); - V_pf_pfil_hooked = 0; - V_pf_end_threads = 0; - TAILQ_INIT(&V_pf_tags); - TAILQ_INIT(&V_pf_qids); - CURVNET_RESTORE(); - } - VNET_LIST_RUNLOCK(); + return (0); +} - rw_init(&pf_rules_lock, "pf rulesets"); +static int +vnet_pf_init(void) +{ + int error; - pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME); + V_pf_pfil_hooked = 0; + TAILQ_INIT(&V_pf_tags); + TAILQ_INIT(&V_pf_qids); + if ((error = pfattach()) != 0) return (error); @@ -3676,11 +3676,6 @@ } PF_RULES_WLOCK(); shutdown_pf(); - V_pf_end_threads = 1; - while (V_pf_end_threads < 2) { - wakeup_one(pf_purge_thread); - rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftmo", 0); - } pf_normalize_cleanup(); pfi_cleanup(); pfr_cleanup(); @@ -3727,3 +3722,6 @@ DECLARE_MODULE(pf, pf_mod, SI_SUB_PSEUDO, SI_ORDER_FIRST); MODULE_VERSION(pf, PF_MODVER); + +VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY - 255, + vnet_pf_init, NULL); Index: sys/netpfil/pf/pf_norm.c =================================================================== --- sys/netpfil/pf/pf_norm.c (revision 251294) +++ sys/netpfil/pf/pf_norm.c (working copy) @@ -163,7 +163,8 @@ uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT); uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached"); - mtx_init(&pf_frag_mtx, "pf fragments", NULL, MTX_DEF); + if (IS_DEFAULT_VNET(curvnet)) + mtx_init(&pf_frag_mtx, "pf fragments", NULL, MTX_DEF); TAILQ_INIT(&V_pf_fragqueue); TAILQ_INIT(&V_pf_cachequeue); Index: sys/netpfil/pf/pf_table.c =================================================================== --- sys/netpfil/pf/pf_table.c (revision 251294) +++ sys/netpfil/pf/pf_table.c (working copy) @@ -183,10 +183,14 @@ static RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); static RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); -struct pfr_ktablehead pfr_ktables; +VNET_DEFINE(struct pfr_ktablehead, pfr_ktables); +#define V_pfr_ktables VNET(pfr_ktables) + struct pfr_table pfr_nulltable; -int pfr_ktable_cnt; +VNET_DEFINE(int, pfr_ktable_cnt); +#define V_pfr_ktable_cnt VNET(pfr_ktable_cnt) + void pfr_initialize(void) { @@ -1082,7 +1086,7 @@ return (ENOENT); SLIST_INIT(&workq); - RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { + RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { if (pfr_skip_table(filter, p, flags)) continue; if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR)) @@ -1117,7 +1121,7 @@ flags & PFR_FLAG_USERIOCTL)) senderr(EINVAL); key.pfrkt_flags |= PFR_TFLAG_ACTIVE; - p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); + p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); if (p == NULL) { p = pfr_create_ktable(&key.pfrkt_t, tzero, 1); if (p == NULL) @@ -1133,7 +1137,7 @@ /* find or create root table */ bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor)); - r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); + r = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); if (r != NULL) { p->pfrkt_root = r; goto _skip; @@ -1189,7 +1193,7 @@ if (pfr_validate_table(&key.pfrkt_t, 0, flags & PFR_FLAG_USERIOCTL)) return (EINVAL); - p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); + p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { SLIST_FOREACH(q, &workq, pfrkt_workq) if (!pfr_ktable_compare(p, q)) @@ -1228,7 +1232,7 @@ *size = n; return (0); } - RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { + RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { if (pfr_skip_table(filter, p, flags)) continue; if (n-- <= 0) @@ -1263,7 +1267,7 @@ return (0); } SLIST_INIT(&workq); - RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { + RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { if (pfr_skip_table(filter, p, flags)) continue; if (n-- <= 0) @@ -1295,7 +1299,7 @@ bcopy(tbl + i, &key.pfrkt_t, sizeof(key.pfrkt_t)); if (pfr_validate_table(&key.pfrkt_t, 0, 0)) return (EINVAL); - p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); + p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); if (p != NULL) { SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); xzero++; @@ -1327,7 +1331,7 @@ if (pfr_validate_table(&key.pfrkt_t, 0, flags & PFR_FLAG_USERIOCTL)) return (EINVAL); - p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); + p = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { p->pfrkt_nflags = (p->pfrkt_flags | setflag) & ~clrflag; @@ -1369,7 +1373,7 @@ if (rs == NULL) return (ENOMEM); SLIST_INIT(&workq); - RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { + RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || pfr_skip_table(trs, p, 0)) continue; @@ -1414,7 +1418,7 @@ return (EBUSY); tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; SLIST_INIT(&tableq); - kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl); + kt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl); if (kt == NULL) { kt = pfr_create_ktable(tbl, 0, 1); if (kt == NULL) @@ -1427,7 +1431,7 @@ /* find or create root table */ bzero(&key, sizeof(key)); strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name)); - rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); + rt = RB_FIND(pfr_ktablehead, &V_pfr_ktables, &key); if (rt != NULL) { kt->pfrkt_root = rt; goto _skip; @@ -1504,7 +1508,7 @@ if (rs == NULL || !rs->topen || ticket != rs->tticket) return (0); SLIST_INIT(&workq); - RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { + RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || pfr_skip_table(trs, p, 0)) continue; @@ -1540,7 +1544,7 @@ return (EBUSY); SLIST_INIT(&workq); - RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { + RB_FOREACH(p, pfr_ktablehead, &V_pfr_ktables) { if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || pfr_skip_table(trs, p, 0)) continue; @@ -1686,7 +1690,7 @@ PF_RULES_ASSERT(); if (flags & PFR_FLAG_ALLRSETS) - return (pfr_ktable_cnt); + return (V_pfr_ktable_cnt); if (filter->pfrt_anchor[0]) { rs = pf_find_ruleset(filter->pfrt_anchor); return ((rs != NULL) ? rs->tables : -1); @@ -1719,8 +1723,8 @@ PF_RULES_WASSERT(); - RB_INSERT(pfr_ktablehead, &pfr_ktables, kt); - pfr_ktable_cnt++; + RB_INSERT(pfr_ktablehead, &V_pfr_ktables, kt); + V_pfr_ktable_cnt++; if (kt->pfrkt_root != NULL) if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) pfr_setflags_ktable(kt->pfrkt_root, @@ -1751,14 +1755,14 @@ if (!(newf & PFR_TFLAG_ACTIVE)) newf &= ~PFR_TFLAG_USRMASK; if (!(newf & PFR_TFLAG_SETMASK)) { - RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt); + RB_REMOVE(pfr_ktablehead, &V_pfr_ktables, kt); if (kt->pfrkt_root != NULL) if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) pfr_setflags_ktable(kt->pfrkt_root, kt->pfrkt_root->pfrkt_flags & ~PFR_TFLAG_REFDANCHOR); pfr_destroy_ktable(kt, 1); - pfr_ktable_cnt--; + V_pfr_ktable_cnt--; return; } if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) { @@ -1883,7 +1887,7 @@ pfr_lookup_table(struct pfr_table *tbl) { /* struct pfr_ktable start like a struct pfr_table */ - return (RB_FIND(pfr_ktablehead, &pfr_ktables, + return (RB_FIND(pfr_ktablehead, &V_pfr_ktables, (struct pfr_ktable *)tbl)); }
Index: sys/net/pfvar.h =================================================================== --- sys/net/pfvar.h (revision 252856) +++ sys/net/pfvar.h (working copy) @@ -1701,6 +1701,11 @@ VNET_DECLARE(struct pf_rulequeue, pf_unlinked_rule void pf_initialize(void); void pf_cleanup(void); +void pf_overloadqueue_mtx_init(void); +void pf_sendqueue_mtx_init(void); +void pfi_unlnkdkifs_mtx_init(void); +void pf_unlnkdrules_mtx_init(void); +void pf_frag_mtx_init(void); struct pf_mtag *pf_get_mtag(struct mbuf *); Index: sys/netpfil/pf/pf.c =================================================================== --- sys/netpfil/pf/pf.c (revision 252856) +++ sys/netpfil/pf/pf.c (working copy) @@ -755,16 +755,34 @@ pf_initialize() STAILQ_INIT(&V_pf_sendqueue); SLIST_INIT(&V_pf_overloadqueue); TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, &V_pf_overloadqueue); - mtx_init(&pf_sendqueue_mtx, "pf send queue", NULL, MTX_DEF); - mtx_init(&pf_overloadqueue_mtx, "pf overload/flush queue", NULL, - MTX_DEF); /* Unlinked, but may be referenced rules. */ TAILQ_INIT(&V_pf_unlinked_rules); +} + +void +pf_unlnkdrules_mtx_init() +{ + mtx_init(&pf_unlnkdrules_mtx, "pf unlinked rules", NULL, MTX_DEF); } void +pf_overloadqueue_mtx_init() +{ + + mtx_init(&pf_overloadqueue_mtx, "pf overload/flush queue", NULL, + MTX_DEF); +} + +void +pf_sendqueue_mtx_init() +{ + + mtx_init(&pf_sendqueue_mtx, "pf send queue", NULL, MTX_DEF); +} + +void pf_cleanup() { struct pf_keyhash *kh; Index: sys/netpfil/pf/pf_if.c =================================================================== --- sys/netpfil/pf/pf_if.c (revision 252856) +++ sys/netpfil/pf/pf_if.c (working copy) @@ -100,6 +100,13 @@ static VNET_DEFINE(struct pfi_list, pfi_unlinked_k static struct mtx pfi_unlnkdkifs_mtx; void +pfi_unlnkdkifs_mtx_init() +{ + + mtx_init(&pfi_unlnkdkifs_mtx, "pf unlinked interfaces", NULL, MTX_DEF); +} + +void pfi_initialize(void) { struct ifg_group *ifg; @@ -110,8 +117,6 @@ pfi_initialize(void) V_pfi_buffer = malloc(V_pfi_buffer_max * sizeof(*V_pfi_buffer), PFI_MTYPE, M_WAITOK); - mtx_init(&pfi_unlnkdkifs_mtx, "pf unlinked interfaces", NULL, MTX_DEF); - kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK); PF_RULES_WLOCK(); V_pfi_all = pfi_kif_attach(kif, IFG_ALL); Index: sys/netpfil/pf/pf_ioctl.c =================================================================== --- sys/netpfil/pf/pf_ioctl.c (revision 252856) +++ sys/netpfil/pf/pf_ioctl.c (working copy) @@ -3629,28 +3629,31 @@ dehook_pf(void) } static int -pf_load(void) +vnet_pf_init(void) { int error; - VNET_ITERATOR_DECL(vnet_iter); + TAILQ_INIT(&V_pf_tags); + TAILQ_INIT(&V_pf_qids); - VNET_LIST_RLOCK(); - VNET_FOREACH(vnet_iter) { - CURVNET_SET(vnet_iter); - V_pf_pfil_hooked = 0; - V_pf_end_threads = 0; - TAILQ_INIT(&V_pf_tags); - TAILQ_INIT(&V_pf_qids); - CURVNET_RESTORE(); - } - VNET_LIST_RUNLOCK(); + if ((error = pfattach()) != 0) + return (error); + return (0); +} + +static int +pf_load(void) +{ + rw_init(&pf_rules_lock, "pf rulesets"); + pf_sendqueue_mtx_init(); + pf_overloadqueue_mtx_init(); + pf_unlnkdrules_mtx_init(); + pfi_unlnkdkifs_mtx_init(); + pf_frag_mtx_init(); pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME); - if ((error = pfattach()) != 0) - return (error); return (0); } @@ -3727,3 +3730,5 @@ static moduledata_t pf_mod = { DECLARE_MODULE(pf, pf_mod, SI_SUB_PSEUDO, SI_ORDER_FIRST); MODULE_VERSION(pf, PF_MODVER); + +VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY - 255, vnet_pf_init, NULL); Index: sys/netpfil/pf/pf_norm.c =================================================================== --- sys/netpfil/pf/pf_norm.c (revision 252856) +++ sys/netpfil/pf/pf_norm.c (working copy) @@ -163,8 +163,6 @@ pf_normalize_init(void) uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT); uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached"); - mtx_init(&pf_frag_mtx, "pf fragments", NULL, MTX_DEF); - TAILQ_INIT(&V_pf_fragqueue); TAILQ_INIT(&V_pf_cachequeue); } @@ -180,6 +178,13 @@ pf_normalize_cleanup(void) mtx_destroy(&pf_frag_mtx); } +void +pf_frag_mtx_init() +{ + + mtx_init(&pf_frag_mtx, "pf fragments", NULL, MTX_DEF); +} + static int pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b) {
Index: sys/net/pfvar.h =================================================================== --- sys/net/pfvar.h (revision 252114) +++ sys/net/pfvar.h (working copy) @@ -227,6 +227,17 @@ extern struct rwlock pf_rules_lock; #define PF_RULES_RASSERT() rw_assert(&pf_rules_lock, RA_RLOCKED) #define PF_RULES_WASSERT() rw_assert(&pf_rules_lock, RA_WLOCKED) +extern struct mtx pf_sendqueue_mtx; +#define PF_SENDQ_LOCK() mtx_lock(&pf_sendqueue_mtx) +#define PF_SENDQ_UNLOCK() mtx_unlock(&pf_sendqueue_mtx) + +extern struct mtx pf_overloadqueue_mtx; +#define PF_OVERLOADQ_LOCK() mtx_lock(&pf_overloadqueue_mtx) +#define PF_OVERLOADQ_UNLOCK() mtx_unlock(&pf_overloadqueue_mtx) + +extern struct mtx pfi_unlnkdkifs_mtx; +extern struct mtx pf_frag_mtx; + #define PF_MODVER 1 #define PFLOG_MODVER 1 #define PFSYNC_MODVER 1 Index: sys/netpfil/pf/pf.c =================================================================== --- sys/netpfil/pf/pf.c (revision 252114) +++ sys/netpfil/pf/pf.c (working copy) @@ -157,9 +157,7 @@ STAILQ_HEAD(pf_send_head, pf_send_entry); static VNET_DEFINE(struct pf_send_head, pf_sendqueue); #define V_pf_sendqueue VNET(pf_sendqueue) -static struct mtx pf_sendqueue_mtx; -#define PF_SENDQ_LOCK() mtx_lock(&pf_sendqueue_mtx) -#define PF_SENDQ_UNLOCK() mtx_unlock(&pf_sendqueue_mtx) +struct mtx pf_sendqueue_mtx; /* * Queue for pf_overload_task() tasks. @@ -178,9 +176,7 @@ static VNET_DEFINE(struct pf_overload_head, pf_ove static VNET_DEFINE(struct task, pf_overloadtask); #define V_pf_overloadtask VNET(pf_overloadtask) -static struct mtx pf_overloadqueue_mtx; -#define PF_OVERLOADQ_LOCK() mtx_lock(&pf_overloadqueue_mtx) -#define PF_OVERLOADQ_UNLOCK() mtx_unlock(&pf_overloadqueue_mtx) +struct mtx pf_overloadqueue_mtx; VNET_DEFINE(struct pf_rulequeue, pf_unlinked_rules); struct mtx pf_unlnkdrules_mtx; @@ -755,13 +751,9 @@ pf_initialize() STAILQ_INIT(&V_pf_sendqueue); SLIST_INIT(&V_pf_overloadqueue); TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, &V_pf_overloadqueue); - mtx_init(&pf_sendqueue_mtx, "pf send queue", NULL, MTX_DEF); - mtx_init(&pf_overloadqueue_mtx, "pf overload/flush queue", NULL, - MTX_DEF); /* Unlinked, but may be referenced rules. */ TAILQ_INIT(&V_pf_unlinked_rules); - mtx_init(&pf_unlnkdrules_mtx, "pf unlinked rules", NULL, MTX_DEF); } void Index: sys/netpfil/pf/pf_if.c =================================================================== --- sys/netpfil/pf/pf_if.c (revision 252114) +++ sys/netpfil/pf/pf_if.c (working copy) @@ -97,7 +97,7 @@ MALLOC_DEFINE(PFI_MTYPE, "pf_ifnet", "pf(4) interf LIST_HEAD(pfi_list, pfi_kif); static VNET_DEFINE(struct pfi_list, pfi_unlinked_kifs); #define V_pfi_unlinked_kifs VNET(pfi_unlinked_kifs) -static struct mtx pfi_unlnkdkifs_mtx; +struct mtx pfi_unlnkdkifs_mtx; void pfi_initialize(void) @@ -110,8 +110,6 @@ pfi_initialize(void) V_pfi_buffer = malloc(V_pfi_buffer_max * sizeof(*V_pfi_buffer), PFI_MTYPE, M_WAITOK); - mtx_init(&pfi_unlnkdkifs_mtx, "pf unlinked interfaces", NULL, MTX_DEF); - kif = malloc(sizeof(*kif), PFI_MTYPE, M_WAITOK); PF_RULES_WLOCK(); V_pfi_all = pfi_kif_attach(kif, IFG_ALL); Index: sys/netpfil/pf/pf_ioctl.c =================================================================== --- sys/netpfil/pf/pf_ioctl.c (revision 252114) +++ sys/netpfil/pf/pf_ioctl.c (working copy) @@ -3629,28 +3629,32 @@ dehook_pf(void) } static int -pf_load(void) +vnet_pf_init(void) { int error; - VNET_ITERATOR_DECL(vnet_iter); + V_pf_pfil_hooked = 0; + TAILQ_INIT(&V_pf_tags); + TAILQ_INIT(&V_pf_qids); - VNET_LIST_RLOCK(); - VNET_FOREACH(vnet_iter) { - CURVNET_SET(vnet_iter); - V_pf_pfil_hooked = 0; - V_pf_end_threads = 0; - TAILQ_INIT(&V_pf_tags); - TAILQ_INIT(&V_pf_qids); - CURVNET_RESTORE(); - } - VNET_LIST_RUNLOCK(); + if ((error = pfattach()) != 0) + return (error); + return (0); +} + +static int +pf_load(void) +{ + rw_init(&pf_rules_lock, "pf rulesets"); - pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME); - if ((error = pfattach()) != 0) - return (error); + mtx_init(&pf_sendqueue_mtx, "pf send queue", NULL, MTX_DEF); + mtx_init(&pf_overloadqueue_mtx, "pf overload/flush queue", NULL, + MTX_DEF); + mtx_init(&pf_unlnkdrules_mtx, "pf unlinked rules", NULL, MTX_DEF); + mtx_init(&pfi_unlnkdkifs_mtx, "pf unlinked interfaces", NULL, MTX_DEF); + mtx_init(&pf_frag_mtx, "pf fragments", NULL, MTX_DEF); return (0); } @@ -3727,3 +3731,5 @@ static moduledata_t pf_mod = { DECLARE_MODULE(pf, pf_mod, SI_SUB_PSEUDO, SI_ORDER_FIRST); MODULE_VERSION(pf, PF_MODVER); + +VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY - 255, vnet_pf_init, NULL); Index: sys/netpfil/pf/pf_norm.c =================================================================== --- sys/netpfil/pf/pf_norm.c (revision 252114) +++ sys/netpfil/pf/pf_norm.c (working copy) @@ -92,7 +92,7 @@ struct pf_fragment { LIST_HEAD(, pf_frent) fr_queue; }; -static struct mtx pf_frag_mtx; +struct mtx pf_frag_mtx; #define PF_FRAG_LOCK() mtx_lock(&pf_frag_mtx) #define PF_FRAG_UNLOCK() mtx_unlock(&pf_frag_mtx) #define PF_FRAG_ASSERT() mtx_assert(&pf_frag_mtx, MA_OWNED) @@ -163,8 +163,6 @@ pf_normalize_init(void) uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT); uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached"); - mtx_init(&pf_frag_mtx, "pf fragments", NULL, MTX_DEF); - TAILQ_INIT(&V_pf_fragqueue); TAILQ_INIT(&V_pf_cachequeue); }
Index: sys/net/pfvar.h =================================================================== --- sys/net/pfvar.h (revision 251794) +++ sys/net/pfvar.h (working copy) @@ -1659,19 +1659,17 @@ struct pf_idhash { struct mtx lock; }; +extern u_long pf_hashmask; +extern u_long pf_srchashmask; #define PF_HASHSIZ (32768) VNET_DECLARE(struct pf_keyhash *, pf_keyhash); VNET_DECLARE(struct pf_idhash *, pf_idhash); -VNET_DECLARE(u_long, pf_hashmask); #define V_pf_keyhash VNET(pf_keyhash) #define V_pf_idhash VNET(pf_idhash) -#define V_pf_hashmask VNET(pf_hashmask) VNET_DECLARE(struct pf_srchash *, pf_srchash); -VNET_DECLARE(u_long, pf_srchashmask); #define V_pf_srchash VNET(pf_srchash) -#define V_pf_srchashmask VNET(pf_srchashmask) -#define PF_IDHASH(s) (be64toh((s)->id) % (V_pf_hashmask + 1)) +#define PF_IDHASH(s) (be64toh((s)->id) % (pf_hashmask + 1)) VNET_DECLARE(void *, pf_swi_cookie); #define V_pf_swi_cookie VNET(pf_swi_cookie) Index: sys/netpfil/pf/if_pfsync.c =================================================================== --- sys/netpfil/pf/if_pfsync.c (revision 251794) +++ sys/netpfil/pf/if_pfsync.c (working copy) @@ -683,7 +683,7 @@ pfsync_in_clr(struct pfsync_pkt *pkt, struct mbuf pfi_kif_find(clr[i].ifname) == NULL) continue; - for (int i = 0; i <= V_pf_hashmask; i++) { + for (int i = 0; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; struct pf_state *s; relock: @@ -2045,7 +2045,7 @@ pfsync_bulk_update(void *arg) else i = sc->sc_bulk_hashid; - for (; i <= V_pf_hashmask; i++) { + for (; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; if (s != NULL) Index: sys/netpfil/pf/pf.c =================================================================== --- sys/netpfil/pf/pf.c (revision 251794) +++ sys/netpfil/pf/pf.c (working copy) @@ -353,21 +353,19 @@ VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MA static MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures"); VNET_DEFINE(struct pf_keyhash *, pf_keyhash); VNET_DEFINE(struct pf_idhash *, pf_idhash); -VNET_DEFINE(u_long, pf_hashmask); VNET_DEFINE(struct pf_srchash *, pf_srchash); -VNET_DEFINE(u_long, pf_srchashmask); SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW, 0, "pf(4)"); -VNET_DEFINE(u_long, pf_hashsize); -#define V_pf_hashsize VNET(pf_hashsize) -SYSCTL_VNET_UINT(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN, - &VNET_NAME(pf_hashsize), 0, "Size of pf(4) states hashtable"); +u_long pf_hashmask; +u_long pf_srchashmask; +static u_long pf_hashsize; +static u_long pf_srchashsize; -VNET_DEFINE(u_long, pf_srchashsize); -#define V_pf_srchashsize VNET(pf_srchashsize) -SYSCTL_VNET_UINT(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN, - &VNET_NAME(pf_srchashsize), 0, "Size of pf(4) source nodes hashtable"); +SYSCTL_UINT(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN, + &pf_hashsize, 0, "Size of pf(4) states hashtable"); +SYSCTL_UINT(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN, + &pf_srchashsize, 0, "Size of pf(4) source nodes hashtable"); VNET_DEFINE(void *, pf_swi_cookie); @@ -383,7 +381,7 @@ pf_hashkey(struct pf_state_key *sk) sizeof(struct pf_state_key_cmp)/sizeof(uint32_t), V_pf_hashseed); - return (h & V_pf_hashmask); + return (h & pf_hashmask); } static __inline uint32_t @@ -404,7 +402,7 @@ pf_hashsrc(struct pf_addr *addr, sa_family_t af) panic("%s: unknown address family %u", __func__, af); } - return (h & V_pf_srchashmask); + return (h & pf_srchashmask); } #ifdef INET6 @@ -566,7 +564,7 @@ pf_overload_task(void *c, int pending) if (SLIST_EMPTY(&queue)) return; - for (int i = 0; i <= V_pf_hashmask; i++) { + for (int i = 0; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; struct pf_state_key *sk; struct pf_state *s; @@ -698,12 +696,12 @@ pf_initialize() struct pf_srchash *sh; u_int i; - TUNABLE_ULONG_FETCH("net.pf.states_hashsize", &V_pf_hashsize); - if (V_pf_hashsize == 0 || !powerof2(V_pf_hashsize)) - V_pf_hashsize = PF_HASHSIZ; - TUNABLE_ULONG_FETCH("net.pf.source_nodes_hashsize", &V_pf_srchashsize); - if (V_pf_srchashsize == 0 || !powerof2(V_pf_srchashsize)) - V_pf_srchashsize = PF_HASHSIZ / 4; + TUNABLE_ULONG_FETCH("net.pf.states_hashsize", &pf_hashsize); + if (pf_hashsize == 0 || !powerof2(pf_hashsize)) + pf_hashsize = PF_HASHSIZ; + TUNABLE_ULONG_FETCH("net.pf.source_nodes_hashsize", &pf_srchashsize); + if (pf_srchashsize == 0 || !powerof2(pf_srchashsize)) + pf_srchashsize = PF_HASHSIZ / 4; V_pf_hashseed = arc4random(); @@ -717,12 +715,12 @@ pf_initialize() V_pf_state_key_z = uma_zcreate("pf state keys", sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); - V_pf_keyhash = malloc(V_pf_hashsize * sizeof(struct pf_keyhash), + V_pf_keyhash = malloc(pf_hashsize * sizeof(struct pf_keyhash), M_PFHASH, M_WAITOK | M_ZERO); - V_pf_idhash = malloc(V_pf_hashsize * sizeof(struct pf_idhash), + V_pf_idhash = malloc(pf_hashsize * sizeof(struct pf_idhash), M_PFHASH, M_WAITOK | M_ZERO); - V_pf_hashmask = V_pf_hashsize - 1; - for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= V_pf_hashmask; + pf_hashmask = pf_hashsize - 1; + for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask; i++, kh++, ih++) { mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK); mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF); @@ -735,10 +733,10 @@ pf_initialize() V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z; uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT); uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached"); - V_pf_srchash = malloc(V_pf_srchashsize * sizeof(struct pf_srchash), + V_pf_srchash = malloc(pf_srchashsize * sizeof(struct pf_srchash), M_PFHASH, M_WAITOK|M_ZERO); - V_pf_srchashmask = V_pf_srchashsize - 1; - for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) + pf_srchashmask = pf_srchashsize - 1; + for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF); /* ALTQ */ @@ -775,7 +773,7 @@ pf_cleanup() struct pf_send_entry *pfse, *next; u_int i; - for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= V_pf_hashmask; + for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask; i++, kh++, ih++) { KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty", __func__)); @@ -787,7 +785,7 @@ pf_cleanup() free(V_pf_keyhash, M_PFHASH); free(V_pf_idhash, M_PFHASH); - for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) { + for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) { KASSERT(LIST_EMPTY(&sh->nodes), ("%s: source node hash not empty", __func__)); mtx_destroy(&sh->lock); @@ -1177,7 +1175,7 @@ pf_find_state_byid(uint64_t id, uint32_t creatorid V_pf_status.fcounters[FCNT_STATE_SEARCH]++; - ih = &V_pf_idhash[(be64toh(id) % (V_pf_hashmask + 1))]; + ih = &V_pf_idhash[(be64toh(id) % (pf_hashmask + 1))]; PF_HASHROW_LOCK(ih); LIST_FOREACH(s, &ih->states, entry) @@ -1373,7 +1371,7 @@ pf_purge_thread(void *v) /* * Now purge everything. */ - pf_purge_expired_states(0, V_pf_hashmask); + pf_purge_expired_states(0, pf_hashmask); pf_purge_expired_fragments(); pf_purge_expired_src_nodes(); @@ -1396,7 +1394,7 @@ pf_purge_thread(void *v) PF_RULES_RUNLOCK(); /* Process 1/interval fraction of the state table every run. */ - idx = pf_purge_expired_states(idx, V_pf_hashmask / + idx = pf_purge_expired_states(idx, pf_hashmask / (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10)); /* Purge other expired types every PFTM_INTERVAL seconds. */ @@ -1462,7 +1460,7 @@ pf_purge_expired_src_nodes() struct pf_src_node *cur, *next; int i; - for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) { + for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) { PF_HASHROW_LOCK(sh); LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next) if (cur->states <= 0 && cur->expire <= time_uptime) { @@ -1614,7 +1612,7 @@ relock: PF_HASHROW_UNLOCK(ih); /* Return when we hit end of hash. */ - if (++i > V_pf_hashmask) { + if (++i > pf_hashmask) { V_pf_status.states = uma_zone_get_cur(V_pf_state_z); return (0); } Index: sys/netpfil/pf/pf_ioctl.c =================================================================== --- sys/netpfil/pf/pf_ioctl.c (revision 251794) +++ sys/netpfil/pf/pf_ioctl.c (working copy) @@ -1577,7 +1577,7 @@ DIOCCHANGERULE_error: struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; u_int i, killed = 0; - for (i = 0; i <= V_pf_hashmask; i++) { + for (i = 0; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; relock_DIOCCLRSTATES: @@ -1622,7 +1622,7 @@ relock_DIOCCLRSTATES: break; } - for (i = 0; i <= V_pf_hashmask; i++) { + for (i = 0; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; relock_DIOCKILLSTATES: @@ -1726,7 +1726,7 @@ relock_DIOCKILLSTATES: p = pstore = malloc(ps->ps_len, M_TEMP, M_WAITOK); nr = 0; - for (i = 0; i <= V_pf_hashmask; i++) { + for (i = 0; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; PF_HASHROW_LOCK(ih); @@ -3078,7 +3078,7 @@ DIOCCHANGEADDR_error: uint32_t i, nr = 0; if (psn->psn_len == 0) { - for (i = 0, sh = V_pf_srchash; i < V_pf_srchashmask; + for (i = 0, sh = V_pf_srchash; i < pf_srchashmask; i++, sh++) { PF_HASHROW_LOCK(sh); LIST_FOREACH(n, &sh->nodes, entry) @@ -3090,7 +3090,7 @@ DIOCCHANGEADDR_error: } p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK); - for (i = 0, sh = V_pf_srchash; i < V_pf_srchashmask; + for (i = 0, sh = V_pf_srchash; i < pf_srchashmask; i++, sh++) { PF_HASHROW_LOCK(sh); LIST_FOREACH(n, &sh->nodes, entry) { @@ -3147,7 +3147,7 @@ DIOCCHANGEADDR_error: struct pf_src_node *sn; u_int i, killed = 0; - for (i = 0, sh = V_pf_srchash; i < V_pf_srchashmask; + for (i = 0, sh = V_pf_srchash; i < pf_srchashmask; i++, sh++) { /* * XXXGL: we don't ever acquire sources hash lock @@ -3331,7 +3331,7 @@ pf_clear_states(void) struct pf_state *s; u_int i; - for (i = 0; i <= V_pf_hashmask; i++) { + for (i = 0; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; relock: PF_HASHROW_LOCK(ih); @@ -3366,7 +3366,7 @@ pf_clear_srcnodes(struct pf_src_node *n) struct pf_state *s; int i; - for (i = 0; i <= V_pf_hashmask; i++) { + for (i = 0; i <= pf_hashmask; i++) { struct pf_idhash *ih = &V_pf_idhash[i]; PF_HASHROW_LOCK(ih); @@ -3382,7 +3382,7 @@ pf_clear_srcnodes(struct pf_src_node *n) if (n == NULL) { struct pf_srchash *sh; - for (i = 0, sh = V_pf_srchash; i < V_pf_srchashmask; + for (i = 0, sh = V_pf_srchash; i < pf_srchashmask; i++, sh++) { PF_HASHROW_LOCK(sh); LIST_FOREACH(n, &sh->nodes, entry) {
_______________________________________________ freebsd-pf@freebsd.org mailing list http://lists.freebsd.org/mailman/listinfo/freebsd-pf To unsubscribe, send any mail to "freebsd-pf-unsubscr...@freebsd.org"