On Fri, Jun 23, 2017 at 03:00:07PM +0200, Alexander Bluhm wrote:
> Adjusted timeouts will follow in the next diff.
To avoid that fragments for a single connection that reuse the
fragment id are reassembled into the wrong packet, throw away stale
fragments. With the default timeout this happens after 18,000 newer
fragemnts have been seen.
ok?
bluhm
Index: net/pf_norm.c
===================================================================
RCS file: /data/mirror/openbsd/cvs/src/sys/net/pf_norm.c,v
retrieving revision 1.207
diff -u -p -r1.207 pf_norm.c
--- net/pf_norm.c 24 Jun 2017 20:32:39 -0000 1.207
+++ net/pf_norm.c 25 Jun 2017 20:21:51 -0000
@@ -84,6 +84,7 @@ struct pf_frnode {
u_int8_t fn_proto; /* protocol for fragments in fn_tree */
u_int8_t fn_direction; /* pf packet direction */
u_int32_t fn_fragments; /* number of entries in fn_tree */
+ u_int32_t fn_gen; /* fr_gen of newest entry in fn_tree */
RB_ENTRY(pf_frnode) fn_entry;
struct pf_frag_tree fn_tree; /* matching fragments, lookup by id */
@@ -96,6 +97,7 @@ struct pf_fragment {
TAILQ_ENTRY(pf_fragment) frag_next;
TAILQ_HEAD(pf_fragq, pf_frent) fr_queue;
int32_t fr_timeout;
+ u_int32_t fr_gen; /* generation number (per pf_frnode) */
u_int16_t fr_maxlen; /* maximum length of single fragment */
struct pf_frnode *fr_node; /* ip src/dst/proto/af for fragments */
};
@@ -268,11 +270,13 @@ pf_free_fragment(struct pf_fragment *fra
pool_put(&pf_frag_pl, frag);
}
+#define PF_FRSTALE 200
struct pf_fragment *
pf_find_fragment(struct pf_frnode *key, u_int32_t id)
{
struct pf_fragment *frag, idkey;
struct pf_frnode *frnode;
+ u_int32_t stale;
frnode = RB_FIND(pf_frnode_tree, &pf_frnode_tree, key);
if (frnode == NULL)
@@ -282,6 +286,24 @@ pf_find_fragment(struct pf_frnode *key,
frag = RB_FIND(pf_frag_tree, &frnode->fn_tree, &idkey);
if (frag == NULL)
return (NULL);
+ /*
+ * Limit the number of fragments we accept for each (proto,src,dst,af)
+ * combination (aka pf_frnode), so we can deal better with a high rate
+ * of fragments.
+ * Store the current generation for each pf_frnode in fn_gen and on
+ * lookup discard 'stale' fragments (pf_fragment, based on the fr_gen
+ * member). Instead of adding another button interpret the pf fragment
+ * timeout in multiples of 200 fragments. This way the default of 60s
+ * means: pf_fragment objects older than 60*200 = 18,000 generations
+ * are considered stale.
+ */
+ stale = pf_default_rule.timeout[PFTM_FRAG] * PF_FRAG_STALE;
+ if ((frnode->fn_gen - frag->fr_gen) >= stale) {
+ DPFPRINTF(LOG_NOTICE, "stale fragment %d(%p), gen %u, num %u",
+ frag->fr_id, frag, frag->fr_gen, frnode->fn_fragments);
+ pf_free_fragment(frag);
+ return (NULL);
+ }
TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
TAILQ_INSERT_HEAD(&pf_fragqueue, frag, frag_next);
@@ -369,9 +391,11 @@ pf_fillup_fragment(struct pf_frnode *key
*frnode = *key;
RB_INIT(&frnode->fn_tree);
frnode->fn_fragments = 0;
+ frnode->fn_gen = 0;
}
TAILQ_INIT(&frag->fr_queue);
frag->fr_timeout = time_uptime;
+ frag->fr_gen = frnode->fn_gen++;
frag->fr_maxlen = frent->fe_len;
frag->fr_id = id;
frag->fr_node = frnode;
Index: net/pfvar.h
===================================================================
RCS file: /data/mirror/openbsd/cvs/src/sys/net/pfvar.h,v
retrieving revision 1.457
diff -u -p -r1.457 pfvar.h
--- net/pfvar.h 30 May 2017 19:40:54 -0000 1.457
+++ net/pfvar.h 25 Jun 2017 19:43:40 -0000
@@ -109,6 +109,8 @@ enum { PFTM_TCP_FIRST_PACKET, PFTM_TCP_O
#define PFTM_SRC_NODE_VAL 0 /* Source tracking */
#define PFTM_TS_DIFF_VAL 30 /* Allowed TS diff */
+#define PF_FRAG_STALE 200 /* Limit fragments per second per connection */
+
enum { PF_NOPFROUTE, PF_ROUTETO, PF_DUPTO, PF_REPLYTO };
enum { PF_LIMIT_STATES, PF_LIMIT_SRC_NODES, PF_LIMIT_FRAGS,
PF_LIMIT_TABLES, PF_LIMIT_TABLE_ENTRIES, PF_LIMIT_MAX };