The branch stable/12 has been updated by kp:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=8de214ad4d225418c8c7befe975637e236ddf93d

commit 8de214ad4d225418c8c7befe975637e236ddf93d
Author:     Kristof Provost <k...@freebsd.org>
AuthorDate: 2018-11-02 15:32:04 +0000
Commit:     Kristof Provost <k...@freebsd.org>
CommitDate: 2021-02-28 15:36:22 +0000

    pf: Limit the fragment entry queue length to 64 per bucket.
    
    So we have a global limit of 1024 fragments, but it is fine grained to
    the region of the packet.  Smaller packets may have less fragments.
    This costs another 16 bytes of memory per reassembly and devides the
    worst case for searching by 8.
    
    Obtained from:  OpenBSD
    Differential Revision:  https://reviews.freebsd.org/D17734
    
    (cherry picked from commit 790194cd472b1d17e08940e9f839322abcf14ec9)
---
 sys/net/pfvar.h          |  7 +++++++
 sys/netpfil/pf/pf_norm.c | 34 +++++++++++++++++++++++++++++-----
 2 files changed, 36 insertions(+), 5 deletions(-)

diff --git a/sys/net/pfvar.h b/sys/net/pfvar.h
index d6c2bf9120e9..3a535d04f12f 100644
--- a/sys/net/pfvar.h
+++ b/sys/net/pfvar.h
@@ -1011,6 +1011,13 @@ struct pf_divert {
  */
 #define PF_FRAG_ENTRY_POINTS           16
 
+/*
+ * The number of entries in the fragment queue must be limited
+ * to avoid DoS by linear seaching.  Instead of a global limit,
+ * use a limit per entry point.  For large packets these sum up.
+ */
+#define PF_FRAG_ENTRY_LIMIT            64
+
 /*
  * ioctl parameter structures
  */
diff --git a/sys/netpfil/pf/pf_norm.c b/sys/netpfil/pf/pf_norm.c
index 0e2fdca4c2ce..eb310e27b9ae 100644
--- a/sys/netpfil/pf/pf_norm.c
+++ b/sys/netpfil/pf/pf_norm.c
@@ -87,7 +87,10 @@ struct pf_fragment {
 #define fr_af  fr_key.frc_af
 #define fr_proto       fr_key.frc_proto
 
+       /* pointers to queue element */
        struct pf_frent *fr_firstoff[PF_FRAG_ENTRY_POINTS];
+       /* count entries between pointers */
+       uint8_t fr_entries[PF_FRAG_ENTRY_POINTS];
        RB_ENTRY(pf_fragment) fr_entry;
        TAILQ_ENTRY(pf_fragment) frag_next;
        uint32_t        fr_timeout;
@@ -138,7 +141,7 @@ static int  pf_frent_holes(struct pf_frent *frent);
 static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key,
                    struct pf_frag_tree *tree);
 static inline int      pf_frent_index(struct pf_frent *);
-static void    pf_frent_insert(struct pf_fragment *,
+static int     pf_frent_insert(struct pf_fragment *,
                            struct pf_frent *, struct pf_frent *);
 void                   pf_frent_remove(struct pf_fragment *,
                            struct pf_frent *);
@@ -392,12 +395,24 @@ pf_frent_index(struct pf_frent *frent)
        return frent->fe_off / (0x10000 / PF_FRAG_ENTRY_POINTS);
 }
 
-static void
+static int
 pf_frent_insert(struct pf_fragment *frag, struct pf_frent *frent,
     struct pf_frent *prev)
 {
        int index;
 
+       CTASSERT(PF_FRAG_ENTRY_LIMIT <= 0xff);
+
+       /*
+        * A packet has at most 65536 octets.  With 16 entry points, each one
+        * spawns 4096 octets.  We limit these to 64 fragments each, which
+        * means on average every fragment must have at least 64 octets.
+        */
+       index = pf_frent_index(frent);
+       if (frag->fr_entries[index] >= PF_FRAG_ENTRY_LIMIT)
+               return ENOBUFS;
+       frag->fr_entries[index]++;
+
        if (prev == NULL) {
                TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
        } else {
@@ -406,7 +421,6 @@ pf_frent_insert(struct pf_fragment *frag, struct pf_frent 
*frent,
                TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next);
        }
 
-       index = pf_frent_index(frent);
        if (frag->fr_firstoff[index] == NULL) {
                KASSERT(prev == NULL || pf_frent_index(prev) < index,
                    ("prev == NULL || pf_frent_index(pref) < index"));
@@ -424,6 +438,8 @@ pf_frent_insert(struct pf_fragment *frag, struct pf_frent 
*frent,
        }
 
        frag->fr_holes += pf_frent_holes(frent);
+
+       return 0;
 }
 
 void
@@ -460,6 +476,9 @@ pf_frent_remove(struct pf_fragment *frag, struct pf_frent 
*frent)
        }
 
        TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
+
+       KASSERT(frag->fr_entries[index] > 0, ("No fragments remaining"));
+       frag->fr_entries[index]--;
 }
 
 struct pf_frent *
@@ -567,6 +586,7 @@ pf_fillup_fragment(struct pf_fragment_cmp *key, struct 
pf_frent *frent,
 
                *(struct pf_fragment_cmp *)frag = *key;
                memset(frag->fr_firstoff, 0, sizeof(frag->fr_firstoff));
+               memset(frag->fr_entries, 0, sizeof(frag->fr_entries));
                frag->fr_timeout = time_uptime;
                frag->fr_maxlen = frent->fe_len;
                frag->fr_holes = 1;
@@ -575,7 +595,7 @@ pf_fillup_fragment(struct pf_fragment_cmp *key, struct 
pf_frent *frent,
                RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag);
                TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
 
-               /* We do not have a previous fragment. */
+               /* We do not have a previous fragment, cannot fail. */
                pf_frent_insert(frag, frent, NULL);
 
                return (frag);
@@ -646,7 +666,11 @@ pf_fillup_fragment(struct pf_fragment_cmp *key, struct 
pf_frent *frent,
                uma_zfree(V_pf_frent_z, after);
        }
 
-       pf_frent_insert(frag, frent, prev);
+       /* If part of the queue gets too long, there is not way to recover. */
+       if (pf_frent_insert(frag, frent, prev)) {
+               DPFPRINTF(("fragment queue limit exceeded"));
+               goto bad_fragment;
+       }
 
        return (frag);
 
_______________________________________________
dev-commits-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/dev-commits-src-all
To unsubscribe, send any mail to "dev-commits-src-all-unsubscr...@freebsd.org"

Reply via email to