[NETFILTER] nf_conntrack: fix possibility of infinite loop while evicting 
nf_ct_frag6_queue

This synchronizes nf_ct_reasm with ipv6 reassembly, and fixes a possibility
of an infinite loop if CPUs evict and create nf_ct_frag6_queue in parallel.

Signed-off-by: Yasuyuki Kozakai <[EMAIL PROTECTED]>
Signed-off-by: Harald Welte <[EMAIL PROTECTED]>

---
commit 46f4ae4ee9d28279e33c6ba6c95f2921f1b39446
tree d3951189bab4bea2286665f96c3e7ef44d30baba
parent 8b4ddf3aaa558931e62d62fda1ee3db2ef138c89
author Yasuyuki Kozakai <[EMAIL PROTECTED]> Sun, 13 Nov 2005 11:18:23 +0100
committer Harald Welte <[EMAIL PROTECTED]> Sun, 13 Nov 2005 11:18:23 +0100

 net/ipv6/netfilter/nf_conntrack_reasm.c |   42 +++++++++++++++++++------------
 1 files changed, 26 insertions(+), 16 deletions(-)

diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c 
b/net/ipv6/netfilter/nf_conntrack_reasm.c
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -190,8 +190,10 @@ static void nf_ct_frag6_secret_rebuild(u
 atomic_t nf_ct_frag6_mem = ATOMIC_INIT(0);
 
 /* Memory Tracking Functions. */
-static inline void frag_kfree_skb(struct sk_buff *skb)
+static inline void frag_kfree_skb(struct sk_buff *skb, unsigned int *work)
 {
+       if (work)
+               *work -= skb->truesize;
        atomic_sub(skb->truesize, &nf_ct_frag6_mem);
        if (NFCT_FRAG6_CB(skb)->orig)
                kfree_skb(NFCT_FRAG6_CB(skb)->orig);
@@ -199,8 +201,11 @@ static inline void frag_kfree_skb(struct
        kfree_skb(skb);
 }
 
-static inline void frag_free_queue(struct nf_ct_frag6_queue *fq)
+static inline void frag_free_queue(struct nf_ct_frag6_queue *fq,
+                                  unsigned int *work)
 {
+       if (work)
+               *work -= sizeof(struct nf_ct_frag6_queue);
        atomic_sub(sizeof(struct nf_ct_frag6_queue), &nf_ct_frag6_mem);
        kfree(fq);
 }
@@ -218,7 +223,8 @@ static inline struct nf_ct_frag6_queue *
 /* Destruction primitives. */
 
 /* Complete destruction of fq. */
-static void nf_ct_frag6_destroy(struct nf_ct_frag6_queue *fq)
+static void nf_ct_frag6_destroy(struct nf_ct_frag6_queue *fq,
+                               unsigned int *work)
 {
        struct sk_buff *fp;
 
@@ -230,17 +236,17 @@ static void nf_ct_frag6_destroy(struct n
        while (fp) {
                struct sk_buff *xp = fp->next;
 
-               frag_kfree_skb(fp);
+               frag_kfree_skb(fp, work);
                fp = xp;
        }
 
-       frag_free_queue(fq);
+       frag_free_queue(fq, work);
 }
 
-static __inline__ void fq_put(struct nf_ct_frag6_queue *fq)
+static __inline__ void fq_put(struct nf_ct_frag6_queue *fq, unsigned int *work)
 {
        if (atomic_dec_and_test(&fq->refcnt))
-               nf_ct_frag6_destroy(fq);
+               nf_ct_frag6_destroy(fq, work);
 }
 
 /* Kill fq entry. It is not destroyed immediately,
@@ -262,10 +268,14 @@ static void nf_ct_frag6_evictor(void)
 {
        struct nf_ct_frag6_queue *fq;
        struct list_head *tmp;
+       unsigned int work;
 
-       for (;;) {
-               if (atomic_read(&nf_ct_frag6_mem) <= nf_ct_frag6_low_thresh)
-                       return;
+       work = atomic_read(&nf_ct_frag6_mem);
+       if (work <= nf_ct_frag6_low_thresh)
+               return;
+
+       work -= nf_ct_frag6_low_thresh;
+       while (work > 0) {
                read_lock(&nf_ct_frag6_lock);
                if (list_empty(&nf_ct_frag6_lru_list)) {
                        read_unlock(&nf_ct_frag6_lock);
@@ -281,7 +291,7 @@ static void nf_ct_frag6_evictor(void)
                        fq_kill(fq);
                spin_unlock(&fq->lock);
 
-               fq_put(fq);
+               fq_put(fq, &work);
        }
 }
 
@@ -298,7 +308,7 @@ static void nf_ct_frag6_expire(unsigned 
 
 out:
        spin_unlock(&fq->lock);
-       fq_put(fq);
+       fq_put(fq, NULL);
 }
 
 /* Creation primitives. */
@@ -318,7 +328,7 @@ static struct nf_ct_frag6_queue *nf_ct_f
                        atomic_inc(&fq->refcnt);
                        write_unlock(&nf_ct_frag6_lock);
                        fq_in->last_in |= COMPLETE;
-                       fq_put(fq_in);
+                       fq_put(fq_in, NULL);
                        return fq;
                }
        }
@@ -535,7 +545,7 @@ static int nf_ct_frag6_queue(struct nf_c
                                fq->fragments = next;
 
                        fq->meat -= free_it->len;
-                       frag_kfree_skb(free_it);
+                       frag_kfree_skb(free_it, NULL);
                }
        }
 
@@ -811,7 +821,7 @@ struct sk_buff *nf_ct_frag6_gather(struc
        if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) {
                spin_unlock(&fq->lock);
                DEBUGP("Can't insert skb to queue\n");
-               fq_put(fq);
+               fq_put(fq, NULL);
                goto ret_orig;
        }
 
@@ -822,7 +832,7 @@ struct sk_buff *nf_ct_frag6_gather(struc
        }
        spin_unlock(&fq->lock);
 
-       fq_put(fq);
+       fq_put(fq, NULL);
        return ret_skb;
 
 ret_orig:

--
- Harald Welte <[EMAIL PROTECTED]>                 http://netfilter.org/
============================================================================
  "Fragmentation is like classful addressing -- an interesting early
   architectural error that shows how much experimentation was going
   on while IP was being designed."                    -- Paul Vixie
-
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to