Make a new function sfq_q_enqueue() that operates directly on the queue data. This will be useful for implementing sfq_change() in a later patch. A pleasant side-effect is reducing most of the duplicate code in sfq_enqueue() and sfq_requeue().
Similarly, make a new function sfq_q_dequeue(). Signed-off-by: Corey Hickey <[EMAIL PROTECTED]> --- net/sched/sch_sfq.c | 72 +++++++++++++++++++++++++++------------------------ 1 files changed, 38 insertions(+), 34 deletions(-) diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 3a23e30..57485ef 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -77,6 +77,9 @@ #define SFQ_DEPTH 128 #define SFQ_HASH_DIVISOR 1024 +#define SFQ_HEAD 0 +#define SFQ_TAIL 1 + /* This type should contain at least SFQ_DEPTH*2 values */ typedef unsigned char sfq_index; @@ -244,10 +247,9 @@ static unsigned int sfq_drop(struct Qdisc *sch) return 0; } -static int -sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch) +static void +sfq_q_enqueue(struct sk_buff *skb, struct sfq_sched_data *q, unsigned int end) { - struct sfq_sched_data *q = qdisc_priv(sch); unsigned hash = sfq_hash(q, skb); sfq_index x; @@ -256,8 +258,12 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch) q->ht[hash] = x = q->dep[SFQ_DEPTH].next; q->hash[x] = hash; } - sch->qstats.backlog += skb->len; - __skb_queue_tail(&q->qs[x], skb); + + if (end == SFQ_TAIL) + __skb_queue_tail(&q->qs[x], skb); + else + __skb_queue_head(&q->qs[x], skb); + sfq_inc(q, x); if (q->qs[x].qlen == 1) { /* The flow is new */ if (q->tail == SFQ_DEPTH) { /* It is the first flow */ @@ -270,6 +276,15 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch) q->tail = x; } } +} + +static int +sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch) +{ + struct sfq_sched_data *q = qdisc_priv(sch); + + sfq_q_enqueue(skb, q, SFQ_TAIL); + sch->qstats.backlog += skb->len; if (++sch->q.qlen <= q->limit) { sch->bstats.bytes += skb->len; sch->bstats.packets++; @@ -284,45 +299,21 @@ static int sfq_requeue(struct sk_buff *skb, struct Qdisc* sch) { struct sfq_sched_data *q = qdisc_priv(sch); - unsigned hash = sfq_hash(q, skb); - sfq_index x; - x = q->ht[hash]; - if (x == SFQ_DEPTH) { - q->ht[hash] = x = q->dep[SFQ_DEPTH].next; - q->hash[x] = hash; - } + sfq_q_enqueue(skb, q, SFQ_HEAD); sch->qstats.backlog += skb->len; - __skb_queue_head(&q->qs[x], skb); - sfq_inc(q, x); - if (q->qs[x].qlen == 1) { /* The flow is new */ - if (q->tail == SFQ_DEPTH) { /* It is the first flow */ - q->tail = x; - q->next[x] = x; - q->allot[x] = q->quantum; - } else { - q->next[x] = q->next[q->tail]; - q->next[q->tail] = x; - q->tail = x; - } - } if (++sch->q.qlen <= q->limit) { sch->qstats.requeues++; return 0; } - sch->qstats.drops++; sfq_drop(sch); return NET_XMIT_CN; } - - - -static struct sk_buff * -sfq_dequeue(struct Qdisc* sch) +static struct +sk_buff *sfq_q_dequeue(struct sfq_sched_data *q) { - struct sfq_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; sfq_index a, old_a; @@ -335,8 +326,6 @@ sfq_dequeue(struct Qdisc* sch) /* Grab packet */ skb = __skb_dequeue(&q->qs[a]); sfq_dec(q, a); - sch->q.qlen--; - sch->qstats.backlog -= skb->len; /* Is the slot empty? */ if (q->qs[a].qlen == 0) { @@ -353,6 +342,21 @@ sfq_dequeue(struct Qdisc* sch) a = q->next[a]; q->allot[a] += q->quantum; } + + return skb; +} + +static struct sk_buff +*sfq_dequeue(struct Qdisc* sch) +{ + struct sfq_sched_data *q = qdisc_priv(sch); + struct sk_buff *skb; + + skb = sfq_q_dequeue(q); + if (skb == NULL) + return NULL; + sch->q.qlen--; + sch->qstats.backlog -= skb->len; return skb; } -- 1.5.3 - To unsubscribe from this list: send the line "unsubscribe netdev" in the body of a message to [EMAIL PROTECTED] More majordomo info at http://vger.kernel.org/majordomo-info.html