2
* net/sched/sch_sfq.c Stochastic Fairness Queueing discipline.
4
* This program is free software; you can redistribute it and/or
5
* modify it under the terms of the GNU General Public License
6
* as published by the Free Software Foundation; either version
7
* 2 of the License, or (at your option) any later version.
9
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12
#include <linux/module.h>
13
#include <linux/types.h>
14
#include <linux/kernel.h>
15
#include <linux/jiffies.h>
16
#include <linux/string.h>
18
#include <linux/errno.h>
19
#include <linux/init.h>
20
#include <linux/ipv6.h>
21
#include <linux/skbuff.h>
22
#include <linux/jhash.h>
23
#include <linux/slab.h>
24
#include <linux/vmalloc.h>
26
#include <net/netlink.h>
27
#include <net/pkt_sched.h>
30
/* Stochastic Fairness Queuing algorithm.
31
=======================================
34
Paul E. McKenney "Stochastic Fairness Queuing",
35
IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.
37
Paul E. McKenney "Stochastic Fairness Queuing",
38
"Interworking: Research and Experience", v.2, 1991, p.113-131.
42
M. Shreedhar and George Varghese "Efficient Fair
43
Queuing using Deficit Round Robin", Proc. SIGCOMM 95.
46
This is not the thing that is usually called (W)FQ nowadays.
47
It does not use any timestamp mechanism, but instead
48
processes queues in round-robin order.
52
- It is very cheap. Both CPU and memory requirements are minimal.
56
- "Stochastic" -> It is not 100% fair.
57
When hash collisions occur, several flows are considered as one.
59
- "Round-robin" -> It introduces larger delays than virtual clock
60
based schemes, and should not be used for isolating interactive
61
traffic from non-interactive. It means, that this scheduler
62
should be used as leaf of CBQ or P3, which put interactive traffic
63
to higher priority band.
65
We still need true WFQ for top level CSZ, but using WFQ
66
for the best effort traffic is absolutely pointless:
67
SFQ is superior for this purpose.
70
This implementation limits maximal queue length to 128;
71
max mtu to 2^18-1; max 128 flows, number of hash buckets to 1024.
72
The only goal of this restrictions was that all data
73
fit into one 4K page on 32bit arches.
75
It is easy to increase these values, but not in flight. */
77
#define SFQ_DEPTH 128 /* max number of packets per flow */
78
#define SFQ_SLOTS 128 /* max number of flows */
79
#define SFQ_EMPTY_SLOT 255
80
#define SFQ_DEFAULT_HASH_DIVISOR 1024
82
/* We use 16 bits to store allot, and want to handle packets up to 64K
83
* Scale allot by 8 (1<<3) so that no overflow occurs.
85
#define SFQ_ALLOT_SHIFT 3
86
#define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
88
/* This type should contain at least SFQ_DEPTH + SFQ_SLOTS values */
89
typedef unsigned char sfq_index;
92
* We dont use pointers to save space.
93
* Small indexes [0 ... SFQ_SLOTS - 1] are 'pointers' to slots[] array
94
* while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
95
* are 'pointers' to dep[] array
103
struct sk_buff *skblist_next;
104
struct sk_buff *skblist_prev;
105
sfq_index qlen; /* number of skbs in skblist */
106
sfq_index next; /* next slot in sfq chain */
107
struct sfq_head dep; /* anchor in dep[] chains */
108
unsigned short hash; /* hash value (index in ht[]) */
109
short allot; /* credit for this slot */
112
struct sfq_sched_data {
115
unsigned int quantum; /* Allotment per round: MUST BE >= MTU */
117
unsigned int divisor; /* number of slots in hash table */
119
struct tcf_proto *filter_list;
120
struct timer_list perturb_timer;
122
sfq_index cur_depth; /* depth of longest slot */
123
unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
124
struct sfq_slot *tail; /* current slot in round */
125
sfq_index *ht; /* Hash table (divisor slots) */
126
struct sfq_slot slots[SFQ_SLOTS];
127
struct sfq_head dep[SFQ_DEPTH]; /* Linked list of slots, indexed by depth */
131
* sfq_head are either in a sfq_slot or in dep[] array
133
static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
136
return &q->slots[val].dep;
137
return &q->dep[val - SFQ_SLOTS];
140
static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
142
return jhash_2words(h, h1, q->perturbation) & (q->divisor - 1);
145
static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
149
switch (skb->protocol) {
150
case htons(ETH_P_IP):
152
const struct iphdr *iph;
155
if (!pskb_network_may_pull(skb, sizeof(*iph)))
158
h = (__force u32)iph->daddr;
159
h2 = (__force u32)iph->saddr ^ iph->protocol;
160
if (ip_is_fragment(iph))
162
poff = proto_ports_offset(iph->protocol);
164
pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
166
h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff);
170
case htons(ETH_P_IPV6):
172
const struct ipv6hdr *iph;
175
if (!pskb_network_may_pull(skb, sizeof(*iph)))
178
h = (__force u32)iph->daddr.s6_addr32[3];
179
h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
180
poff = proto_ports_offset(iph->nexthdr);
182
pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
184
h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff);
190
h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol;
191
h2 = (unsigned long)skb->sk;
194
return sfq_fold_hash(q, h, h2);
197
static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
200
struct sfq_sched_data *q = qdisc_priv(sch);
201
struct tcf_result res;
204
if (TC_H_MAJ(skb->priority) == sch->handle &&
205
TC_H_MIN(skb->priority) > 0 &&
206
TC_H_MIN(skb->priority) <= q->divisor)
207
return TC_H_MIN(skb->priority);
210
return sfq_hash(q, skb) + 1;
212
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
213
result = tc_classify(skb, q->filter_list, &res);
215
#ifdef CONFIG_NET_CLS_ACT
219
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
224
if (TC_H_MIN(res.classid) <= q->divisor)
225
return TC_H_MIN(res.classid);
231
* x : slot number [0 .. SFQ_SLOTS - 1]
233
static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
236
int qlen = q->slots[x].qlen;
238
p = qlen + SFQ_SLOTS;
239
n = q->dep[qlen].next;
241
q->slots[x].dep.next = n;
242
q->slots[x].dep.prev = p;
244
q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */
245
sfq_dep_head(q, n)->prev = x;
248
#define sfq_unlink(q, x, n, p) \
249
n = q->slots[x].dep.next; \
250
p = q->slots[x].dep.prev; \
251
sfq_dep_head(q, p)->next = n; \
252
sfq_dep_head(q, n)->prev = p
255
static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
260
sfq_unlink(q, x, n, p);
262
d = q->slots[x].qlen--;
263
if (n == p && q->cur_depth == d)
268
static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
273
sfq_unlink(q, x, n, p);
275
d = ++q->slots[x].qlen;
276
if (q->cur_depth < d)
281
/* helper functions : might be changed when/if skb use a standard list_head */
283
/* remove one skb from tail of slot queue */
284
static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
286
struct sk_buff *skb = slot->skblist_prev;
288
slot->skblist_prev = skb->prev;
289
skb->prev->next = (struct sk_buff *)slot;
290
skb->next = skb->prev = NULL;
294
/* remove one skb from head of slot queue */
295
static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
297
struct sk_buff *skb = slot->skblist_next;
299
slot->skblist_next = skb->next;
300
skb->next->prev = (struct sk_buff *)slot;
301
skb->next = skb->prev = NULL;
305
static inline void slot_queue_init(struct sfq_slot *slot)
307
slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
310
/* add skb to slot queue (tail add) */
311
static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
313
skb->prev = slot->skblist_prev;
314
skb->next = (struct sk_buff *)slot;
315
slot->skblist_prev->next = skb;
316
slot->skblist_prev = skb;
319
#define slot_queue_walk(slot, skb) \
320
for (skb = slot->skblist_next; \
321
skb != (struct sk_buff *)slot; \
324
static unsigned int sfq_drop(struct Qdisc *sch)
326
struct sfq_sched_data *q = qdisc_priv(sch);
327
sfq_index x, d = q->cur_depth;
330
struct sfq_slot *slot;
332
/* Queue is full! Find the longest slot and drop tail packet from it */
337
skb = slot_dequeue_tail(slot);
338
len = qdisc_pkt_len(skb);
343
sch->qstats.backlog -= len;
348
/* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
351
q->tail->next = slot->next;
352
q->ht[slot->hash] = SFQ_EMPTY_SLOT;
360
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
362
struct sfq_sched_data *q = qdisc_priv(sch);
365
struct sfq_slot *slot;
366
int uninitialized_var(ret);
368
hash = sfq_classify(skb, sch, &ret);
370
if (ret & __NET_XMIT_BYPASS)
379
if (x == SFQ_EMPTY_SLOT) {
380
x = q->dep[0].next; /* get a free slot */
386
/* If selected queue has length q->limit, do simple tail drop,
387
* i.e. drop _this_ packet.
389
if (slot->qlen >= q->limit)
390
return qdisc_drop(skb, sch);
392
sch->qstats.backlog += qdisc_pkt_len(skb);
393
slot_queue_add(slot, skb);
395
if (slot->qlen == 1) { /* The flow is new */
396
if (q->tail == NULL) { /* It is the first flow */
399
slot->next = q->tail->next;
403
slot->allot = q->scaled_quantum;
405
if (++sch->q.qlen <= q->limit)
406
return NET_XMIT_SUCCESS;
410
/* Return Congestion Notification only if we dropped a packet
413
if (qlen != slot->qlen)
416
/* As we dropped a packet, better let upper stack know this */
417
qdisc_tree_decrease_qlen(sch, 1);
418
return NET_XMIT_SUCCESS;
421
static struct sk_buff *
422
sfq_dequeue(struct Qdisc *sch)
424
struct sfq_sched_data *q = qdisc_priv(sch);
427
struct sfq_slot *slot;
429
/* No active slots */
436
if (slot->allot <= 0) {
438
slot->allot += q->scaled_quantum;
441
skb = slot_dequeue_head(slot);
443
qdisc_bstats_update(sch, skb);
445
sch->qstats.backlog -= qdisc_pkt_len(skb);
447
/* Is the slot empty? */
448
if (slot->qlen == 0) {
449
q->ht[slot->hash] = SFQ_EMPTY_SLOT;
452
q->tail = NULL; /* no more active slots */
455
q->tail->next = next_a;
457
slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
463
sfq_reset(struct Qdisc *sch)
467
while ((skb = sfq_dequeue(sch)) != NULL)
471
static void sfq_perturbation(unsigned long arg)
473
struct Qdisc *sch = (struct Qdisc *)arg;
474
struct sfq_sched_data *q = qdisc_priv(sch);
476
q->perturbation = net_random();
478
if (q->perturb_period)
479
mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
482
static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
484
struct sfq_sched_data *q = qdisc_priv(sch);
485
struct tc_sfq_qopt *ctl = nla_data(opt);
488
if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
492
(!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
496
q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
497
q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
498
q->perturb_period = ctl->perturb_period * HZ;
500
q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
502
q->divisor = ctl->divisor;
504
while (sch->q.qlen > q->limit)
506
qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
508
del_timer(&q->perturb_timer);
509
if (q->perturb_period) {
510
mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
511
q->perturbation = net_random();
513
sch_tree_unlock(sch);
517
static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
519
struct sfq_sched_data *q = qdisc_priv(sch);
523
q->perturb_timer.function = sfq_perturbation;
524
q->perturb_timer.data = (unsigned long)sch;
525
init_timer_deferrable(&q->perturb_timer);
527
for (i = 0; i < SFQ_DEPTH; i++) {
528
q->dep[i].next = i + SFQ_SLOTS;
529
q->dep[i].prev = i + SFQ_SLOTS;
532
q->limit = SFQ_DEPTH - 1;
535
q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
537
q->quantum = psched_mtu(qdisc_dev(sch));
538
q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
539
q->perturb_period = 0;
540
q->perturbation = net_random();
542
int err = sfq_change(sch, opt);
547
sz = sizeof(q->ht[0]) * q->divisor;
548
q->ht = kmalloc(sz, GFP_KERNEL);
549
if (!q->ht && sz > PAGE_SIZE)
553
for (i = 0; i < q->divisor; i++)
554
q->ht[i] = SFQ_EMPTY_SLOT;
556
for (i = 0; i < SFQ_SLOTS; i++) {
557
slot_queue_init(&q->slots[i]);
561
sch->flags |= TCQ_F_CAN_BYPASS;
563
sch->flags &= ~TCQ_F_CAN_BYPASS;
567
static void sfq_destroy(struct Qdisc *sch)
569
struct sfq_sched_data *q = qdisc_priv(sch);
571
tcf_destroy_chain(&q->filter_list);
572
q->perturb_period = 0;
573
del_timer_sync(&q->perturb_timer);
574
if (is_vmalloc_addr(q->ht))
580
static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
582
struct sfq_sched_data *q = qdisc_priv(sch);
583
unsigned char *b = skb_tail_pointer(skb);
584
struct tc_sfq_qopt opt;
586
opt.quantum = q->quantum;
587
opt.perturb_period = q->perturb_period / HZ;
589
opt.limit = q->limit;
590
opt.divisor = q->divisor;
591
opt.flows = q->limit;
593
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
602
static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
607
static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
612
static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
615
/* we cannot bypass queue discipline anymore */
616
sch->flags &= ~TCQ_F_CAN_BYPASS;
620
static void sfq_put(struct Qdisc *q, unsigned long cl)
624
static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
626
struct sfq_sched_data *q = qdisc_priv(sch);
630
return &q->filter_list;
633
static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
634
struct sk_buff *skb, struct tcmsg *tcm)
636
tcm->tcm_handle |= TC_H_MIN(cl);
640
static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
643
struct sfq_sched_data *q = qdisc_priv(sch);
644
sfq_index idx = q->ht[cl - 1];
645
struct gnet_stats_queue qs = { 0 };
646
struct tc_sfq_xstats xstats = { 0 };
649
if (idx != SFQ_EMPTY_SLOT) {
650
const struct sfq_slot *slot = &q->slots[idx];
652
xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
653
qs.qlen = slot->qlen;
654
slot_queue_walk(slot, skb)
655
qs.backlog += qdisc_pkt_len(skb);
657
if (gnet_stats_copy_queue(d, &qs) < 0)
659
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
662
static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
664
struct sfq_sched_data *q = qdisc_priv(sch);
670
for (i = 0; i < q->divisor; i++) {
671
if (q->ht[i] == SFQ_EMPTY_SLOT ||
672
arg->count < arg->skip) {
676
if (arg->fn(sch, i + 1, arg) < 0) {
684
static const struct Qdisc_class_ops sfq_class_ops = {
688
.tcf_chain = sfq_find_tcf,
689
.bind_tcf = sfq_bind,
690
.unbind_tcf = sfq_put,
691
.dump = sfq_dump_class,
692
.dump_stats = sfq_dump_class_stats,
696
static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
697
.cl_ops = &sfq_class_ops,
699
.priv_size = sizeof(struct sfq_sched_data),
700
.enqueue = sfq_enqueue,
701
.dequeue = sfq_dequeue,
702
.peek = qdisc_peek_dequeued,
706
.destroy = sfq_destroy,
709
.owner = THIS_MODULE,
712
static int __init sfq_module_init(void)
714
return register_qdisc(&sfq_qdisc_ops);
716
static void __exit sfq_module_exit(void)
718
unregister_qdisc(&sfq_qdisc_ops);
720
module_init(sfq_module_init)
721
module_exit(sfq_module_exit)
722
MODULE_LICENSE("GPL");