2
* net/sched/sch_sfb.c Stochastic Fair Blue
4
* Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr>
5
* Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
7
* This program is free software; you can redistribute it and/or
8
* modify it under the terms of the GNU General Public License
9
* version 2 as published by the Free Software Foundation.
11
* W. Feng, D. Kandlur, D. Saha, K. Shin. Blue:
12
* A New Class of Active Queue Management Algorithms.
13
* U. Michigan CSE-TR-387-99, April 1999.
15
* http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
19
#include <linux/module.h>
20
#include <linux/types.h>
21
#include <linux/kernel.h>
22
#include <linux/errno.h>
23
#include <linux/skbuff.h>
24
#include <linux/random.h>
25
#include <linux/jhash.h>
27
#include <net/pkt_sched.h>
28
#include <net/inet_ecn.h>
31
* SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
32
* This implementation uses L = 8 and N = 16
33
* This permits us to split one 32bit hash (provided per packet by rxhash or
34
* external classifier) into 8 subhashes of 4 bits.
36
#define SFB_BUCKET_SHIFT 4
37
#define SFB_NUMBUCKETS (1 << SFB_BUCKET_SHIFT) /* N bins per Level */
38
#define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1)
39
#define SFB_LEVELS (32 / SFB_BUCKET_SHIFT) /* L */
41
/* SFB algo uses a virtual queue, named "bin" */
43
u16 qlen; /* length of virtual queue */
44
u16 p_mark; /* marking probability */
47
/* We use a double buffering right before hash change
48
* (Section 4.4 of SFB reference : moving hash functions)
51
u32 perturbation; /* jhash perturbation */
52
struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
55
struct sfb_sched_data {
57
struct tcf_proto *filter_list;
58
unsigned long rehash_interval;
59
unsigned long warmup_time; /* double buffering warmup time in jiffies */
61
u32 bin_size; /* maximum queue length per bin */
62
u32 increment; /* d1 */
63
u32 decrement; /* d2 */
64
u32 limit; /* HARD maximal queue length */
68
unsigned long rehash_time;
69
unsigned long token_time;
71
u8 slot; /* current active bins (0 or 1) */
72
bool double_buffering;
73
struct sfb_bins bins[2];
80
u32 childdrop; /* drops in child qdisc */
81
u32 marked; /* ECN mark */
86
* Each queued skb might be hashed on one or two bins
87
* We store in skb_cb the two hash values.
88
* (A zero value means double buffering was not used)
94
static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
96
BUILD_BUG_ON(sizeof(skb->cb) <
97
sizeof(struct qdisc_skb_cb) + sizeof(struct sfb_skb_cb));
98
return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
102
* If using 'internal' SFB flow classifier, hash comes from skb rxhash
103
* If using external classifier, hash comes from the classid.
105
static u32 sfb_hash(const struct sk_buff *skb, u32 slot)
107
return sfb_skb_cb(skb)->hashes[slot];
110
/* Probabilities are coded as Q0.16 fixed-point values,
111
* with 0xFFFF representing 65535/65536 (almost 1.0)
112
* Addition and subtraction are saturating in [0, 65535]
114
static u32 prob_plus(u32 p1, u32 p2)
118
return min_t(u32, res, SFB_MAX_PROB);
121
static u32 prob_minus(u32 p1, u32 p2)
123
return p1 > p2 ? p1 - p2 : 0;
126
static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
129
struct sfb_bucket *b = &q->bins[slot].bins[0][0];
131
for (i = 0; i < SFB_LEVELS; i++) {
132
u32 hash = sfbhash & SFB_BUCKET_MASK;
134
sfbhash >>= SFB_BUCKET_SHIFT;
135
if (b[hash].qlen < 0xFFFF)
137
b += SFB_NUMBUCKETS; /* next level */
141
static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
145
sfbhash = sfb_hash(skb, 0);
147
increment_one_qlen(sfbhash, 0, q);
149
sfbhash = sfb_hash(skb, 1);
151
increment_one_qlen(sfbhash, 1, q);
154
static void decrement_one_qlen(u32 sfbhash, u32 slot,
155
struct sfb_sched_data *q)
158
struct sfb_bucket *b = &q->bins[slot].bins[0][0];
160
for (i = 0; i < SFB_LEVELS; i++) {
161
u32 hash = sfbhash & SFB_BUCKET_MASK;
163
sfbhash >>= SFB_BUCKET_SHIFT;
164
if (b[hash].qlen > 0)
166
b += SFB_NUMBUCKETS; /* next level */
170
static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
174
sfbhash = sfb_hash(skb, 0);
176
decrement_one_qlen(sfbhash, 0, q);
178
sfbhash = sfb_hash(skb, 1);
180
decrement_one_qlen(sfbhash, 1, q);
183
static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
185
b->p_mark = prob_minus(b->p_mark, q->decrement);
188
static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
190
b->p_mark = prob_plus(b->p_mark, q->increment);
193
static void sfb_zero_all_buckets(struct sfb_sched_data *q)
195
memset(&q->bins, 0, sizeof(q->bins));
199
* compute max qlen, max p_mark, and avg p_mark
201
static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
204
u32 qlen = 0, prob = 0, totalpm = 0;
205
const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0];
207
for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) {
210
totalpm += b->p_mark;
211
if (prob < b->p_mark)
216
*avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS);
221
static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
223
q->bins[slot].perturbation = net_random();
226
static void sfb_swap_slot(struct sfb_sched_data *q)
228
sfb_init_perturbation(q->slot, q);
230
q->double_buffering = false;
233
/* Non elastic flows are allowed to use part of the bandwidth, expressed
234
* in "penalty_rate" packets per second, with "penalty_burst" burst
236
static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
238
if (q->penalty_rate == 0 || q->penalty_burst == 0)
241
if (q->tokens_avail < 1) {
242
unsigned long age = min(10UL * HZ, jiffies - q->token_time);
244
q->tokens_avail = (age * q->penalty_rate) / HZ;
245
if (q->tokens_avail > q->penalty_burst)
246
q->tokens_avail = q->penalty_burst;
247
q->token_time = jiffies;
248
if (q->tokens_avail < 1)
256
static bool sfb_classify(struct sk_buff *skb, struct sfb_sched_data *q,
257
int *qerr, u32 *salt)
259
struct tcf_result res;
262
result = tc_classify(skb, q->filter_list, &res);
264
#ifdef CONFIG_NET_CLS_ACT
268
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
273
*salt = TC_H_MIN(res.classid);
279
static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
282
struct sfb_sched_data *q = qdisc_priv(sch);
283
struct Qdisc *child = q->qdisc;
287
u32 r, slot, salt, sfbhash;
288
int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
290
if (unlikely(sch->q.qlen >= q->limit)) {
291
sch->qstats.overlimits++;
292
q->stats.queuedrop++;
296
if (q->rehash_interval > 0) {
297
unsigned long limit = q->rehash_time + q->rehash_interval;
299
if (unlikely(time_after(jiffies, limit))) {
301
q->rehash_time = jiffies;
302
} else if (unlikely(!q->double_buffering && q->warmup_time > 0 &&
303
time_after(jiffies, limit - q->warmup_time))) {
304
q->double_buffering = true;
308
if (q->filter_list) {
309
/* If using external classifiers, get result and record it. */
310
if (!sfb_classify(skb, q, &ret, &salt))
313
salt = skb_get_rxhash(skb);
318
sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
321
sfb_skb_cb(skb)->hashes[slot] = sfbhash;
323
for (i = 0; i < SFB_LEVELS; i++) {
324
u32 hash = sfbhash & SFB_BUCKET_MASK;
325
struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
327
sfbhash >>= SFB_BUCKET_SHIFT;
329
decrement_prob(b, q);
330
else if (b->qlen >= q->bin_size)
331
increment_prob(b, q);
332
if (minqlen > b->qlen)
334
if (p_min > b->p_mark)
339
sfb_skb_cb(skb)->hashes[slot] = 0;
341
if (unlikely(minqlen >= q->max)) {
342
sch->qstats.overlimits++;
343
q->stats.bucketdrop++;
347
if (unlikely(p_min >= SFB_MAX_PROB)) {
349
if (q->double_buffering) {
350
sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
353
sfb_skb_cb(skb)->hashes[slot] = sfbhash;
355
for (i = 0; i < SFB_LEVELS; i++) {
356
u32 hash = sfbhash & SFB_BUCKET_MASK;
357
struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
359
sfbhash >>= SFB_BUCKET_SHIFT;
361
decrement_prob(b, q);
362
else if (b->qlen >= q->bin_size)
363
increment_prob(b, q);
366
if (sfb_rate_limit(skb, q)) {
367
sch->qstats.overlimits++;
368
q->stats.penaltydrop++;
374
r = net_random() & SFB_MAX_PROB;
376
if (unlikely(r < p_min)) {
377
if (unlikely(p_min > SFB_MAX_PROB / 2)) {
378
/* If we're marking that many packets, then either
379
* this flow is unresponsive, or we're badly congested.
380
* In either case, we want to start dropping packets.
382
if (r < (p_min - SFB_MAX_PROB / 2) * 2) {
383
q->stats.earlydrop++;
387
if (INET_ECN_set_ce(skb)) {
390
q->stats.earlydrop++;
396
ret = qdisc_enqueue(skb, child);
397
if (likely(ret == NET_XMIT_SUCCESS)) {
399
increment_qlen(skb, q);
400
} else if (net_xmit_drop_count(ret)) {
401
q->stats.childdrop++;
407
qdisc_drop(skb, sch);
410
if (ret & __NET_XMIT_BYPASS)
416
static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
418
struct sfb_sched_data *q = qdisc_priv(sch);
419
struct Qdisc *child = q->qdisc;
422
skb = child->dequeue(q->qdisc);
425
qdisc_bstats_update(sch, skb);
427
decrement_qlen(skb, q);
433
static struct sk_buff *sfb_peek(struct Qdisc *sch)
435
struct sfb_sched_data *q = qdisc_priv(sch);
436
struct Qdisc *child = q->qdisc;
438
return child->ops->peek(child);
441
/* No sfb_drop -- impossible since the child doesn't return the dropped skb. */
443
static void sfb_reset(struct Qdisc *sch)
445
struct sfb_sched_data *q = qdisc_priv(sch);
447
qdisc_reset(q->qdisc);
450
q->double_buffering = false;
451
sfb_zero_all_buckets(q);
452
sfb_init_perturbation(0, q);
455
static void sfb_destroy(struct Qdisc *sch)
457
struct sfb_sched_data *q = qdisc_priv(sch);
459
tcf_destroy_chain(&q->filter_list);
460
qdisc_destroy(q->qdisc);
463
static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
464
[TCA_SFB_PARMS] = { .len = sizeof(struct tc_sfb_qopt) },
467
static const struct tc_sfb_qopt sfb_default_ops = {
468
.rehash_interval = 600 * MSEC_PER_SEC,
469
.warmup_time = 60 * MSEC_PER_SEC,
473
.increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */
474
.decrement = (SFB_MAX_PROB + 3000) / 6000,
479
static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
481
struct sfb_sched_data *q = qdisc_priv(sch);
483
struct nlattr *tb[TCA_SFB_MAX + 1];
484
const struct tc_sfb_qopt *ctl = &sfb_default_ops;
489
err = nla_parse_nested(tb, TCA_SFB_MAX, opt, sfb_policy);
493
if (tb[TCA_SFB_PARMS] == NULL)
496
ctl = nla_data(tb[TCA_SFB_PARMS]);
501
limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
503
child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
505
return PTR_ERR(child);
509
qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
510
qdisc_destroy(q->qdisc);
513
q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
514
q->warmup_time = msecs_to_jiffies(ctl->warmup_time);
515
q->rehash_time = jiffies;
517
q->increment = ctl->increment;
518
q->decrement = ctl->decrement;
520
q->bin_size = ctl->bin_size;
521
q->penalty_rate = ctl->penalty_rate;
522
q->penalty_burst = ctl->penalty_burst;
523
q->tokens_avail = ctl->penalty_burst;
524
q->token_time = jiffies;
527
q->double_buffering = false;
528
sfb_zero_all_buckets(q);
529
sfb_init_perturbation(0, q);
530
sfb_init_perturbation(1, q);
532
sch_tree_unlock(sch);
537
static int sfb_init(struct Qdisc *sch, struct nlattr *opt)
539
struct sfb_sched_data *q = qdisc_priv(sch);
541
q->qdisc = &noop_qdisc;
542
return sfb_change(sch, opt);
545
static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
547
struct sfb_sched_data *q = qdisc_priv(sch);
549
struct tc_sfb_qopt opt = {
550
.rehash_interval = jiffies_to_msecs(q->rehash_interval),
551
.warmup_time = jiffies_to_msecs(q->warmup_time),
554
.bin_size = q->bin_size,
555
.increment = q->increment,
556
.decrement = q->decrement,
557
.penalty_rate = q->penalty_rate,
558
.penalty_burst = q->penalty_burst,
561
sch->qstats.backlog = q->qdisc->qstats.backlog;
562
opts = nla_nest_start(skb, TCA_OPTIONS);
563
NLA_PUT(skb, TCA_SFB_PARMS, sizeof(opt), &opt);
564
return nla_nest_end(skb, opts);
567
nla_nest_cancel(skb, opts);
571
static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
573
struct sfb_sched_data *q = qdisc_priv(sch);
574
struct tc_sfb_xstats st = {
575
.earlydrop = q->stats.earlydrop,
576
.penaltydrop = q->stats.penaltydrop,
577
.bucketdrop = q->stats.bucketdrop,
578
.queuedrop = q->stats.queuedrop,
579
.childdrop = q->stats.childdrop,
580
.marked = q->stats.marked,
583
st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q);
585
return gnet_stats_copy_app(d, &st, sizeof(st));
588
static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
589
struct sk_buff *skb, struct tcmsg *tcm)
594
static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
597
struct sfb_sched_data *q = qdisc_priv(sch);
605
qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
607
sch_tree_unlock(sch);
611
static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg)
613
struct sfb_sched_data *q = qdisc_priv(sch);
618
static unsigned long sfb_get(struct Qdisc *sch, u32 classid)
623
static void sfb_put(struct Qdisc *sch, unsigned long arg)
627
static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
628
struct nlattr **tca, unsigned long *arg)
633
static int sfb_delete(struct Qdisc *sch, unsigned long cl)
638
static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
641
if (walker->count >= walker->skip)
642
if (walker->fn(sch, 1, walker) < 0) {
650
static struct tcf_proto **sfb_find_tcf(struct Qdisc *sch, unsigned long cl)
652
struct sfb_sched_data *q = qdisc_priv(sch);
656
return &q->filter_list;
659
static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
666
static const struct Qdisc_class_ops sfb_class_ops = {
671
.change = sfb_change_class,
672
.delete = sfb_delete,
674
.tcf_chain = sfb_find_tcf,
675
.bind_tcf = sfb_bind,
676
.unbind_tcf = sfb_put,
677
.dump = sfb_dump_class,
680
static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
682
.priv_size = sizeof(struct sfb_sched_data),
683
.cl_ops = &sfb_class_ops,
684
.enqueue = sfb_enqueue,
685
.dequeue = sfb_dequeue,
689
.destroy = sfb_destroy,
690
.change = sfb_change,
692
.dump_stats = sfb_dump_stats,
693
.owner = THIS_MODULE,
696
static int __init sfb_module_init(void)
698
return register_qdisc(&sfb_qdisc_ops);
701
static void __exit sfb_module_exit(void)
703
unregister_qdisc(&sfb_qdisc_ops);
706
module_init(sfb_module_init)
707
module_exit(sfb_module_exit)
709
MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline");
710
MODULE_AUTHOR("Juliusz Chroboczek");
711
MODULE_AUTHOR("Eric Dumazet");
712
MODULE_LICENSE("GPL");