2
* BFQ, or Budget Fair Queueing, disk scheduler.
4
* Based on ideas and code from CFQ:
5
* Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7
* Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8
* Paolo Valente <paolo.valente@unimore.it>
10
* Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ file.
12
* BFQ is a proportional share disk scheduling algorithm based on the
13
* slice-by-slice service scheme of CFQ. But BFQ assigns budgets,
14
* measured in number of sectors, to tasks instead of time slices.
15
* The disk is not granted to the active task for a given time slice,
16
* but until it has exahusted its assigned budget. This change from
17
* the time to the service domain allows BFQ to distribute the disk
18
* bandwidth among tasks as desired, without any distortion due to
19
* ZBR, workload fluctuations or other factors. BFQ uses an ad hoc
20
* internal scheduler, called B-WF2Q+, to schedule tasks according to
21
* their budgets. Thanks to this accurate scheduler, BFQ can afford
22
* to assign high budgets to disk-bound non-seeky tasks (to boost the
23
* throughput), and yet guarantee low latencies to interactive and
24
* soft real-time applications.
26
* BFQ has been introduced in [1], where the interested reader can
27
* find an accurate description of the algorithm, the bandwidth
28
* distribution and latency guarantees it provides, plus formal proofs
29
* of all the properties. With respect to the algorithm presented in
30
* the paper, this implementation adds several little heuristics, and
31
* a hierarchical extension, based on H-WF2Q+.
33
* B-WF2Q+ is based on WF2Q+, that is described in [2], together with
34
* H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N)
35
* complexity derives from the one introduced with EEVDF in [3].
37
* [1] P. Valente and F. Checconi, ``High Throughput Disk Scheduling
38
* with Deterministic Guarantees on Bandwidth Distribution,'',
39
* IEEE Transactions on Computer, May 2010.
41
* http://algo.ing.unimo.it/people/paolo/disk_sched/bfq-techreport.pdf
43
* [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing
44
* Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689,
47
* http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
49
* [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline
50
* First: A Flexible and Accurate Mechanism for Proportional Share
51
* Resource Allocation,'' technical report.
53
* http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
55
#include <linux/module.h>
56
#include <linux/slab.h>
57
#include <linux/blkdev.h>
58
#include <linux/cgroup.h>
59
#include <linux/elevator.h>
60
#include <linux/jiffies.h>
61
#include <linux/rbtree.h>
62
#include <linux/ioprio.h>
65
/* Max number of dispatches in one round of service. */
66
static const int bfq_quantum = 4;
68
/* Expiration time of sync (0) and async (1) requests, in jiffies. */
69
static const int bfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
71
/* Maximum backwards seek, in KiB. */
72
static const int bfq_back_max = 16 * 1024;
74
/* Penalty of a backwards seek, in number of sectors. */
75
static const int bfq_back_penalty = 2;
77
/* Idling period duration, in jiffies. */
78
static int bfq_slice_idle = HZ / 125;
80
/* Default maximum budget values, in sectors and number of requests. */
81
static const int bfq_default_max_budget = 16 * 1024;
82
static const int bfq_max_budget_async_rq = 4;
85
* Async to sync throughput distribution is controlled as follows:
86
* when an async request is served, the entity is charged the number
87
* of sectors of the request, multipled by the factor below
89
static const int bfq_async_charge_factor = 10;
91
/* Default timeout values, in jiffies, approximating CFQ defaults. */
92
static const int bfq_timeout_sync = HZ / 8;
93
static int bfq_timeout_async = HZ / 25;
95
struct kmem_cache *bfq_pool;
96
struct kmem_cache *bfq_ioc_pool;
98
static DEFINE_PER_CPU(unsigned long, bfq_ioc_count);
99
static struct completion *bfq_ioc_gone;
100
static DEFINE_SPINLOCK(bfq_ioc_gone_lock);
102
static DEFINE_SPINLOCK(cic_index_lock);
103
static DEFINE_IDA(cic_index_ida);
105
/* Below this threshold (in ms), we consider thinktime immediate. */
108
/* hw_tag detection: parallel requests threshold and min samples needed. */
109
#define BFQ_HW_QUEUE_THRESHOLD 4
110
#define BFQ_HW_QUEUE_SAMPLES 32
112
#define BFQQ_SEEK_THR (sector_t)(8 * 1024)
113
#define BFQQ_SEEKY(bfqq) ((bfqq)->seek_mean > BFQQ_SEEK_THR)
115
/* Min samples used for peak rate estimation (for autotuning). */
116
#define BFQ_PEAK_RATE_SAMPLES 32
118
/* Shift used for peak rate fixed precision calculations. */
119
#define BFQ_RATE_SHIFT 16
122
* The duration of the weight raising for interactive applications is
123
* computed automatically (as default behaviour), using the following
124
* formula: duration = (R / r) * T, where r is the peak rate of the
125
* disk, and R and T are two reference parameters. In particular, R is
126
* the peak rate of a reference disk, and T is about the maximum time
127
* for starting popular large applications on that disk, under BFQ and
128
* while reading two files in parallel. Finally, BFQ uses two
129
* different pairs (R, T) depending on whether the disk is rotational
132
#define T_rot (msecs_to_jiffies(5500))
133
#define T_nonrot (msecs_to_jiffies(2000))
134
/* Next two quantities are in sectors/usec, left-shifted by BFQ_RATE_SHIFT */
136
#define R_nonrot 34791
138
#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
139
{ RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
142
((struct cfq_io_context *) (rq)->elevator_private[0])
143
#define RQ_BFQQ(rq) ((rq)->elevator_private[1])
146
#include "bfq-sched.c"
147
#include "bfq-cgroup.c"
149
#define bfq_class_idle(bfqq) ((bfqq)->entity.ioprio_class ==\
151
#define bfq_class_rt(bfqq) ((bfqq)->entity.ioprio_class ==\
154
#define bfq_sample_valid(samples) ((samples) > 80)
157
* We regard a request as SYNC, if either it's a read or has the SYNC bit
158
* set (in which case it could also be a direct WRITE).
160
static inline int bfq_bio_sync(struct bio *bio)
162
if (bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC))
169
* Scheduler run of queue, if there are requests pending and no one in the
170
* driver that will restart queueing.
172
static inline void bfq_schedule_dispatch(struct bfq_data *bfqd)
174
if (bfqd->queued != 0) {
175
bfq_log(bfqd, "schedule dispatch");
176
kblockd_schedule_work(bfqd->queue, &bfqd->unplug_work);
181
* Lifted from AS - choose which of rq1 and rq2 that is best served now.
182
* We choose the request that is closesr to the head right now. Distance
183
* behind the head is penalized and only allowed to a certain extent.
185
static struct request *bfq_choose_req(struct bfq_data *bfqd,
190
sector_t s1, s2, d1 = 0, d2 = 0;
191
unsigned long back_max;
192
#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
193
#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
194
unsigned wrap = 0; /* bit mask: requests behind the disk head? */
196
if (rq1 == NULL || rq1 == rq2)
201
if (rq_is_sync(rq1) && !rq_is_sync(rq2))
203
else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
205
if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
207
else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
210
s1 = blk_rq_pos(rq1);
211
s2 = blk_rq_pos(rq2);
214
* By definition, 1KiB is 2 sectors.
216
back_max = bfqd->bfq_back_max * 2;
219
* Strict one way elevator _except_ in the case where we allow
220
* short backward seeks which are biased as twice the cost of a
221
* similar forward seek.
225
else if (s1 + back_max >= last)
226
d1 = (last - s1) * bfqd->bfq_back_penalty;
228
wrap |= BFQ_RQ1_WRAP;
232
else if (s2 + back_max >= last)
233
d2 = (last - s2) * bfqd->bfq_back_penalty;
235
wrap |= BFQ_RQ2_WRAP;
237
/* Found required data */
240
* By doing switch() on the bit mask "wrap" we avoid having to
241
* check two variables for all permutations: --> faster!
244
case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
260
case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */
263
* Since both rqs are wrapped,
264
* start with the one that's further behind head
265
* (--> only *one* back seek required),
266
* since back seek takes more time than forward.
275
static struct bfq_queue *
276
bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
277
sector_t sector, struct rb_node **ret_parent,
278
struct rb_node ***rb_link)
280
struct rb_node **p, *parent;
281
struct bfq_queue *bfqq = NULL;
289
bfqq = rb_entry(parent, struct bfq_queue, pos_node);
292
* Sort strictly based on sector. Smallest to the left,
293
* largest to the right.
295
if (sector > blk_rq_pos(bfqq->next_rq))
297
else if (sector < blk_rq_pos(bfqq->next_rq))
305
*ret_parent = parent;
309
bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
310
(long long unsigned)sector,
311
bfqq != NULL ? bfqq->pid : 0);
316
static void bfq_rq_pos_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq)
318
struct rb_node **p, *parent;
319
struct bfq_queue *__bfqq;
321
if (bfqq->pos_root != NULL) {
322
rb_erase(&bfqq->pos_node, bfqq->pos_root);
323
bfqq->pos_root = NULL;
326
if (bfq_class_idle(bfqq))
331
bfqq->pos_root = &bfqd->rq_pos_tree;
332
__bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
333
blk_rq_pos(bfqq->next_rq), &parent, &p);
334
if (__bfqq == NULL) {
335
rb_link_node(&bfqq->pos_node, parent, p);
336
rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
338
bfqq->pos_root = NULL;
341
static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
342
struct bfq_queue *bfqq,
343
struct request *last)
345
struct rb_node *rbnext = rb_next(&last->rb_node);
346
struct rb_node *rbprev = rb_prev(&last->rb_node);
347
struct request *next = NULL, *prev = NULL;
349
BUG_ON(RB_EMPTY_NODE(&last->rb_node));
352
prev = rb_entry_rq(rbprev);
355
next = rb_entry_rq(rbnext);
357
rbnext = rb_first(&bfqq->sort_list);
358
if (rbnext && rbnext != &last->rb_node)
359
next = rb_entry_rq(rbnext);
362
return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
365
static void bfq_del_rq_rb(struct request *rq)
367
struct bfq_queue *bfqq = RQ_BFQQ(rq);
368
struct bfq_data *bfqd = bfqq->bfqd;
369
const int sync = rq_is_sync(rq);
371
BUG_ON(bfqq->queued[sync] == 0);
372
bfqq->queued[sync]--;
375
elv_rb_del(&bfqq->sort_list, rq);
377
if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
378
if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->active_queue)
379
bfq_del_bfqq_busy(bfqd, bfqq, 1);
381
* Remove queue from request-position tree as it is empty.
383
if (bfqq->pos_root != NULL) {
384
rb_erase(&bfqq->pos_node, bfqq->pos_root);
385
bfqq->pos_root = NULL;
390
/* see the definition of bfq_async_charge_factor for details */
391
static inline unsigned long bfq_serv_to_charge(struct request *rq,
392
struct bfq_queue *bfqq)
394
return blk_rq_sectors(rq) *
395
(1 + ((!bfq_bfqq_sync(bfqq)) * (bfqq->raising_coeff == 1) *
396
bfq_async_charge_factor));
400
* bfq_updated_next_req - update the queue after a new next_rq selection.
401
* @bfqd: the device data the queue belongs to.
402
* @bfqq: the queue to update.
404
* If the first request of a queue changes we make sure that the queue
405
* has enough budget to serve at least its first request (if the
406
* request has grown). We do this because if the queue has not enough
407
* budget for its first request, it has to go through two dispatch
408
* rounds to actually get it dispatched.
410
static void bfq_updated_next_req(struct bfq_data *bfqd,
411
struct bfq_queue *bfqq)
413
struct bfq_entity *entity = &bfqq->entity;
414
struct bfq_service_tree *st = bfq_entity_service_tree(entity);
415
struct request *next_rq = bfqq->next_rq;
416
unsigned long new_budget;
421
if (bfqq == bfqd->active_queue)
423
* In order not to break guarantees, budgets cannot be
424
* changed after an entity has been selected.
428
BUG_ON(entity->tree != &st->active);
429
BUG_ON(entity == entity->sched_data->active_entity);
431
new_budget = max_t(unsigned long, bfqq->max_budget,
432
bfq_serv_to_charge(next_rq, bfqq));
433
entity->budget = new_budget;
434
bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", new_budget);
435
bfq_activate_bfqq(bfqd, bfqq);
438
static inline unsigned int bfq_wrais_duration(struct bfq_data *bfqd)
442
if (bfqd->bfq_raising_max_time > 0)
443
return bfqd->bfq_raising_max_time;
446
do_div(dur, bfqd->peak_rate);
451
static void bfq_add_rq_rb(struct request *rq)
453
struct bfq_queue *bfqq = RQ_BFQQ(rq);
454
struct bfq_entity *entity = &bfqq->entity;
455
struct bfq_data *bfqd = bfqq->bfqd;
456
struct request *next_rq, *prev;
457
unsigned long old_raising_coeff = bfqq->raising_coeff;
458
int idle_for_long_time = bfqq->budget_timeout +
459
bfqd->bfq_raising_min_idle_time < jiffies;
461
bfq_log_bfqq(bfqd, bfqq, "add_rq_rb %d", rq_is_sync(rq));
462
bfqq->queued[rq_is_sync(rq)]++;
465
elv_rb_add(&bfqq->sort_list, rq);
468
* Check if this request is a better next-serve candidate.
470
prev = bfqq->next_rq;
471
next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
472
BUG_ON(next_rq == NULL);
473
bfqq->next_rq = next_rq;
476
* Adjust priority tree position, if next_rq changes.
478
if (prev != bfqq->next_rq)
479
bfq_rq_pos_tree_add(bfqd, bfqq);
481
if (!bfq_bfqq_busy(bfqq)) {
482
int soft_rt = bfqd->bfq_raising_max_softrt_rate > 0 &&
483
bfqq->soft_rt_next_start < jiffies;
484
entity->budget = max_t(unsigned long, bfqq->max_budget,
485
bfq_serv_to_charge(next_rq, bfqq));
487
if (! bfqd->low_latency)
491
* If the queue is not being boosted and has been idle
492
* for enough time, start a weight-raising period
494
if(old_raising_coeff == 1 && (idle_for_long_time || soft_rt)) {
495
bfqq->raising_coeff = bfqd->bfq_raising_coeff;
496
if (idle_for_long_time)
497
bfqq->raising_cur_max_time =
498
bfq_wrais_duration(bfqd);
500
bfqq->raising_cur_max_time =
501
bfqd->bfq_raising_rt_max_time;
502
bfq_log_bfqq(bfqd, bfqq,
503
"wrais starting at %llu msec,"
505
bfqq->last_rais_start_finish,
506
jiffies_to_msecs(bfqq->
507
raising_cur_max_time));
508
} else if (old_raising_coeff > 1) {
509
if (idle_for_long_time)
510
bfqq->raising_cur_max_time =
511
bfq_wrais_duration(bfqd);
512
else if (bfqq->raising_cur_max_time ==
513
bfqd->bfq_raising_rt_max_time &&
515
bfqq->raising_coeff = 1;
516
bfq_log_bfqq(bfqd, bfqq,
517
"wrais ending at %llu msec,"
519
bfqq->last_rais_start_finish,
520
jiffies_to_msecs(bfqq->
521
raising_cur_max_time));
524
if (old_raising_coeff != bfqq->raising_coeff)
525
entity->ioprio_changed = 1;
527
bfq_add_bfqq_busy(bfqd, bfqq);
529
if(bfqd->low_latency && old_raising_coeff == 1 &&
531
bfqq->last_rais_start_finish +
532
bfqd->bfq_raising_min_inter_arr_async < jiffies) {
533
bfqq->raising_coeff = bfqd->bfq_raising_coeff;
534
bfqq->raising_cur_max_time = bfq_wrais_duration(bfqd);
536
entity->ioprio_changed = 1;
537
bfq_log_bfqq(bfqd, bfqq,
538
"non-idle wrais starting at %llu msec,"
540
bfqq->last_rais_start_finish,
541
jiffies_to_msecs(bfqq->
542
raising_cur_max_time));
544
bfq_updated_next_req(bfqd, bfqq);
547
if(bfqd->low_latency &&
548
(old_raising_coeff == 1 || bfqq->raising_coeff == 1 ||
550
bfqq->last_rais_start_finish = jiffies;
553
static void bfq_reposition_rq_rb(struct bfq_queue *bfqq, struct request *rq)
555
elv_rb_del(&bfqq->sort_list, rq);
556
bfqq->queued[rq_is_sync(rq)]--;
557
bfqq->bfqd->queued--;
561
static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
564
struct task_struct *tsk = current;
565
struct cfq_io_context *cic;
566
struct bfq_queue *bfqq;
568
cic = bfq_cic_lookup(bfqd, tsk->io_context);
572
bfqq = cic_to_bfqq(cic, bfq_bio_sync(bio));
574
sector_t sector = bio->bi_sector + bio_sectors(bio);
576
return elv_rb_find(&bfqq->sort_list, sector);
582
static void bfq_activate_request(struct request_queue *q, struct request *rq)
584
struct bfq_data *bfqd = q->elevator->elevator_data;
586
bfqd->rq_in_driver++;
587
bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
588
bfq_log(bfqd, "activate_request: new bfqd->last_position %llu",
589
(long long unsigned)bfqd->last_position);
592
static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
594
struct bfq_data *bfqd = q->elevator->elevator_data;
596
WARN_ON(bfqd->rq_in_driver == 0);
597
bfqd->rq_in_driver--;
600
static void bfq_remove_request(struct request *rq)
602
struct bfq_queue *bfqq = RQ_BFQQ(rq);
603
struct bfq_data *bfqd = bfqq->bfqd;
605
if (bfqq->next_rq == rq) {
606
bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
607
bfq_updated_next_req(bfqd, bfqq);
610
list_del_init(&rq->queuelist);
613
if (rq->cmd_flags & REQ_META) {
614
WARN_ON(bfqq->meta_pending == 0);
615
bfqq->meta_pending--;
619
static int bfq_merge(struct request_queue *q, struct request **req,
622
struct bfq_data *bfqd = q->elevator->elevator_data;
623
struct request *__rq;
625
__rq = bfq_find_rq_fmerge(bfqd, bio);
626
if (__rq != NULL && elv_rq_merge_ok(__rq, bio)) {
628
return ELEVATOR_FRONT_MERGE;
631
return ELEVATOR_NO_MERGE;
634
static void bfq_merged_request(struct request_queue *q, struct request *req,
637
if (type == ELEVATOR_FRONT_MERGE) {
638
struct bfq_queue *bfqq = RQ_BFQQ(req);
640
bfq_reposition_rq_rb(bfqq, req);
644
static void bfq_merged_requests(struct request_queue *q, struct request *rq,
645
struct request *next)
647
struct bfq_queue *bfqq = RQ_BFQQ(rq);
650
* Reposition in fifo if next is older than rq.
652
if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
653
time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
654
list_move(&rq->queuelist, &next->queuelist);
655
rq_set_fifo_time(rq, rq_fifo_time(next));
658
if (bfqq->next_rq == next)
661
bfq_remove_request(next);
664
static int bfq_allow_merge(struct request_queue *q, struct request *rq,
667
struct bfq_data *bfqd = q->elevator->elevator_data;
668
struct cfq_io_context *cic;
669
struct bfq_queue *bfqq;
671
/* Disallow merge of a sync bio into an async request. */
672
if (bfq_bio_sync(bio) && !rq_is_sync(rq))
676
* Lookup the bfqq that this bio will be queued with. Allow
677
* merge only if rq is queued there.
679
cic = bfq_cic_lookup(bfqd, current->io_context);
683
bfqq = cic_to_bfqq(cic, bfq_bio_sync(bio));
684
return bfqq == RQ_BFQQ(rq);
687
static void __bfq_set_active_queue(struct bfq_data *bfqd,
688
struct bfq_queue *bfqq)
691
bfq_mark_bfqq_must_alloc(bfqq);
692
bfq_mark_bfqq_budget_new(bfqq);
693
bfq_clear_bfqq_fifo_expire(bfqq);
695
bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
697
bfq_log_bfqq(bfqd, bfqq, "set_active_queue, cur-budget = %lu",
698
bfqq->entity.budget);
701
bfqd->active_queue = bfqq;
705
* Get and set a new active queue for service.
707
static struct bfq_queue *bfq_set_active_queue(struct bfq_data *bfqd,
708
struct bfq_queue *bfqq)
711
bfqq = bfq_get_next_queue(bfqd);
713
bfq_get_next_queue_forced(bfqd, bfqq);
715
__bfq_set_active_queue(bfqd, bfqq);
719
static inline sector_t bfq_dist_from_last(struct bfq_data *bfqd,
722
if (blk_rq_pos(rq) >= bfqd->last_position)
723
return blk_rq_pos(rq) - bfqd->last_position;
725
return bfqd->last_position - blk_rq_pos(rq);
729
* Return true if bfqq has no request pending and rq is close enough to
730
* bfqd->last_position, or if rq is closer to bfqd->last_position than
733
static inline int bfq_rq_close(struct bfq_data *bfqd, struct request *rq)
735
return bfq_dist_from_last(bfqd, rq) <= BFQQ_SEEK_THR;
738
static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
740
struct rb_root *root = &bfqd->rq_pos_tree;
741
struct rb_node *parent, *node;
742
struct bfq_queue *__bfqq;
743
sector_t sector = bfqd->last_position;
745
if (RB_EMPTY_ROOT(root))
749
* First, if we find a request starting at the end of the last
750
* request, choose it.
752
__bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
757
* If the exact sector wasn't found, the parent of the NULL leaf
758
* will contain the closest sector (rq_pos_tree sorted by next_request
761
__bfqq = rb_entry(parent, struct bfq_queue, pos_node);
762
if (bfq_rq_close(bfqd, __bfqq->next_rq))
765
if (blk_rq_pos(__bfqq->next_rq) < sector)
766
node = rb_next(&__bfqq->pos_node);
768
node = rb_prev(&__bfqq->pos_node);
772
__bfqq = rb_entry(node, struct bfq_queue, pos_node);
773
if (bfq_rq_close(bfqd, __bfqq->next_rq))
781
* cur_bfqq - passed in so that we don't decide that the current queue
782
* is closely cooperating with itself.
784
* We are assuming that cur_bfqq has dispatched at least one request,
785
* and that bfqd->last_position reflects a position on the disk associated
786
* with the I/O issued by cur_bfqq.
788
static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
789
struct bfq_queue *cur_bfqq)
791
struct bfq_queue *bfqq;
793
if (bfq_class_idle(cur_bfqq))
795
if (!bfq_bfqq_sync(cur_bfqq))
797
if (BFQQ_SEEKY(cur_bfqq))
800
/* If device has only one backlogged bfq_queue, don't search. */
801
if (bfqd->busy_queues == 1)
805
* We should notice if some of the queues are cooperating, e.g.
806
* working closely on the same area of the disk. In that case,
807
* we can group them together and don't waste time idling.
809
bfqq = bfqq_close(bfqd);
810
if (bfqq == NULL || bfqq == cur_bfqq)
814
* Do not merge queues from different bfq_groups.
816
if (bfqq->entity.parent != cur_bfqq->entity.parent)
820
* It only makes sense to merge sync queues.
822
if (!bfq_bfqq_sync(bfqq))
824
if (BFQQ_SEEKY(bfqq))
828
* Do not merge queues of different priority classes.
830
if (bfq_class_rt(bfqq) != bfq_class_rt(cur_bfqq))
837
* If enough samples have been computed, return the current max budget
838
* stored in bfqd, which is dynamically updated according to the
839
* estimated disk peak rate; otherwise return the default max budget
841
static inline unsigned long bfq_max_budget(struct bfq_data *bfqd)
843
if (bfqd->budgets_assigned < 194)
844
return bfq_default_max_budget;
846
return bfqd->bfq_max_budget;
850
* Return min budget, which is a fraction of the current or default
851
* max budget (trying with 1/32)
853
static inline unsigned long bfq_min_budget(struct bfq_data *bfqd)
855
if (bfqd->budgets_assigned < 194)
856
return bfq_default_max_budget;
858
return bfqd->bfq_max_budget / 32;
862
* Decides whether idling should be done for given device and
863
* given active queue.
865
static inline bool bfq_queue_nonrot_noidle(struct bfq_data *bfqd,
866
struct bfq_queue *active_bfqq)
868
if (active_bfqq == NULL)
871
* If device is SSD it has no seek penalty, disable idling; but
873
* - device does not support queuing, otherwise we still have
874
* a problem with sync vs async workloads;
875
* - the queue is not weight-raised, to preserve guarantees.
877
return (blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag &&
878
active_bfqq->raising_coeff == 1);
881
static void bfq_arm_slice_timer(struct bfq_data *bfqd)
883
struct bfq_queue *bfqq = bfqd->active_queue;
884
struct cfq_io_context *cic;
887
WARN_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
889
if (bfq_queue_nonrot_noidle(bfqd, bfqq))
892
/* Idling is disabled, either manually or by past process history. */
893
if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_idle_window(bfqq))
896
/* Tasks have exited, don't wait. */
897
cic = bfqd->active_cic;
898
if (cic == NULL || atomic_read(&cic->ioc->nr_tasks) == 0)
901
bfq_mark_bfqq_wait_request(bfqq);
904
* We don't want to idle for seeks, but we do want to allow
905
* fair distribution of slice time for a process doing back-to-back
906
* seeks. So allow a little bit of time for him to submit a new rq.
908
* To prevent processes with (partly) seeky workloads from
909
* being too ill-treated, grant them a small fraction of the
910
* assigned budget before reducing the waiting time to
911
* BFQ_MIN_TT. This happened to help reduce latency.
913
sl = bfqd->bfq_slice_idle;
914
if (bfq_sample_valid(bfqq->seek_samples) && BFQQ_SEEKY(bfqq) &&
915
bfqq->entity.service > bfq_max_budget(bfqd) / 8 &&
916
bfqq->raising_coeff == 1)
917
sl = min(sl, msecs_to_jiffies(BFQ_MIN_TT));
918
else if (bfqq->raising_coeff > 1)
920
bfqd->last_idling_start = ktime_get();
921
mod_timer(&bfqd->idle_slice_timer, jiffies + sl);
922
bfq_log(bfqd, "arm idle: %u/%u ms",
923
jiffies_to_msecs(sl), jiffies_to_msecs(bfqd->bfq_slice_idle));
927
* Set the maximum time for the active queue to consume its
928
* budget. This prevents seeky processes from lowering the disk
929
* throughput (always guaranteed with a time slice scheme as in CFQ).
931
static void bfq_set_budget_timeout(struct bfq_data *bfqd)
933
struct bfq_queue *bfqq = bfqd->active_queue;
934
unsigned int timeout_coeff;
935
if (bfqq->raising_cur_max_time == bfqd->bfq_raising_rt_max_time)
938
timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
940
bfqd->last_budget_start = ktime_get();
942
bfq_clear_bfqq_budget_new(bfqq);
943
bfqq->budget_timeout = jiffies +
944
bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * timeout_coeff;
946
bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u",
947
jiffies_to_msecs(bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] *
952
* Move request from internal lists to the request queue dispatch list.
954
static void bfq_dispatch_insert(struct request_queue *q, struct request *rq)
956
struct bfq_data *bfqd = q->elevator->elevator_data;
957
struct bfq_queue *bfqq = RQ_BFQQ(rq);
959
bfq_remove_request(rq);
961
elv_dispatch_sort(q, rq);
963
if (bfq_bfqq_sync(bfqq))
968
* Return expired entry, or NULL to just start from scratch in rbtree.
970
static struct request *bfq_check_fifo(struct bfq_queue *bfqq)
972
struct request *rq = NULL;
974
if (bfq_bfqq_fifo_expire(bfqq))
977
bfq_mark_bfqq_fifo_expire(bfqq);
979
if (list_empty(&bfqq->fifo))
982
rq = rq_entry_fifo(bfqq->fifo.next);
984
if (time_before(jiffies, rq_fifo_time(rq)))
991
* Must be called with the queue_lock held.
993
static int bfqq_process_refs(struct bfq_queue *bfqq)
995
int process_refs, io_refs;
997
io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
998
process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st;
999
BUG_ON(process_refs < 0);
1000
return process_refs;
1003
static void bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
1005
int process_refs, new_process_refs;
1006
struct bfq_queue *__bfqq;
1009
* If there are no process references on the new_bfqq, then it is
1010
* unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
1011
* may have dropped their last reference (not just their last process
1014
if (!bfqq_process_refs(new_bfqq))
1017
/* Avoid a circular list and skip interim queue merges. */
1018
while ((__bfqq = new_bfqq->new_bfqq)) {
1024
process_refs = bfqq_process_refs(bfqq);
1025
new_process_refs = bfqq_process_refs(new_bfqq);
1027
* If the process for the bfqq has gone away, there is no
1028
* sense in merging the queues.
1030
if (process_refs == 0 || new_process_refs == 0)
1034
* Merge in the direction of the lesser amount of work.
1036
if (new_process_refs >= process_refs) {
1037
bfqq->new_bfqq = new_bfqq;
1038
atomic_add(process_refs, &new_bfqq->ref);
1040
new_bfqq->new_bfqq = bfqq;
1041
atomic_add(new_process_refs, &bfqq->ref);
1043
bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
1047
static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq)
1049
struct bfq_entity *entity = &bfqq->entity;
1050
return entity->budget - entity->service;
1053
static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1055
BUG_ON(bfqq != bfqd->active_queue);
1057
__bfq_bfqd_reset_active(bfqd);
1059
if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
1060
bfq_del_bfqq_busy(bfqd, bfqq, 1);
1062
* overloading budget_timeout field to store when
1063
* the queue remains with no backlog, used by
1064
* the weight-raising mechanism
1066
bfqq->budget_timeout = jiffies ;
1069
bfq_activate_bfqq(bfqd, bfqq);
1071
* Resort priority tree of potential close cooperators.
1073
bfq_rq_pos_tree_add(bfqd, bfqq);
1077
* If this bfqq is shared between multiple processes, check
1078
* to make sure that those processes are still issuing I/Os
1079
* within the mean seek distance. If not, it may be time to
1080
* break the queues apart again.
1082
if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
1083
bfq_mark_bfqq_split_coop(bfqq);
1087
* __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
1088
* @bfqd: device data.
1089
* @bfqq: queue to update.
1090
* @reason: reason for expiration.
1092
* Handle the feedback on @bfqq budget. See the body for detailed
1095
static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
1096
struct bfq_queue *bfqq,
1097
enum bfqq_expiration reason)
1099
struct request *next_rq;
1100
unsigned long budget, min_budget;
1102
budget = bfqq->max_budget;
1103
min_budget = bfq_min_budget(bfqd);
1105
BUG_ON(bfqq != bfqd->active_queue);
1107
bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %lu, budg left %lu",
1108
bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
1109
bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %lu, min budg %lu",
1110
budget, bfq_min_budget(bfqd));
1111
bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
1112
bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->active_queue));
1114
if (bfq_bfqq_sync(bfqq)) {
1117
* Caveat: in all the following cases we trade latency
1120
case BFQ_BFQQ_TOO_IDLE:
1122
* This is the only case where we may reduce
1123
* the budget: if there is no requets of the
1124
* process still waiting for completion, then
1125
* we assume (tentatively) that the timer has
1126
* expired because the batch of requests of
1127
* the process could have been served with a
1128
* smaller budget. Hence, betting that
1129
* process will behave in the same way when it
1130
* becomes backlogged again, we reduce its
1131
* next budget. As long as we guess right,
1132
* this budget cut reduces the latency
1133
* experienced by the process.
1135
* However, if there are still outstanding
1136
* requests, then the process may have not yet
1137
* issued its next request just because it is
1138
* still waiting for the completion of some of
1139
* the still oustanding ones. So in this
1140
* subcase we do not reduce its budget, on the
1141
* contrary we increase it to possibly boost
1142
* the throughput, as discussed in the
1143
* comments to the BUDGET_TIMEOUT case.
1145
if (bfqq->dispatched > 0) /* still oustanding reqs */
1146
budget = min(budget * 2, bfqd->bfq_max_budget);
1148
if (budget > 5 * min_budget)
1149
budget -= 4 * min_budget;
1151
budget = min_budget;
1154
case BFQ_BFQQ_BUDGET_TIMEOUT:
1156
* We double the budget here because: 1) it
1157
* gives the chance to boost the throughput if
1158
* this is not a seeky process (which may have
1159
* bumped into this timeout because of, e.g.,
1160
* ZBR), 2) together with charge_full_budget
1161
* it helps give seeky processes higher
1162
* timestamps, and hence be served less
1165
budget = min(budget * 2, bfqd->bfq_max_budget);
1167
case BFQ_BFQQ_BUDGET_EXHAUSTED:
1169
* The process still has backlog, and did not
1170
* let either the budget timeout or the disk
1171
* idling timeout expire. Hence it is not
1172
* seeky, has a short thinktime and may be
1173
* happy with a higher budget too. So
1174
* definitely increase the budget of this good
1175
* candidate to boost the disk throughput.
1177
budget = min(budget * 4, bfqd->bfq_max_budget);
1179
case BFQ_BFQQ_NO_MORE_REQUESTS:
1181
* Leave the budget unchanged.
1186
} else /* async queue */
1187
/* async queues get always the maximum possible budget
1188
* (their ability to dispatch is limited by
1189
* @bfqd->bfq_max_budget_async_rq).
1191
budget = bfqd->bfq_max_budget;
1193
bfqq->max_budget = budget;
1195
if (bfqd->budgets_assigned >= 194 && bfqd->bfq_user_max_budget == 0 &&
1196
bfqq->max_budget > bfqd->bfq_max_budget)
1197
bfqq->max_budget = bfqd->bfq_max_budget;
1200
* Make sure that we have enough budget for the next request.
1201
* Since the finish time of the bfqq must be kept in sync with
1202
* the budget, be sure to call __bfq_bfqq_expire() after the
1205
next_rq = bfqq->next_rq;
1206
if (next_rq != NULL)
1207
bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
1208
bfq_serv_to_charge(next_rq, bfqq));
1210
bfqq->entity.budget = bfqq->max_budget;
1212
bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %lu",
1213
next_rq != NULL ? blk_rq_sectors(next_rq) : 0,
1214
bfqq->entity.budget);
1217
static unsigned long bfq_calc_max_budget(u64 peak_rate, u64 timeout)
1219
unsigned long max_budget;
1222
* The max_budget calculated when autotuning is equal to the
1223
* amount of sectors transfered in timeout_sync at the
1224
* estimated peak rate.
1226
max_budget = (unsigned long)(peak_rate * 1000 *
1227
timeout >> BFQ_RATE_SHIFT);
1233
* In addition to updating the peak rate, checks whether the process
1234
* is "slow", and returns 1 if so. This slow flag is used, in addition
1235
* to the budget timeout, to reduce the amount of service provided to
1236
* seeky processes, and hence reduce their chances to lower the
1237
* throughput. See the code for more details.
1239
static int bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1240
int compensate, enum bfqq_expiration reason)
1242
u64 bw, usecs, expected, timeout;
1246
if (!bfq_bfqq_sync(bfqq) || bfq_bfqq_budget_new(bfqq))
1250
delta = bfqd->last_idling_start;
1252
delta = ktime_get();
1253
delta = ktime_sub(delta, bfqd->last_budget_start);
1254
usecs = ktime_to_us(delta);
1256
/* Don't trust short/unrealistic values. */
1257
if (usecs < 100 || usecs >= LONG_MAX)
1261
* Calculate the bandwidth for the last slice. We use a 64 bit
1262
* value to store the peak rate, in sectors per usec in fixed
1263
* point math. We do so to have enough precision in the estimate
1264
* and to avoid overflows.
1266
bw = (u64)bfqq->entity.service << BFQ_RATE_SHIFT;
1267
do_div(bw, (unsigned long)usecs);
1269
timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
1272
* Use only long (> 20ms) intervals to filter out spikes for
1273
* the peak rate estimation.
1275
if (usecs > 20000) {
1276
if (bw > bfqd->peak_rate ||
1277
(!BFQQ_SEEKY(bfqq) &&
1278
reason == BFQ_BFQQ_BUDGET_TIMEOUT)) {
1279
bfq_log(bfqd, "measured bw =%llu", bw);
1281
* To smooth oscillations use a low-pass filter with
1283
* new_rate = (7/8) * old_rate + (1/8) * bw
1286
bfqd->peak_rate *= 7;
1287
do_div(bfqd->peak_rate, 8);
1288
bfqd->peak_rate += bw;
1290
bfq_log(bfqd, "new peak_rate=%llu", bfqd->peak_rate);
1293
update |= bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES - 1;
1295
if (bfqd->peak_rate_samples < BFQ_PEAK_RATE_SAMPLES)
1296
bfqd->peak_rate_samples++;
1298
if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES &&
1299
update && bfqd->bfq_user_max_budget == 0) {
1300
bfqd->bfq_max_budget =
1301
bfq_calc_max_budget(bfqd->peak_rate, timeout);
1302
bfq_log(bfqd, "new max_budget=%lu",
1303
bfqd->bfq_max_budget);
1308
* If the process has been served for a too short time
1309
* interval to let its possible sequential accesses prevail on
1310
* the initial seek time needed to move the disk head on the
1311
* first sector it requested, then give the process a chance
1312
* and for the moment return false.
1314
if (bfqq->entity.budget <= bfq_max_budget(bfqd) / 8)
1318
* A process is considered ``slow'' (i.e., seeky, so that we
1319
* cannot treat it fairly in the service domain, as it would
1320
* slow down too much the other processes) if, when a slice
1321
* ends for whatever reason, it has received service at a
1322
* rate that would not be high enough to complete the budget
1323
* before the budget timeout expiration.
1325
expected = bw * 1000 * timeout >> BFQ_RATE_SHIFT;
1328
* Caveat: processes doing IO in the slower disk zones will
1329
* tend to be slow(er) even if not seeky. And the estimated
1330
* peak rate will actually be an average over the disk
1331
* surface. Hence, to not be too harsh with unlucky processes,
1332
* we keep a budget/3 margin of safety before declaring a
1335
return expected > (4 * bfqq->entity.budget) / 3;
1339
* bfq_bfqq_expire - expire a queue.
1340
* @bfqd: device owning the queue.
1341
* @bfqq: the queue to expire.
1342
* @compensate: if true, compensate for the time spent idling.
1343
* @reason: the reason causing the expiration.
1346
* If the process associated to the queue is slow (i.e., seeky), or in
1347
* case of budget timeout, or, finally, if it is async, we
1348
* artificially charge it an entire budget (independently of the
1349
* actual service it received). As a consequence, the queue will get
1350
* higher timestamps than the correct ones upon reactivation, and
1351
* hence it will be rescheduled as if it had received more service
1352
* than what it actually received. In the end, this class of processes
1353
* will receive less service in proportion to how slowly they consume
1354
* their budgets (and hence how seriously they tend to lower the
1357
* In contrast, when a queue expires because it has been idling for
1358
* too much or because it exhausted its budget, we do not touch the
1359
* amount of service it has received. Hence when the queue will be
1360
* reactivated and its timestamps updated, the latter will be in sync
1361
* with the actual service received by the queue until expiration.
1363
* Charging a full budget to the first type of queues and the exact
1364
* service to the others has the effect of using the WF2Q+ policy to
1365
* schedule the former on a timeslice basis, without violating the
1366
* service domain guarantees of the latter.
1368
static void bfq_bfqq_expire(struct bfq_data *bfqd,
1369
struct bfq_queue *bfqq,
1371
enum bfqq_expiration reason)
1374
BUG_ON(bfqq != bfqd->active_queue);
1376
/* Update disk peak rate for autotuning and check whether the
1377
* process is slow (see bfq_update_peak_rate).
1379
slow = bfq_update_peak_rate(bfqd, bfqq, compensate, reason);
1382
* As above explained, 'punish' slow (i.e., seeky), timed-out
1383
* and async queues, to favor sequential sync workloads.
1385
* Processes doing IO in the slower disk zones will tend to be
1386
* slow(er) even if not seeky. Hence, since the estimated peak
1387
* rate is actually an average over the disk surface, these
1388
* processes may timeout just for bad luck. To avoid punishing
1389
* them we do not charge a full budget to a process that
1390
* succeeded in consuming at least 2/3 of its budget.
1392
if (slow || (reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
1393
bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3))
1394
bfq_bfqq_charge_full_budget(bfqq);
1396
if (bfqd->low_latency && bfqq->raising_coeff == 1)
1397
bfqq->last_rais_start_finish = jiffies;
1399
if (bfqd->low_latency && bfqd->bfq_raising_max_softrt_rate > 0) {
1400
if(reason != BFQ_BFQQ_BUDGET_TIMEOUT)
1401
bfqq->soft_rt_next_start =
1403
HZ * bfqq->entity.service /
1404
bfqd->bfq_raising_max_softrt_rate;
1406
bfqq->soft_rt_next_start = -1; /* infinity */
1408
bfq_log_bfqq(bfqd, bfqq,
1409
"expire (%d, slow %d, num_disp %d, idle_win %d)", reason, slow,
1410
bfqq->dispatched, bfq_bfqq_idle_window(bfqq));
1412
/* Increase, decrease or leave budget unchanged according to reason */
1413
__bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
1414
__bfq_bfqq_expire(bfqd, bfqq);
1418
* Budget timeout is not implemented through a dedicated timer, but
1419
* just checked on request arrivals and completions, as well as on
1420
* idle timer expirations.
1422
static int bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
1424
if (bfq_bfqq_budget_new(bfqq))
1427
if (time_before(jiffies, bfqq->budget_timeout))
1434
* If we expire a queue that is waiting for the arrival of a new
1435
* request, we may prevent the fictitious timestamp backshifting that
1436
* allows the guarantees of the queue to be preserved (see [1] for
1437
* this tricky aspect). Hence we return true only if this condition
1438
* does not hold, or if the queue is slow enough to deserve only to be
1439
* kicked off for preserving a high throughput.
1441
static inline int bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
1443
bfq_log_bfqq(bfqq->bfqd, bfqq,
1444
"may_budget_timeout: wr %d left %d timeout %d",
1445
bfq_bfqq_wait_request(bfqq),
1446
bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
1447
bfq_bfqq_budget_timeout(bfqq));
1449
return (!bfq_bfqq_wait_request(bfqq) ||
1450
bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
1452
bfq_bfqq_budget_timeout(bfqq);
1456
* Select a queue for service. If we have a current active queue,
1457
* check whether to continue servicing it, or retrieve and set a new one.
1459
static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
1461
struct bfq_queue *bfqq, *new_bfqq = NULL;
1462
struct request *next_rq;
1463
enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT;
1465
bfqq = bfqd->active_queue;
1469
bfq_log_bfqq(bfqd, bfqq, "select_queue: already active queue");
1472
* If another queue has a request waiting within our mean seek
1473
* distance, let it run. The expire code will check for close
1474
* cooperators and put the close queue at the front of the
1475
* service tree. If possible, merge the expiring queue with the
1478
new_bfqq = bfq_close_cooperator(bfqd, bfqq);
1479
if (new_bfqq != NULL && bfqq->new_bfqq == NULL)
1480
bfq_setup_merge(bfqq, new_bfqq);
1482
if (bfq_may_expire_for_budg_timeout(bfqq))
1485
next_rq = bfqq->next_rq;
1487
* If bfqq has requests queued and it has enough budget left to
1488
* serve them, keep the queue, otherwise expire it.
1490
if (next_rq != NULL) {
1491
if (bfq_serv_to_charge(next_rq, bfqq) >
1492
bfq_bfqq_budget_left(bfqq)) {
1493
reason = BFQ_BFQQ_BUDGET_EXHAUSTED;
1497
* The idle timer may be pending because we may not
1498
* disable disk idling even when a new request arrives
1500
if (timer_pending(&bfqd->idle_slice_timer)) {
1502
* If we get here: 1) at least a new request
1503
* has arrived but we have not disabled the
1504
* timer because the request was too small,
1505
* 2) then the block layer has unplugged the
1506
* device, causing the dispatch to be invoked.
1508
* Since the device is unplugged, now the
1509
* requests are probably large enough to
1510
* provide a reasonable throughput.
1511
* So we disable idling.
1513
bfq_clear_bfqq_wait_request(bfqq);
1514
del_timer(&bfqd->idle_slice_timer);
1516
if (new_bfqq == NULL)
1524
* No requests pending. If there is no cooperator, and the active
1525
* queue still has requests in flight or is idling for a new request,
1528
if (new_bfqq == NULL && (timer_pending(&bfqd->idle_slice_timer) ||
1529
(bfqq->dispatched != 0 && bfq_bfqq_idle_window(bfqq) &&
1530
!bfq_queue_nonrot_noidle(bfqd, bfqq)))) {
1533
} else if (new_bfqq != NULL && timer_pending(&bfqd->idle_slice_timer)) {
1535
* Expiring the queue because there is a close cooperator,
1538
bfq_clear_bfqq_wait_request(bfqq);
1539
del_timer(&bfqd->idle_slice_timer);
1542
reason = BFQ_BFQQ_NO_MORE_REQUESTS;
1544
bfq_bfqq_expire(bfqd, bfqq, 0, reason);
1546
bfqq = bfq_set_active_queue(bfqd, new_bfqq);
1547
bfq_log(bfqd, "select_queue: new queue %d returned",
1548
bfqq != NULL ? bfqq->pid : 0);
1553
static void update_raising_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1555
if (bfqq->raising_coeff > 1) { /* queue is being boosted */
1556
struct bfq_entity *entity = &bfqq->entity;
1558
bfq_log_bfqq(bfqd, bfqq,
1559
"raising period dur %u/%u msec, "
1560
"old raising coeff %u, w %d(%d)",
1561
jiffies_to_msecs(jiffies -
1562
bfqq->last_rais_start_finish),
1563
jiffies_to_msecs(bfqq->raising_cur_max_time),
1564
bfqq->raising_coeff,
1565
bfqq->entity.weight, bfqq->entity.orig_weight);
1567
BUG_ON(bfqq != bfqd->active_queue && entity->weight !=
1568
entity->orig_weight * bfqq->raising_coeff);
1569
if(entity->ioprio_changed)
1570
bfq_log_bfqq(bfqd, bfqq,
1571
"WARN: pending prio change");
1573
* If too much time has elapsed from the beginning
1574
* of this weight-raising period and process is not soft
1575
* real-time, stop it
1577
if (jiffies - bfqq->last_rais_start_finish >
1578
bfqq->raising_cur_max_time) {
1579
int soft_rt = bfqd->bfq_raising_max_softrt_rate > 0 &&
1580
bfqq->soft_rt_next_start < jiffies;
1582
bfqq->last_rais_start_finish = jiffies;
1584
bfqq->raising_cur_max_time =
1585
bfqd->bfq_raising_rt_max_time;
1587
bfqq->raising_coeff = 1;
1588
entity->ioprio_changed = 1;
1589
__bfq_entity_update_weight_prio(
1590
bfq_entity_service_tree(entity),
1599
* Dispatch one request from bfqq, moving it to the request queue
1602
static int bfq_dispatch_request(struct bfq_data *bfqd,
1603
struct bfq_queue *bfqq)
1607
unsigned long service_to_charge;
1609
BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
1611
/* Follow expired path, else get first next available. */
1612
rq = bfq_check_fifo(bfqq);
1615
service_to_charge = bfq_serv_to_charge(rq, bfqq);
1617
if (service_to_charge > bfq_bfqq_budget_left(bfqq)) {
1619
* This may happen if the next rq is chosen
1620
* in fifo order instead of sector order.
1621
* The budget is properly dimensioned
1622
* to be always sufficient to serve the next request
1623
* only if it is chosen in sector order. The reason is
1624
* that it would be quite inefficient and little useful
1625
* to always make sure that the budget is large enough
1626
* to serve even the possible next rq in fifo order.
1627
* In fact, requests are seldom served in fifo order.
1629
* Expire the queue for budget exhaustion, and
1630
* make sure that the next act_budget is enough
1631
* to serve the next request, even if it comes
1632
* from the fifo expired path.
1636
* Since this dispatch is failed, make sure that
1637
* a new one will be performed
1639
if (!bfqd->rq_in_driver)
1640
bfq_schedule_dispatch(bfqd);
1644
/* Finally, insert request into driver dispatch list. */
1645
bfq_bfqq_served(bfqq, service_to_charge);
1646
bfq_dispatch_insert(bfqd->queue, rq);
1648
update_raising_data(bfqd, bfqq);
1650
bfq_log_bfqq(bfqd, bfqq, "dispatched %u sec req (%llu), "
1653
(long long unsigned)blk_rq_pos(rq),
1654
bfq_bfqq_budget_left(bfqq));
1658
if (bfqd->active_cic == NULL) {
1659
atomic_long_inc(&RQ_CIC(rq)->ioc->refcount);
1660
bfqd->active_cic = RQ_CIC(rq);
1663
if (bfqd->busy_queues > 1 && ((!bfq_bfqq_sync(bfqq) &&
1664
dispatched >= bfqd->bfq_max_budget_async_rq) ||
1665
bfq_class_idle(bfqq)))
1671
bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_EXHAUSTED);
1675
static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq)
1679
while (bfqq->next_rq != NULL) {
1680
bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq);
1684
BUG_ON(!list_empty(&bfqq->fifo));
1689
* Drain our current requests. Used for barriers and when switching
1690
* io schedulers on-the-fly.
1692
static int bfq_forced_dispatch(struct bfq_data *bfqd)
1694
struct bfq_queue *bfqq, *n;
1695
struct bfq_service_tree *st;
1698
bfqq = bfqd->active_queue;
1700
__bfq_bfqq_expire(bfqd, bfqq);
1703
* Loop through classes, and be careful to leave the scheduler
1704
* in a consistent state, as feedback mechanisms and vtime
1705
* updates cannot be disabled during the process.
1707
list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) {
1708
st = bfq_entity_service_tree(&bfqq->entity);
1710
dispatched += __bfq_forced_dispatch_bfqq(bfqq);
1711
bfqq->max_budget = bfq_max_budget(bfqd);
1713
bfq_forget_idle(st);
1716
BUG_ON(bfqd->busy_queues != 0);
1721
static int bfq_dispatch_requests(struct request_queue *q, int force)
1723
struct bfq_data *bfqd = q->elevator->elevator_data;
1724
struct bfq_queue *bfqq;
1727
bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
1728
if (bfqd->busy_queues == 0)
1731
if (unlikely(force))
1732
return bfq_forced_dispatch(bfqd);
1734
if((bfqq = bfq_select_queue(bfqd)) == NULL)
1737
max_dispatch = bfqd->bfq_quantum;
1738
if (bfq_class_idle(bfqq))
1741
if (!bfq_bfqq_sync(bfqq))
1742
max_dispatch = bfqd->bfq_max_budget_async_rq;
1744
if (bfqq->dispatched >= max_dispatch) {
1745
if (bfqd->busy_queues > 1)
1747
if (bfqq->dispatched >= 4 * max_dispatch)
1751
if (bfqd->sync_flight != 0 && !bfq_bfqq_sync(bfqq))
1754
bfq_clear_bfqq_wait_request(bfqq);
1755
BUG_ON(timer_pending(&bfqd->idle_slice_timer));
1757
if (! bfq_dispatch_request(bfqd, bfqq))
1760
bfq_log_bfqq(bfqd, bfqq, "dispatched one request of %d"
1761
"(max_disp %d)", bfqq->pid, max_dispatch);
1767
* Task holds one reference to the queue, dropped when task exits. Each rq
1768
* in-flight on this queue also holds a reference, dropped when rq is freed.
1770
* Queue lock must be held here.
1772
static void bfq_put_queue(struct bfq_queue *bfqq)
1774
struct bfq_data *bfqd = bfqq->bfqd;
1776
BUG_ON(atomic_read(&bfqq->ref) <= 0);
1778
bfq_log_bfqq(bfqd, bfqq, "put_queue: %p %d", bfqq,
1779
atomic_read(&bfqq->ref));
1780
if (!atomic_dec_and_test(&bfqq->ref))
1783
BUG_ON(rb_first(&bfqq->sort_list) != NULL);
1784
BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0);
1785
BUG_ON(bfqq->entity.tree != NULL);
1786
BUG_ON(bfq_bfqq_busy(bfqq));
1787
BUG_ON(bfqd->active_queue == bfqq);
1789
bfq_log_bfqq(bfqd, bfqq, "put_queue: %p freed", bfqq);
1791
kmem_cache_free(bfq_pool, bfqq);
1794
static void bfq_put_cooperator(struct bfq_queue *bfqq)
1796
struct bfq_queue *__bfqq, *next;
1799
* If this queue was scheduled to merge with another queue, be
1800
* sure to drop the reference taken on that queue (and others in
1801
* the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
1803
__bfqq = bfqq->new_bfqq;
1805
if (__bfqq == bfqq) {
1806
WARN(1, "bfqq->new_bfqq loop detected.\n");
1809
next = __bfqq->new_bfqq;
1810
bfq_put_queue(__bfqq);
1815
static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1817
if (bfqq == bfqd->active_queue) {
1818
__bfq_bfqq_expire(bfqd, bfqq);
1819
bfq_schedule_dispatch(bfqd);
1822
bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq,
1823
atomic_read(&bfqq->ref));
1825
bfq_put_cooperator(bfqq);
1827
bfq_put_queue(bfqq);
1831
* Update the entity prio values; note that the new values will not
1832
* be used until the next (re)activation.
1834
static void bfq_init_prio_data(struct bfq_queue *bfqq, struct io_context *ioc)
1836
struct task_struct *tsk = current;
1839
if (!bfq_bfqq_prio_changed(bfqq))
1842
ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
1843
switch (ioprio_class) {
1845
printk(KERN_ERR "bfq: bad prio %x\n", ioprio_class);
1846
case IOPRIO_CLASS_NONE:
1848
* No prio set, inherit CPU scheduling settings.
1850
bfqq->entity.new_ioprio = task_nice_ioprio(tsk);
1851
bfqq->entity.new_ioprio_class = task_nice_ioclass(tsk);
1853
case IOPRIO_CLASS_RT:
1854
bfqq->entity.new_ioprio = task_ioprio(ioc);
1855
bfqq->entity.new_ioprio_class = IOPRIO_CLASS_RT;
1857
case IOPRIO_CLASS_BE:
1858
bfqq->entity.new_ioprio = task_ioprio(ioc);
1859
bfqq->entity.new_ioprio_class = IOPRIO_CLASS_BE;
1861
case IOPRIO_CLASS_IDLE:
1862
bfqq->entity.new_ioprio_class = IOPRIO_CLASS_IDLE;
1863
bfqq->entity.new_ioprio = 7;
1864
bfq_clear_bfqq_idle_window(bfqq);
1868
bfqq->entity.ioprio_changed = 1;
1871
* Keep track of original prio settings in case we have to temporarily
1872
* elevate the priority of this queue.
1874
bfqq->org_ioprio = bfqq->entity.new_ioprio;
1875
bfq_clear_bfqq_prio_changed(bfqq);
1878
static void bfq_changed_ioprio(struct io_context *ioc,
1879
struct cfq_io_context *cic)
1881
struct bfq_data *bfqd;
1882
struct bfq_queue *bfqq, *new_bfqq;
1883
struct bfq_group *bfqg;
1884
unsigned long uninitialized_var(flags);
1886
bfqd = bfq_get_bfqd_locked(&cic->key, &flags);
1887
if (unlikely(bfqd == NULL))
1890
bfqq = cic->cfqq[BLK_RW_ASYNC];
1892
bfqg = container_of(bfqq->entity.sched_data, struct bfq_group,
1894
new_bfqq = bfq_get_queue(bfqd, bfqg, BLK_RW_ASYNC, cic->ioc,
1896
if (new_bfqq != NULL) {
1897
cic->cfqq[BLK_RW_ASYNC] = new_bfqq;
1898
bfq_log_bfqq(bfqd, bfqq,
1899
"changed_ioprio: bfqq %p %d",
1900
bfqq, atomic_read(&bfqq->ref));
1901
bfq_put_queue(bfqq);
1905
bfqq = cic->cfqq[BLK_RW_SYNC];
1907
bfq_mark_bfqq_prio_changed(bfqq);
1909
bfq_put_bfqd_unlock(bfqd, &flags);
1912
static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1913
pid_t pid, int is_sync)
1915
RB_CLEAR_NODE(&bfqq->entity.rb_node);
1916
INIT_LIST_HEAD(&bfqq->fifo);
1918
atomic_set(&bfqq->ref, 0);
1921
bfq_mark_bfqq_prio_changed(bfqq);
1924
if (!bfq_class_idle(bfqq))
1925
bfq_mark_bfqq_idle_window(bfqq);
1926
bfq_mark_bfqq_sync(bfqq);
1929
/* Tentative initial value to trade off between thr and lat */
1930
bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
1933
bfqq->raising_coeff = 1;
1934
bfqq->last_rais_start_finish = 0;
1935
bfqq->soft_rt_next_start = -1;
1938
static struct bfq_queue *bfq_find_alloc_queue(struct bfq_data *bfqd,
1939
struct bfq_group *bfqg,
1941
struct io_context *ioc,
1944
struct bfq_queue *bfqq, *new_bfqq = NULL;
1945
struct cfq_io_context *cic;
1948
cic = bfq_cic_lookup(bfqd, ioc);
1949
/* cic always exists here */
1950
bfqq = cic_to_bfqq(cic, is_sync);
1953
* Always try a new alloc if we fall back to the OOM bfqq
1954
* originally, since it should just be a temporary situation.
1956
if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
1958
if (new_bfqq != NULL) {
1961
} else if (gfp_mask & __GFP_WAIT) {
1962
spin_unlock_irq(bfqd->queue->queue_lock);
1963
new_bfqq = kmem_cache_alloc_node(bfq_pool,
1964
gfp_mask | __GFP_ZERO,
1966
spin_lock_irq(bfqd->queue->queue_lock);
1967
if (new_bfqq != NULL)
1970
bfqq = kmem_cache_alloc_node(bfq_pool,
1971
gfp_mask | __GFP_ZERO,
1976
bfq_init_bfqq(bfqd, bfqq, current->pid, is_sync);
1977
bfq_log_bfqq(bfqd, bfqq, "allocated");
1979
bfqq = &bfqd->oom_bfqq;
1980
bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
1983
bfq_init_prio_data(bfqq, ioc);
1984
bfq_init_entity(&bfqq->entity, bfqg);
1987
if (new_bfqq != NULL)
1988
kmem_cache_free(bfq_pool, new_bfqq);
1993
static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
1994
struct bfq_group *bfqg,
1995
int ioprio_class, int ioprio)
1997
switch (ioprio_class) {
1998
case IOPRIO_CLASS_RT:
1999
return &bfqg->async_bfqq[0][ioprio];
2000
case IOPRIO_CLASS_BE:
2001
return &bfqg->async_bfqq[1][ioprio];
2002
case IOPRIO_CLASS_IDLE:
2003
return &bfqg->async_idle_bfqq;
2009
static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
2010
struct bfq_group *bfqg, int is_sync,
2011
struct io_context *ioc, gfp_t gfp_mask)
2013
const int ioprio = task_ioprio(ioc);
2014
const int ioprio_class = task_ioprio_class(ioc);
2015
struct bfq_queue **async_bfqq = NULL;
2016
struct bfq_queue *bfqq = NULL;
2019
async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
2025
bfqq = bfq_find_alloc_queue(bfqd, bfqg, is_sync, ioc, gfp_mask);
2028
* Pin the queue now that it's allocated, scheduler exit will prune it.
2030
if (!is_sync && *async_bfqq == NULL) {
2031
atomic_inc(&bfqq->ref);
2032
bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
2033
bfqq, atomic_read(&bfqq->ref));
2037
atomic_inc(&bfqq->ref);
2038
bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq,
2039
atomic_read(&bfqq->ref));
2043
static void bfq_update_io_thinktime(struct bfq_data *bfqd,
2044
struct cfq_io_context *cic)
2046
unsigned long elapsed = jiffies - cic->ttime.last_end_request;
2047
unsigned long ttime = min(elapsed, 2UL * bfqd->bfq_slice_idle);
2049
cic->ttime.ttime_samples = (7*cic->ttime.ttime_samples + 256) / 8;
2050
cic->ttime.ttime_total = (7*cic->ttime.ttime_total + 256*ttime) / 8;
2051
cic->ttime.ttime_mean = (cic->ttime.ttime_total + 128) / cic->ttime.ttime_samples;
2054
static void bfq_update_io_seektime(struct bfq_data *bfqd,
2055
struct bfq_queue *bfqq,
2061
if (bfqq->last_request_pos < blk_rq_pos(rq))
2062
sdist = blk_rq_pos(rq) - bfqq->last_request_pos;
2064
sdist = bfqq->last_request_pos - blk_rq_pos(rq);
2067
* Don't allow the seek distance to get too large from the
2068
* odd fragment, pagein, etc.
2070
if (bfqq->seek_samples == 0) /* first request, not really a seek */
2072
else if (bfqq->seek_samples <= 60) /* second & third seek */
2073
sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*1024);
2075
sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*64);
2077
bfqq->seek_samples = (7*bfqq->seek_samples + 256) / 8;
2078
bfqq->seek_total = (7*bfqq->seek_total + (u64)256*sdist) / 8;
2079
total = bfqq->seek_total + (bfqq->seek_samples/2);
2080
do_div(total, bfqq->seek_samples);
2081
if (bfq_bfqq_coop(bfqq)) {
2083
* If the mean seektime increases for a (non-seeky) shared
2084
* queue, some cooperator is likely to be idling too much.
2085
* On the contrary, if it decreases, some cooperator has
2086
* probably waked up.
2089
if ((sector_t)total < bfqq->seek_mean)
2090
bfq_mark_bfqq_some_coop_idle(bfqq) ;
2091
else if ((sector_t)total > bfqq->seek_mean)
2092
bfq_clear_bfqq_some_coop_idle(bfqq) ;
2094
bfqq->seek_mean = (sector_t)total;
2096
bfq_log_bfqq(bfqd, bfqq, "dist=%llu mean=%llu", (u64)sdist,
2097
(u64)bfqq->seek_mean);
2101
* Disable idle window if the process thinks too long or seeks so much that
2102
* it doesn't matter.
2104
static void bfq_update_idle_window(struct bfq_data *bfqd,
2105
struct bfq_queue *bfqq,
2106
struct cfq_io_context *cic)
2110
/* Don't idle for async or idle io prio class. */
2111
if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
2114
enable_idle = bfq_bfqq_idle_window(bfqq);
2116
if (atomic_read(&cic->ioc->nr_tasks) == 0 ||
2117
bfqd->bfq_slice_idle == 0 ||
2118
(bfqd->hw_tag && BFQQ_SEEKY(bfqq) &&
2119
bfqq->raising_coeff == 1))
2121
else if (bfq_sample_valid(cic->ttime.ttime_samples)) {
2122
if (cic->ttime.ttime_mean > bfqd->bfq_slice_idle &&
2123
bfqq->raising_coeff == 1)
2128
bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d",
2132
bfq_mark_bfqq_idle_window(bfqq);
2134
bfq_clear_bfqq_idle_window(bfqq);
2138
* Called when a new fs request (rq) is added to bfqq. Check if there's
2139
* something we should do about it.
2141
static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2144
struct cfq_io_context *cic = RQ_CIC(rq);
2146
if (rq->cmd_flags & REQ_META)
2147
bfqq->meta_pending++;
2149
bfq_update_io_thinktime(bfqd, cic);
2150
bfq_update_io_seektime(bfqd, bfqq, rq);
2151
if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
2153
bfq_update_idle_window(bfqd, bfqq, cic);
2155
bfq_log_bfqq(bfqd, bfqq,
2156
"rq_enqueued: idle_window=%d (seeky %d, mean %llu)",
2157
bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq),
2158
(long long unsigned)bfqq->seek_mean);
2160
bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
2162
if (bfqq == bfqd->active_queue) {
2164
* If there is just this request queued and the request
2165
* is small, just exit.
2166
* In this way, if the disk is being idled to wait for a new
2167
* request from the active queue, we avoid unplugging the
2170
* By doing so, we spare the disk to be committed
2171
* to serve just a small request. On the contrary, we wait for
2172
* the block layer to decide when to unplug the device:
2173
* hopefully, new requests will be merged to this
2174
* one quickly, then the device will be unplugged
2175
* and larger requests will be dispatched.
2177
if (bfqq->queued[rq_is_sync(rq)] == 1 &&
2178
blk_rq_sectors(rq) < 32) {
2181
if (bfq_bfqq_wait_request(bfqq)) {
2183
* If we are waiting for a request for this queue, let
2184
* it rip immediately and flag that we must not expire
2185
* this queue just now.
2187
bfq_clear_bfqq_wait_request(bfqq);
2188
del_timer(&bfqd->idle_slice_timer);
2190
* Here we can safely expire the queue, in
2191
* case of budget timeout, without wasting
2194
if (bfq_bfqq_budget_timeout(bfqq))
2195
bfq_bfqq_expire(bfqd, bfqq, 0,
2196
BFQ_BFQQ_BUDGET_TIMEOUT);
2197
__blk_run_queue(bfqd->queue);
2202
static void bfq_insert_request(struct request_queue *q, struct request *rq)
2204
struct bfq_data *bfqd = q->elevator->elevator_data;
2205
struct bfq_queue *bfqq = RQ_BFQQ(rq);
2207
assert_spin_locked(bfqd->queue->queue_lock);
2208
bfq_init_prio_data(bfqq, RQ_CIC(rq)->ioc);
2212
rq_set_fifo_time(rq, jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)]);
2213
list_add_tail(&rq->queuelist, &bfqq->fifo);
2215
bfq_rq_enqueued(bfqd, bfqq, rq);
2218
static void bfq_update_hw_tag(struct bfq_data *bfqd)
2220
bfqd->max_rq_in_driver = max(bfqd->max_rq_in_driver,
2221
bfqd->rq_in_driver);
2223
if (bfqd->hw_tag == 1)
2227
* This sample is valid if the number of outstanding requests
2228
* is large enough to allow a queueing behavior. Note that the
2229
* sum is not exact, as it's not taking into account deactivated
2232
if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
2235
if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
2238
bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
2239
bfqd->max_rq_in_driver = 0;
2240
bfqd->hw_tag_samples = 0;
2243
static void bfq_completed_request(struct request_queue *q, struct request *rq)
2245
struct bfq_queue *bfqq = RQ_BFQQ(rq);
2246
struct bfq_data *bfqd = bfqq->bfqd;
2247
const int sync = rq_is_sync(rq);
2249
bfq_log_bfqq(bfqd, bfqq, "completed %u sects req (%d)",
2250
blk_rq_sectors(rq), sync);
2252
bfq_update_hw_tag(bfqd);
2254
WARN_ON(!bfqd->rq_in_driver);
2255
WARN_ON(!bfqq->dispatched);
2256
bfqd->rq_in_driver--;
2259
if (bfq_bfqq_sync(bfqq))
2260
bfqd->sync_flight--;
2263
RQ_CIC(rq)->ttime.last_end_request = jiffies;
2266
* If this is the active queue, check if it needs to be expired,
2267
* or if we want to idle in case it has no pending requests.
2269
if (bfqd->active_queue == bfqq) {
2270
if (bfq_bfqq_budget_new(bfqq))
2271
bfq_set_budget_timeout(bfqd);
2273
/* Idling is disabled also for cooperation issues:
2274
* 1) there is a close cooperator for the queue, or
2275
* 2) the queue is shared and some cooperator is likely
2276
* to be idle (in this case, by not arming the idle timer,
2277
* we try to slow down the queue, to prevent the zones
2278
* of the disk accessed by the active cooperators to become
2279
* too distant from the zone that will be accessed by the
2280
* currently idle cooperators)
2282
if (bfq_may_expire_for_budg_timeout(bfqq))
2283
bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT);
2285
(bfqd->rq_in_driver == 0 ||
2286
bfqq->raising_coeff > 1)
2287
&& RB_EMPTY_ROOT(&bfqq->sort_list)
2288
&& !bfq_close_cooperator(bfqd, bfqq)
2289
&& (!bfq_bfqq_coop(bfqq) ||
2290
!bfq_bfqq_some_coop_idle(bfqq)))
2291
bfq_arm_slice_timer(bfqd);
2294
if (!bfqd->rq_in_driver)
2295
bfq_schedule_dispatch(bfqd);
2298
static inline int __bfq_may_queue(struct bfq_queue *bfqq)
2300
if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) {
2301
bfq_clear_bfqq_must_alloc(bfqq);
2302
return ELV_MQUEUE_MUST;
2305
return ELV_MQUEUE_MAY;
2308
static int bfq_may_queue(struct request_queue *q, int rw)
2310
struct bfq_data *bfqd = q->elevator->elevator_data;
2311
struct task_struct *tsk = current;
2312
struct cfq_io_context *cic;
2313
struct bfq_queue *bfqq;
2316
* Don't force setup of a queue from here, as a call to may_queue
2317
* does not necessarily imply that a request actually will be queued.
2318
* So just lookup a possibly existing queue, or return 'may queue'
2321
cic = bfq_cic_lookup(bfqd, tsk->io_context);
2323
return ELV_MQUEUE_MAY;
2325
bfqq = cic_to_bfqq(cic, rw_is_sync(rw));
2327
bfq_init_prio_data(bfqq, cic->ioc);
2329
return __bfq_may_queue(bfqq);
2332
return ELV_MQUEUE_MAY;
2336
* Queue lock held here.
2338
static void bfq_put_request(struct request *rq)
2340
struct bfq_queue *bfqq = RQ_BFQQ(rq);
2343
const int rw = rq_data_dir(rq);
2345
BUG_ON(!bfqq->allocated[rw]);
2346
bfqq->allocated[rw]--;
2348
put_io_context(RQ_CIC(rq)->ioc);
2350
rq->elevator_private[0] = NULL;
2351
rq->elevator_private[1] = NULL;
2353
bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d",
2354
bfqq, atomic_read(&bfqq->ref));
2355
bfq_put_queue(bfqq);
2359
static struct bfq_queue *
2360
bfq_merge_bfqqs(struct bfq_data *bfqd, struct cfq_io_context *cic,
2361
struct bfq_queue *bfqq)
2363
bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
2364
(long unsigned)bfqq->new_bfqq->pid);
2365
cic_set_bfqq(cic, bfqq->new_bfqq, 1);
2366
bfq_mark_bfqq_coop(bfqq->new_bfqq);
2367
bfq_put_queue(bfqq);
2368
return cic_to_bfqq(cic, 1);
2372
* Returns NULL if a new bfqq should be allocated, or the old bfqq if this
2373
* was the last process referring to said bfqq.
2375
static struct bfq_queue *
2376
bfq_split_bfqq(struct cfq_io_context *cic, struct bfq_queue *bfqq)
2378
bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
2379
if (bfqq_process_refs(bfqq) == 1) {
2380
bfqq->pid = current->pid;
2381
bfq_clear_bfqq_some_coop_idle(bfqq);
2382
bfq_clear_bfqq_coop(bfqq);
2383
bfq_clear_bfqq_split_coop(bfqq);
2387
cic_set_bfqq(cic, NULL, 1);
2389
bfq_put_cooperator(bfqq);
2391
bfq_put_queue(bfqq);
2396
* Allocate bfq data structures associated with this request.
2398
static int bfq_set_request(struct request_queue *q, struct request *rq,
2401
struct bfq_data *bfqd = q->elevator->elevator_data;
2402
struct cfq_io_context *cic;
2403
const int rw = rq_data_dir(rq);
2404
const int is_sync = rq_is_sync(rq);
2405
struct bfq_queue *bfqq;
2406
struct bfq_group *bfqg;
2407
unsigned long flags;
2409
might_sleep_if(gfp_mask & __GFP_WAIT);
2411
cic = bfq_get_io_context(bfqd, gfp_mask);
2413
spin_lock_irqsave(q->queue_lock, flags);
2418
bfqg = bfq_cic_update_cgroup(cic);
2421
bfqq = cic_to_bfqq(cic, is_sync);
2422
if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
2423
bfqq = bfq_get_queue(bfqd, bfqg, is_sync, cic->ioc, gfp_mask);
2424
cic_set_bfqq(cic, bfqq, is_sync);
2427
* If the queue was seeky for too long, break it apart.
2429
if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
2430
bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
2431
bfqq = bfq_split_bfqq(cic, bfqq);
2437
* Check to see if this queue is scheduled to merge with
2438
* another closely cooperating queue. The merging of queues
2439
* happens here as it must be done in process context.
2440
* The reference on new_bfqq was taken in merge_bfqqs.
2442
if (bfqq->new_bfqq != NULL)
2443
bfqq = bfq_merge_bfqqs(bfqd, cic, bfqq);
2446
bfqq->allocated[rw]++;
2447
atomic_inc(&bfqq->ref);
2448
bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq,
2449
atomic_read(&bfqq->ref));
2451
spin_unlock_irqrestore(q->queue_lock, flags);
2453
rq->elevator_private[0] = cic;
2454
rq->elevator_private[1] = bfqq;
2460
put_io_context(cic->ioc);
2462
bfq_schedule_dispatch(bfqd);
2463
spin_unlock_irqrestore(q->queue_lock, flags);
2468
static void bfq_kick_queue(struct work_struct *work)
2470
struct bfq_data *bfqd =
2471
container_of(work, struct bfq_data, unplug_work);
2472
struct request_queue *q = bfqd->queue;
2474
spin_lock_irq(q->queue_lock);
2476
spin_unlock_irq(q->queue_lock);
2480
* Handler of the expiration of the timer running if the active_queue
2481
* is idling inside its time slice.
2483
static void bfq_idle_slice_timer(unsigned long data)
2485
struct bfq_data *bfqd = (struct bfq_data *)data;
2486
struct bfq_queue *bfqq;
2487
unsigned long flags;
2488
enum bfqq_expiration reason;
2490
spin_lock_irqsave(bfqd->queue->queue_lock, flags);
2492
bfqq = bfqd->active_queue;
2494
* Theoretical race here: active_queue can be NULL or different
2495
* from the queue that was idling if the timer handler spins on
2496
* the queue_lock and a new request arrives for the current
2497
* queue and there is a full dispatch cycle that changes the
2498
* active_queue. This can hardly happen, but in the worst case
2499
* we just expire a queue too early.
2502
bfq_log_bfqq(bfqd, bfqq, "slice_timer expired");
2503
if (bfq_bfqq_budget_timeout(bfqq))
2505
* Also here the queue can be safely expired
2506
* for budget timeout without wasting
2509
reason = BFQ_BFQQ_BUDGET_TIMEOUT;
2510
else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
2512
* The queue may not be empty upon timer expiration,
2513
* because we may not disable the timer when the first
2514
* request of the active queue arrives during
2517
reason = BFQ_BFQQ_TOO_IDLE;
2519
goto schedule_dispatch;
2521
bfq_bfqq_expire(bfqd, bfqq, 1, reason);
2525
bfq_schedule_dispatch(bfqd);
2527
spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
2530
static void bfq_shutdown_timer_wq(struct bfq_data *bfqd)
2532
del_timer_sync(&bfqd->idle_slice_timer);
2533
cancel_work_sync(&bfqd->unplug_work);
2536
static inline void __bfq_put_async_bfqq(struct bfq_data *bfqd,
2537
struct bfq_queue **bfqq_ptr)
2539
struct bfq_group *root_group = bfqd->root_group;
2540
struct bfq_queue *bfqq = *bfqq_ptr;
2542
bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
2544
bfq_bfqq_move(bfqd, bfqq, &bfqq->entity, root_group);
2545
bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
2546
bfqq, atomic_read(&bfqq->ref));
2547
bfq_put_queue(bfqq);
2553
* Release all the bfqg references to its async queues. If we are
2554
* deallocating the group these queues may still contain requests, so
2555
* we reparent them to the root cgroup (i.e., the only one that will
2556
* exist for sure untill all the requests on a device are gone).
2558
static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
2562
for (i = 0; i < 2; i++)
2563
for (j = 0; j < IOPRIO_BE_NR; j++)
2564
__bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
2566
__bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
2569
static void bfq_exit_queue(struct elevator_queue *e)
2571
struct bfq_data *bfqd = e->elevator_data;
2572
struct request_queue *q = bfqd->queue;
2573
struct bfq_queue *bfqq, *n;
2574
struct cfq_io_context *cic;
2576
bfq_shutdown_timer_wq(bfqd);
2578
spin_lock_irq(q->queue_lock);
2580
while (!list_empty(&bfqd->cic_list)) {
2581
cic = list_entry(bfqd->cic_list.next, struct cfq_io_context,
2583
__bfq_exit_single_io_context(bfqd, cic);
2586
BUG_ON(bfqd->active_queue != NULL);
2587
list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
2588
bfq_deactivate_bfqq(bfqd, bfqq, 0);
2590
bfq_disconnect_groups(bfqd);
2591
spin_unlock_irq(q->queue_lock);
2593
bfq_shutdown_timer_wq(bfqd);
2595
spin_lock(&cic_index_lock);
2596
ida_remove(&cic_index_ida, bfqd->cic_index);
2597
spin_unlock(&cic_index_lock);
2599
/* Wait for cic->key accessors to exit their grace periods. */
2602
BUG_ON(timer_pending(&bfqd->idle_slice_timer));
2604
bfq_free_root_group(bfqd);
2608
static int bfq_alloc_cic_index(void)
2613
if (!ida_pre_get(&cic_index_ida, GFP_KERNEL))
2616
spin_lock(&cic_index_lock);
2617
error = ida_get_new(&cic_index_ida, &index);
2618
spin_unlock(&cic_index_lock);
2619
if (error && error != -EAGAIN)
2626
static void *bfq_init_queue(struct request_queue *q)
2628
struct bfq_group *bfqg;
2629
struct bfq_data *bfqd;
2632
i = bfq_alloc_cic_index();
2636
bfqd = kmalloc_node(sizeof(*bfqd), GFP_KERNEL | __GFP_ZERO, q->node);
2640
bfqd->cic_index = i;
2643
* Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
2644
* Grab a permanent reference to it, so that the normal code flow
2645
* will not attempt to free it.
2647
bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, 1, 0);
2648
atomic_inc(&bfqd->oom_bfqq.ref);
2650
INIT_LIST_HEAD(&bfqd->cic_list);
2654
bfqg = bfq_alloc_root_group(bfqd, q->node);
2660
bfqd->root_group = bfqg;
2662
init_timer(&bfqd->idle_slice_timer);
2663
bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
2664
bfqd->idle_slice_timer.data = (unsigned long)bfqd;
2666
bfqd->rq_pos_tree = RB_ROOT;
2668
INIT_WORK(&bfqd->unplug_work, bfq_kick_queue);
2670
INIT_LIST_HEAD(&bfqd->active_list);
2671
INIT_LIST_HEAD(&bfqd->idle_list);
2675
bfqd->bfq_max_budget = bfq_default_max_budget;
2677
bfqd->bfq_quantum = bfq_quantum;
2678
bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
2679
bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
2680
bfqd->bfq_back_max = bfq_back_max;
2681
bfqd->bfq_back_penalty = bfq_back_penalty;
2682
bfqd->bfq_slice_idle = bfq_slice_idle;
2683
bfqd->bfq_class_idle_last_service = 0;
2684
bfqd->bfq_max_budget_async_rq = bfq_max_budget_async_rq;
2685
bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async;
2686
bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync;
2688
bfqd->low_latency = true;
2690
bfqd->bfq_raising_coeff = 20;
2691
bfqd->bfq_raising_rt_max_time = msecs_to_jiffies(300);
2692
bfqd->bfq_raising_max_time = 0;
2693
bfqd->bfq_raising_min_idle_time = msecs_to_jiffies(2000);
2694
bfqd->bfq_raising_min_inter_arr_async = msecs_to_jiffies(500);
2695
bfqd->bfq_raising_max_softrt_rate = 7000;
2697
/* Initially estimate the device's peak rate as the reference rate */
2698
if (blk_queue_nonrot(bfqd->queue)) {
2699
bfqd->RT_prod = R_nonrot * T_nonrot;
2700
bfqd->peak_rate = R_nonrot;
2702
bfqd->RT_prod = R_rot * T_rot;
2703
bfqd->peak_rate = R_rot;
2709
static void bfq_slab_kill(void)
2711
if (bfq_pool != NULL)
2712
kmem_cache_destroy(bfq_pool);
2713
if (bfq_ioc_pool != NULL)
2714
kmem_cache_destroy(bfq_ioc_pool);
2717
static int __init bfq_slab_setup(void)
2719
bfq_pool = KMEM_CACHE(bfq_queue, 0);
2720
if (bfq_pool == NULL)
2723
bfq_ioc_pool = kmem_cache_create("bfq_io_context",
2724
sizeof(struct cfq_io_context),
2725
__alignof__(struct cfq_io_context),
2727
if (bfq_ioc_pool == NULL)
2736
static ssize_t bfq_var_show(unsigned int var, char *page)
2738
return sprintf(page, "%d\n", var);
2741
static ssize_t bfq_var_store(unsigned long *var, const char *page, size_t count)
2743
unsigned long new_val;
2744
int ret = strict_strtoul(page, 10, &new_val);
2752
static ssize_t bfq_raising_max_time_show(struct elevator_queue *e, char *page)
2754
struct bfq_data *bfqd = e->elevator_data;
2755
return sprintf(page, "%d\n", bfqd->bfq_raising_max_time > 0 ?
2756
bfqd->bfq_raising_max_time :
2757
bfq_wrais_duration(bfqd));
2760
static ssize_t bfq_weights_show(struct elevator_queue *e, char *page)
2762
struct bfq_queue *bfqq;
2763
struct bfq_data *bfqd = e->elevator_data;
2764
ssize_t num_char = 0;
2766
num_char += sprintf(page + num_char, "Active:\n");
2767
list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) {
2768
num_char += sprintf(page + num_char,
2769
"pid%d: weight %hu, dur %d/%u\n",
2771
bfqq->entity.weight,
2772
jiffies_to_msecs(jiffies -
2773
bfqq->last_rais_start_finish),
2774
jiffies_to_msecs(bfqq->raising_cur_max_time));
2776
num_char += sprintf(page + num_char, "Idle:\n");
2777
list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) {
2778
num_char += sprintf(page + num_char,
2779
"pid%d: weight %hu, dur %d/%u\n",
2781
bfqq->entity.weight,
2782
jiffies_to_msecs(jiffies -
2783
bfqq->last_rais_start_finish),
2784
jiffies_to_msecs(bfqq->raising_cur_max_time));
2789
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
2790
static ssize_t __FUNC(struct elevator_queue *e, char *page) \
2792
struct bfq_data *bfqd = e->elevator_data; \
2793
unsigned int __data = __VAR; \
2795
__data = jiffies_to_msecs(__data); \
2796
return bfq_var_show(__data, (page)); \
2798
SHOW_FUNCTION(bfq_quantum_show, bfqd->bfq_quantum, 0);
2799
SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 1);
2800
SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 1);
2801
SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
2802
SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
2803
SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 1);
2804
SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
2805
SHOW_FUNCTION(bfq_max_budget_async_rq_show, bfqd->bfq_max_budget_async_rq, 0);
2806
SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout[BLK_RW_SYNC], 1);
2807
SHOW_FUNCTION(bfq_timeout_async_show, bfqd->bfq_timeout[BLK_RW_ASYNC], 1);
2808
SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
2809
SHOW_FUNCTION(bfq_raising_coeff_show, bfqd->bfq_raising_coeff, 0);
2810
SHOW_FUNCTION(bfq_raising_rt_max_time_show, bfqd->bfq_raising_rt_max_time, 1);
2811
SHOW_FUNCTION(bfq_raising_min_idle_time_show, bfqd->bfq_raising_min_idle_time,
2813
SHOW_FUNCTION(bfq_raising_min_inter_arr_async_show,
2814
bfqd->bfq_raising_min_inter_arr_async,
2816
SHOW_FUNCTION(bfq_raising_max_softrt_rate_show,
2817
bfqd->bfq_raising_max_softrt_rate, 0);
2818
#undef SHOW_FUNCTION
2820
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
2822
__FUNC(struct elevator_queue *e, const char *page, size_t count) \
2824
struct bfq_data *bfqd = e->elevator_data; \
2825
unsigned long __data; \
2826
int ret = bfq_var_store(&__data, (page), count); \
2827
if (__data < (MIN)) \
2829
else if (__data > (MAX)) \
2832
*(__PTR) = msecs_to_jiffies(__data); \
2834
*(__PTR) = __data; \
2837
STORE_FUNCTION(bfq_quantum_store, &bfqd->bfq_quantum, 1, INT_MAX, 0);
2838
STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
2840
STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
2842
STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
2843
STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
2845
STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 1);
2846
STORE_FUNCTION(bfq_max_budget_async_rq_store, &bfqd->bfq_max_budget_async_rq,
2848
STORE_FUNCTION(bfq_timeout_async_store, &bfqd->bfq_timeout[BLK_RW_ASYNC], 0,
2850
STORE_FUNCTION(bfq_raising_coeff_store, &bfqd->bfq_raising_coeff, 1,
2852
STORE_FUNCTION(bfq_raising_max_time_store, &bfqd->bfq_raising_max_time, 0,
2854
STORE_FUNCTION(bfq_raising_rt_max_time_store, &bfqd->bfq_raising_rt_max_time, 0,
2856
STORE_FUNCTION(bfq_raising_min_idle_time_store,
2857
&bfqd->bfq_raising_min_idle_time, 0, INT_MAX, 1);
2858
STORE_FUNCTION(bfq_raising_min_inter_arr_async_store,
2859
&bfqd->bfq_raising_min_inter_arr_async, 0, INT_MAX, 1);
2860
STORE_FUNCTION(bfq_raising_max_softrt_rate_store,
2861
&bfqd->bfq_raising_max_softrt_rate, 0, INT_MAX, 0);
2862
#undef STORE_FUNCTION
2864
/* do nothing for the moment */
2865
static ssize_t bfq_weights_store(struct elevator_queue *e,
2866
const char *page, size_t count)
2871
static inline unsigned long bfq_estimated_max_budget(struct bfq_data *bfqd)
2873
u64 timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
2875
if (bfqd->peak_rate_samples >= BFQ_PEAK_RATE_SAMPLES)
2876
return bfq_calc_max_budget(bfqd->peak_rate, timeout);
2878
return bfq_default_max_budget;
2881
static ssize_t bfq_max_budget_store(struct elevator_queue *e,
2882
const char *page, size_t count)
2884
struct bfq_data *bfqd = e->elevator_data;
2885
unsigned long __data;
2886
int ret = bfq_var_store(&__data, (page), count);
2889
bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
2891
if (__data > INT_MAX)
2893
bfqd->bfq_max_budget = __data;
2896
bfqd->bfq_user_max_budget = __data;
2901
static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
2902
const char *page, size_t count)
2904
struct bfq_data *bfqd = e->elevator_data;
2905
unsigned long __data;
2906
int ret = bfq_var_store(&__data, (page), count);
2910
else if (__data > INT_MAX)
2913
bfqd->bfq_timeout[BLK_RW_SYNC] = msecs_to_jiffies(__data);
2914
if (bfqd->bfq_user_max_budget == 0)
2915
bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
2920
static ssize_t bfq_low_latency_store(struct elevator_queue *e,
2921
const char *page, size_t count)
2923
struct bfq_data *bfqd = e->elevator_data;
2924
unsigned long __data;
2925
int ret = bfq_var_store(&__data, (page), count);
2929
bfqd->low_latency = __data;
2934
#define BFQ_ATTR(name) \
2935
__ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store)
2937
static struct elv_fs_entry bfq_attrs[] = {
2939
BFQ_ATTR(fifo_expire_sync),
2940
BFQ_ATTR(fifo_expire_async),
2941
BFQ_ATTR(back_seek_max),
2942
BFQ_ATTR(back_seek_penalty),
2943
BFQ_ATTR(slice_idle),
2944
BFQ_ATTR(max_budget),
2945
BFQ_ATTR(max_budget_async_rq),
2946
BFQ_ATTR(timeout_sync),
2947
BFQ_ATTR(timeout_async),
2948
BFQ_ATTR(low_latency),
2949
BFQ_ATTR(raising_coeff),
2950
BFQ_ATTR(raising_max_time),
2951
BFQ_ATTR(raising_rt_max_time),
2952
BFQ_ATTR(raising_min_idle_time),
2953
BFQ_ATTR(raising_min_inter_arr_async),
2954
BFQ_ATTR(raising_max_softrt_rate),
2959
static struct elevator_type iosched_bfq = {
2961
.elevator_merge_fn = bfq_merge,
2962
.elevator_merged_fn = bfq_merged_request,
2963
.elevator_merge_req_fn = bfq_merged_requests,
2964
.elevator_allow_merge_fn = bfq_allow_merge,
2965
.elevator_dispatch_fn = bfq_dispatch_requests,
2966
.elevator_add_req_fn = bfq_insert_request,
2967
.elevator_activate_req_fn = bfq_activate_request,
2968
.elevator_deactivate_req_fn = bfq_deactivate_request,
2969
.elevator_completed_req_fn = bfq_completed_request,
2970
.elevator_former_req_fn = elv_rb_former_request,
2971
.elevator_latter_req_fn = elv_rb_latter_request,
2972
.elevator_set_req_fn = bfq_set_request,
2973
.elevator_put_req_fn = bfq_put_request,
2974
.elevator_may_queue_fn = bfq_may_queue,
2975
.elevator_init_fn = bfq_init_queue,
2976
.elevator_exit_fn = bfq_exit_queue,
2977
.trim = bfq_free_io_context,
2979
.elevator_attrs = bfq_attrs,
2980
.elevator_name = "bfq",
2981
.elevator_owner = THIS_MODULE,
2984
static int __init bfq_init(void)
2987
* Can be 0 on HZ < 1000 setups.
2989
if (bfq_slice_idle == 0)
2992
if (bfq_timeout_async == 0)
2993
bfq_timeout_async = 1;
2995
if (bfq_slab_setup())
2998
elv_register(&iosched_bfq);
3003
static void __exit bfq_exit(void)
3005
DECLARE_COMPLETION_ONSTACK(all_gone);
3006
elv_unregister(&iosched_bfq);
3007
bfq_ioc_gone = &all_gone;
3008
/* bfq_ioc_gone's update must be visible before reading bfq_ioc_count */
3010
if (elv_ioc_count_read(bfq_ioc_count) != 0)
3011
wait_for_completion(&all_gone);
3012
ida_destroy(&cic_index_ida);
3016
module_init(bfq_init);
3017
module_exit(bfq_exit);
3019
MODULE_AUTHOR("Fabio Checconi, Paolo Valente");
3020
MODULE_LICENSE("GPL");
3021
MODULE_DESCRIPTION("Budget Fair Queueing IO scheduler");