2
* BFQ: CGROUPS support.
4
* Based on ideas and code from CFQ:
5
* Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7
* Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8
* Paolo Valente <paolo.valente@unimore.it>
10
* Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ file.
13
#ifdef CONFIG_CGROUP_BFQIO
14
static struct bfqio_cgroup bfqio_root_cgroup = {
15
.weight = BFQ_DEFAULT_GRP_WEIGHT,
16
.ioprio = BFQ_DEFAULT_GRP_IOPRIO,
17
.ioprio_class = BFQ_DEFAULT_GRP_CLASS,
20
static inline void bfq_init_entity(struct bfq_entity *entity,
21
struct bfq_group *bfqg)
23
entity->weight = entity->new_weight;
24
entity->orig_weight = entity->new_weight;
25
entity->ioprio = entity->new_ioprio;
26
entity->ioprio_class = entity->new_ioprio_class;
27
entity->parent = bfqg->my_entity;
28
entity->sched_data = &bfqg->sched_data;
31
static struct bfqio_cgroup *cgroup_to_bfqio(struct cgroup *cgroup)
33
return container_of(cgroup_subsys_state(cgroup, bfqio_subsys_id),
34
struct bfqio_cgroup, css);
38
* Search the bfq_group for bfqd into the hash table (by now only a list)
39
* of bgrp. Must be called under rcu_read_lock().
41
static struct bfq_group *bfqio_lookup_group(struct bfqio_cgroup *bgrp,
42
struct bfq_data *bfqd)
44
struct bfq_group *bfqg;
48
hlist_for_each_entry_rcu(bfqg, n, &bgrp->group_data, group_node) {
49
key = rcu_dereference(bfqg->bfqd);
57
static inline void bfq_group_init_entity(struct bfqio_cgroup *bgrp,
58
struct bfq_group *bfqg)
60
struct bfq_entity *entity = &bfqg->entity;
62
entity->weight = entity->new_weight = bgrp->weight;
63
entity->orig_weight = entity->new_weight;
64
entity->ioprio = entity->new_ioprio = bgrp->ioprio;
65
entity->ioprio_class = entity->new_ioprio_class = bgrp->ioprio_class;
66
entity->ioprio_changed = 1;
67
entity->my_sched_data = &bfqg->sched_data;
70
static inline void bfq_group_set_parent(struct bfq_group *bfqg,
71
struct bfq_group *parent)
73
struct bfq_entity *entity;
75
BUG_ON(parent == NULL);
78
entity = &bfqg->entity;
79
entity->parent = parent->my_entity;
80
entity->sched_data = &parent->sched_data;
84
* bfq_group_chain_alloc - allocate a chain of groups.
85
* @bfqd: queue descriptor.
86
* @cgroup: the leaf cgroup this chain starts from.
88
* Allocate a chain of groups starting from the one belonging to
89
* @cgroup up to the root cgroup. Stop if a cgroup on the chain
90
* to the root has already an allocated group on @bfqd.
92
static struct bfq_group *bfq_group_chain_alloc(struct bfq_data *bfqd,
93
struct cgroup *cgroup)
95
struct bfqio_cgroup *bgrp;
96
struct bfq_group *bfqg, *prev = NULL, *leaf = NULL;
98
for (; cgroup != NULL; cgroup = cgroup->parent) {
99
bgrp = cgroup_to_bfqio(cgroup);
101
bfqg = bfqio_lookup_group(bgrp, bfqd);
104
* All the cgroups in the path from there to the
105
* root must have a bfq_group for bfqd, so we don't
106
* need any more allocations.
111
bfqg = kzalloc(sizeof(*bfqg), GFP_ATOMIC);
115
bfq_group_init_entity(bgrp, bfqg);
116
bfqg->my_entity = &bfqg->entity;
122
bfq_group_set_parent(prev, bfqg);
124
* Build a list of allocated nodes using the bfqd
125
* filed, that is still unused and will be initialized
126
* only after the node will be connected.
136
while (leaf != NULL) {
146
* bfq_group_chain_link - link an allocatd group chain to a cgroup hierarchy.
147
* @bfqd: the queue descriptor.
148
* @cgroup: the leaf cgroup to start from.
149
* @leaf: the leaf group (to be associated to @cgroup).
151
* Try to link a chain of groups to a cgroup hierarchy, connecting the
152
* nodes bottom-up, so we can be sure that when we find a cgroup in the
153
* hierarchy that already as a group associated to @bfqd all the nodes
154
* in the path to the root cgroup have one too.
156
* On locking: the queue lock protects the hierarchy (there is a hierarchy
157
* per device) while the bfqio_cgroup lock protects the list of groups
158
* belonging to the same cgroup.
160
static void bfq_group_chain_link(struct bfq_data *bfqd, struct cgroup *cgroup,
161
struct bfq_group *leaf)
163
struct bfqio_cgroup *bgrp;
164
struct bfq_group *bfqg, *next, *prev = NULL;
167
assert_spin_locked(bfqd->queue->queue_lock);
169
for (; cgroup != NULL && leaf != NULL; cgroup = cgroup->parent) {
170
bgrp = cgroup_to_bfqio(cgroup);
173
bfqg = bfqio_lookup_group(bgrp, bfqd);
174
BUG_ON(bfqg != NULL);
176
spin_lock_irqsave(&bgrp->lock, flags);
178
rcu_assign_pointer(leaf->bfqd, bfqd);
179
hlist_add_head_rcu(&leaf->group_node, &bgrp->group_data);
180
hlist_add_head(&leaf->bfqd_node, &bfqd->group_list);
182
spin_unlock_irqrestore(&bgrp->lock, flags);
188
BUG_ON(cgroup == NULL && leaf != NULL);
189
if (cgroup != NULL && prev != NULL) {
190
bgrp = cgroup_to_bfqio(cgroup);
191
bfqg = bfqio_lookup_group(bgrp, bfqd);
192
bfq_group_set_parent(prev, bfqg);
197
* bfq_find_alloc_group - return the group associated to @bfqd in @cgroup.
198
* @bfqd: queue descriptor.
199
* @cgroup: cgroup being searched for.
201
* Return a group associated to @bfqd in @cgroup, allocating one if
202
* necessary. When a group is returned all the cgroups in the path
203
* to the root have a group associated to @bfqd.
205
* If the allocation fails, return the root group: this breaks guarantees
206
* but is a safe fallbak. If this loss becames a problem it can be
207
* mitigated using the equivalent weight (given by the product of the
208
* weights of the groups in the path from @group to the root) in the
211
* We allocate all the missing nodes in the path from the leaf cgroup
212
* to the root and we connect the nodes only after all the allocations
213
* have been successful.
215
static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd,
216
struct cgroup *cgroup)
218
struct bfqio_cgroup *bgrp = cgroup_to_bfqio(cgroup);
219
struct bfq_group *bfqg;
221
bfqg = bfqio_lookup_group(bgrp, bfqd);
225
bfqg = bfq_group_chain_alloc(bfqd, cgroup);
227
bfq_group_chain_link(bfqd, cgroup, bfqg);
229
bfqg = bfqd->root_group;
235
* bfq_bfqq_move - migrate @bfqq to @bfqg.
236
* @bfqd: queue descriptor.
237
* @bfqq: the queue to move.
238
* @entity: @bfqq's entity.
239
* @bfqg: the group to move to.
241
* Move @bfqq to @bfqg, deactivating it from its old group and reactivating
242
* it on the new one. Avoid putting the entity on the old group idle tree.
244
* Must be called under the queue lock; the cgroup owning @bfqg must
245
* not disappear (by now this just means that we are called under
248
static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
249
struct bfq_entity *entity, struct bfq_group *bfqg)
253
busy = bfq_bfqq_busy(bfqq);
254
resume = !RB_EMPTY_ROOT(&bfqq->sort_list);
256
BUG_ON(resume && !entity->on_st);
257
BUG_ON(busy && !resume && entity->on_st && bfqq != bfqd->active_queue);
260
BUG_ON(atomic_read(&bfqq->ref) < 2);
263
bfq_del_bfqq_busy(bfqd, bfqq, 0);
265
bfq_deactivate_bfqq(bfqd, bfqq, 0);
266
} else if (entity->on_st)
267
bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
270
* Here we use a reference to bfqg. We don't need a refcounter
271
* as the cgroup reference will not be dropped, so that its
272
* destroy() callback will not be invoked.
274
entity->parent = bfqg->my_entity;
275
entity->sched_data = &bfqg->sched_data;
278
bfq_activate_bfqq(bfqd, bfqq);
282
* __bfq_cic_change_cgroup - move @cic to @cgroup.
283
* @bfqd: the queue descriptor.
284
* @cic: the cic to move.
285
* @cgroup: the cgroup to move to.
287
* Move cic to cgroup, assuming that bfqd->queue is locked; the caller
288
* has to make sure that the reference to cgroup is valid across the call.
290
* NOTE: an alternative approach might have been to store the current
291
* cgroup in bfqq and getting a reference to it, reducing the lookup
292
* time here, at the price of slightly more complex code.
294
static struct bfq_group *__bfq_cic_change_cgroup(struct bfq_data *bfqd,
295
struct cfq_io_context *cic,
296
struct cgroup *cgroup)
298
struct bfq_queue *async_bfqq = cic_to_bfqq(cic, 0);
299
struct bfq_queue *sync_bfqq = cic_to_bfqq(cic, 1);
300
struct bfq_entity *entity;
301
struct bfq_group *bfqg;
303
bfqg = bfq_find_alloc_group(bfqd, cgroup);
304
if (async_bfqq != NULL) {
305
entity = &async_bfqq->entity;
307
if (entity->sched_data != &bfqg->sched_data) {
308
cic_set_bfqq(cic, NULL, 0);
309
bfq_log_bfqq(bfqd, async_bfqq,
310
"cic_change_group: %p %d",
311
async_bfqq, atomic_read(&async_bfqq->ref));
312
bfq_put_queue(async_bfqq);
316
if (sync_bfqq != NULL) {
317
entity = &sync_bfqq->entity;
318
if (entity->sched_data != &bfqg->sched_data)
319
bfq_bfqq_move(bfqd, sync_bfqq, entity, bfqg);
326
* bfq_cic_change_cgroup - move @cic to @cgroup.
327
* @cic: the cic being migrated.
328
* @cgroup: the destination cgroup.
330
* When the task owning @cic is moved to @cgroup, @cic is immediately
331
* moved into its new parent group.
333
static void bfq_cic_change_cgroup(struct cfq_io_context *cic,
334
struct cgroup *cgroup)
336
struct bfq_data *bfqd;
337
unsigned long uninitialized_var(flags);
339
bfqd = bfq_get_bfqd_locked(&cic->key, &flags);
341
!strncmp(bfqd->queue->elevator->elevator_type->elevator_name,
342
"bfq", ELV_NAME_MAX)) {
343
__bfq_cic_change_cgroup(bfqd, cic, cgroup);
344
bfq_put_bfqd_unlock(bfqd, &flags);
349
* bfq_cic_update_cgroup - update the cgroup of @cic.
350
* @cic: the @cic to update.
352
* Make sure that @cic is enqueued in the cgroup of the current task.
353
* We need this in addition to moving cics during the cgroup attach
354
* phase because the task owning @cic could be at its first disk
355
* access or we may end up in the root cgroup as the result of a
356
* memory allocation failure and here we try to move to the right
359
* Must be called under the queue lock. It is safe to use the returned
360
* value even after the rcu_read_unlock() as the migration/destruction
361
* paths act under the queue lock too. IOW it is impossible to race with
362
* group migration/destruction and end up with an invalid group as:
363
* a) here cgroup has not yet been destroyed, nor its destroy callback
364
* has started execution, as current holds a reference to it,
365
* b) if it is destroyed after rcu_read_unlock() [after current is
366
* migrated to a different cgroup] its attach() callback will have
367
* taken care of remove all the references to the old cgroup data.
369
static struct bfq_group *bfq_cic_update_cgroup(struct cfq_io_context *cic)
371
struct bfq_data *bfqd = cic->key;
372
struct bfq_group *bfqg;
373
struct cgroup *cgroup;
375
BUG_ON(bfqd == NULL);
378
cgroup = task_cgroup(current, bfqio_subsys_id);
379
bfqg = __bfq_cic_change_cgroup(bfqd, cic, cgroup);
386
* bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
387
* @st: the service tree being flushed.
389
static inline void bfq_flush_idle_tree(struct bfq_service_tree *st)
391
struct bfq_entity *entity = st->first_idle;
393
for (; entity != NULL; entity = st->first_idle)
394
__bfq_deactivate_entity(entity, 0);
398
* bfq_reparent_leaf_entity - move leaf entity to the root_group.
399
* @bfqd: the device data structure with the root group.
400
* @entity: the entity to move.
402
static inline void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
403
struct bfq_entity *entity)
405
struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
407
BUG_ON(bfqq == NULL);
408
bfq_bfqq_move(bfqd, bfqq, entity, bfqd->root_group);
413
* bfq_reparent_active_entities - move to the root group all active entities.
414
* @bfqd: the device data structure with the root group.
415
* @bfqg: the group to move from.
416
* @st: the service tree with the entities.
418
* Needs queue_lock to be taken and reference to be valid over the call.
420
static inline void bfq_reparent_active_entities(struct bfq_data *bfqd,
421
struct bfq_group *bfqg,
422
struct bfq_service_tree *st)
424
struct rb_root *active = &st->active;
425
struct bfq_entity *entity = NULL;
427
if (!RB_EMPTY_ROOT(&st->active))
428
entity = bfq_entity_of(rb_first(active));
430
for (; entity != NULL ; entity = bfq_entity_of(rb_first(active)))
431
bfq_reparent_leaf_entity(bfqd, entity);
433
if (bfqg->sched_data.active_entity != NULL)
434
bfq_reparent_leaf_entity(bfqd, bfqg->sched_data.active_entity);
440
* bfq_destroy_group - destroy @bfqg.
441
* @bgrp: the bfqio_cgroup containing @bfqg.
442
* @bfqg: the group being destroyed.
444
* Destroy @bfqg, making sure that it is not referenced from its parent.
446
static void bfq_destroy_group(struct bfqio_cgroup *bgrp, struct bfq_group *bfqg)
448
struct bfq_data *bfqd;
449
struct bfq_service_tree *st;
450
struct bfq_entity *entity = bfqg->my_entity;
451
unsigned long uninitialized_var(flags);
454
hlist_del(&bfqg->group_node);
457
* Empty all service_trees belonging to this group before deactivating
460
for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
461
st = bfqg->sched_data.service_tree + i;
464
* The idle tree may still contain bfq_queues belonging
465
* to exited task because they never migrated to a different
466
* cgroup from the one being destroyed now. Noone else
467
* can access them so it's safe to act without any lock.
469
bfq_flush_idle_tree(st);
472
* It may happen that some queues are still active
473
* (busy) upon group destruction (if the corresponding
474
* processes have been forced to terminate). We move
475
* all the leaf entities corresponding to these queues
477
* Also, it may happen that the group has an entity
478
* under service, which is disconnected from the active
479
* tree: it must be moved, too.
480
* There is no need to put the sync queues, as the
481
* scheduler has taken no reference.
483
bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags);
485
bfq_reparent_active_entities(bfqd, bfqg, st);
486
bfq_put_bfqd_unlock(bfqd, &flags);
488
BUG_ON(!RB_EMPTY_ROOT(&st->active));
489
BUG_ON(!RB_EMPTY_ROOT(&st->idle));
491
BUG_ON(bfqg->sched_data.next_active != NULL);
492
BUG_ON(bfqg->sched_data.active_entity != NULL);
495
* We may race with device destruction, take extra care when
496
* dereferencing bfqg->bfqd.
498
bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags);
500
hlist_del(&bfqg->bfqd_node);
501
__bfq_deactivate_entity(entity, 0);
502
bfq_put_async_queues(bfqd, bfqg);
503
bfq_put_bfqd_unlock(bfqd, &flags);
505
BUG_ON(entity->tree != NULL);
508
* No need to defer the kfree() to the end of the RCU grace
509
* period: we are called from the destroy() callback of our
510
* cgroup, so we can be sure that noone is a) still using
511
* this cgroup or b) doing lookups in it.
517
* bfq_disconnect_groups - diconnect @bfqd from all its groups.
518
* @bfqd: the device descriptor being exited.
520
* When the device exits we just make sure that no lookup can return
521
* the now unused group structures. They will be deallocated on cgroup
524
static void bfq_disconnect_groups(struct bfq_data *bfqd)
526
struct hlist_node *pos, *n;
527
struct bfq_group *bfqg;
529
bfq_log(bfqd, "disconnect_groups beginning") ;
530
hlist_for_each_entry_safe(bfqg, pos, n, &bfqd->group_list, bfqd_node) {
531
hlist_del(&bfqg->bfqd_node);
533
__bfq_deactivate_entity(bfqg->my_entity, 0);
536
* Don't remove from the group hash, just set an
537
* invalid key. No lookups can race with the
538
* assignment as bfqd is being destroyed; this
539
* implies also that new elements cannot be added
542
rcu_assign_pointer(bfqg->bfqd, NULL);
544
bfq_log(bfqd, "disconnect_groups: put async for group %p",
546
bfq_put_async_queues(bfqd, bfqg);
550
static inline void bfq_free_root_group(struct bfq_data *bfqd)
552
struct bfqio_cgroup *bgrp = &bfqio_root_cgroup;
553
struct bfq_group *bfqg = bfqd->root_group;
555
bfq_put_async_queues(bfqd, bfqg);
557
spin_lock_irq(&bgrp->lock);
558
hlist_del_rcu(&bfqg->group_node);
559
spin_unlock_irq(&bgrp->lock);
562
* No need to synchronize_rcu() here: since the device is gone
563
* there cannot be any read-side access to its root_group.
568
static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node)
570
struct bfq_group *bfqg;
571
struct bfqio_cgroup *bgrp;
574
bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
578
bfqg->entity.parent = NULL;
579
for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
580
bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
582
bgrp = &bfqio_root_cgroup;
583
spin_lock_irq(&bgrp->lock);
584
rcu_assign_pointer(bfqg->bfqd, bfqd);
585
hlist_add_head_rcu(&bfqg->group_node, &bgrp->group_data);
586
spin_unlock_irq(&bgrp->lock);
591
#define SHOW_FUNCTION(__VAR) \
592
static u64 bfqio_cgroup_##__VAR##_read(struct cgroup *cgroup, \
593
struct cftype *cftype) \
595
struct bfqio_cgroup *bgrp; \
598
if (!cgroup_lock_live_group(cgroup)) \
601
bgrp = cgroup_to_bfqio(cgroup); \
602
spin_lock_irq(&bgrp->lock); \
604
spin_unlock_irq(&bgrp->lock); \
611
SHOW_FUNCTION(weight);
612
SHOW_FUNCTION(ioprio);
613
SHOW_FUNCTION(ioprio_class);
616
#define STORE_FUNCTION(__VAR, __MIN, __MAX) \
617
static int bfqio_cgroup_##__VAR##_write(struct cgroup *cgroup, \
618
struct cftype *cftype, \
621
struct bfqio_cgroup *bgrp; \
622
struct bfq_group *bfqg; \
623
struct hlist_node *n; \
625
if (val < (__MIN) || val > (__MAX)) \
628
if (!cgroup_lock_live_group(cgroup)) \
631
bgrp = cgroup_to_bfqio(cgroup); \
633
spin_lock_irq(&bgrp->lock); \
634
bgrp->__VAR = (unsigned short)val; \
635
hlist_for_each_entry(bfqg, n, &bgrp->group_data, group_node) { \
636
bfqg->entity.new_##__VAR = (unsigned short)val; \
638
bfqg->entity.ioprio_changed = 1; \
640
spin_unlock_irq(&bgrp->lock); \
647
STORE_FUNCTION(weight, BFQ_MIN_WEIGHT, BFQ_MAX_WEIGHT);
648
STORE_FUNCTION(ioprio, 0, IOPRIO_BE_NR - 1);
649
STORE_FUNCTION(ioprio_class, IOPRIO_CLASS_RT, IOPRIO_CLASS_IDLE);
650
#undef STORE_FUNCTION
652
static struct cftype bfqio_files[] = {
655
.read_u64 = bfqio_cgroup_weight_read,
656
.write_u64 = bfqio_cgroup_weight_write,
660
.read_u64 = bfqio_cgroup_ioprio_read,
661
.write_u64 = bfqio_cgroup_ioprio_write,
664
.name = "ioprio_class",
665
.read_u64 = bfqio_cgroup_ioprio_class_read,
666
.write_u64 = bfqio_cgroup_ioprio_class_write,
670
static int bfqio_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
672
return cgroup_add_files(cgroup, subsys, bfqio_files,
673
ARRAY_SIZE(bfqio_files));
676
static struct cgroup_subsys_state *bfqio_create(struct cgroup_subsys *subsys,
677
struct cgroup *cgroup)
679
struct bfqio_cgroup *bgrp;
681
if (cgroup->parent != NULL) {
682
bgrp = kzalloc(sizeof(*bgrp), GFP_KERNEL);
684
return ERR_PTR(-ENOMEM);
686
bgrp = &bfqio_root_cgroup;
688
spin_lock_init(&bgrp->lock);
689
INIT_HLIST_HEAD(&bgrp->group_data);
690
bgrp->ioprio = BFQ_DEFAULT_GRP_IOPRIO;
691
bgrp->ioprio_class = BFQ_DEFAULT_GRP_CLASS;
697
* We cannot support shared io contexts, as we have no mean to support
698
* two tasks with the same ioc in two different groups without major rework
699
* of the main cic/bfqq data structures. By now we allow a task to change
700
* its cgroup only if it's the only owner of its ioc; the drawback of this
701
* behavior is that a group containing a task that forked using CLONE_IO
702
* will not be destroyed until the tasks sharing the ioc die.
704
static int bfqio_can_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
705
struct task_struct *tsk)
707
struct io_context *ioc;
710
/* task_lock() is needed to avoid races with exit_io_context() */
712
ioc = tsk->io_context;
713
if (ioc != NULL && atomic_read(&ioc->nr_tasks) > 1)
715
* ioc == NULL means that the task is either too young or
716
* exiting: if it has still no ioc the ioc can't be shared,
717
* if the task is exiting the attach will fail anyway, no
718
* matter what we return here.
726
static void bfqio_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
727
struct cgroup *prev, struct task_struct *tsk)
729
struct io_context *ioc;
730
struct cfq_io_context *cic;
731
struct hlist_node *n;
734
ioc = tsk->io_context;
736
BUG_ON(atomic_long_read(&ioc->refcount) == 0);
737
atomic_long_inc(&ioc->refcount);
745
hlist_for_each_entry_rcu(cic, n, &ioc->bfq_cic_list, cic_list)
746
bfq_cic_change_cgroup(cic, cgroup);
752
static void bfqio_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
754
struct bfqio_cgroup *bgrp = cgroup_to_bfqio(cgroup);
755
struct hlist_node *n, *tmp;
756
struct bfq_group *bfqg;
759
* Since we are destroying the cgroup, there are no more tasks
760
* referencing it, and all the RCU grace periods that may have
761
* referenced it are ended (as the destruction of the parent
762
* cgroup is RCU-safe); bgrp->group_data will not be accessed by
763
* anything else and we don't need any synchronization.
765
hlist_for_each_entry_safe(bfqg, n, tmp, &bgrp->group_data, group_node)
766
bfq_destroy_group(bgrp, bfqg);
768
BUG_ON(!hlist_empty(&bgrp->group_data));
773
struct cgroup_subsys bfqio_subsys = {
775
.create = bfqio_create,
776
.can_attach = bfqio_can_attach,
777
.attach = bfqio_attach,
778
.destroy = bfqio_destroy,
779
.populate = bfqio_populate,
780
.subsys_id = bfqio_subsys_id,
783
static inline void bfq_init_entity(struct bfq_entity *entity,
784
struct bfq_group *bfqg)
786
entity->weight = entity->new_weight;
787
entity->orig_weight = entity->new_weight;
788
entity->ioprio = entity->new_ioprio;
789
entity->ioprio_class = entity->new_ioprio_class;
790
entity->sched_data = &bfqg->sched_data;
793
static inline struct bfq_group *
794
bfq_cic_update_cgroup(struct cfq_io_context *cic)
796
struct bfq_data *bfqd = cic->key;
797
return bfqd->root_group;
800
static inline void bfq_bfqq_move(struct bfq_data *bfqd,
801
struct bfq_queue *bfqq,
802
struct bfq_entity *entity,
803
struct bfq_group *bfqg)
807
static inline void bfq_disconnect_groups(struct bfq_data *bfqd)
809
bfq_put_async_queues(bfqd, bfqd->root_group);
812
static inline void bfq_free_root_group(struct bfq_data *bfqd)
814
kfree(bfqd->root_group);
817
static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node)
819
struct bfq_group *bfqg;
822
bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
826
for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
827
bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;