~ubuntu-branches/debian/wheezy/linux-2.6/wheezy

« back to all changes in this revision

Viewing changes to block/cfq-iosched.c

  • Committer: Bazaar Package Importer
  • Author(s): Ben Hutchings, Ben Hutchings, Aurelien Jarno
  • Date: 2011-06-07 12:14:05 UTC
  • mfrom: (43.1.9 sid)
  • Revision ID: james.westby@ubuntu.com-20110607121405-i3h1rd7nrnd2b73h
Tags: 2.6.39-2
[ Ben Hutchings ]
* [x86] Enable BACKLIGHT_APPLE, replacing BACKLIGHT_MBP_NVIDIA
  (Closes: #627492)
* cgroups: Disable memory resource controller by default. Allow it
  to be enabled using kernel parameter 'cgroup_enable=memory'.
* rt2800usb: Enable support for more USB devices including
  Linksys WUSB600N (Closes: #596626) (this change was accidentally
  omitted from 2.6.39-1)
* [x86] Remove Celeron from list of processors supporting PAE. Most
  'Celeron M' models do not.
* Update debconf template translations:
  - Swedish (Martin Bagge) (Closes: #628932)
  - French (David Prévot) (Closes: #628191)
* aufs: Update for 2.6.39 (Closes: #627837)
* Add stable 2.6.39.1, including:
  - ext4: dont set PageUptodate in ext4_end_bio()
  - pata_cmd64x: fix boot crash on parisc (Closes: #622997, #622745)
  - ext3: Fix fs corruption when make_indexed_dir() fails
  - netfilter: nf_ct_sip: validate Content-Length in TCP SIP messages
  - sctp: fix race between sctp_bind_addr_free() and
    sctp_bind_addr_conflict()
  - sctp: fix memory leak of the ASCONF queue when free asoc
  - md/bitmap: fix saving of events_cleared and other state
  - cdc_acm: Fix oops when Droids MuIn LCD is connected
  - cx88: Fix conversion from BKL to fine-grained locks (Closes: #619827)
  - keys: Set cred->user_ns in key_replace_session_keyring (CVE-2011-2184)
  - tmpfs: fix race between truncate and writepage
  - nfs41: Correct offset for LAYOUTCOMMIT
  - xen/mmu: fix a race window causing leave_mm BUG()
  - ext4: fix possible use-after-free in ext4_remove_li_request()
  For the complete list of changes, see:
   http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.39.1
* Bump ABI to 2
* netfilter: Enable IP_SET, IP_SET_BITMAP_IP, IP_SET_BITMAP_IPMAC,
  IP_SET_BITMAP_PORT, IP_SET_HASH_IP, IP_SET_HASH_IPPORT,
  IP_SET_HASH_IPPORTIP, IP_SET_HASH_IPPORTNET, IP_SET_HASH_NET,
  IP_SET_HASH_NETPORT, IP_SET_LIST_SET, NETFILTER_XT_SET as modules
  (Closes: #629401)

[ Aurelien Jarno ]
* [mipsel/loongson-2f] Disable_SCSI_LPFC to workaround GCC ICE.

Show diffs side-by-side

added added

removed removed

Lines of Context:
54
54
#define CFQQ_SEEKY(cfqq)        (hweight32(cfqq->seek_history) > 32/8)
55
55
 
56
56
#define RQ_CIC(rq)              \
57
 
        ((struct cfq_io_context *) (rq)->elevator_private)
58
 
#define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elevator_private2)
59
 
#define RQ_CFQG(rq)             (struct cfq_group *) ((rq)->elevator_private3)
 
57
        ((struct cfq_io_context *) (rq)->elevator_private[0])
 
58
#define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elevator_private[1])
 
59
#define RQ_CFQG(rq)             (struct cfq_group *) ((rq)->elevator_private[2])
60
60
 
61
61
static struct kmem_cache *cfq_pool;
62
62
static struct kmem_cache *cfq_ioc_pool;
146
146
        struct cfq_rb_root *service_tree;
147
147
        struct cfq_queue *new_cfqq;
148
148
        struct cfq_group *cfqg;
149
 
        struct cfq_group *orig_cfqg;
150
149
        /* Number of sectors dispatched from queue in single dispatch round */
151
150
        unsigned long nr_sectors;
152
151
};
179
178
        /* group service_tree key */
180
179
        u64 vdisktime;
181
180
        unsigned int weight;
 
181
        unsigned int new_weight;
 
182
        bool needs_update;
182
183
 
183
184
        /* number of cfqq currently on this group */
184
185
        int nr_cfqq;
238
239
        struct rb_root prio_trees[CFQ_PRIO_LISTS];
239
240
 
240
241
        unsigned int busy_queues;
 
242
        unsigned int busy_sync_queues;
241
243
 
242
244
        int rq_in_driver;
243
245
        int rq_in_flight[2];
285
287
        unsigned int cfq_slice_idle;
286
288
        unsigned int cfq_group_idle;
287
289
        unsigned int cfq_latency;
288
 
        unsigned int cfq_group_isolation;
289
290
 
290
291
        unsigned int cic_index;
291
292
        struct list_head cic_list;
501
502
        }
502
503
}
503
504
 
504
 
static int cfq_queue_empty(struct request_queue *q)
505
 
{
506
 
        struct cfq_data *cfqd = q->elevator->elevator_data;
507
 
 
508
 
        return !cfqd->rq_queued;
509
 
}
510
 
 
511
505
/*
512
506
 * Scale schedule slice based on io priority. Use the sync time slice only
513
507
 * if a queue is marked sync and has sync io queued. A sync queue with async
558
552
 
559
553
static void update_min_vdisktime(struct cfq_rb_root *st)
560
554
{
561
 
        u64 vdisktime = st->min_vdisktime;
562
555
        struct cfq_group *cfqg;
563
556
 
564
557
        if (st->left) {
565
558
                cfqg = rb_entry_cfqg(st->left);
566
 
                vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime);
 
559
                st->min_vdisktime = max_vdisktime(st->min_vdisktime,
 
560
                                                  cfqg->vdisktime);
567
561
        }
568
 
 
569
 
        st->min_vdisktime = max_vdisktime(st->min_vdisktime, vdisktime);
570
562
}
571
563
 
572
564
/*
863
855
}
864
856
 
865
857
static void
866
 
cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
 
858
cfq_update_group_weight(struct cfq_group *cfqg)
 
859
{
 
860
        BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
 
861
        if (cfqg->needs_update) {
 
862
                cfqg->weight = cfqg->new_weight;
 
863
                cfqg->needs_update = false;
 
864
        }
 
865
}
 
866
 
 
867
static void
 
868
cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
 
869
{
 
870
        BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
 
871
 
 
872
        cfq_update_group_weight(cfqg);
 
873
        __cfq_group_service_tree_add(st, cfqg);
 
874
        st->total_weight += cfqg->weight;
 
875
}
 
876
 
 
877
static void
 
878
cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
867
879
{
868
880
        struct cfq_rb_root *st = &cfqd->grp_service_tree;
869
881
        struct cfq_group *__cfqg;
876
888
        /*
877
889
         * Currently put the group at the end. Later implement something
878
890
         * so that groups get lesser vtime based on their weights, so that
879
 
         * if group does not loose all if it was not continously backlogged.
 
891
         * if group does not loose all if it was not continuously backlogged.
880
892
         */
881
893
        n = rb_last(&st->rb);
882
894
        if (n) {
884
896
                cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
885
897
        } else
886
898
                cfqg->vdisktime = st->min_vdisktime;
887
 
 
888
 
        __cfq_group_service_tree_add(st, cfqg);
889
 
        st->total_weight += cfqg->weight;
890
 
}
891
 
 
892
 
static void
893
 
cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
 
899
        cfq_group_service_tree_add(st, cfqg);
 
900
}
 
901
 
 
902
static void
 
903
cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
 
904
{
 
905
        st->total_weight -= cfqg->weight;
 
906
        if (!RB_EMPTY_NODE(&cfqg->rb_node))
 
907
                cfq_rb_erase(&cfqg->rb_node, st);
 
908
}
 
909
 
 
910
static void
 
911
cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
894
912
{
895
913
        struct cfq_rb_root *st = &cfqd->grp_service_tree;
896
914
 
902
920
                return;
903
921
 
904
922
        cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
905
 
        st->total_weight -= cfqg->weight;
906
 
        if (!RB_EMPTY_NODE(&cfqg->rb_node))
907
 
                cfq_rb_erase(&cfqg->rb_node, st);
 
923
        cfq_group_service_tree_del(st, cfqg);
908
924
        cfqg->saved_workload_slice = 0;
909
925
        cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
910
926
}
911
927
 
912
 
static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
 
928
static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
 
929
                                                unsigned int *unaccounted_time)
913
930
{
914
931
        unsigned int slice_used;
915
932
 
928
945
                                        1);
929
946
        } else {
930
947
                slice_used = jiffies - cfqq->slice_start;
931
 
                if (slice_used > cfqq->allocated_slice)
 
948
                if (slice_used > cfqq->allocated_slice) {
 
949
                        *unaccounted_time = slice_used - cfqq->allocated_slice;
932
950
                        slice_used = cfqq->allocated_slice;
 
951
                }
 
952
                if (time_after(cfqq->slice_start, cfqq->dispatch_start))
 
953
                        *unaccounted_time += cfqq->slice_start -
 
954
                                        cfqq->dispatch_start;
933
955
        }
934
956
 
935
957
        return slice_used;
939
961
                                struct cfq_queue *cfqq)
940
962
{
941
963
        struct cfq_rb_root *st = &cfqd->grp_service_tree;
942
 
        unsigned int used_sl, charge;
 
964
        unsigned int used_sl, charge, unaccounted_sl = 0;
943
965
        int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
944
966
                        - cfqg->service_tree_idle.count;
945
967
 
946
968
        BUG_ON(nr_sync < 0);
947
 
        used_sl = charge = cfq_cfqq_slice_usage(cfqq);
 
969
        used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
948
970
 
949
971
        if (iops_mode(cfqd))
950
972
                charge = cfqq->slice_dispatch;
952
974
                charge = cfqq->allocated_slice;
953
975
 
954
976
        /* Can't update vdisktime while group is on service tree */
955
 
        cfq_rb_erase(&cfqg->rb_node, st);
 
977
        cfq_group_service_tree_del(st, cfqg);
956
978
        cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
957
 
        __cfq_group_service_tree_add(st, cfqg);
 
979
        /* If a new weight was requested, update now, off tree */
 
980
        cfq_group_service_tree_add(st, cfqg);
958
981
 
959
982
        /* This group is being expired. Save the context */
960
983
        if (time_after(cfqd->workload_expires, jiffies)) {
970
993
        cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
971
994
                        " sect=%u", used_sl, cfqq->slice_dispatch, charge,
972
995
                        iops_mode(cfqd), cfqq->nr_sectors);
973
 
        cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
 
996
        cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
 
997
                                          unaccounted_sl);
974
998
        cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
975
999
}
976
1000
 
985
1009
void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
986
1010
                                        unsigned int weight)
987
1011
{
988
 
        cfqg_of_blkg(blkg)->weight = weight;
 
1012
        struct cfq_group *cfqg = cfqg_of_blkg(blkg);
 
1013
        cfqg->new_weight = weight;
 
1014
        cfqg->needs_update = true;
989
1015
}
990
1016
 
991
 
static struct cfq_group *
992
 
cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
 
1017
static struct cfq_group * cfq_find_alloc_cfqg(struct cfq_data *cfqd,
 
1018
                struct blkio_cgroup *blkcg, int create)
993
1019
{
994
 
        struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
995
1020
        struct cfq_group *cfqg = NULL;
996
1021
        void *key = cfqd;
997
1022
        int i, j;
1053
1078
 */
1054
1079
static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1055
1080
{
1056
 
        struct cgroup *cgroup;
 
1081
        struct blkio_cgroup *blkcg;
1057
1082
        struct cfq_group *cfqg = NULL;
1058
1083
 
1059
1084
        rcu_read_lock();
1060
 
        cgroup = task_cgroup(current, blkio_subsys_id);
1061
 
        cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create);
 
1085
        blkcg = task_blkio_cgroup(current);
 
1086
        cfqg = cfq_find_alloc_cfqg(cfqd, blkcg, create);
1062
1087
        if (!cfqg && create)
1063
1088
                cfqg = &cfqd->root_group;
1064
1089
        rcu_read_unlock();
1187
1212
        int new_cfqq = 1;
1188
1213
        int group_changed = 0;
1189
1214
 
1190
 
#ifdef CONFIG_CFQ_GROUP_IOSCHED
1191
 
        if (!cfqd->cfq_group_isolation
1192
 
            && cfqq_type(cfqq) == SYNC_NOIDLE_WORKLOAD
1193
 
            && cfqq->cfqg && cfqq->cfqg != &cfqd->root_group) {
1194
 
                /* Move this cfq to root group */
1195
 
                cfq_log_cfqq(cfqd, cfqq, "moving to root group");
1196
 
                if (!RB_EMPTY_NODE(&cfqq->rb_node))
1197
 
                        cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1198
 
                cfqq->orig_cfqg = cfqq->cfqg;
1199
 
                cfqq->cfqg = &cfqd->root_group;
1200
 
                cfqd->root_group.ref++;
1201
 
                group_changed = 1;
1202
 
        } else if (!cfqd->cfq_group_isolation
1203
 
                   && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) {
1204
 
                /* cfqq is sequential now needs to go to its original group */
1205
 
                BUG_ON(cfqq->cfqg != &cfqd->root_group);
1206
 
                if (!RB_EMPTY_NODE(&cfqq->rb_node))
1207
 
                        cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1208
 
                cfq_put_cfqg(cfqq->cfqg);
1209
 
                cfqq->cfqg = cfqq->orig_cfqg;
1210
 
                cfqq->orig_cfqg = NULL;
1211
 
                group_changed = 1;
1212
 
                cfq_log_cfqq(cfqd, cfqq, "moved to origin group");
1213
 
        }
1214
 
#endif
1215
 
 
1216
1215
        service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1217
1216
                                                cfqq_type(cfqq));
1218
1217
        if (cfq_class_idle(cfqq)) {
1284
1283
        service_tree->count++;
1285
1284
        if ((add_front || !new_cfqq) && !group_changed)
1286
1285
                return;
1287
 
        cfq_group_service_tree_add(cfqd, cfqq->cfqg);
 
1286
        cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1288
1287
}
1289
1288
 
1290
1289
static struct cfq_queue *
1372
1371
        BUG_ON(cfq_cfqq_on_rr(cfqq));
1373
1372
        cfq_mark_cfqq_on_rr(cfqq);
1374
1373
        cfqd->busy_queues++;
 
1374
        if (cfq_cfqq_sync(cfqq))
 
1375
                cfqd->busy_sync_queues++;
1375
1376
 
1376
1377
        cfq_resort_rr_list(cfqd, cfqq);
1377
1378
}
1395
1396
                cfqq->p_root = NULL;
1396
1397
        }
1397
1398
 
1398
 
        cfq_group_service_tree_del(cfqd, cfqq->cfqg);
 
1399
        cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
1399
1400
        BUG_ON(!cfqd->busy_queues);
1400
1401
        cfqd->busy_queues--;
 
1402
        if (cfq_cfqq_sync(cfqq))
 
1403
                cfqd->busy_sync_queues--;
1401
1404
}
1402
1405
 
1403
1406
/*
2405
2408
         * Does this cfqq already have too much IO in flight?
2406
2409
         */
2407
2410
        if (cfqq->dispatched >= max_dispatch) {
 
2411
                bool promote_sync = false;
2408
2412
                /*
2409
2413
                 * idle queue must always only have a single IO in flight
2410
2414
                 */
2412
2416
                        return false;
2413
2417
 
2414
2418
                /*
 
2419
                 * If there is only one sync queue
 
2420
                 * we can ignore async queue here and give the sync
 
2421
                 * queue no dispatch limit. The reason is a sync queue can
 
2422
                 * preempt async queue, limiting the sync queue doesn't make
 
2423
                 * sense. This is useful for aiostress test.
 
2424
                 */
 
2425
                if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
 
2426
                        promote_sync = true;
 
2427
 
 
2428
                /*
2415
2429
                 * We have other queues, don't allow more IO from this one
2416
2430
                 */
2417
 
                if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq))
 
2431
                if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
 
2432
                                !promote_sync)
2418
2433
                        return false;
2419
2434
 
2420
2435
                /*
2421
2436
                 * Sole queue user, no limit
2422
2437
                 */
2423
 
                if (cfqd->busy_queues == 1)
 
2438
                if (cfqd->busy_queues == 1 || promote_sync)
2424
2439
                        max_dispatch = -1;
2425
2440
                else
2426
2441
                        /*
2542
2557
static void cfq_put_queue(struct cfq_queue *cfqq)
2543
2558
{
2544
2559
        struct cfq_data *cfqd = cfqq->cfqd;
2545
 
        struct cfq_group *cfqg, *orig_cfqg;
 
2560
        struct cfq_group *cfqg;
2546
2561
 
2547
2562
        BUG_ON(cfqq->ref <= 0);
2548
2563
 
2554
2569
        BUG_ON(rb_first(&cfqq->sort_list));
2555
2570
        BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
2556
2571
        cfqg = cfqq->cfqg;
2557
 
        orig_cfqg = cfqq->orig_cfqg;
2558
2572
 
2559
2573
        if (unlikely(cfqd->active_queue == cfqq)) {
2560
2574
                __cfq_slice_expired(cfqd, cfqq, 0);
2564
2578
        BUG_ON(cfq_cfqq_on_rr(cfqq));
2565
2579
        kmem_cache_free(cfq_pool, cfqq);
2566
2580
        cfq_put_cfqg(cfqg);
2567
 
        if (orig_cfqg)
2568
 
                cfq_put_cfqg(orig_cfqg);
2569
2581
}
2570
2582
 
2571
2583
/*
2572
 
 * Must always be called with the rcu_read_lock() held
 
2584
 * Call func for each cic attached to this ioc.
2573
2585
 */
2574
2586
static void
2575
 
__call_for_each_cic(struct io_context *ioc,
2576
 
                    void (*func)(struct io_context *, struct cfq_io_context *))
 
2587
call_for_each_cic(struct io_context *ioc,
 
2588
                  void (*func)(struct io_context *, struct cfq_io_context *))
2577
2589
{
2578
2590
        struct cfq_io_context *cic;
2579
2591
        struct hlist_node *n;
2580
2592
 
 
2593
        rcu_read_lock();
 
2594
 
2581
2595
        hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
2582
2596
                func(ioc, cic);
2583
 
}
2584
2597
 
2585
 
/*
2586
 
 * Call func for each cic attached to this ioc.
2587
 
 */
2588
 
static void
2589
 
call_for_each_cic(struct io_context *ioc,
2590
 
                  void (*func)(struct io_context *, struct cfq_io_context *))
2591
 
{
2592
 
        rcu_read_lock();
2593
 
        __call_for_each_cic(ioc, func);
2594
2598
        rcu_read_unlock();
2595
2599
}
2596
2600
 
2651
2655
         * should be ok to iterate over the known list, we will see all cic's
2652
2656
         * since no new ones are added.
2653
2657
         */
2654
 
        __call_for_each_cic(ioc, cic_free_func);
 
2658
        call_for_each_cic(ioc, cic_free_func);
2655
2659
}
2656
2660
 
2657
2661
static void cfq_put_cooperator(struct cfq_queue *cfqq)
3355
3359
                            cfqd->busy_queues > 1) {
3356
3360
                                cfq_del_timer(cfqd, cfqq);
3357
3361
                                cfq_clear_cfqq_wait_request(cfqq);
3358
 
                                __blk_run_queue(cfqd->queue, false);
 
3362
                                __blk_run_queue(cfqd->queue);
3359
3363
                        } else {
3360
3364
                                cfq_blkiocg_update_idle_time_stats(
3361
3365
                                                &cfqq->cfqg->blkg);
3370
3374
                 * this new queue is RT and the current one is BE
3371
3375
                 */
3372
3376
                cfq_preempt_queue(cfqd, cfqq);
3373
 
                __blk_run_queue(cfqd->queue, false);
 
3377
                __blk_run_queue(cfqd->queue);
3374
3378
        }
3375
3379
}
3376
3380
 
3613
3617
 
3614
3618
                put_io_context(RQ_CIC(rq)->ioc);
3615
3619
 
3616
 
                rq->elevator_private = NULL;
3617
 
                rq->elevator_private2 = NULL;
 
3620
                rq->elevator_private[0] = NULL;
 
3621
                rq->elevator_private[1] = NULL;
3618
3622
 
3619
3623
                /* Put down rq reference on cfqg */
3620
3624
                cfq_put_cfqg(RQ_CFQG(rq));
3621
 
                rq->elevator_private3 = NULL;
 
3625
                rq->elevator_private[2] = NULL;
3622
3626
 
3623
3627
                cfq_put_queue(cfqq);
3624
3628
        }
3705
3709
        }
3706
3710
 
3707
3711
        cfqq->allocated[rw]++;
 
3712
 
3708
3713
        cfqq->ref++;
3709
 
        rq->elevator_private = cic;
3710
 
        rq->elevator_private2 = cfqq;
3711
 
        rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);
3712
 
 
 
3714
        rq->elevator_private[0] = cic;
 
3715
        rq->elevator_private[1] = cfqq;
 
3716
        rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg);
3713
3717
        spin_unlock_irqrestore(q->queue_lock, flags);
3714
 
 
3715
3718
        return 0;
3716
3719
 
3717
3720
queue_fail:
3731
3734
        struct request_queue *q = cfqd->queue;
3732
3735
 
3733
3736
        spin_lock_irq(q->queue_lock);
3734
 
        __blk_run_queue(cfqd->queue, false);
 
3737
        __blk_run_queue(cfqd->queue);
3735
3738
        spin_unlock_irq(q->queue_lock);
3736
3739
}
3737
3740
 
3953
3956
        cfqd->cfq_slice_idle = cfq_slice_idle;
3954
3957
        cfqd->cfq_group_idle = cfq_group_idle;
3955
3958
        cfqd->cfq_latency = 1;
3956
 
        cfqd->cfq_group_isolation = 0;
3957
3959
        cfqd->hw_tag = -1;
3958
3960
        /*
3959
3961
         * we optimistically start assuming sync ops weren't delayed in last
4029
4031
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4030
4032
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4031
4033
SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4032
 
SHOW_FUNCTION(cfq_group_isolation_show, cfqd->cfq_group_isolation, 0);
4033
4034
#undef SHOW_FUNCTION
4034
4035
 
4035
4036
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
4063
4064
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4064
4065
                UINT_MAX, 0);
4065
4066
STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4066
 
STORE_FUNCTION(cfq_group_isolation_store, &cfqd->cfq_group_isolation, 0, 1, 0);
4067
4067
#undef STORE_FUNCTION
4068
4068
 
4069
4069
#define CFQ_ATTR(name) \
4081
4081
        CFQ_ATTR(slice_idle),
4082
4082
        CFQ_ATTR(group_idle),
4083
4083
        CFQ_ATTR(low_latency),
4084
 
        CFQ_ATTR(group_isolation),
4085
4084
        __ATTR_NULL
4086
4085
};
4087
4086
 
4096
4095
                .elevator_add_req_fn =          cfq_insert_request,
4097
4096
                .elevator_activate_req_fn =     cfq_activate_request,
4098
4097
                .elevator_deactivate_req_fn =   cfq_deactivate_request,
4099
 
                .elevator_queue_empty_fn =      cfq_queue_empty,
4100
4098
                .elevator_completed_req_fn =    cfq_completed_request,
4101
4099
                .elevator_former_req_fn =       elv_rb_former_request,
4102
4100
                .elevator_latter_req_fn =       elv_rb_latter_request,