~ubuntu-branches/ubuntu/trusty/linux-linaro-omap/trusty

« back to all changes in this revision

Viewing changes to block/blk-core.c

  • Committer: Package Import Robot
  • Author(s): John Rigby, John Rigby
  • Date: 2011-09-26 10:44:23 UTC
  • Revision ID: package-import@ubuntu.com-20110926104423-57i0gl3v99b3lkfg
Tags: 3.0.0-1007.9
[ John Rigby ]

Enable crypto modules and remove crypto-modules from
exclude-module files
LP: #826021

Show diffs side-by-side

added added

removed removed

Lines of Context:
345
345
{
346
346
        kobject_put(&q->kobj);
347
347
}
 
348
EXPORT_SYMBOL(blk_put_queue);
348
349
 
349
350
/*
350
351
 * Note: If a driver supplied the queue lock, it should not zap that lock
566
567
 
567
568
        return 1;
568
569
}
 
570
EXPORT_SYMBOL(blk_get_queue);
569
571
 
570
572
static inline void blk_free_request(struct request_queue *q, struct request *rq)
571
573
{
572
 
        BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
573
 
 
574
574
        if (rq->cmd_flags & REQ_ELVPRIV)
575
575
                elv_put_request(q, rq);
576
576
        mempool_free(rq, q->rq.rq_pool);
839
839
{
840
840
        struct request *rq;
841
841
 
 
842
        if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
 
843
                return NULL;
 
844
 
842
845
        BUG_ON(rw != READ && rw != WRITE);
843
846
 
844
847
        spin_lock_irq(q->queue_lock);
1110
1113
{
1111
1114
        const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1112
1115
 
1113
 
        /*
1114
 
         * Debug stuff, kill later
1115
 
         */
1116
 
        if (!rq_mergeable(req)) {
1117
 
                blk_dump_rq_flags(req, "back");
1118
 
                return false;
1119
 
        }
1120
 
 
1121
1116
        if (!ll_back_merge_fn(q, req, bio))
1122
1117
                return false;
1123
1118
 
1132
1127
        req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1133
1128
 
1134
1129
        drive_stat_acct(req, 0);
 
1130
        elv_bio_merged(q, req, bio);
1135
1131
        return true;
1136
1132
}
1137
1133
 
1139
1135
                                    struct request *req, struct bio *bio)
1140
1136
{
1141
1137
        const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1142
 
        sector_t sector;
1143
 
 
1144
 
        /*
1145
 
         * Debug stuff, kill later
1146
 
         */
1147
 
        if (!rq_mergeable(req)) {
1148
 
                blk_dump_rq_flags(req, "front");
1149
 
                return false;
1150
 
        }
1151
1138
 
1152
1139
        if (!ll_front_merge_fn(q, req, bio))
1153
1140
                return false;
1157
1144
        if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1158
1145
                blk_rq_set_mixed_merge(req);
1159
1146
 
1160
 
        sector = bio->bi_sector;
1161
 
 
1162
1147
        bio->bi_next = req->bio;
1163
1148
        req->bio = bio;
1164
1149
 
1173
1158
        req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1174
1159
 
1175
1160
        drive_stat_acct(req, 0);
 
1161
        elv_bio_merged(q, req, bio);
1176
1162
        return true;
1177
1163
}
1178
1164
 
1258
1244
 
1259
1245
        el_ret = elv_merge(q, &req, bio);
1260
1246
        if (el_ret == ELEVATOR_BACK_MERGE) {
1261
 
                BUG_ON(req->cmd_flags & REQ_ON_PLUG);
1262
1247
                if (bio_attempt_back_merge(q, req, bio)) {
1263
1248
                        if (!attempt_back_merge(q, req))
1264
1249
                                elv_merged_request(q, req, el_ret);
1265
1250
                        goto out_unlock;
1266
1251
                }
1267
1252
        } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1268
 
                BUG_ON(req->cmd_flags & REQ_ON_PLUG);
1269
1253
                if (bio_attempt_front_merge(q, req, bio)) {
1270
1254
                        if (!attempt_front_merge(q, req))
1271
1255
                                elv_merged_request(q, req, el_ret);
1320
1304
                        if (__rq->q != q)
1321
1305
                                plug->should_sort = 1;
1322
1306
                }
1323
 
                /*
1324
 
                 * Debug flag, kill later
1325
 
                 */
1326
 
                req->cmd_flags |= REQ_ON_PLUG;
1327
1307
                list_add_tail(&req->queuelist, &plug->list);
1328
1308
                drive_stat_acct(req, 1);
1329
1309
        } else {
1550
1530
                        goto end_io;
1551
1531
                }
1552
1532
 
1553
 
                blk_throtl_bio(q, &bio);
 
1533
                if (blk_throtl_bio(q, &bio))
 
1534
                        goto end_io;
1554
1535
 
1555
1536
                /*
1556
1537
                 * If bio = NULL, bio has been throttled and will be submitted
2748
2729
        while (!list_empty(&list)) {
2749
2730
                rq = list_entry_rq(list.next);
2750
2731
                list_del_init(&rq->queuelist);
2751
 
                BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
2752
2732
                BUG_ON(!rq->q);
2753
2733
                if (rq->q != q) {
2754
2734
                        /*
2760
2740
                        depth = 0;
2761
2741
                        spin_lock(q->queue_lock);
2762
2742
                }
2763
 
                rq->cmd_flags &= ~REQ_ON_PLUG;
2764
 
 
2765
2743
                /*
2766
2744
                 * rq is already accounted, so use raw insert
2767
2745
                 */