~ubuntu-branches/ubuntu/quantal/linux-linaro-mx51/quantal

« back to all changes in this revision

Viewing changes to drivers/mmc/card/queue.c

  • Committer: Package Import Robot
  • Author(s): John Rigby, John Rigby
  • Date: 2011-09-26 10:44:23 UTC
  • Revision ID: package-import@ubuntu.com-20110926104423-3o58a3c1bj7x00rs
Tags: 3.0.0-1007.9
[ John Rigby ]

Enable crypto modules and remove crypto-modules from
exclude-module files
LP: #826021

Show diffs side-by-side

added added

removed removed

Lines of Context:
52
52
        down(&mq->thread_sem);
53
53
        do {
54
54
                struct request *req = NULL;
55
 
                struct mmc_queue_req *tmp;
56
55
 
57
56
                spin_lock_irq(q->queue_lock);
58
57
                set_current_state(TASK_INTERRUPTIBLE);
59
58
                req = blk_fetch_request(q);
60
 
                mq->mqrq_cur->req = req;
 
59
                mq->req = req;
61
60
                spin_unlock_irq(q->queue_lock);
62
61
 
63
 
                if (req || mq->mqrq_prev->req) {
64
 
                        set_current_state(TASK_RUNNING);
65
 
                        mq->issue_fn(mq, req);
66
 
                } else {
 
62
                if (!req) {
67
63
                        if (kthread_should_stop()) {
68
64
                                set_current_state(TASK_RUNNING);
69
65
                                break;
71
67
                        up(&mq->thread_sem);
72
68
                        schedule();
73
69
                        down(&mq->thread_sem);
 
70
                        continue;
74
71
                }
 
72
                set_current_state(TASK_RUNNING);
75
73
 
76
 
                /* Current request becomes previous request and vice versa. */
77
 
                mq->mqrq_prev->brq.mrq.data = NULL;
78
 
                mq->mqrq_prev->req = NULL;
79
 
                tmp = mq->mqrq_prev;
80
 
                mq->mqrq_prev = mq->mqrq_cur;
81
 
                mq->mqrq_cur = tmp;
 
74
                mq->issue_fn(mq, req);
82
75
        } while (1);
83
76
        up(&mq->thread_sem);
84
77
 
104
97
                return;
105
98
        }
106
99
 
107
 
        if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
 
100
        if (!mq->req)
108
101
                wake_up_process(mq->thread);
109
102
}
110
103
 
111
 
struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
112
 
{
113
 
        struct scatterlist *sg;
114
 
 
115
 
        sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
116
 
        if (!sg)
117
 
                *err = -ENOMEM;
118
 
        else {
119
 
                *err = 0;
120
 
                sg_init_table(sg, sg_len);
121
 
        }
122
 
 
123
 
        return sg;
124
 
}
125
 
 
126
104
/**
127
105
 * mmc_init_queue - initialise a queue structure.
128
106
 * @mq: mmc queue
129
107
 * @card: mmc card to attach this queue
130
108
 * @lock: queue lock
 
109
 * @subname: partition subname
131
110
 *
132
111
 * Initialise a MMC card request queue.
133
112
 */
134
 
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
 
113
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
 
114
                   spinlock_t *lock, const char *subname)
135
115
{
136
116
        struct mmc_host *host = card->host;
137
117
        u64 limit = BLK_BOUNCE_HIGH;
138
118
        int ret;
139
 
        struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
140
 
        struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
141
119
 
142
120
        if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
143
121
                limit = *mmc_dev(host)->dma_mask;
147
125
        if (!mq->queue)
148
126
                return -ENOMEM;
149
127
 
150
 
        memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
151
 
        memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
152
 
        mq->mqrq_cur = mqrq_cur;
153
 
        mq->mqrq_prev = mqrq_prev;
154
128
        mq->queue->queuedata = mq;
 
129
        mq->req = NULL;
155
130
 
156
131
        blk_queue_prep_rq(mq->queue, mmc_prep_request);
157
132
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
160
135
                mq->queue->limits.max_discard_sectors = UINT_MAX;
161
136
                if (card->erased_byte == 0)
162
137
                        mq->queue->limits.discard_zeroes_data = 1;
163
 
                if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) {
164
 
                        mq->queue->limits.discard_granularity =
165
 
                                                        card->erase_size << 9;
166
 
                        mq->queue->limits.discard_alignment =
167
 
                                                        card->erase_size << 9;
168
 
                }
 
138
                mq->queue->limits.discard_granularity = card->pref_erase << 9;
169
139
                if (mmc_can_secure_erase_trim(card))
170
140
                        queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
171
141
                                                mq->queue);
185
155
                        bouncesz = host->max_blk_count * 512;
186
156
 
187
157
                if (bouncesz > 512) {
188
 
                        mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
189
 
                        if (!mqrq_cur->bounce_buf) {
190
 
                                printk(KERN_WARNING "%s: unable to "
191
 
                                        "allocate bounce cur buffer\n",
192
 
                                        mmc_card_name(card));
193
 
                        }
194
 
                        mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
195
 
                        if (!mqrq_prev->bounce_buf) {
196
 
                                printk(KERN_WARNING "%s: unable to "
197
 
                                        "allocate bounce prev buffer\n",
198
 
                                        mmc_card_name(card));
199
 
                                kfree(mqrq_cur->bounce_buf);
200
 
                                mqrq_cur->bounce_buf = NULL;
 
158
                        mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
 
159
                        if (!mq->bounce_buf) {
 
160
                                printk(KERN_WARNING "%s: unable to "
 
161
                                        "allocate bounce buffer\n",
 
162
                                        mmc_card_name(card));
201
163
                        }
202
164
                }
203
165
 
204
 
                if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
 
166
                if (mq->bounce_buf) {
205
167
                        blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
206
168
                        blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
207
169
                        blk_queue_max_segments(mq->queue, bouncesz / 512);
208
170
                        blk_queue_max_segment_size(mq->queue, bouncesz);
209
171
 
210
 
                        mqrq_cur->sg = mmc_alloc_sg(1, &ret);
211
 
                        if (ret)
212
 
                                goto cleanup_queue;
213
 
 
214
 
                        mqrq_cur->bounce_sg =
215
 
                                mmc_alloc_sg(bouncesz / 512, &ret);
216
 
                        if (ret)
217
 
                                goto cleanup_queue;
218
 
 
219
 
                        mqrq_prev->sg = mmc_alloc_sg(1, &ret);
220
 
                        if (ret)
221
 
                                goto cleanup_queue;
222
 
 
223
 
                        mqrq_prev->bounce_sg =
224
 
                                mmc_alloc_sg(bouncesz / 512, &ret);
225
 
                        if (ret)
226
 
                                goto cleanup_queue;
 
172
                        mq->sg = kmalloc(sizeof(struct scatterlist),
 
173
                                GFP_KERNEL);
 
174
                        if (!mq->sg) {
 
175
                                ret = -ENOMEM;
 
176
                                goto cleanup_queue;
 
177
                        }
 
178
                        sg_init_table(mq->sg, 1);
 
179
 
 
180
                        mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
 
181
                                bouncesz / 512, GFP_KERNEL);
 
182
                        if (!mq->bounce_sg) {
 
183
                                ret = -ENOMEM;
 
184
                                goto cleanup_queue;
 
185
                        }
 
186
                        sg_init_table(mq->bounce_sg, bouncesz / 512);
227
187
                }
228
188
        }
229
189
#endif
230
190
 
231
 
        if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
 
191
        if (!mq->bounce_buf) {
232
192
                blk_queue_bounce_limit(mq->queue, limit);
233
193
                blk_queue_max_hw_sectors(mq->queue,
234
194
                        min(host->max_blk_count, host->max_req_size / 512));
235
195
                blk_queue_max_segments(mq->queue, host->max_segs);
236
196
                blk_queue_max_segment_size(mq->queue, host->max_seg_size);
237
197
 
238
 
                mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
239
 
                if (ret)
240
 
                        goto cleanup_queue;
241
 
 
242
 
 
243
 
                mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
244
 
                if (ret)
245
 
                        goto cleanup_queue;
 
198
                mq->sg = kmalloc(sizeof(struct scatterlist) *
 
199
                        host->max_segs, GFP_KERNEL);
 
200
                if (!mq->sg) {
 
201
                        ret = -ENOMEM;
 
202
                        goto cleanup_queue;
 
203
                }
 
204
                sg_init_table(mq->sg, host->max_segs);
246
205
        }
247
206
 
248
207
        sema_init(&mq->thread_sem, 1);
249
208
 
250
 
        mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d",
251
 
                host->index);
 
209
        mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
 
210
                host->index, subname ? subname : "");
252
211
 
253
212
        if (IS_ERR(mq->thread)) {
254
213
                ret = PTR_ERR(mq->thread);
257
216
 
258
217
        return 0;
259
218
 free_bounce_sg:
260
 
        kfree(mqrq_cur->bounce_sg);
261
 
        mqrq_cur->bounce_sg = NULL;
262
 
        kfree(mqrq_prev->bounce_sg);
263
 
        mqrq_prev->bounce_sg = NULL;
264
 
 
 
219
        if (mq->bounce_sg)
 
220
                kfree(mq->bounce_sg);
 
221
        mq->bounce_sg = NULL;
265
222
 cleanup_queue:
266
 
        kfree(mqrq_cur->sg);
267
 
        mqrq_cur->sg = NULL;
268
 
        kfree(mqrq_cur->bounce_buf);
269
 
        mqrq_cur->bounce_buf = NULL;
270
 
 
271
 
        kfree(mqrq_prev->sg);
272
 
        mqrq_prev->sg = NULL;
273
 
        kfree(mqrq_prev->bounce_buf);
274
 
        mqrq_prev->bounce_buf = NULL;
275
 
 
 
223
        if (mq->sg)
 
224
                kfree(mq->sg);
 
225
        mq->sg = NULL;
 
226
        if (mq->bounce_buf)
 
227
                kfree(mq->bounce_buf);
 
228
        mq->bounce_buf = NULL;
276
229
        blk_cleanup_queue(mq->queue);
277
230
        return ret;
278
231
}
281
234
{
282
235
        struct request_queue *q = mq->queue;
283
236
        unsigned long flags;
284
 
        struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
285
 
        struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
286
237
 
287
238
        /* Make sure the queue isn't suspended, as that will deadlock */
288
239
        mmc_queue_resume(mq);
296
247
        blk_start_queue(q);
297
248
        spin_unlock_irqrestore(q->queue_lock, flags);
298
249
 
299
 
        kfree(mqrq_cur->bounce_sg);
300
 
        mqrq_cur->bounce_sg = NULL;
301
 
 
302
 
        kfree(mqrq_cur->sg);
303
 
        mqrq_cur->sg = NULL;
304
 
 
305
 
        kfree(mqrq_cur->bounce_buf);
306
 
        mqrq_cur->bounce_buf = NULL;
307
 
 
308
 
        kfree(mqrq_prev->bounce_sg);
309
 
        mqrq_prev->bounce_sg = NULL;
310
 
 
311
 
        kfree(mqrq_prev->sg);
312
 
        mqrq_prev->sg = NULL;
313
 
 
314
 
        kfree(mqrq_prev->bounce_buf);
315
 
        mqrq_prev->bounce_buf = NULL;
 
250
        if (mq->bounce_sg)
 
251
                kfree(mq->bounce_sg);
 
252
        mq->bounce_sg = NULL;
 
253
 
 
254
        kfree(mq->sg);
 
255
        mq->sg = NULL;
 
256
 
 
257
        if (mq->bounce_buf)
 
258
                kfree(mq->bounce_buf);
 
259
        mq->bounce_buf = NULL;
316
260
 
317
261
        mq->card = NULL;
318
262
}
365
309
/*
366
310
 * Prepare the sg list(s) to be handed of to the host driver
367
311
 */
368
 
unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
 
312
unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
369
313
{
370
314
        unsigned int sg_len;
371
315
        size_t buflen;
372
316
        struct scatterlist *sg;
373
317
        int i;
374
318
 
375
 
        if (!mqrq->bounce_buf)
376
 
                return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
377
 
 
378
 
        BUG_ON(!mqrq->bounce_sg);
379
 
 
380
 
        sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
381
 
 
382
 
        mqrq->bounce_sg_len = sg_len;
 
319
        if (!mq->bounce_buf)
 
320
                return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
 
321
 
 
322
        BUG_ON(!mq->bounce_sg);
 
323
 
 
324
        sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
 
325
 
 
326
        mq->bounce_sg_len = sg_len;
383
327
 
384
328
        buflen = 0;
385
 
        for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
 
329
        for_each_sg(mq->bounce_sg, sg, sg_len, i)
386
330
                buflen += sg->length;
387
331
 
388
 
        sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
 
332
        sg_init_one(mq->sg, mq->bounce_buf, buflen);
389
333
 
390
334
        return 1;
391
335
}
394
338
 * If writing, bounce the data to the buffer before the request
395
339
 * is sent to the host driver
396
340
 */
397
 
void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
 
341
void mmc_queue_bounce_pre(struct mmc_queue *mq)
398
342
{
399
 
        if (!mqrq->bounce_buf)
400
 
                return;
401
 
 
402
 
        if (rq_data_dir(mqrq->req) != WRITE)
403
 
                return;
404
 
 
405
 
        sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
406
 
                mqrq->bounce_buf, mqrq->sg[0].length);
 
343
        if (!mq->bounce_buf)
 
344
                return;
 
345
 
 
346
        if (rq_data_dir(mq->req) != WRITE)
 
347
                return;
 
348
 
 
349
        sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
 
350
                mq->bounce_buf, mq->sg[0].length);
407
351
}
408
352
 
409
353
/*
410
354
 * If reading, bounce the data from the buffer after the request
411
355
 * has been handled by the host driver
412
356
 */
413
 
void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
 
357
void mmc_queue_bounce_post(struct mmc_queue *mq)
414
358
{
415
 
        if (!mqrq->bounce_buf)
416
 
                return;
417
 
 
418
 
        if (rq_data_dir(mqrq->req) != READ)
419
 
                return;
420
 
 
421
 
        sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
422
 
                mqrq->bounce_buf, mqrq->sg[0].length);
 
359
        if (!mq->bounce_buf)
 
360
                return;
 
361
 
 
362
        if (rq_data_dir(mq->req) != READ)
 
363
                return;
 
364
 
 
365
        sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
 
366
                mq->bounce_buf, mq->sg[0].length);
423
367
}
 
368