52
52
down(&mq->thread_sem);
54
54
struct request *req = NULL;
55
struct mmc_queue_req *tmp;
57
56
spin_lock_irq(q->queue_lock);
58
57
set_current_state(TASK_INTERRUPTIBLE);
59
58
req = blk_fetch_request(q);
60
mq->mqrq_cur->req = req;
61
60
spin_unlock_irq(q->queue_lock);
63
if (req || mq->mqrq_prev->req) {
64
set_current_state(TASK_RUNNING);
65
mq->issue_fn(mq, req);
67
63
if (kthread_should_stop()) {
68
64
set_current_state(TASK_RUNNING);
107
if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
108
101
wake_up_process(mq->thread);
111
struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
113
struct scatterlist *sg;
115
sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
120
sg_init_table(sg, sg_len);
127
105
* mmc_init_queue - initialise a queue structure.
129
107
* @card: mmc card to attach this queue
130
108
* @lock: queue lock
109
* @subname: partition subname
132
111
* Initialise a MMC card request queue.
134
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
113
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
114
spinlock_t *lock, const char *subname)
136
116
struct mmc_host *host = card->host;
137
117
u64 limit = BLK_BOUNCE_HIGH;
139
struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
140
struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
142
120
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
143
121
limit = *mmc_dev(host)->dma_mask;
160
135
mq->queue->limits.max_discard_sectors = UINT_MAX;
161
136
if (card->erased_byte == 0)
162
137
mq->queue->limits.discard_zeroes_data = 1;
163
if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) {
164
mq->queue->limits.discard_granularity =
165
card->erase_size << 9;
166
mq->queue->limits.discard_alignment =
167
card->erase_size << 9;
138
mq->queue->limits.discard_granularity = card->pref_erase << 9;
169
139
if (mmc_can_secure_erase_trim(card))
170
140
queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
185
155
bouncesz = host->max_blk_count * 512;
187
157
if (bouncesz > 512) {
188
mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
189
if (!mqrq_cur->bounce_buf) {
190
printk(KERN_WARNING "%s: unable to "
191
"allocate bounce cur buffer\n",
192
mmc_card_name(card));
194
mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
195
if (!mqrq_prev->bounce_buf) {
196
printk(KERN_WARNING "%s: unable to "
197
"allocate bounce prev buffer\n",
198
mmc_card_name(card));
199
kfree(mqrq_cur->bounce_buf);
200
mqrq_cur->bounce_buf = NULL;
158
mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
159
if (!mq->bounce_buf) {
160
printk(KERN_WARNING "%s: unable to "
161
"allocate bounce buffer\n",
162
mmc_card_name(card));
204
if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
166
if (mq->bounce_buf) {
205
167
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
206
168
blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
207
169
blk_queue_max_segments(mq->queue, bouncesz / 512);
208
170
blk_queue_max_segment_size(mq->queue, bouncesz);
210
mqrq_cur->sg = mmc_alloc_sg(1, &ret);
214
mqrq_cur->bounce_sg =
215
mmc_alloc_sg(bouncesz / 512, &ret);
219
mqrq_prev->sg = mmc_alloc_sg(1, &ret);
223
mqrq_prev->bounce_sg =
224
mmc_alloc_sg(bouncesz / 512, &ret);
172
mq->sg = kmalloc(sizeof(struct scatterlist),
178
sg_init_table(mq->sg, 1);
180
mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
181
bouncesz / 512, GFP_KERNEL);
182
if (!mq->bounce_sg) {
186
sg_init_table(mq->bounce_sg, bouncesz / 512);
231
if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
191
if (!mq->bounce_buf) {
232
192
blk_queue_bounce_limit(mq->queue, limit);
233
193
blk_queue_max_hw_sectors(mq->queue,
234
194
min(host->max_blk_count, host->max_req_size / 512));
235
195
blk_queue_max_segments(mq->queue, host->max_segs);
236
196
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
238
mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
243
mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
198
mq->sg = kmalloc(sizeof(struct scatterlist) *
199
host->max_segs, GFP_KERNEL);
204
sg_init_table(mq->sg, host->max_segs);
248
207
sema_init(&mq->thread_sem, 1);
250
mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d",
209
mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
210
host->index, subname ? subname : "");
253
212
if (IS_ERR(mq->thread)) {
254
213
ret = PTR_ERR(mq->thread);
296
247
blk_start_queue(q);
297
248
spin_unlock_irqrestore(q->queue_lock, flags);
299
kfree(mqrq_cur->bounce_sg);
300
mqrq_cur->bounce_sg = NULL;
305
kfree(mqrq_cur->bounce_buf);
306
mqrq_cur->bounce_buf = NULL;
308
kfree(mqrq_prev->bounce_sg);
309
mqrq_prev->bounce_sg = NULL;
311
kfree(mqrq_prev->sg);
312
mqrq_prev->sg = NULL;
314
kfree(mqrq_prev->bounce_buf);
315
mqrq_prev->bounce_buf = NULL;
251
kfree(mq->bounce_sg);
252
mq->bounce_sg = NULL;
258
kfree(mq->bounce_buf);
259
mq->bounce_buf = NULL;
366
310
* Prepare the sg list(s) to be handed of to the host driver
368
unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
312
unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
370
314
unsigned int sg_len;
372
316
struct scatterlist *sg;
375
if (!mqrq->bounce_buf)
376
return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
378
BUG_ON(!mqrq->bounce_sg);
380
sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
382
mqrq->bounce_sg_len = sg_len;
320
return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
322
BUG_ON(!mq->bounce_sg);
324
sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
326
mq->bounce_sg_len = sg_len;
385
for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
329
for_each_sg(mq->bounce_sg, sg, sg_len, i)
386
330
buflen += sg->length;
388
sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
332
sg_init_one(mq->sg, mq->bounce_buf, buflen);
394
338
* If writing, bounce the data to the buffer before the request
395
339
* is sent to the host driver
397
void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
341
void mmc_queue_bounce_pre(struct mmc_queue *mq)
399
if (!mqrq->bounce_buf)
402
if (rq_data_dir(mqrq->req) != WRITE)
405
sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
406
mqrq->bounce_buf, mqrq->sg[0].length);
346
if (rq_data_dir(mq->req) != WRITE)
349
sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
350
mq->bounce_buf, mq->sg[0].length);
410
354
* If reading, bounce the data from the buffer after the request
411
355
* has been handled by the host driver
413
void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
357
void mmc_queue_bounce_post(struct mmc_queue *mq)
415
if (!mqrq->bounce_buf)
418
if (rq_data_dir(mqrq->req) != READ)
421
sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
422
mqrq->bounce_buf, mqrq->sg[0].length);
362
if (rq_data_dir(mq->req) != READ)
365
sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
366
mq->bounce_buf, mq->sg[0].length);