2
* linux/drivers/mmc/tmio_mmc_dma.c
4
* Copyright (C) 2010-2011 Guennadi Liakhovetski
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License version 2 as
8
* published by the Free Software Foundation.
10
* DMA function for TMIO MMC implementations
13
#include <linux/device.h>
14
#include <linux/dmaengine.h>
15
#include <linux/mfd/tmio.h>
16
#include <linux/mmc/host.h>
17
#include <linux/mmc/tmio.h>
18
#include <linux/pagemap.h>
19
#include <linux/scatterlist.h>
23
#define TMIO_MMC_MIN_DMA_LEN 8
25
static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
27
#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
28
/* Switch DMA mode on or off - SuperH specific? */
29
writew(enable ? 2 : 0, host->ctl + (0xd8 << host->bus_shift));
33
static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
35
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
36
struct dma_async_tx_descriptor *desc = NULL;
37
struct dma_chan *chan = host->chan_rx;
38
struct tmio_mmc_data *pdata = host->pdata;
41
bool aligned = true, multiple = true;
42
unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
44
for_each_sg(sg, sg_tmp, host->sg_len, i) {
45
if (sg_tmp->offset & align)
47
if (sg_tmp->length & align) {
53
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
54
(align & PAGE_MASK))) || !multiple) {
59
if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
60
host->force_pio = true;
64
tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY);
66
/* The only sg element can be unaligned, use our bounce buffer then */
68
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
69
host->sg_ptr = &host->bounce_sg;
73
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
75
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
76
DMA_FROM_DEVICE, DMA_CTRL_ACK);
79
cookie = dmaengine_submit(desc);
85
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
86
__func__, host->sg_len, ret, cookie, host->mrq);
90
/* DMA failed, fall back to PIO */
94
dma_release_channel(chan);
95
/* Free the Tx channel too */
99
dma_release_channel(chan);
101
dev_warn(&host->pdev->dev,
102
"DMA failed: %d, falling back to PIO\n", ret);
103
tmio_mmc_enable_dma(host, false);
106
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
107
desc, cookie, host->sg_len);
110
static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
112
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
113
struct dma_async_tx_descriptor *desc = NULL;
114
struct dma_chan *chan = host->chan_tx;
115
struct tmio_mmc_data *pdata = host->pdata;
118
bool aligned = true, multiple = true;
119
unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
121
for_each_sg(sg, sg_tmp, host->sg_len, i) {
122
if (sg_tmp->offset & align)
124
if (sg_tmp->length & align) {
130
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
131
(align & PAGE_MASK))) || !multiple) {
136
if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
137
host->force_pio = true;
141
tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ);
143
/* The only sg element can be unaligned, use our bounce buffer then */
146
void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
147
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
148
memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
149
tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
150
host->sg_ptr = &host->bounce_sg;
154
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
156
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
157
DMA_TO_DEVICE, DMA_CTRL_ACK);
160
cookie = dmaengine_submit(desc);
166
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
167
__func__, host->sg_len, ret, cookie, host->mrq);
171
/* DMA failed, fall back to PIO */
174
host->chan_tx = NULL;
175
dma_release_channel(chan);
176
/* Free the Rx channel too */
177
chan = host->chan_rx;
179
host->chan_rx = NULL;
180
dma_release_channel(chan);
182
dev_warn(&host->pdev->dev,
183
"DMA failed: %d, falling back to PIO\n", ret);
184
tmio_mmc_enable_dma(host, false);
187
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
191
void tmio_mmc_start_dma(struct tmio_mmc_host *host,
192
struct mmc_data *data)
194
if (data->flags & MMC_DATA_READ) {
196
tmio_mmc_start_dma_rx(host);
199
tmio_mmc_start_dma_tx(host);
203
static void tmio_mmc_issue_tasklet_fn(unsigned long priv)
205
struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
206
struct dma_chan *chan = NULL;
208
spin_lock_irq(&host->lock);
210
if (host && host->data) {
211
if (host->data->flags & MMC_DATA_READ)
212
chan = host->chan_rx;
214
chan = host->chan_tx;
217
spin_unlock_irq(&host->lock);
219
tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
222
dma_async_issue_pending(chan);
225
static void tmio_mmc_tasklet_fn(unsigned long arg)
227
struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
229
spin_lock_irq(&host->lock);
234
if (host->data->flags & MMC_DATA_READ)
235
dma_unmap_sg(host->chan_rx->device->dev,
236
host->sg_ptr, host->sg_len,
239
dma_unmap_sg(host->chan_tx->device->dev,
240
host->sg_ptr, host->sg_len,
243
tmio_mmc_do_data_irq(host);
245
spin_unlock_irq(&host->lock);
248
/* It might be necessary to make filter MFD specific */
249
static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
251
dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
256
void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
258
/* We can only either use DMA for both Tx and Rx or not use it at all */
262
if (!host->chan_tx && !host->chan_rx) {
266
dma_cap_set(DMA_SLAVE, mask);
268
host->chan_tx = dma_request_channel(mask, tmio_mmc_filter,
269
pdata->dma->chan_priv_tx);
270
dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
276
host->chan_rx = dma_request_channel(mask, tmio_mmc_filter,
277
pdata->dma->chan_priv_rx);
278
dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
284
host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
285
if (!host->bounce_buf)
288
tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host);
289
tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
292
tmio_mmc_enable_dma(host, true);
297
dma_release_channel(host->chan_rx);
298
host->chan_rx = NULL;
300
dma_release_channel(host->chan_tx);
301
host->chan_tx = NULL;
304
void tmio_mmc_release_dma(struct tmio_mmc_host *host)
307
struct dma_chan *chan = host->chan_tx;
308
host->chan_tx = NULL;
309
dma_release_channel(chan);
312
struct dma_chan *chan = host->chan_rx;
313
host->chan_rx = NULL;
314
dma_release_channel(chan);
316
if (host->bounce_buf) {
317
free_pages((unsigned long)host->bounce_buf, 0);
318
host->bounce_buf = NULL;