2
* Renesas SuperH DMA Engine support
4
* base is drivers/dma/flsdma.c
6
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
10
* This is free software; you can redistribute it and/or modify
11
* it under the terms of the GNU General Public License as published by
12
* the Free Software Foundation; either version 2 of the License, or
13
* (at your option) any later version.
15
* - DMA of SuperH does not have Hardware DMA chain mode.
16
* - MAX DMA size is 16MB.
20
#include <linux/init.h>
21
#include <linux/module.h>
22
#include <linux/slab.h>
23
#include <linux/interrupt.h>
24
#include <linux/dmaengine.h>
25
#include <linux/delay.h>
26
#include <linux/dma-mapping.h>
27
#include <linux/platform_device.h>
28
#include <linux/pm_runtime.h>
29
#include <linux/sh_dma.h>
30
#include <linux/notifier.h>
31
#include <linux/kdebug.h>
32
#include <linux/spinlock.h>
33
#include <linux/rculist.h>
36
/* DMA descriptor control */
37
enum sh_dmae_desc_status {
41
DESC_COMPLETED, /* completed, have to call callback */
42
DESC_WAITING, /* callback called, waiting for ack / re-submit */
45
#define NR_DESCS_PER_CHANNEL 32
46
/* Default MEMCPY transfer size = 2^2 = 4 bytes */
47
#define LOG2_DEFAULT_XFER_SIZE 2
50
* Used for write-side mutual exclusion for the global device list,
51
* read-side synchronization by way of RCU, and per-controller data.
53
static DEFINE_SPINLOCK(sh_dmae_lock);
54
static LIST_HEAD(sh_dmae_devices);
56
/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
57
static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
59
static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
61
static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
63
__raw_writel(data, sh_dc->base + reg / sizeof(u32));
66
static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
68
return __raw_readl(sh_dc->base + reg / sizeof(u32));
71
static u16 dmaor_read(struct sh_dmae_device *shdev)
73
u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
75
if (shdev->pdata->dmaor_is_32bit)
76
return __raw_readl(addr);
78
return __raw_readw(addr);
81
static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
83
u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
85
if (shdev->pdata->dmaor_is_32bit)
86
__raw_writel(data, addr);
88
__raw_writew(data, addr);
91
static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
93
struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
95
__raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
98
static u32 chcr_read(struct sh_dmae_chan *sh_dc)
100
struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
102
return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
106
* Reset DMA controller
108
* SH7780 has two DMAOR register
110
static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
112
unsigned short dmaor;
115
spin_lock_irqsave(&sh_dmae_lock, flags);
117
dmaor = dmaor_read(shdev);
118
dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
120
spin_unlock_irqrestore(&sh_dmae_lock, flags);
123
static int sh_dmae_rst(struct sh_dmae_device *shdev)
125
unsigned short dmaor;
128
spin_lock_irqsave(&sh_dmae_lock, flags);
130
dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
132
dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
134
dmaor = dmaor_read(shdev);
136
spin_unlock_irqrestore(&sh_dmae_lock, flags);
138
if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
139
dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
145
static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
147
u32 chcr = chcr_read(sh_chan);
149
if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
150
return true; /* working */
152
return false; /* waiting */
155
static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
157
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
158
struct sh_dmae_pdata *pdata = shdev->pdata;
159
int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
160
((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
162
if (cnt >= pdata->ts_shift_num)
165
return pdata->ts_shift[cnt];
168
static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
170
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
171
struct sh_dmae_pdata *pdata = shdev->pdata;
174
for (i = 0; i < pdata->ts_shift_num; i++)
175
if (pdata->ts_shift[i] == l2size)
178
if (i == pdata->ts_shift_num)
181
return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
182
((i << pdata->ts_high_shift) & pdata->ts_high_mask);
185
static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
187
sh_dmae_writel(sh_chan, hw->sar, SAR);
188
sh_dmae_writel(sh_chan, hw->dar, DAR);
189
sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
192
static void dmae_start(struct sh_dmae_chan *sh_chan)
194
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
195
u32 chcr = chcr_read(sh_chan);
197
if (shdev->pdata->needs_tend_set)
198
sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
200
chcr |= CHCR_DE | shdev->chcr_ie_bit;
201
chcr_write(sh_chan, chcr & ~CHCR_TE);
204
static void dmae_halt(struct sh_dmae_chan *sh_chan)
206
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
207
u32 chcr = chcr_read(sh_chan);
209
chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
210
chcr_write(sh_chan, chcr);
213
static void dmae_init(struct sh_dmae_chan *sh_chan)
216
* Default configuration for dual address memory-memory transfer.
217
* 0x400 represents auto-request.
219
u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
220
LOG2_DEFAULT_XFER_SIZE);
221
sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
222
chcr_write(sh_chan, chcr);
225
static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
227
/* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
228
if (dmae_is_busy(sh_chan))
231
sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
232
chcr_write(sh_chan, val);
237
static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
239
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
240
struct sh_dmae_pdata *pdata = shdev->pdata;
241
const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
242
u16 __iomem *addr = shdev->dmars;
243
unsigned int shift = chan_pdata->dmars_bit;
245
if (dmae_is_busy(sh_chan))
251
/* in the case of a missing DMARS resource use first memory window */
253
addr = (u16 __iomem *)shdev->chan_reg;
254
addr += chan_pdata->dmars / sizeof(u16);
256
__raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
262
static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
264
static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
266
struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
267
struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
268
struct sh_dmae_slave *param = tx->chan->private;
269
dma_async_tx_callback callback = tx->callback;
273
spin_lock_irq(&sh_chan->desc_lock);
275
if (list_empty(&sh_chan->ld_queue))
280
cookie = sh_chan->common.cookie;
285
sh_chan->common.cookie = cookie;
288
/* Mark all chunks of this descriptor as submitted, move to the queue */
289
list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
291
* All chunks are on the global ld_free, so, we have to find
292
* the end of the chain ourselves
294
if (chunk != desc && (chunk->mark == DESC_IDLE ||
295
chunk->async_tx.cookie > 0 ||
296
chunk->async_tx.cookie == -EBUSY ||
297
&chunk->node == &sh_chan->ld_free))
299
chunk->mark = DESC_SUBMITTED;
300
/* Callback goes to the last chunk */
301
chunk->async_tx.callback = NULL;
302
chunk->cookie = cookie;
303
list_move_tail(&chunk->node, &sh_chan->ld_queue);
307
last->async_tx.callback = callback;
308
last->async_tx.callback_param = tx->callback_param;
310
dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
311
tx->cookie, &last->async_tx, sh_chan->id,
312
desc->hw.sar, desc->hw.tcr, desc->hw.dar);
315
sh_chan->pm_state = DMAE_PM_BUSY;
317
pm_runtime_get(sh_chan->dev);
319
spin_unlock_irq(&sh_chan->desc_lock);
321
pm_runtime_barrier(sh_chan->dev);
323
spin_lock_irq(&sh_chan->desc_lock);
325
/* Have we been reset, while waiting? */
326
if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) {
327
dev_dbg(sh_chan->dev, "Bring up channel %d\n",
330
const struct sh_dmae_slave_config *cfg =
333
dmae_set_dmars(sh_chan, cfg->mid_rid);
334
dmae_set_chcr(sh_chan, cfg->chcr);
339
if (sh_chan->pm_state == DMAE_PM_PENDING)
340
sh_chan_xfer_ld_queue(sh_chan);
341
sh_chan->pm_state = DMAE_PM_ESTABLISHED;
345
spin_unlock_irq(&sh_chan->desc_lock);
350
/* Called with desc_lock held */
351
static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
353
struct sh_desc *desc;
355
list_for_each_entry(desc, &sh_chan->ld_free, node)
356
if (desc->mark != DESC_PREPARED) {
357
BUG_ON(desc->mark != DESC_IDLE);
358
list_del(&desc->node);
365
static const struct sh_dmae_slave_config *sh_dmae_find_slave(
366
struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
368
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
369
struct sh_dmae_pdata *pdata = shdev->pdata;
372
if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
375
for (i = 0; i < pdata->slave_num; i++)
376
if (pdata->slave[i].slave_id == param->slave_id)
377
return pdata->slave + i;
382
static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
384
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
385
struct sh_desc *desc;
386
struct sh_dmae_slave *param = chan->private;
390
* This relies on the guarantee from dmaengine that alloc_chan_resources
391
* never runs concurrently with itself or free_chan_resources.
394
const struct sh_dmae_slave_config *cfg;
396
cfg = sh_dmae_find_slave(sh_chan, param);
402
if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) {
410
while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
411
desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
414
dma_async_tx_descriptor_init(&desc->async_tx,
416
desc->async_tx.tx_submit = sh_dmae_tx_submit;
417
desc->mark = DESC_IDLE;
419
list_add(&desc->node, &sh_chan->ld_free);
420
sh_chan->descs_allocated++;
423
if (!sh_chan->descs_allocated) {
428
return sh_chan->descs_allocated;
432
clear_bit(param->slave_id, sh_dmae_slave_used);
435
chan->private = NULL;
440
* sh_dma_free_chan_resources - Free all resources of the channel.
442
static void sh_dmae_free_chan_resources(struct dma_chan *chan)
444
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
445
struct sh_desc *desc, *_desc;
448
/* Protect against ISR */
449
spin_lock_irq(&sh_chan->desc_lock);
451
spin_unlock_irq(&sh_chan->desc_lock);
453
/* Now no new interrupts will occur */
455
/* Prepared and not submitted descriptors can still be on the queue */
456
if (!list_empty(&sh_chan->ld_queue))
457
sh_dmae_chan_ld_cleanup(sh_chan, true);
460
/* The caller is holding dma_list_mutex */
461
struct sh_dmae_slave *param = chan->private;
462
clear_bit(param->slave_id, sh_dmae_slave_used);
463
chan->private = NULL;
466
spin_lock_irq(&sh_chan->desc_lock);
468
list_splice_init(&sh_chan->ld_free, &list);
469
sh_chan->descs_allocated = 0;
471
spin_unlock_irq(&sh_chan->desc_lock);
473
list_for_each_entry_safe(desc, _desc, &list, node)
478
* sh_dmae_add_desc - get, set up and return one transfer descriptor
479
* @sh_chan: DMA channel
480
* @flags: DMA transfer flags
481
* @dest: destination DMA address, incremented when direction equals
482
* DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
483
* @src: source DMA address, incremented when direction equals
484
* DMA_TO_DEVICE or DMA_BIDIRECTIONAL
485
* @len: DMA transfer length
486
* @first: if NULL, set to the current descriptor and cookie set to -EBUSY
487
* @direction: needed for slave DMA to decide which address to keep constant,
488
* equals DMA_BIDIRECTIONAL for MEMCPY
489
* Returns 0 or an error
490
* Locks: called with desc_lock held
492
static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
493
unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
494
struct sh_desc **first, enum dma_data_direction direction)
502
/* Allocate the link descriptor from the free list */
503
new = sh_dmae_get_desc(sh_chan);
505
dev_err(sh_chan->dev, "No free link descriptor available\n");
509
copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
513
new->hw.tcr = copy_size;
517
new->async_tx.cookie = -EBUSY;
520
/* Other desc - invisible to the user */
521
new->async_tx.cookie = -EINVAL;
524
dev_dbg(sh_chan->dev,
525
"chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
526
copy_size, *len, *src, *dest, &new->async_tx,
527
new->async_tx.cookie, sh_chan->xmit_shift);
529
new->mark = DESC_PREPARED;
530
new->async_tx.flags = flags;
531
new->direction = direction;
534
if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
536
if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
543
* sh_dmae_prep_sg - prepare transfer descriptors from an SG list
545
* Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
546
* converted to scatter-gather to guarantee consistent locking and a correct
547
* list manipulation. For slave DMA direction carries the usual meaning, and,
548
* logically, the SG list is RAM and the addr variable contains slave address,
549
* e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
550
* and the SG list contains only one element and points at the source buffer.
552
static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
553
struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
554
enum dma_data_direction direction, unsigned long flags)
556
struct scatterlist *sg;
557
struct sh_desc *first = NULL, *new = NULL /* compiler... */;
560
unsigned long irq_flags;
566
for_each_sg(sgl, sg, sg_len, i)
567
chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
568
(SH_DMA_TCR_MAX + 1);
570
/* Have to lock the whole loop to protect against concurrent release */
571
spin_lock_irqsave(&sh_chan->desc_lock, irq_flags);
575
* first descriptor is what user is dealing with in all API calls, its
576
* cookie is at first set to -EBUSY, at tx-submit to a positive
578
* if more than one chunk is needed further chunks have cookie = -EINVAL
579
* the last chunk, if not equal to the first, has cookie = -ENOSPC
580
* all chunks are linked onto the tx_list head with their .node heads
581
* only during this function, then they are immediately spliced
582
* back onto the free list in form of a chain
584
for_each_sg(sgl, sg, sg_len, i) {
585
dma_addr_t sg_addr = sg_dma_address(sg);
586
size_t len = sg_dma_len(sg);
592
dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
593
i, sg, len, (unsigned long long)sg_addr);
595
if (direction == DMA_FROM_DEVICE)
596
new = sh_dmae_add_desc(sh_chan, flags,
597
&sg_addr, addr, &len, &first,
600
new = sh_dmae_add_desc(sh_chan, flags,
601
addr, &sg_addr, &len, &first,
606
new->chunks = chunks--;
607
list_add_tail(&new->node, &tx_list);
612
new->async_tx.cookie = -ENOSPC;
614
/* Put them back on the free list, so, they don't get lost */
615
list_splice_tail(&tx_list, &sh_chan->ld_free);
617
spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
619
return &first->async_tx;
622
list_for_each_entry(new, &tx_list, node)
623
new->mark = DESC_IDLE;
624
list_splice(&tx_list, &sh_chan->ld_free);
626
spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
631
static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
632
struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
633
size_t len, unsigned long flags)
635
struct sh_dmae_chan *sh_chan;
636
struct scatterlist sg;
641
sh_chan = to_sh_chan(chan);
643
sg_init_table(&sg, 1);
644
sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
645
offset_in_page(dma_src));
646
sg_dma_address(&sg) = dma_src;
647
sg_dma_len(&sg) = len;
649
return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
653
static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
654
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
655
enum dma_data_direction direction, unsigned long flags)
657
struct sh_dmae_slave *param;
658
struct sh_dmae_chan *sh_chan;
659
dma_addr_t slave_addr;
664
sh_chan = to_sh_chan(chan);
665
param = chan->private;
667
/* Someone calling slave DMA on a public channel? */
668
if (!param || !sg_len) {
669
dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
670
__func__, param, sg_len, param ? param->slave_id : -1);
674
slave_addr = param->config->addr;
677
* if (param != NULL), this is a successfully requested slave channel,
678
* therefore param->config != NULL too.
680
return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
684
static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
687
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
690
/* Only supports DMA_TERMINATE_ALL */
691
if (cmd != DMA_TERMINATE_ALL)
697
spin_lock_irqsave(&sh_chan->desc_lock, flags);
700
if (!list_empty(&sh_chan->ld_queue)) {
701
/* Record partial transfer */
702
struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
703
struct sh_desc, node);
704
desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
707
spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
709
sh_dmae_chan_ld_cleanup(sh_chan, true);
714
static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
716
struct sh_desc *desc, *_desc;
717
/* Is the "exposed" head of a chain acked? */
718
bool head_acked = false;
719
dma_cookie_t cookie = 0;
720
dma_async_tx_callback callback = NULL;
724
spin_lock_irqsave(&sh_chan->desc_lock, flags);
725
list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
726
struct dma_async_tx_descriptor *tx = &desc->async_tx;
728
BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
729
BUG_ON(desc->mark != DESC_SUBMITTED &&
730
desc->mark != DESC_COMPLETED &&
731
desc->mark != DESC_WAITING);
734
* queue is ordered, and we use this loop to (1) clean up all
735
* completed descriptors, and to (2) update descriptor flags of
736
* any chunks in a (partially) completed chain
738
if (!all && desc->mark == DESC_SUBMITTED &&
739
desc->cookie != cookie)
745
if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
746
if (sh_chan->completed_cookie != desc->cookie - 1)
747
dev_dbg(sh_chan->dev,
748
"Completing cookie %d, expected %d\n",
750
sh_chan->completed_cookie + 1);
751
sh_chan->completed_cookie = desc->cookie;
754
/* Call callback on the last chunk */
755
if (desc->mark == DESC_COMPLETED && tx->callback) {
756
desc->mark = DESC_WAITING;
757
callback = tx->callback;
758
param = tx->callback_param;
759
dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
760
tx->cookie, tx, sh_chan->id);
761
BUG_ON(desc->chunks != 1);
765
if (tx->cookie > 0 || tx->cookie == -EBUSY) {
766
if (desc->mark == DESC_COMPLETED) {
767
BUG_ON(tx->cookie < 0);
768
desc->mark = DESC_WAITING;
770
head_acked = async_tx_test_ack(tx);
772
switch (desc->mark) {
774
desc->mark = DESC_WAITING;
778
async_tx_ack(&desc->async_tx);
782
dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
785
if (((desc->mark == DESC_COMPLETED ||
786
desc->mark == DESC_WAITING) &&
787
async_tx_test_ack(&desc->async_tx)) || all) {
788
/* Remove from ld_queue list */
789
desc->mark = DESC_IDLE;
791
list_move(&desc->node, &sh_chan->ld_free);
793
if (list_empty(&sh_chan->ld_queue)) {
794
dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
795
pm_runtime_put(sh_chan->dev);
800
if (all && !callback)
802
* Terminating and the loop completed normally: forgive
803
* uncompleted cookies
805
sh_chan->completed_cookie = sh_chan->common.cookie;
807
spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
816
* sh_chan_ld_cleanup - Clean up link descriptors
818
* This function cleans up the ld_queue of DMA channel.
820
static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
822
while (__ld_cleanup(sh_chan, all))
826
/* Called under spin_lock_irq(&sh_chan->desc_lock) */
827
static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
829
struct sh_desc *desc;
832
if (dmae_is_busy(sh_chan))
835
/* Find the first not transferred descriptor */
836
list_for_each_entry(desc, &sh_chan->ld_queue, node)
837
if (desc->mark == DESC_SUBMITTED) {
838
dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
839
desc->async_tx.cookie, sh_chan->id,
840
desc->hw.tcr, desc->hw.sar, desc->hw.dar);
841
/* Get the ld start address from ld_queue */
842
dmae_set_reg(sh_chan, &desc->hw);
848
static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
850
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
852
spin_lock_irq(&sh_chan->desc_lock);
853
if (sh_chan->pm_state == DMAE_PM_ESTABLISHED)
854
sh_chan_xfer_ld_queue(sh_chan);
856
sh_chan->pm_state = DMAE_PM_PENDING;
857
spin_unlock_irq(&sh_chan->desc_lock);
860
static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
862
struct dma_tx_state *txstate)
864
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
865
dma_cookie_t last_used;
866
dma_cookie_t last_complete;
867
enum dma_status status;
870
sh_dmae_chan_ld_cleanup(sh_chan, false);
872
/* First read completed cookie to avoid a skew */
873
last_complete = sh_chan->completed_cookie;
875
last_used = chan->cookie;
876
BUG_ON(last_complete < 0);
877
dma_set_tx_state(txstate, last_complete, last_used, 0);
879
spin_lock_irqsave(&sh_chan->desc_lock, flags);
881
status = dma_async_is_complete(cookie, last_complete, last_used);
884
* If we don't find cookie on the queue, it has been aborted and we have
887
if (status != DMA_SUCCESS) {
888
struct sh_desc *desc;
890
list_for_each_entry(desc, &sh_chan->ld_queue, node)
891
if (desc->cookie == cookie) {
892
status = DMA_IN_PROGRESS;
897
spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
902
static irqreturn_t sh_dmae_interrupt(int irq, void *data)
904
irqreturn_t ret = IRQ_NONE;
905
struct sh_dmae_chan *sh_chan = data;
908
spin_lock(&sh_chan->desc_lock);
910
chcr = chcr_read(sh_chan);
912
if (chcr & CHCR_TE) {
917
tasklet_schedule(&sh_chan->tasklet);
920
spin_unlock(&sh_chan->desc_lock);
925
/* Called from error IRQ or NMI */
926
static bool sh_dmae_reset(struct sh_dmae_device *shdev)
928
unsigned int handled = 0;
931
/* halt the dma controller */
932
sh_dmae_ctl_stop(shdev);
934
/* We cannot detect, which channel caused the error, have to reset all */
935
for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
936
struct sh_dmae_chan *sh_chan = shdev->chan[i];
937
struct sh_desc *desc;
943
spin_lock(&sh_chan->desc_lock);
945
/* Stop the channel */
948
list_splice_init(&sh_chan->ld_queue, &dl);
950
if (!list_empty(&dl)) {
951
dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
952
pm_runtime_put(sh_chan->dev);
954
sh_chan->pm_state = DMAE_PM_ESTABLISHED;
956
spin_unlock(&sh_chan->desc_lock);
959
list_for_each_entry(desc, &dl, node) {
960
struct dma_async_tx_descriptor *tx = &desc->async_tx;
961
desc->mark = DESC_IDLE;
963
tx->callback(tx->callback_param);
966
spin_lock(&sh_chan->desc_lock);
967
list_splice(&dl, &sh_chan->ld_free);
968
spin_unlock(&sh_chan->desc_lock);
978
static irqreturn_t sh_dmae_err(int irq, void *data)
980
struct sh_dmae_device *shdev = data;
982
if (!(dmaor_read(shdev) & DMAOR_AE))
989
static void dmae_do_tasklet(unsigned long data)
991
struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
992
struct sh_desc *desc;
993
u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
994
u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
996
spin_lock_irq(&sh_chan->desc_lock);
997
list_for_each_entry(desc, &sh_chan->ld_queue, node) {
998
if (desc->mark == DESC_SUBMITTED &&
999
((desc->direction == DMA_FROM_DEVICE &&
1000
(desc->hw.dar + desc->hw.tcr) == dar_buf) ||
1001
(desc->hw.sar + desc->hw.tcr) == sar_buf)) {
1002
dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
1003
desc->async_tx.cookie, &desc->async_tx,
1005
desc->mark = DESC_COMPLETED;
1010
sh_chan_xfer_ld_queue(sh_chan);
1011
spin_unlock_irq(&sh_chan->desc_lock);
1013
sh_dmae_chan_ld_cleanup(sh_chan, false);
1016
static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
1018
/* Fast path out if NMIF is not asserted for this controller */
1019
if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
1022
return sh_dmae_reset(shdev);
1025
static int sh_dmae_nmi_handler(struct notifier_block *self,
1026
unsigned long cmd, void *data)
1028
struct sh_dmae_device *shdev;
1029
int ret = NOTIFY_DONE;
1033
* Only concern ourselves with NMI events.
1035
* Normally we would check the die chain value, but as this needs
1036
* to be architecture independent, check for NMI context instead.
1042
list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
1044
* Only stop if one of the controllers has NMIF asserted,
1045
* we do not want to interfere with regular address error
1046
* handling or NMI events that don't concern the DMACs.
1048
triggered = sh_dmae_nmi_notify(shdev);
1049
if (triggered == true)
1057
static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
1058
.notifier_call = sh_dmae_nmi_handler,
1060
/* Run before NMI debug handler and KGDB */
1064
static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
1065
int irq, unsigned long flags)
1068
const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
1069
struct platform_device *pdev = to_platform_device(shdev->common.dev);
1070
struct sh_dmae_chan *new_sh_chan;
1073
new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
1075
dev_err(shdev->common.dev,
1076
"No free memory for allocating dma channels!\n");
1080
new_sh_chan->pm_state = DMAE_PM_ESTABLISHED;
1082
/* reference struct dma_device */
1083
new_sh_chan->common.device = &shdev->common;
1085
new_sh_chan->dev = shdev->common.dev;
1086
new_sh_chan->id = id;
1087
new_sh_chan->irq = irq;
1088
new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
1090
/* Init DMA tasklet */
1091
tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
1092
(unsigned long)new_sh_chan);
1094
spin_lock_init(&new_sh_chan->desc_lock);
1096
/* Init descripter manage list */
1097
INIT_LIST_HEAD(&new_sh_chan->ld_queue);
1098
INIT_LIST_HEAD(&new_sh_chan->ld_free);
1100
/* Add the channel to DMA device channel list */
1101
list_add_tail(&new_sh_chan->common.device_node,
1102
&shdev->common.channels);
1103
shdev->common.chancnt++;
1106
snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1107
"sh-dmae%d.%d", pdev->id, new_sh_chan->id);
1109
snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
1110
"sh-dma%d", new_sh_chan->id);
1112
/* set up channel irq */
1113
err = request_irq(irq, &sh_dmae_interrupt, flags,
1114
new_sh_chan->dev_id, new_sh_chan);
1116
dev_err(shdev->common.dev, "DMA channel %d request_irq error "
1117
"with return %d\n", id, err);
1121
shdev->chan[id] = new_sh_chan;
1125
/* remove from dmaengine device node */
1126
list_del(&new_sh_chan->common.device_node);
1131
static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
1135
for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
1136
if (shdev->chan[i]) {
1137
struct sh_dmae_chan *sh_chan = shdev->chan[i];
1139
free_irq(sh_chan->irq, sh_chan);
1141
list_del(&sh_chan->common.device_node);
1143
shdev->chan[i] = NULL;
1146
shdev->common.chancnt = 0;
1149
static int __init sh_dmae_probe(struct platform_device *pdev)
1151
struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
1152
unsigned long irqflags = IRQF_DISABLED,
1153
chan_flag[SH_DMAC_MAX_CHANNELS] = {};
1154
int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
1155
int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
1156
struct sh_dmae_device *shdev;
1157
struct resource *chan, *dmars, *errirq_res, *chanirq_res;
1159
/* get platform data */
1160
if (!pdata || !pdata->channel_num)
1163
chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1164
/* DMARS area is optional */
1165
dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1168
* 1. there always must be at least one IRQ IO-resource. On SH4 it is
1169
* the error IRQ, in which case it is the only IRQ in this resource:
1170
* start == end. If it is the only IRQ resource, all channels also
1172
* 2. DMA channel IRQ resources can be specified one per resource or in
1173
* ranges (start != end)
1174
* 3. iff all events (channels and, optionally, error) on this
1175
* controller use the same IRQ, only one IRQ resource can be
1176
* specified, otherwise there must be one IRQ per channel, even if
1177
* some of them are equal
1178
* 4. if all IRQs on this controller are equal or if some specific IRQs
1179
* specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
1180
* requested with the IRQF_SHARED flag
1182
errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1183
if (!chan || !errirq_res)
1186
if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
1187
dev_err(&pdev->dev, "DMAC register region already claimed\n");
1191
if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
1192
dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
1198
shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
1200
dev_err(&pdev->dev, "Not enough memory\n");
1204
shdev->chan_reg = ioremap(chan->start, resource_size(chan));
1205
if (!shdev->chan_reg)
1208
shdev->dmars = ioremap(dmars->start, resource_size(dmars));
1214
shdev->pdata = pdata;
1216
if (pdata->chcr_offset)
1217
shdev->chcr_offset = pdata->chcr_offset;
1219
shdev->chcr_offset = CHCR;
1221
if (pdata->chcr_ie_bit)
1222
shdev->chcr_ie_bit = pdata->chcr_ie_bit;
1224
shdev->chcr_ie_bit = CHCR_IE;
1226
platform_set_drvdata(pdev, shdev);
1228
pm_runtime_enable(&pdev->dev);
1229
pm_runtime_get_sync(&pdev->dev);
1231
spin_lock_irq(&sh_dmae_lock);
1232
list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
1233
spin_unlock_irq(&sh_dmae_lock);
1235
/* reset dma controller - only needed as a test */
1236
err = sh_dmae_rst(shdev);
1240
INIT_LIST_HEAD(&shdev->common.channels);
1242
dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
1243
if (pdata->slave && pdata->slave_num)
1244
dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
1246
shdev->common.device_alloc_chan_resources
1247
= sh_dmae_alloc_chan_resources;
1248
shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
1249
shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
1250
shdev->common.device_tx_status = sh_dmae_tx_status;
1251
shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
1253
/* Compulsory for DMA_SLAVE fields */
1254
shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
1255
shdev->common.device_control = sh_dmae_control;
1257
shdev->common.dev = &pdev->dev;
1258
/* Default transfer size of 32 bytes requires 32-byte alignment */
1259
shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
1261
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
1262
chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1265
chanirq_res = errirq_res;
1269
if (chanirq_res == errirq_res ||
1270
(errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
1271
irqflags = IRQF_SHARED;
1273
errirq = errirq_res->start;
1275
err = request_irq(errirq, sh_dmae_err, irqflags,
1276
"DMAC Address Error", shdev);
1279
"DMA failed requesting irq #%d, error %d\n",
1285
chanirq_res = errirq_res;
1286
#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
1288
if (chanirq_res->start == chanirq_res->end &&
1289
!platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1290
/* Special case - all multiplexed */
1291
for (; irq_cnt < pdata->channel_num; irq_cnt++) {
1292
if (irq_cnt < SH_DMAC_MAX_CHANNELS) {
1293
chan_irq[irq_cnt] = chanirq_res->start;
1294
chan_flag[irq_cnt] = IRQF_SHARED;
1302
for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
1303
if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
1308
if ((errirq_res->flags & IORESOURCE_BITS) ==
1309
IORESOURCE_IRQ_SHAREABLE)
1310
chan_flag[irq_cnt] = IRQF_SHARED;
1312
chan_flag[irq_cnt] = IRQF_DISABLED;
1314
"Found IRQ %d for channel %d\n",
1316
chan_irq[irq_cnt++] = i;
1319
if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
1322
chanirq_res = platform_get_resource(pdev,
1323
IORESOURCE_IRQ, ++irqres);
1324
} while (irq_cnt < pdata->channel_num && chanirq_res);
1327
/* Create DMA Channel */
1328
for (i = 0; i < irq_cnt; i++) {
1329
err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
1331
goto chan_probe_err;
1335
dev_notice(&pdev->dev, "Attempting to register %d DMA "
1336
"channels when a maximum of %d are supported.\n",
1337
pdata->channel_num, SH_DMAC_MAX_CHANNELS);
1339
pm_runtime_put(&pdev->dev);
1341
dma_async_device_register(&shdev->common);
1346
sh_dmae_chan_remove(shdev);
1348
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
1349
free_irq(errirq, shdev);
1353
spin_lock_irq(&sh_dmae_lock);
1354
list_del_rcu(&shdev->node);
1355
spin_unlock_irq(&sh_dmae_lock);
1357
pm_runtime_put(&pdev->dev);
1358
pm_runtime_disable(&pdev->dev);
1361
iounmap(shdev->dmars);
1363
platform_set_drvdata(pdev, NULL);
1365
iounmap(shdev->chan_reg);
1371
release_mem_region(dmars->start, resource_size(dmars));
1373
release_mem_region(chan->start, resource_size(chan));
1378
static int __exit sh_dmae_remove(struct platform_device *pdev)
1380
struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1381
struct resource *res;
1382
int errirq = platform_get_irq(pdev, 0);
1384
dma_async_device_unregister(&shdev->common);
1387
free_irq(errirq, shdev);
1389
spin_lock_irq(&sh_dmae_lock);
1390
list_del_rcu(&shdev->node);
1391
spin_unlock_irq(&sh_dmae_lock);
1393
/* channel data remove */
1394
sh_dmae_chan_remove(shdev);
1396
pm_runtime_disable(&pdev->dev);
1399
iounmap(shdev->dmars);
1400
iounmap(shdev->chan_reg);
1402
platform_set_drvdata(pdev, NULL);
1407
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1409
release_mem_region(res->start, resource_size(res));
1410
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1412
release_mem_region(res->start, resource_size(res));
1417
static void sh_dmae_shutdown(struct platform_device *pdev)
1419
struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1420
sh_dmae_ctl_stop(shdev);
1423
static int sh_dmae_runtime_suspend(struct device *dev)
1428
static int sh_dmae_runtime_resume(struct device *dev)
1430
struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1432
return sh_dmae_rst(shdev);
1436
static int sh_dmae_suspend(struct device *dev)
1438
struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1441
for (i = 0; i < shdev->pdata->channel_num; i++) {
1442
struct sh_dmae_chan *sh_chan = shdev->chan[i];
1443
if (sh_chan->descs_allocated)
1444
sh_chan->pm_error = pm_runtime_put_sync(dev);
1450
static int sh_dmae_resume(struct device *dev)
1452
struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1455
for (i = 0; i < shdev->pdata->channel_num; i++) {
1456
struct sh_dmae_chan *sh_chan = shdev->chan[i];
1457
struct sh_dmae_slave *param = sh_chan->common.private;
1459
if (!sh_chan->descs_allocated)
1462
if (!sh_chan->pm_error)
1463
pm_runtime_get_sync(dev);
1466
const struct sh_dmae_slave_config *cfg = param->config;
1467
dmae_set_dmars(sh_chan, cfg->mid_rid);
1468
dmae_set_chcr(sh_chan, cfg->chcr);
1477
#define sh_dmae_suspend NULL
1478
#define sh_dmae_resume NULL
1481
const struct dev_pm_ops sh_dmae_pm = {
1482
.suspend = sh_dmae_suspend,
1483
.resume = sh_dmae_resume,
1484
.runtime_suspend = sh_dmae_runtime_suspend,
1485
.runtime_resume = sh_dmae_runtime_resume,
1488
static struct platform_driver sh_dmae_driver = {
1489
.remove = __exit_p(sh_dmae_remove),
1490
.shutdown = sh_dmae_shutdown,
1492
.owner = THIS_MODULE,
1493
.name = "sh-dma-engine",
1498
static int __init sh_dmae_init(void)
1500
/* Wire up NMI handling */
1501
int err = register_die_notifier(&sh_dmae_nmi_notifier);
1505
return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
1507
module_init(sh_dmae_init);
1509
static void __exit sh_dmae_exit(void)
1511
platform_driver_unregister(&sh_dmae_driver);
1513
unregister_die_notifier(&sh_dmae_nmi_notifier);
1515
module_exit(sh_dmae_exit);
1517
MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1518
MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1519
MODULE_LICENSE("GPL");
1520
MODULE_ALIAS("platform:sh-dma-engine");