2
* Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
6
* The code contained herein is licensed under the GNU General Public
7
* License. You may obtain a copy of the GNU General Public License
8
* Version 2 or later at the following locations:
10
* http://www.opensource.org/licenses/gpl-license.html
11
* http://www.gnu.org/copyleft/gpl.html
14
/* Front-end to the DMA handling. This handles the allocation/freeing
15
* of DMA channels, and provides a unified interface to the machines
20
* @file plat-mxc/dma_mx2.c
21
* @brief This file contains functions for DMA API
26
#include <linux/module.h>
27
#include <linux/slab.h>
28
#include <linux/sched.h>
29
#include <linux/mman.h>
30
#include <linux/init.h>
31
#include <linux/spinlock.h>
32
#include <linux/interrupt.h>
33
#include <linux/clk.h>
35
#include <linux/proc_fs.h>
38
#include <mach/hardware.h>
40
#include <asm/delay.h>
42
#include <asm/atomic.h>
44
/* commented temperily for mx27 compilation
49
#include <mach/apmc.h>
50
struct apmc_user *dma_apmc_user;
51
struct pm_dev *dma_pm;
52
#define DMA_PMST_RESUME 0
53
#define DMA_PMST_STANDBY 1
54
#define DMA_PMST_SUSPEND 2
55
static unsigned int dma_pm_status = DMA_PMST_RESUME;
59
* This variable is used to controll the clock of DMA.
60
* It counts the number of actived channels
62
static atomic_t g_dma_actived = ATOMIC_INIT(0);
65
* This variable point a proc file which contains the information
68
static struct proc_dir_entry *g_proc_dir;
73
static mxc_dma_channel_t g_dma_channels[MAX_DMA_CHANNELS];
74
static mx2_dma_priv_t g_dma_privates[MXC_DMA_CHANNELS];
75
static mx2_dma_bd_t g_dma_bd_table[MXC_DMA_CHANNELS][MAX_BD_SIZE];
77
static DEFINE_SPINLOCK(dma_list_lock);
79
static struct clk *dma_clk;
81
/*!@brief flush buffer descriptor ring*/
82
#define flush_dma_bd(private) \
84
atomic_set(&(private->bd_used), 0); \
85
private->bd_rd = private->bd_wr;\
88
/*!@brief get next buffer discriptor */
89
#define next_dma_bd(private) \
91
int bd_next = (private->bd_rd+1)%MAX_BD_SIZE; \
92
(bd_next == private->bd_wr) ? NULL: private->bd_ring+bd_next;\
95
static inline int consume_dma_bd(mxc_dma_channel_t * dma, int error);
97
*@brief allocate a dma channel.
99
*@param idx Requested channel NO.
100
* @li MXC_INVLAID_CHANNEL System allocates a free channel which is not statically allocated.
101
* @li Others User requests a specific channel
102
*@return @li MXC_INVLAID_CHANNEL Failure
105
static inline int get_dma_channel(int idx)
108
mxc_dma_channel_t *p;
110
if ((idx >= MAX_DMA_CHANNELS) && (idx != MXC_DMA_DYNAMIC_CHANNEL)) {
113
if (idx != MXC_DMA_DYNAMIC_CHANNEL) {
114
p = g_dma_channels + idx;
115
BUG_ON(p->dynamic != 0);
116
if (xchg(&p->lock, 1) != 0) {
123
for (i = 0; (i < MAX_DMA_CHANNELS); i++, p++) {
124
if (p->dynamic && (xchg(&p->lock, 1) == 0)) {
132
*@brief release a dma channel.
134
*@param idx channel number
137
static inline void put_dma_channel(int idx)
139
mxc_dma_channel_t *p;
141
if ((idx < MAX_DMA_CHANNELS) && (idx >= 0)) {
142
p = g_dma_channels + idx;
143
(void)xchg(&p->lock, 0);
148
*@brief Get dma list for /proc/dma
150
static int mxc_get_dma_list(char *buf)
152
mxc_dma_channel_t *dma;
156
for (i = 0, dma = g_dma_channels; i < MAX_DMA_CHANNELS; i++, dma++) {
158
p += sprintf(p, "dma channel %2d: %s\n", i,
159
dma->dev_name ? dma->dev_name : "unknown");
161
p += sprintf(p, "dma channel %2d: unused\n", i);
168
/*!@brief save the mask of dma interrupts*/
169
#define save_dma_interrupt(flags) \
170
flags = __raw_readl(IO_ADDRESS(DMA_BASE_ADDR) + DMA_DIMR)
172
/*!@brief restore the mask of dma interrupts*/
173
#define restore_dma_interrupt(flags) \
174
__raw_writel(flags, IO_ADDRESS(DMA_BASE_ADDR) + DMA_DIMR)
176
/*!@brief disable interrupt of dma channel*/
177
static inline void mask_dma_interrupt(int channel)
180
save_dma_interrupt(reg);
181
reg |= 1 << channel; /*mask interrupt; */
182
restore_dma_interrupt(reg);
185
/*!@brief enable interrupt of dma channel */
186
static inline void unmask_dma_interrupt(int channel)
189
save_dma_interrupt(reg);
190
reg &= ~(1 << channel); /*unmask interrupt; */
191
restore_dma_interrupt(reg);
194
/*!@brief get interrupt event of dma channel */
195
static unsigned long inline __get_dma_interrupt(int channel)
199
if (__raw_readl(IO_ADDRESS(DMA_BASE_ADDR) + DMA_DISR) & (1 << channel))
202
if (__raw_readl(IO_ADDRESS(DMA_BASE_ADDR) + DMA_DBTOSR) &
204
mode |= DMA_BURST_TIMEOUT;
205
if (__raw_readl(IO_ADDRESS(DMA_BASE_ADDR) + DMA_DSESR) & (1 << channel))
206
mode |= DMA_TRANSFER_ERROR;
208
if (__raw_readl(IO_ADDRESS(DMA_BASE_ADDR) + DMA_DBOSR) & (1 << channel))
209
mode |= DMA_BUFFER_OVERFLOW;
210
if (__raw_readl(IO_ADDRESS(DMA_BASE_ADDR) + DMA_DRTOSR) &
212
mode |= DMA_REQUEST_TIMEOUT;
217
*@brief clean all event of dma interrupt and return the valid event.
219
static unsigned long inline __clear_dma_interrupt(int channel)
222
mode = __get_dma_interrupt(channel);
223
__raw_writel(1 << channel, IO_ADDRESS(DMA_BASE_ADDR) + DMA_DISR);
224
__raw_writel(1 << channel, IO_ADDRESS(DMA_BASE_ADDR) + DMA_DBTOSR);
225
__raw_writel(1 << channel, IO_ADDRESS(DMA_BASE_ADDR) + DMA_DRTOSR);
226
__raw_writel(1 << channel, IO_ADDRESS(DMA_BASE_ADDR) + DMA_DSESR);
227
__raw_writel(1 << channel, IO_ADDRESS(DMA_BASE_ADDR) + DMA_DBOSR);
232
/*!@brief This function enables dma clocks without lock */
233
static void inline __enable_dma_clk(void)
237
reg = __raw_readl(IO_ADDRESS(DMA_BASE_ADDR) + DMA_DCR);
239
__raw_writel(reg, IO_ADDRESS(DMA_BASE_ADDR) + DMA_DCR);
242
/*!@brief This function disables dma clocks without lock */
243
static void inline __disable_dma_clk(void)
246
reg = __raw_readl(IO_ADDRESS(DMA_BASE_ADDR) + DMA_DCR);
248
__raw_writel(reg, IO_ADDRESS(DMA_BASE_ADDR) + DMA_DCR);
249
clk_disable(dma_clk);
252
/*!@brief This function enables dma clocks with lock */
253
static void inline enable_dma_clk(void)
256
spin_lock_irqsave(&dma_list_lock, flags);
257
if (atomic_read(&g_dma_actived) == 0) {
260
spin_unlock_irqrestore(&dma_list_lock, flags);
264
/*!@brief This function disables dma clocks without locked */
265
static void inline disable_dma_clk(void)
268
spin_lock_irqsave(&dma_list_lock, flags);
269
if (atomic_read(&g_dma_actived) == 0) {
272
spin_unlock_irqrestore(&dma_list_lock, flags);
276
/*!@brief select a buffer to transfer and
277
* setup dma channel for current transfer
279
static void setup_dmac(mxc_dma_channel_t * dma)
281
mx2_dma_priv_t *priv = (mx2_dma_priv_t *) dma->private;
282
dma_regs_t *dma_base = (dma_regs_t *) (priv->dma_base);
284
unsigned long ctrl_val;
286
if (dma->active == 0) {
288
"dma channel %d is not enabled, when receiving this channel 's interrupt\n",
292
if (atomic_read(&(priv->bd_used)) <= 0) {
293
printk(KERN_ERR "dma channel %d is empty\n", dma->channel);
295
atomic_dec(&g_dma_actived);
299
* PEND: Wait for set to DMAC.
300
* s1: no transfering:
301
* set first(one BUSY). if there are more than one tranfer. set second &repeat is enabled(two BUSY).
303
* s2: transfering & just on transfer
304
* one BUSY. set the tranesfer and set repeat bit(two BUSY)
305
* s3: transfering & repeat has set
308
p = priv->bd_ring + priv->bd_rd;
309
q = next_dma_bd(priv);
310
if (!(p->state & DMA_BD_ST_BUSY)) {
311
/*NOTICE:: This is first buffer or dma chain does not support chain-buffer. So CEN must clear & set again */
313
__raw_readl(&(dma_base->Ctl)) &
314
(~(DMA_CTL_ACRPT | DMA_CTL_RPT | DMA_CTL_CEN));
315
__raw_writel(ctrl_val, &(dma_base->Ctl));
316
if (p->mode != dma->mode) {
317
dma->mode = p->mode; /* bi-dir channel do mode change */
318
if (dma->mode == MXC_DMA_MODE_READ) {
319
DMA_CTL_SET_SMOD(ctrl_val,
320
priv->dma_info->sourceType);
321
DMA_CTL_SET_SSIZ(ctrl_val,
322
priv->dma_info->sourcePort);
323
DMA_CTL_SET_DMOD(ctrl_val,
324
priv->dma_info->destType);
325
DMA_CTL_SET_DSIZ(ctrl_val,
326
priv->dma_info->destPort);
328
DMA_CTL_SET_SMOD(ctrl_val,
329
priv->dma_info->destType);
330
DMA_CTL_SET_SSIZ(ctrl_val,
331
priv->dma_info->destPort);
332
DMA_CTL_SET_DMOD(ctrl_val,
333
priv->dma_info->sourceType);
334
DMA_CTL_SET_DSIZ(ctrl_val,
335
priv->dma_info->sourcePort);
338
__raw_writel(p->src_addr, &(dma_base->SourceAddr));
339
__raw_writel(p->dst_addr, &(dma_base->DestAddr));
340
__raw_writel(p->count, &(dma_base->Count));
341
p->state |= DMA_BD_ST_BUSY;
342
p->state &= ~(DMA_BD_ST_PEND);
343
ctrl_val |= DMA_CTL_CEN;
344
__raw_writel(ctrl_val, &(dma_base->Ctl));
345
if (q && priv->dma_chaining) { /*DO chain-buffer */
346
__raw_writel(q->src_addr, &(dma_base->SourceAddr));
347
__raw_writel(q->dst_addr, &(dma_base->DestAddr));
348
__raw_writel(q->count, &(dma_base->Count));
349
q->state |= DMA_BD_ST_BUSY;
350
q->state &= ~(DMA_BD_ST_PEND);
351
ctrl_val |= DMA_CTL_ACRPT | DMA_CTL_RPT | DMA_CTL_CEN;
352
__raw_writel(ctrl_val, &(dma_base->Ctl));
354
} else { /* Just dma channel which supports dma buffer can run to there */
355
BUG_ON(!priv->dma_chaining);
356
if (q) { /* p is tranfering, then q must be set into dma controller */
357
/*WARNING:: [1] dangerous area begin.
358
* If the p is completed during MCU run in this erea, the dma channel is crashed.
360
__raw_writel(q->src_addr, &(dma_base->SourceAddr));
361
__raw_writel(q->dst_addr, &(dma_base->DestAddr));
362
__raw_writel(q->count, &(dma_base->Count));
363
/*WARNING:: [2] dangerous area end */
365
__raw_readl(&(dma_base->Ctl)) | (DMA_CTL_ACRPT |
368
__raw_writel(ctrl_val, &(dma_base->Ctl));
370
/* WARNING:: This is workaround and it is dangerous:
371
* the judgement is not safety.
373
if (!__get_dma_interrupt(dma->channel)) {
374
q->state |= DMA_BD_ST_BUSY;
375
q->state &= ~(DMA_BD_ST_PEND);
377
/*Waiting re-enable is in ISR */
379
"Warning:: The privous transfer is completed. Maybe the chain buffer is stopped.");
381
} else { /* Last buffer is transfering: just clear RPT bit */
383
__raw_readl(&(dma_base->Ctl)) &
384
(~(DMA_CTL_ACRPT | DMA_CTL_RPT));
385
__raw_writel(ctrl_val, &(dma_base->Ctl));
391
* @brief interrupt handler of dma channel
393
static irqreturn_t dma_irq_handler(int irq, void *dev_id)
395
mxc_dma_channel_t *dma = (mxc_dma_channel_t *) dev_id;
396
mx2_dma_priv_t *priv = (mx2_dma_priv_t *) (dma ? dma->private : NULL);
397
dma_regs_t *dma_base;
398
int state, error = MXC_DMA_DONE;
400
BUG_ON(priv == NULL);
402
dma_base = (dma_regs_t *) priv->dma_base;
404
state = __clear_dma_interrupt(dma->channel);
406
priv->trans_bytes += dma_base->transferd;
407
if (state != DMA_DONE) {
408
if (state & DMA_REQUEST_TIMEOUT) {
409
error = MXC_DMA_REQUEST_TIMEOUT;
411
error = MXC_DMA_TRANSFER_ERROR;
414
if (consume_dma_bd(dma, error)) {
417
dma->cb_fn(dma->cb_args, error, priv->trans_bytes);
419
priv->trans_bytes = 0;
427
*@brief Set DMA channel parameters
429
*@param dma Requested channel NO.
430
*@param dma_info Channel configuration
431
*@return @li 0 Success
434
static int setup_dma_channel(mxc_dma_channel_t * dma, mx2_dma_info_t * dma_info)
436
mx2_dma_priv_t *priv = (mx2_dma_priv_t *) (dma ? dma->private : NULL);
437
dma_regs_t *dma_base;
440
if (!dma_info || !priv) {
444
if (dma_info->sourceType > 3) {
447
if (dma_info->destType > 3) {
450
if (dma_info->destPort > 3) {
453
if (dma_info->sourcePort > 3) {
456
if (dma_info->M2D_Valid) {
457
/*add for second dma */
458
if (dma_info->W < dma_info->X) {
463
priv->dma_chaining = dma_info->dma_chaining;
464
priv->ren = dma_info->ren;
466
if (dma_info->sourceType != DMA_TYPE_FIFO
467
&& dma_info->destType != DMA_TYPE_FIFO) {
470
"Warning:request enable just affect source or destination port is FIFO !\n");
475
if (dma_info->M2D_Valid) {
476
if (dma_info->msel) {
477
__raw_writel(dma_info->W,
478
IO_ADDRESS(DMA_BASE_ADDR) + DMA_WSRB);
479
__raw_writel(dma_info->X,
480
IO_ADDRESS(DMA_BASE_ADDR) + DMA_XSRB);
481
__raw_writel(dma_info->Y,
482
IO_ADDRESS(DMA_BASE_ADDR) + DMA_YSRB);
485
__raw_writel(dma_info->W,
486
IO_ADDRESS(DMA_BASE_ADDR) + DMA_WSRA);
487
__raw_writel(dma_info->X,
488
IO_ADDRESS(DMA_BASE_ADDR) + DMA_XSRA);
489
__raw_writel(dma_info->Y,
490
IO_ADDRESS(DMA_BASE_ADDR) + DMA_YSRA);
494
dma_base = (dma_regs_t *) (priv->dma_base);
496
__raw_writel(dma_info->burstLength, &(dma_base->BurstLength));
497
__raw_writel(dma_info->request, &(dma_base->RequestSource));
500
reg = dma_info->busuntils & 0x1FFFF;
501
if (dma_info->rto_en) {
504
__raw_writel(reg, &(dma_base->BusUtilt));
506
__raw_writel(dma_info->busuntils, &(dma_base->BusUtilt));
509
reg = __raw_readl(&(dma_base->Ctl)) & (~(DMA_CTL_ACRPT | DMA_CTL_RPT));
514
reg &= ~DMA_CTL_MDIR;
523
if ((dma_info->M2D_Valid) && (dma_info->msel)) {
526
reg &= ~DMA_CTL_MSEL;
529
if (dma_info->mode) {
530
DMA_CTL_SET_SMOD(reg, dma_info->destType);
531
DMA_CTL_SET_SSIZ(reg, dma_info->destPort);
532
DMA_CTL_SET_DMOD(reg, dma_info->sourceType);
533
DMA_CTL_SET_DSIZ(reg, dma_info->sourcePort);
535
DMA_CTL_SET_SMOD(reg, dma_info->sourceType);
536
DMA_CTL_SET_SSIZ(reg, dma_info->sourcePort);
537
DMA_CTL_SET_DMOD(reg, dma_info->destType);
538
DMA_CTL_SET_DSIZ(reg, dma_info->destPort);
541
__raw_writel(reg, &(dma_base->Ctl));
543
__clear_dma_interrupt(dma->channel);
544
unmask_dma_interrupt(dma->channel);
550
/*!@brief setup interrupt and setup dma channel by dma parameter */
551
static inline int __init_dma_channel(mxc_dma_channel_t * chan,
552
mx2_dma_info_t * dma_info)
554
mx2_dma_priv_t *dma_private = (mx2_dma_priv_t *) chan->private;
555
dma_regs_t *dma_base;
558
mask_dma_interrupt(chan->channel);
560
request_irq(dma_private->dma_irq, dma_irq_handler,
561
IRQF_DISABLED | IRQF_SHARED, chan->dev_name,
565
"%s: unable to request IRQ %d for DMA channel\n",
566
chan->dev_name, dma_private->dma_irq);
572
dma_base = (dma_regs_t *) (dma_private->dma_base);
573
__raw_writel(0, &(dma_base->Ctl));
576
if ((ret = setup_dma_channel(chan, dma_info))) {
577
free_irq(dma_private->dma_irq, (void *)chan);
583
/*!@brief initialize buffer descriptor ring.*/
584
static inline void init_dma_bd(mx2_dma_priv_t * private)
588
private->bd_rd = private->bd_wr = 0;
589
atomic_set(&(private->bd_used), 0);
590
for (i = 0, pbd = private->bd_ring; i < MAX_BD_SIZE; i++, pbd++) {
595
/*!@brief add dma buffer into buffer descriptor ring */
596
static inline int fill_dma_bd(mxc_dma_channel_t * dma,
597
mxc_dma_requestbuf_t * buf, int num,
601
unsigned long flags, mask;
602
mx2_dma_priv_t *priv = dma->private;
605
if ((atomic_read(&(priv->bd_used)) + num) > MAX_BD_SIZE) {
609
for (i = 0; i < num; i++) {
611
p = priv->bd_ring + wr;
613
p->count = buf[i].num_of_bytes;
614
p->src_addr = buf[i].src_addr;
615
p->dst_addr = buf[i].dst_addr;
617
p->state = DMA_BD_ST_LAST | DMA_BD_ST_PEND;
619
p->state = DMA_BD_ST_PEND;
621
priv->bd_wr = (wr + 1) % MAX_BD_SIZE;
622
atomic_inc(&(priv->bd_used));
624
if (atomic_read(&(priv->bd_used)) != 2)
626
/* Disable interrupt of this channel */
627
local_irq_save(flags);
629
save_dma_interrupt(mask);
630
mask_dma_interrupt(dma->channel);
631
local_irq_restore(flags);
633
* If channel is transfering and supports chain_buffer,
634
* when the new buffer is 2st buffer , repeat must be enabled
636
if (priv->dma_chaining && dma->active) {
637
q = priv->bd_ring + priv->bd_rd;
638
if (q && (q->state & DMA_BD_ST_BUSY)) {
639
if (atomic_read(&(priv->bd_used)) == 2) {
644
restore_dma_interrupt(mask);
649
/*!@brief add sg-list into buffer descriptor ring */
650
static inline int fill_dma_bd_by_sg(mxc_dma_channel_t * dma,
651
struct scatterlist *sg, int num,
652
int real_bytes, mxc_dma_mode_t mode)
654
int i, wr, total_bytes = real_bytes;
655
unsigned long flags, mask;
656
mx2_dma_priv_t *priv = dma->private;
658
if ((atomic_read(&(priv->bd_used)) + num) > MAX_BD_SIZE) {
662
for (i = 0; i < num && ((real_bytes <= 0) || (total_bytes > 0)); i++) {
664
p = priv->bd_ring + wr;
666
if (real_bytes > 0) {
667
if (sg[i].length >= total_bytes) {
668
p->count = total_bytes;
670
p->count = sg[i].length;
672
total_bytes -= p->count;
674
p->count = sg[i].length;
676
if (mode == MXC_DMA_MODE_READ) {
677
p->src_addr = priv->dma_info->per_address;
678
p->dst_addr = sg[i].dma_address;
680
p->dst_addr = priv->dma_info->per_address;
681
p->src_addr = sg[i].dma_address;
683
if ((i == num - 1) || ((real_bytes > 0) && (total_bytes == 0))) {
684
p->state = DMA_BD_ST_LAST | DMA_BD_ST_PEND;
686
p->state = DMA_BD_ST_PEND;
688
priv->bd_wr = (wr + 1) % MAX_BD_SIZE;
689
atomic_inc(&(priv->bd_used));
691
if (atomic_read(&(priv->bd_used)) != 2)
693
/* Disable interrupt of this channel */
694
local_irq_save(flags);
696
save_dma_interrupt(mask);
697
mask_dma_interrupt(dma->channel);
698
local_irq_restore(flags);
700
* If channel is transfering and supports chain_buffer,
701
* when the new buffer is 2st buffer , repeat must be enabled
703
if (priv->dma_chaining && dma->active) {
704
q = next_dma_bd(priv);
705
if (q && (q->state & DMA_BD_ST_BUSY)) {
706
if ((atomic_read(&(priv->bd_used))) == 2) {
711
restore_dma_interrupt(mask);
716
/*!@brief select next buffer descripter to transfer.
717
* return 1: need call call-back function. 0: Not need call call-back.
718
* it just is called in ISR
720
static inline int consume_dma_bd(mxc_dma_channel_t * dma, int error)
722
mx2_dma_priv_t *priv = dma->private;
727
"request dma channel %d which is not initialize completed.!\n",
731
if (error != MXC_DMA_DONE) {
732
for (p = priv->bd_ring + priv->bd_rd;
733
atomic_read(&(priv->bd_used)) > 0;) {
734
priv->bd_rd = (priv->bd_rd + 1) % MAX_BD_SIZE;
735
atomic_dec(&(priv->bd_used));
736
if (p->state & DMA_BD_ST_LAST) {
744
p = priv->bd_ring + priv->bd_rd;
745
priv->bd_rd = (priv->bd_rd + 1) % MAX_BD_SIZE;
746
atomic_dec(&(priv->bd_used));
747
notify = (p->state & DMA_BD_ST_LAST) == DMA_BD_ST_LAST;
749
if (atomic_read(&(priv->bd_used)) <= 0) {
751
atomic_dec(&g_dma_actived);
759
* This function is generally called by the driver at open time.
760
* The DMA driver would do any initialization steps that is required
761
* to get the channel ready for data transfer.
763
* @param channel_id a pre-defined id. The peripheral driver would specify
764
* the id associated with its peripheral. This would be
765
* used by the DMA driver to identify the peripheral
766
* requesting DMA and do the necessary setup on the
767
* channel associated with the particular peripheral.
768
* The DMA driver could use static or dynamic DMA channel
770
* @param dev_name module name or device name
771
* @return returns a negative number on error if request for a DMA channel did not
772
* succeed, returns the channel number to be used on success.
774
int mxc_dma_request_ext(mxc_dma_device_t channel_id, char *dev_name,
775
struct dma_channel_info *info)
777
mxc_dma_channel_t *dma;
778
mx2_dma_priv_t *dma_private = NULL;
779
mx2_dma_info_t *dma_info = mxc_dma_get_info(channel_id);
783
if (dma_info == NULL) {
787
if ((index = get_dma_channel(dma_info->dma_chan)) < 0) {
791
dma = g_dma_channels + index;
792
dma_private = (mx2_dma_priv_t *) dma->private;
793
if (dma_private == NULL) {
795
"request dma channel %d which is not initialize completed.!\n",
802
dma_private->dma_info = NULL;
805
dma->dev_name = dev_name;
806
dma->mode = dma_info->mode ? MXC_DMA_MODE_WRITE : MXC_DMA_MODE_READ;
807
init_dma_bd(dma_private);
809
if (!(ret = __init_dma_channel(dma, dma_info))) {
810
dma_private->dma_info = dma_info;
814
put_dma_channel(index);
819
* This function is generally called by the driver at close time. The DMA
820
* driver would do any cleanup associated with this channel.
822
* @param channel_num the channel number returned at request time. This
823
* would be used by the DMA driver to identify the calling
824
* driver and do the necessary cleanup on the channel
825
* associated with the particular peripheral
826
* @return returns a negative number on error or 0 on success
828
int mxc_dma_free(int channel_num)
830
mxc_dma_channel_t *dma;
831
mx2_dma_priv_t *dma_private;
833
if ((channel_num >= MAX_DMA_CHANNELS) || (channel_num < 0)) {
837
dma = g_dma_channels + channel_num;
838
dma_private = (mx2_dma_priv_t *) dma->private;
839
if (dma_private == NULL) {
841
"Free dma %d which is not completed initialization \n",
846
if (dma->active) { /*Channel is busy */
847
mxc_dma_disable(channel_num);
850
dma_private = (mx2_dma_priv_t *) dma->private;
853
mask_dma_interrupt(channel_num);
856
free_irq(dma_private->dma_irq, (void *)dma);
857
put_dma_channel(channel_num);
863
* This function would just configure the buffers specified by the user into
864
* dma channel. The caller must call mxc_dma_enable to start this transfer.
866
* @param channel_num the channel number returned at request time. This
867
* would be used by the DMA driver to identify the calling
868
* driver and do the necessary cleanup on the channel
869
* associated with the particular peripheral
870
* @param dma_buf an array of physical addresses to the user defined
871
* buffers. The caller must guarantee the dma_buf is
872
* available until the transfer is completed.
873
* @param num_buf number of buffers in the array
874
* @param mode specifies whether this is READ or WRITE operation
875
* @return This function returns a negative number on error if buffer could not be
876
* added with DMA for transfer. On Success, it returns 0
878
int mxc_dma_config(int channel_num, mxc_dma_requestbuf_t * dma_buf, int num_buf,
881
mxc_dma_channel_t *dma;
882
mx2_dma_priv_t *dma_private;
884
if ((dma_buf == NULL) || (num_buf < 1)) {
888
if ((channel_num >= MAX_DMA_CHANNELS) || (channel_num < 0)) {
892
dma = g_dma_channels + channel_num;
893
dma_private = (mx2_dma_priv_t *) dma->private;
894
if (dma_private == NULL) {
896
"config dma %d which is not completed initialization \n",
901
if (dma->lock == 0) {
905
/*TODO: dma chainning can not support on bi-dir channel */
906
if (dma_private->dma_chaining && (dma->mode != mode)) {
910
/*TODO: fill dma buffer into driver .
911
* If driver is no enought buffer to save them , it will return -EBUSY
913
if (fill_dma_bd(dma, dma_buf, num_buf, mode)) {
921
* This function would just configure the scatterlist specified by the
922
* user into dma channel. This is a slight variation of mxc_dma_config(),
923
* it is provided for the convenience of drivers that have a scatterlist
924
* passed into them. It is the calling driver's responsibility to have the
925
* correct physical address filled in the "dma_address" field of the
928
* @param channel_num the channel number returned at request time. This
929
* would be used by the DMA driver to identify the calling
930
* driver and do the necessary cleanup on the channel
931
* associated with the particular peripheral
932
* @param sg a scatterlist of buffers. The caller must guarantee
933
* the dma_buf is available until the transfer is
935
* @param num_buf number of buffers in the array
936
* @param num_of_bytes total number of bytes to transfer. If set to 0, this
937
* would imply to use the length field of the scatterlist
938
* for each DMA transfer. Else it would calculate the size
939
* for each DMA transfer.
940
* @param mode specifies whether this is READ or WRITE operation
941
* @return This function returns a negative number on error if buffer could not
942
* be added with DMA for transfer. On Success, it returns 0
944
int mxc_dma_sg_config(int channel_num, struct scatterlist *sg,
945
int num_buf, int num_of_bytes, mxc_dma_mode_t mode)
947
mxc_dma_channel_t *dma;
948
mx2_dma_priv_t *dma_private;
950
if ((sg == NULL) || (num_buf < 1) || (num_of_bytes < 0)) {
954
if ((channel_num >= MAX_DMA_CHANNELS) || (channel_num < 0)) {
958
dma = g_dma_channels + channel_num;
959
dma_private = (mx2_dma_priv_t *) dma->private;
960
if (dma_private == NULL) {
962
"config_sg dma %d which is not completed initialization \n",
967
if (dma->lock == 0) {
971
/*TODO: dma chainning can not support on bi-dir channel */
972
if (dma_private->dma_chaining && (dma->mode != mode)) {
976
/*TODO: fill dma buffer into driver .
977
* If driver is no enought buffer to save them , it will return -EBUSY
979
if (fill_dma_bd_by_sg(dma, sg, num_buf, num_of_bytes, mode)) {
986
* This function is provided if the driver would like to set/change its
989
* @param channel_num the channel number returned at request time. This
990
* would be used by the DMA driver to identify the calling
991
* driver and do the necessary cleanup on the channel
992
* associated with the particular peripheral
993
* @param callback a callback function to provide notification on transfer
994
* completion, user could specify NULL if he does not wish
996
* @param arg an argument that gets passed in to the callback
997
* function, used by the user to do any driver specific
999
* @return this function returns an error if the callback could not be set
1002
int mxc_dma_callback_set(int channel_num, mxc_dma_callback_t callback,
1005
mxc_dma_channel_t *dma;
1007
if ((channel_num >= MAX_DMA_CHANNELS) || (channel_num < 0)) {
1010
dma = g_dma_channels + channel_num;
1019
dma->cb_fn = callback;
1026
* This stops the DMA channel and any ongoing transfers. Subsequent use of
1027
* mxc_dma_enable() will restart the channel and restart the transfer.
1029
* @param channel_num the channel number returned at request time. This
1030
* would be used by the DMA driver to identify the calling
1031
* driver and do the necessary cleanup on the channel
1032
* associated with the particular peripheral
1033
* @return returns a negative number on error or 0 on success
1035
int mxc_dma_disable(int channel_num)
1037
mxc_dma_channel_t *dma;
1038
mx2_dma_priv_t *priv;
1039
unsigned long ctrl_val;
1041
if ((channel_num >= MAX_DMA_CHANNELS) || (channel_num < 0)) {
1045
dma = g_dma_channels + channel_num;
1047
if (dma->lock == 0) {
1055
priv = (mx2_dma_priv_t *) dma->private;
1057
printk(KERN_ERR "disable a uncompleted dma channel %d\n",
1065
__clear_dma_interrupt(channel_num);
1067
__raw_readl(IO_ADDRESS(DMA_BASE_ADDR) + DMA_CCR(channel_num));
1068
ctrl_val &= ~DMA_CTL_CEN; /* clear CEN bit */
1069
__raw_writel(ctrl_val,
1070
IO_ADDRESS(DMA_BASE_ADDR) + DMA_CCR(channel_num));
1072
atomic_dec(&g_dma_actived);
1074
/*TODO: Clear all request buffers */
1080
* This starts DMA transfer. Or it restarts DMA on a stopped channel
1081
* previously stopped with mxc_dma_disable().
1083
* @param channel_num the channel number returned at request time. This
1084
* would be used by the DMA driver to identify the calling
1085
* driver and do the necessary cleanup on the channel
1086
* associated with the particular peripheral
1087
* @return returns a negative number on error or 0 on success
1089
int mxc_dma_enable(int channel_num)
1091
mxc_dma_channel_t *dma;
1092
mx2_dma_priv_t *priv;
1094
if ((channel_num >= MAX_DMA_CHANNELS) || (channel_num < 0)) {
1098
dma = g_dma_channels + channel_num;
1100
if (dma->lock == 0) {
1104
priv = (mx2_dma_priv_t *) dma->private;
1106
printk(KERN_ERR "enable a uncompleted dma channel %d\n",
1115
priv->trans_bytes = 0;
1119
atomic_inc(&g_dma_actived);
1120
__clear_dma_interrupt(channel_num);
1128
*@brief Dump DMA registers
1130
*@param channel Requested channel NO.
1134
void mxc_dump_dma_register(int channel)
1136
mxc_dma_channel_t *dma = &g_dma_channels[channel];
1137
mx2_dma_priv_t *priv = (mx2_dma_priv_t *) dma->private;
1138
dma_regs_t *dma_base;
1140
printk(KERN_INFO "======== Dump dma channel %d \n", channel);
1141
if ((unsigned)channel >= MXC_DMA_CHANNELS) {
1142
printk(KERN_INFO "Channel number is invalid \n");
1146
printk(KERN_INFO "Channel is not allocated \n");
1150
printk(KERN_INFO "g_dma_actived = %d\n", atomic_read(&g_dma_actived));
1153
dma_base = (dma_regs_t *) (priv->dma_base);
1154
printk(KERN_INFO "DMA COMMON REGISTER\n");
1155
printk(KERN_INFO "DMA CONTROL DMA_DCR: %08x\n",
1156
__raw_readl(IO_ADDRESS(DMA_BASE_ADDR) + DMA_DCR));
1157
printk(KERN_INFO "DMA Interrupt status DMA_DISR: %08x\n",
1158
__raw_readl(IO_ADDRESS(DMA_BASE_ADDR) + DMA_DISR));
1159
printk(KERN_INFO "DMA Interrupt Mask DMA_DIMR: %08x\n",
1160
__raw_readl(IO_ADDRESS(DMA_BASE_ADDR) + DMA_DIMR));
1161
printk(KERN_INFO "DMA Burst Time Out DMA_DBTOSR: %08x\n",
1162
__raw_readl(IO_ADDRESS(DMA_BASE_ADDR) + DMA_DBTOSR));
1163
printk(KERN_INFO "DMA request Time Out DMA_DRTOSR: %08x\n",
1164
__raw_readl(IO_ADDRESS(DMA_BASE_ADDR) + DMA_DRTOSR));
1165
printk(KERN_INFO "DMA Transfer Error DMA_DSESR: %08x\n",
1166
__raw_readl(IO_ADDRESS(DMA_BASE_ADDR) + DMA_DSESR));
1167
printk(KERN_INFO "DMA DMA_Overflow DMA_DBOSR: %08x\n",
1168
__raw_readl(IO_ADDRESS(DMA_BASE_ADDR) + DMA_DBOSR));
1169
printk(KERN_INFO "DMA Burst Time OutCtl DMA_BurstTOCtl: %08x\n",
1170
__raw_readl(IO_ADDRESS(DMA_BASE_ADDR) + DMA_DBTOCR));
1172
printk(KERN_INFO "DMA 2D X size: %08x\n",
1173
__raw_readl(IO_ADDRESS(DMA_BASE_ADDR) + DMA_XSRA));
1174
printk(KERN_INFO "DMA 2D Y size: %08x\n",
1175
__raw_readl(IO_ADDRESS(DMA_BASE_ADDR) + DMA_YSRA));
1176
printk(KERN_INFO "DMA 2D Z size: %08x\n",
1177
__raw_readl(IO_ADDRESS(DMA_BASE_ADDR) + DMA_WSRA));
1179
printk(KERN_INFO "DMA Chan %2d Sourc SourceAddr: %08x\n", channel,
1180
__raw_readl(&(dma_base->SourceAddr)));
1181
printk(KERN_INFO "DMA Chan %2d dest DestAddr: %08x\n", channel,
1182
__raw_readl(&(dma_base->DestAddr)));
1183
printk(KERN_INFO "DMA Chan %2d count Count: %08x\n", channel,
1184
__raw_readl(&(dma_base->Count)));
1185
printk(KERN_INFO "DMA Chan %2d Ctl Ctl: %08x\n", channel,
1186
__raw_readl(&(dma_base->Ctl)));
1187
printk(KERN_INFO "DMA Chan %2d request RequestSource: %08x\n",
1188
channel, __raw_readl(&(dma_base->RequestSource)));
1189
printk(KERN_INFO "DMA Chan %2d burstL BurstLength: %08x\n", channel,
1190
__raw_readl(&(dma_base->BurstLength)));
1191
printk(KERN_INFO "DMA Chan %2d requestTO ReqTimeout: %08x\n", channel,
1192
__raw_readl(&(dma_base->ReqTimeout)));
1193
printk(KERN_INFO "DMA Chan %2d BusUtilt BusUtilt: %08x\n", channel,
1194
__raw_readl(&(dma_base->BusUtilt)));
1201
static int channel_in_use(void)
1204
for (i = 0; i < MXC_DMA_CHANNELS; i++) {
1205
if (dma_chan[i].lock)
1211
int mxc_dma_pm_standby(void)
1214
if (dma_pm_status == DMA_PMST_STANDBY)
1217
if (!channel_in_use()) {
1219
__disable_dma_clk();
1220
dma_pm_status = DMA_PMST_STANDBY;
1226
int mxc_dma_pm_resume(void)
1229
if (dma_pm_status == DMA_PMST_RESUME)
1232
/*Enable HCLK_DMA and DMA(ipg clock) */
1233
dma_pm_status = DMA_PMST_RESUME;
1237
int mxc_dma_pm_suspend(void)
1240
if (dma_pm_status == DMA_PMST_SUSPEND)
1243
if (!channel_in_use()) {
1245
__disable_dma_clk();
1246
dma_pm_status = DMA_PMST_SUSPEND;
1252
int mxc_dma_pm_handler(struct pm_dev *dev, pm_request_t rqst, void *data)
1256
/*APM doesn't send PM_STANDBY and PM_STANDBY_RESUME request now. */
1258
ret = dma_pm_suspend();
1261
ret = dma_pm_resume();
1269
int __init mxc_dma_init(void)
1272
mxc_dma_channel_t *dma = g_dma_channels;
1273
mx2_dma_priv_t *private = g_dma_privates;
1275
memset(dma, 0, sizeof(mxc_dma_channel_t) * MXC_DMA_CHANNELS);
1276
for (i = 0; i < MXC_DMA_CHANNELS; i++, dma++, private++) {
1278
dma->private = private;
1280
(unsigned int)(IO_ADDRESS(DMA_BASE_ADDR + DMA_CH_BASE(i)));
1281
private->dma_irq = i + MXC_DMA_INTR_0; /*Dma channel interrupt number */
1282
private->bd_ring = &g_dma_bd_table[i][0];
1285
mxc_dma_load_info(g_dma_channels);
1287
dma_clk = clk_get(NULL, "dma_clk");
1288
clk_enable(dma_clk);
1290
__raw_writel(0x2, IO_ADDRESS(DMA_BASE_ADDR) + DMA_DCR); /*reset DMA; */
1294
/*use module init because create_proc after init_dma */
1295
g_proc_dir = create_proc_entry("dma", 0, NULL);
1296
g_proc_dir->read_proc = (read_proc_t *) mxc_get_dma_list;
1297
g_proc_dir->data = NULL;
1300
/* Register the device with power management. */
1301
dma_pm = pm_register(PM_DMA_DEV, PM_SYS_UNKNOWN, dma_pm_handler);
1307
arch_initcall(mxc_dma_init);
1309
EXPORT_SYMBOL(mxc_dma_request_ext);
1310
EXPORT_SYMBOL(mxc_dma_free);
1311
EXPORT_SYMBOL(mxc_dma_callback_set);
1312
EXPORT_SYMBOL(mxc_dma_enable);
1313
EXPORT_SYMBOL(mxc_dma_disable);
1314
EXPORT_SYMBOL(mxc_dma_config);
1315
EXPORT_SYMBOL(mxc_dma_sg_config);
1316
EXPORT_SYMBOL(mxc_dump_dma_register);