1
/* linux/drivers/dma/pl330.c
3
* Copyright (C) 2010 Samsung Electronics Co. Ltd.
4
* Jaswinder Singh <jassi.brar@samsung.com>
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License as published by
8
* the Free Software Foundation; either version 2 of the License, or
9
* (at your option) any later version.
13
#include <linux/init.h>
14
#include <linux/slab.h>
15
#include <linux/module.h>
16
#include <linux/dmaengine.h>
17
#include <linux/interrupt.h>
18
#include <linux/amba/bus.h>
19
#include <linux/amba/pl330.h>
21
#define NR_DEFAULT_DESC 16
24
/* In the DMAC pool */
27
* Allocted to some channel during prep_xxx
28
* Also may be sitting on the work_list.
32
* Sitting on the work_list and already submitted
33
* to the PL330 core. Not more than two descriptors
34
* of a channel can be BUSY at any time.
38
* Sitting on the channel work_list but xfer done
44
struct dma_pl330_chan {
45
/* Schedule desc completion */
46
struct tasklet_struct task;
48
/* DMA-Engine Channel */
51
/* Last completed cookie */
52
dma_cookie_t completed;
54
/* List of to be xfered descriptors */
55
struct list_head work_list;
57
/* Pointer to the DMAC that manages this channel,
58
* NULL if the channel is available to be acquired.
59
* As the parent, this DMAC also provides descriptors
62
struct dma_pl330_dmac *dmac;
64
/* To protect channel manipulation */
67
/* Token of a hardware channel thread of PL330 DMAC
68
* NULL if the channel is available to be acquired.
73
struct dma_pl330_dmac {
74
struct pl330_info pif;
76
/* DMA-Engine Device */
77
struct dma_device ddma;
79
/* Pool of descriptors available for the DMAC's channels */
80
struct list_head desc_pool;
81
/* To protect desc_pool manipulation */
84
/* Peripheral channels connected to this DMAC */
85
struct dma_pl330_chan peripherals[0]; /* keep at end */
88
struct dma_pl330_desc {
89
/* To attach to a queue as child */
90
struct list_head node;
92
/* Descriptor for the DMA Engine API */
93
struct dma_async_tx_descriptor txd;
95
/* Xfer for PL330 core */
98
struct pl330_reqcfg rqcfg;
101
enum desc_status status;
103
/* The channel which currently holds this desc */
104
struct dma_pl330_chan *pchan;
107
static inline struct dma_pl330_chan *
108
to_pchan(struct dma_chan *ch)
113
return container_of(ch, struct dma_pl330_chan, chan);
116
static inline struct dma_pl330_desc *
117
to_desc(struct dma_async_tx_descriptor *tx)
119
return container_of(tx, struct dma_pl330_desc, txd);
122
static inline void free_desc_list(struct list_head *list)
124
struct dma_pl330_dmac *pdmac;
125
struct dma_pl330_desc *desc;
126
struct dma_pl330_chan *pch;
129
if (list_empty(list))
132
/* Finish off the work list */
133
list_for_each_entry(desc, list, node) {
134
dma_async_tx_callback callback;
137
/* All desc in a list belong to same channel */
139
callback = desc->txd.callback;
140
param = desc->txd.callback_param;
150
spin_lock_irqsave(&pdmac->pool_lock, flags);
151
list_splice_tail_init(list, &pdmac->desc_pool);
152
spin_unlock_irqrestore(&pdmac->pool_lock, flags);
155
static inline void fill_queue(struct dma_pl330_chan *pch)
157
struct dma_pl330_desc *desc;
160
list_for_each_entry(desc, &pch->work_list, node) {
162
/* If already submitted */
163
if (desc->status == BUSY)
166
ret = pl330_submit_req(pch->pl330_chid,
171
} else if (ret == -EAGAIN) {
172
/* QFull or DMAC Dying */
175
/* Unacceptable request */
177
dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
178
__func__, __LINE__, desc->txd.cookie);
179
tasklet_schedule(&pch->task);
184
static void pl330_tasklet(unsigned long data)
186
struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
187
struct dma_pl330_desc *desc, *_dt;
191
spin_lock_irqsave(&pch->lock, flags);
193
/* Pick up ripe tomatoes */
194
list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
195
if (desc->status == DONE) {
196
pch->completed = desc->txd.cookie;
197
list_move_tail(&desc->node, &list);
200
/* Try to submit a req imm. next to the last completed cookie */
203
/* Make sure the PL330 Channel thread is active */
204
pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
206
spin_unlock_irqrestore(&pch->lock, flags);
208
free_desc_list(&list);
211
static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
213
struct dma_pl330_desc *desc = token;
214
struct dma_pl330_chan *pch = desc->pchan;
217
/* If desc aborted */
221
spin_lock_irqsave(&pch->lock, flags);
225
spin_unlock_irqrestore(&pch->lock, flags);
227
tasklet_schedule(&pch->task);
230
static int pl330_alloc_chan_resources(struct dma_chan *chan)
232
struct dma_pl330_chan *pch = to_pchan(chan);
233
struct dma_pl330_dmac *pdmac = pch->dmac;
236
spin_lock_irqsave(&pch->lock, flags);
238
pch->completed = chan->cookie = 1;
240
pch->pl330_chid = pl330_request_channel(&pdmac->pif);
241
if (!pch->pl330_chid) {
242
spin_unlock_irqrestore(&pch->lock, flags);
246
tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
248
spin_unlock_irqrestore(&pch->lock, flags);
253
static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
255
struct dma_pl330_chan *pch = to_pchan(chan);
256
struct dma_pl330_desc *desc;
259
/* Only supports DMA_TERMINATE_ALL */
260
if (cmd != DMA_TERMINATE_ALL)
263
spin_lock_irqsave(&pch->lock, flags);
265
/* FLUSH the PL330 Channel thread */
266
pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
268
/* Mark all desc done */
269
list_for_each_entry(desc, &pch->work_list, node)
272
spin_unlock_irqrestore(&pch->lock, flags);
274
pl330_tasklet((unsigned long) pch);
279
static void pl330_free_chan_resources(struct dma_chan *chan)
281
struct dma_pl330_chan *pch = to_pchan(chan);
284
spin_lock_irqsave(&pch->lock, flags);
286
tasklet_kill(&pch->task);
288
pl330_release_channel(pch->pl330_chid);
289
pch->pl330_chid = NULL;
291
spin_unlock_irqrestore(&pch->lock, flags);
294
static enum dma_status
295
pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
296
struct dma_tx_state *txstate)
298
struct dma_pl330_chan *pch = to_pchan(chan);
299
dma_cookie_t last_done, last_used;
302
last_done = pch->completed;
303
last_used = chan->cookie;
305
ret = dma_async_is_complete(cookie, last_done, last_used);
307
dma_set_tx_state(txstate, last_done, last_used, 0);
312
static void pl330_issue_pending(struct dma_chan *chan)
314
pl330_tasklet((unsigned long) to_pchan(chan));
318
* We returned the last one of the circular list of descriptor(s)
319
* from prep_xxx, so the argument to submit corresponds to the last
320
* descriptor of the list.
322
static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
324
struct dma_pl330_desc *desc, *last = to_desc(tx);
325
struct dma_pl330_chan *pch = to_pchan(tx->chan);
329
spin_lock_irqsave(&pch->lock, flags);
331
/* Assign cookies to all nodes */
332
cookie = tx->chan->cookie;
334
while (!list_empty(&last->node)) {
335
desc = list_entry(last->node.next, struct dma_pl330_desc, node);
339
desc->txd.cookie = cookie;
341
list_move_tail(&desc->node, &pch->work_list);
346
last->txd.cookie = cookie;
348
list_add_tail(&last->node, &pch->work_list);
350
tx->chan->cookie = cookie;
352
spin_unlock_irqrestore(&pch->lock, flags);
357
static inline void _init_desc(struct dma_pl330_desc *desc)
360
desc->req.x = &desc->px;
361
desc->req.token = desc;
362
desc->rqcfg.swap = SWAP_NO;
363
desc->rqcfg.privileged = 0;
364
desc->rqcfg.insnaccess = 0;
365
desc->rqcfg.scctl = SCCTRL0;
366
desc->rqcfg.dcctl = DCCTRL0;
367
desc->req.cfg = &desc->rqcfg;
368
desc->req.xfer_cb = dma_pl330_rqcb;
369
desc->txd.tx_submit = pl330_tx_submit;
371
INIT_LIST_HEAD(&desc->node);
374
/* Returns the number of descriptors added to the DMAC pool */
375
int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
377
struct dma_pl330_desc *desc;
384
desc = kmalloc(count * sizeof(*desc), flg);
388
spin_lock_irqsave(&pdmac->pool_lock, flags);
390
for (i = 0; i < count; i++) {
391
_init_desc(&desc[i]);
392
list_add_tail(&desc[i].node, &pdmac->desc_pool);
395
spin_unlock_irqrestore(&pdmac->pool_lock, flags);
400
static struct dma_pl330_desc *
401
pluck_desc(struct dma_pl330_dmac *pdmac)
403
struct dma_pl330_desc *desc = NULL;
409
spin_lock_irqsave(&pdmac->pool_lock, flags);
411
if (!list_empty(&pdmac->desc_pool)) {
412
desc = list_entry(pdmac->desc_pool.next,
413
struct dma_pl330_desc, node);
415
list_del_init(&desc->node);
418
desc->txd.callback = NULL;
421
spin_unlock_irqrestore(&pdmac->pool_lock, flags);
426
static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
428
struct dma_pl330_dmac *pdmac = pch->dmac;
429
struct dma_pl330_peri *peri = pch->chan.private;
430
struct dma_pl330_desc *desc;
432
/* Pluck one desc from the pool of DMAC */
433
desc = pluck_desc(pdmac);
435
/* If the DMAC pool is empty, alloc new */
437
if (!add_desc(pdmac, GFP_ATOMIC, 1))
441
desc = pluck_desc(pdmac);
443
dev_err(pch->dmac->pif.dev,
444
"%s:%d ALERT!\n", __func__, __LINE__);
449
/* Initialize the descriptor */
451
desc->txd.cookie = 0;
452
async_tx_ack(&desc->txd);
454
desc->req.rqtype = peri->rqtype;
455
desc->req.peri = peri->peri_id;
457
dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
462
static inline void fill_px(struct pl330_xfer *px,
463
dma_addr_t dst, dma_addr_t src, size_t len)
471
static struct dma_pl330_desc *
472
__pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
473
dma_addr_t src, size_t len)
475
struct dma_pl330_desc *desc = pl330_get_desc(pch);
478
dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
484
* Ideally we should lookout for reqs bigger than
485
* those that can be programmed with 256 bytes of
486
* MC buffer, but considering a req size is seldom
487
* going to be word-unaligned and more than 200MB,
489
* Also, should the limit is reached we'd rather
490
* have the platform increase MC buffer size than
491
* complicating this API driver.
493
fill_px(&desc->px, dst, src, len);
498
/* Call after fixing burst size */
499
static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
501
struct dma_pl330_chan *pch = desc->pchan;
502
struct pl330_info *pi = &pch->dmac->pif;
505
burst_len = pi->pcfg.data_bus_width / 8;
506
burst_len *= pi->pcfg.data_buf_dep;
507
burst_len >>= desc->rqcfg.brst_size;
509
/* src/dst_burst_len can't be more than 16 */
513
while (burst_len > 1) {
514
if (!(len % (burst_len << desc->rqcfg.brst_size)))
522
static struct dma_async_tx_descriptor *
523
pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
524
dma_addr_t src, size_t len, unsigned long flags)
526
struct dma_pl330_desc *desc;
527
struct dma_pl330_chan *pch = to_pchan(chan);
528
struct dma_pl330_peri *peri = chan->private;
529
struct pl330_info *pi;
532
if (unlikely(!pch || !len || !peri))
535
if (peri->rqtype != MEMTOMEM)
538
pi = &pch->dmac->pif;
540
desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
544
desc->rqcfg.src_inc = 1;
545
desc->rqcfg.dst_inc = 1;
547
/* Select max possible burst size */
548
burst = pi->pcfg.data_bus_width / 8;
556
desc->rqcfg.brst_size = 0;
557
while (burst != (1 << desc->rqcfg.brst_size))
558
desc->rqcfg.brst_size++;
560
desc->rqcfg.brst_len = get_burst_len(desc, len);
562
desc->txd.flags = flags;
567
static struct dma_async_tx_descriptor *
568
pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
569
unsigned int sg_len, enum dma_data_direction direction,
572
struct dma_pl330_desc *first, *desc = NULL;
573
struct dma_pl330_chan *pch = to_pchan(chan);
574
struct dma_pl330_peri *peri = chan->private;
575
struct scatterlist *sg;
580
if (unlikely(!pch || !sgl || !sg_len))
583
/* Make sure the direction is consistent */
584
if ((direction == DMA_TO_DEVICE &&
585
peri->rqtype != MEMTODEV) ||
586
(direction == DMA_FROM_DEVICE &&
587
peri->rqtype != DEVTOMEM)) {
588
dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n",
593
addr = peri->fifo_addr;
594
burst_size = peri->burst_sz;
598
for_each_sg(sgl, sg, sg_len, i) {
600
desc = pl330_get_desc(pch);
602
struct dma_pl330_dmac *pdmac = pch->dmac;
604
dev_err(pch->dmac->pif.dev,
605
"%s:%d Unable to fetch desc\n",
610
spin_lock_irqsave(&pdmac->pool_lock, flags);
612
while (!list_empty(&first->node)) {
613
desc = list_entry(first->node.next,
614
struct dma_pl330_desc, node);
615
list_move_tail(&desc->node, &pdmac->desc_pool);
618
list_move_tail(&first->node, &pdmac->desc_pool);
620
spin_unlock_irqrestore(&pdmac->pool_lock, flags);
628
list_add_tail(&desc->node, &first->node);
630
if (direction == DMA_TO_DEVICE) {
631
desc->rqcfg.src_inc = 1;
632
desc->rqcfg.dst_inc = 0;
634
addr, sg_dma_address(sg), sg_dma_len(sg));
636
desc->rqcfg.src_inc = 0;
637
desc->rqcfg.dst_inc = 1;
639
sg_dma_address(sg), addr, sg_dma_len(sg));
642
desc->rqcfg.brst_size = burst_size;
643
desc->rqcfg.brst_len = 1;
646
/* Return the last desc in the chain */
647
desc->txd.flags = flg;
651
static irqreturn_t pl330_irq_handler(int irq, void *data)
653
if (pl330_update(data))
660
pl330_probe(struct amba_device *adev, struct amba_id *id)
662
struct dma_pl330_platdata *pdat;
663
struct dma_pl330_dmac *pdmac;
664
struct dma_pl330_chan *pch;
665
struct pl330_info *pi;
666
struct dma_device *pd;
667
struct resource *res;
670
pdat = adev->dev.platform_data;
672
if (!pdat || !pdat->nr_valid_peri) {
673
dev_err(&adev->dev, "platform data missing\n");
677
/* Allocate a new DMAC and its Channels */
678
pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch)
679
+ sizeof(*pdmac), GFP_KERNEL);
681
dev_err(&adev->dev, "unable to allocate mem\n");
686
pi->dev = &adev->dev;
687
pi->pl330_data = NULL;
688
pi->mcbufsz = pdat->mcbuf_sz;
691
request_mem_region(res->start, resource_size(res), "dma-pl330");
693
pi->base = ioremap(res->start, resource_size(res));
700
ret = request_irq(irq, pl330_irq_handler, 0,
701
dev_name(&adev->dev), pi);
709
INIT_LIST_HEAD(&pdmac->desc_pool);
710
spin_lock_init(&pdmac->pool_lock);
712
/* Create a descriptor pool of default size */
713
if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
714
dev_warn(&adev->dev, "unable to allocate desc\n");
717
INIT_LIST_HEAD(&pd->channels);
719
/* Initialize channel parameters */
720
for (i = 0; i < pdat->nr_valid_peri; i++) {
721
struct dma_pl330_peri *peri = &pdat->peri[i];
722
pch = &pdmac->peripherals[i];
724
switch (peri->rqtype) {
726
dma_cap_set(DMA_MEMCPY, pd->cap_mask);
730
dma_cap_set(DMA_SLAVE, pd->cap_mask);
733
dev_err(&adev->dev, "DEVTODEV Not Supported\n");
737
INIT_LIST_HEAD(&pch->work_list);
738
spin_lock_init(&pch->lock);
739
pch->pl330_chid = NULL;
740
pch->chan.private = peri;
741
pch->chan.device = pd;
742
pch->chan.chan_id = i;
745
/* Add the channel to the DMAC list */
747
list_add_tail(&pch->chan.device_node, &pd->channels);
750
pd->dev = &adev->dev;
752
pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
753
pd->device_free_chan_resources = pl330_free_chan_resources;
754
pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
755
pd->device_tx_status = pl330_tx_status;
756
pd->device_prep_slave_sg = pl330_prep_slave_sg;
757
pd->device_control = pl330_control;
758
pd->device_issue_pending = pl330_issue_pending;
760
ret = dma_async_device_register(pd);
762
dev_err(&adev->dev, "unable to register DMAC\n");
766
amba_set_drvdata(adev, pdmac);
769
"Loaded driver for PL330 DMAC-%d\n", adev->periphid);
771
"\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
772
pi->pcfg.data_buf_dep,
773
pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
774
pi->pcfg.num_peri, pi->pcfg.num_events);
785
release_mem_region(res->start, resource_size(res));
791
static int __devexit pl330_remove(struct amba_device *adev)
793
struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
794
struct dma_pl330_chan *pch, *_p;
795
struct pl330_info *pi;
796
struct resource *res;
802
amba_set_drvdata(adev, NULL);
805
list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
808
/* Remove the channel */
809
list_del(&pch->chan.device_node);
811
/* Flush the channel */
812
pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
813
pl330_free_chan_resources(&pch->chan);
826
release_mem_region(res->start, resource_size(res));
833
static struct amba_id pl330_ids[] = {
841
static struct amba_driver pl330_driver = {
843
.owner = THIS_MODULE,
846
.id_table = pl330_ids,
847
.probe = pl330_probe,
848
.remove = pl330_remove,
851
static int __init pl330_init(void)
853
return amba_driver_register(&pl330_driver);
855
module_init(pl330_init);
857
static void __exit pl330_exit(void)
859
amba_driver_unregister(&pl330_driver);
862
module_exit(pl330_exit);
864
MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
865
MODULE_DESCRIPTION("API Driver for PL330 DMAC");
866
MODULE_LICENSE("GPL");