2
* Texas Instruments CPDMA Driver
4
* Copyright (C) 2010 Texas Instruments
6
* This program is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU General Public License as
8
* published by the Free Software Foundation version 2.
10
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
11
* kind, whether express or implied; without even the implied warranty
12
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
* GNU General Public License for more details.
15
#include <linux/kernel.h>
16
#include <linux/spinlock.h>
17
#include <linux/device.h>
18
#include <linux/slab.h>
19
#include <linux/err.h>
20
#include <linux/dma-mapping.h>
23
#include "davinci_cpdma.h"
26
#define CPDMA_TXIDVER 0x00
27
#define CPDMA_TXCONTROL 0x04
28
#define CPDMA_TXTEARDOWN 0x08
29
#define CPDMA_RXIDVER 0x10
30
#define CPDMA_RXCONTROL 0x14
31
#define CPDMA_SOFTRESET 0x1c
32
#define CPDMA_RXTEARDOWN 0x18
33
#define CPDMA_TXINTSTATRAW 0x80
34
#define CPDMA_TXINTSTATMASKED 0x84
35
#define CPDMA_TXINTMASKSET 0x88
36
#define CPDMA_TXINTMASKCLEAR 0x8c
37
#define CPDMA_MACINVECTOR 0x90
38
#define CPDMA_MACEOIVECTOR 0x94
39
#define CPDMA_RXINTSTATRAW 0xa0
40
#define CPDMA_RXINTSTATMASKED 0xa4
41
#define CPDMA_RXINTMASKSET 0xa8
42
#define CPDMA_RXINTMASKCLEAR 0xac
43
#define CPDMA_DMAINTSTATRAW 0xb0
44
#define CPDMA_DMAINTSTATMASKED 0xb4
45
#define CPDMA_DMAINTMASKSET 0xb8
46
#define CPDMA_DMAINTMASKCLEAR 0xbc
47
#define CPDMA_DMAINT_HOSTERR BIT(1)
49
/* the following exist only if has_ext_regs is set */
50
#define CPDMA_DMACONTROL 0x20
51
#define CPDMA_DMASTATUS 0x24
52
#define CPDMA_RXBUFFOFS 0x28
53
#define CPDMA_EM_CONTROL 0x2c
55
/* Descriptor mode bits */
56
#define CPDMA_DESC_SOP BIT(31)
57
#define CPDMA_DESC_EOP BIT(30)
58
#define CPDMA_DESC_OWNER BIT(29)
59
#define CPDMA_DESC_EOQ BIT(28)
60
#define CPDMA_DESC_TD_COMPLETE BIT(27)
61
#define CPDMA_DESC_PASS_CRC BIT(26)
63
#define CPDMA_TEARDOWN_VALUE 0xfffffffc
77
struct cpdma_desc_pool {
80
void __iomem *iomap; /* ioremap map */
81
void *cpumap; /* dma_alloc map */
82
int desc_size, mem_size;
83
int num_desc, used_desc;
84
unsigned long *bitmap;
95
const char *cpdma_state_str[] = { "idle", "active", "teardown" };
98
enum cpdma_state state;
99
struct cpdma_params params;
101
struct cpdma_desc_pool *pool;
103
struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
107
enum cpdma_state state;
108
struct cpdma_ctlr *ctlr;
111
struct cpdma_desc __iomem *head, *tail;
113
void __iomem *hdp, *cp, *rxfree;
115
cpdma_handler_fn handler;
116
enum dma_data_direction dir;
117
struct cpdma_chan_stats stats;
118
/* offsets into dmaregs */
119
int int_set, int_clear, td;
122
/* The following make access to common cpdma_ctlr params more readable */
123
#define dmaregs params.dmaregs
124
#define num_chan params.num_chan
126
/* various accessors */
127
#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
128
#define chan_read(chan, fld) __raw_readl((chan)->fld)
129
#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
130
#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
131
#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
132
#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
135
* Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
136
* emac) have dedicated on-chip memory for these descriptors. Some other
137
* devices (e.g. cpsw switches) use plain old memory. Descriptor pools
138
* abstract out these details
140
static struct cpdma_desc_pool *
141
cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
145
struct cpdma_desc_pool *pool;
147
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
151
spin_lock_init(&pool->lock);
154
pool->mem_size = size;
155
pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
156
pool->num_desc = size / pool->desc_size;
158
bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
159
pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
165
pool->iomap = ioremap(phys, size);
166
pool->hw_addr = hw_addr;
168
pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
170
pool->iomap = pool->cpumap;
171
pool->hw_addr = pool->phys;
183
static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
190
spin_lock_irqsave(&pool->lock, flags);
191
WARN_ON(pool->used_desc);
194
dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
197
iounmap(pool->iomap);
199
spin_unlock_irqrestore(&pool->lock, flags);
203
static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
204
struct cpdma_desc __iomem *desc)
208
return pool->hw_addr + (__force dma_addr_t)desc -
209
(__force dma_addr_t)pool->iomap;
212
static inline struct cpdma_desc __iomem *
213
desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
215
return dma ? pool->iomap + dma - pool->hw_addr : NULL;
218
static struct cpdma_desc __iomem *
219
cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc)
223
struct cpdma_desc __iomem *desc = NULL;
225
spin_lock_irqsave(&pool->lock, flags);
227
index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0,
229
if (index < pool->num_desc) {
230
bitmap_set(pool->bitmap, index, num_desc);
231
desc = pool->iomap + pool->desc_size * index;
235
spin_unlock_irqrestore(&pool->lock, flags);
239
static void cpdma_desc_free(struct cpdma_desc_pool *pool,
240
struct cpdma_desc __iomem *desc, int num_desc)
242
unsigned long flags, index;
244
index = ((unsigned long)desc - (unsigned long)pool->iomap) /
246
spin_lock_irqsave(&pool->lock, flags);
247
bitmap_clear(pool->bitmap, index, num_desc);
249
spin_unlock_irqrestore(&pool->lock, flags);
252
struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
254
struct cpdma_ctlr *ctlr;
256
ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL);
260
ctlr->state = CPDMA_STATE_IDLE;
261
ctlr->params = *params;
262
ctlr->dev = params->dev;
263
spin_lock_init(&ctlr->lock);
265
ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
266
ctlr->params.desc_mem_phys,
267
ctlr->params.desc_hw_addr,
268
ctlr->params.desc_mem_size,
269
ctlr->params.desc_align);
275
if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
276
ctlr->num_chan = CPDMA_MAX_CHANNELS;
280
int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
285
spin_lock_irqsave(&ctlr->lock, flags);
286
if (ctlr->state != CPDMA_STATE_IDLE) {
287
spin_unlock_irqrestore(&ctlr->lock, flags);
291
if (ctlr->params.has_soft_reset) {
292
unsigned long timeout = jiffies + HZ/10;
294
dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
295
while (time_before(jiffies, timeout)) {
296
if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
299
WARN_ON(!time_before(jiffies, timeout));
302
for (i = 0; i < ctlr->num_chan; i++) {
303
__raw_writel(0, ctlr->params.txhdp + 4 * i);
304
__raw_writel(0, ctlr->params.rxhdp + 4 * i);
305
__raw_writel(0, ctlr->params.txcp + 4 * i);
306
__raw_writel(0, ctlr->params.rxcp + 4 * i);
309
dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
310
dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
312
dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
313
dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
315
ctlr->state = CPDMA_STATE_ACTIVE;
317
for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
318
if (ctlr->channels[i])
319
cpdma_chan_start(ctlr->channels[i]);
321
spin_unlock_irqrestore(&ctlr->lock, flags);
325
int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
330
spin_lock_irqsave(&ctlr->lock, flags);
331
if (ctlr->state != CPDMA_STATE_ACTIVE) {
332
spin_unlock_irqrestore(&ctlr->lock, flags);
336
ctlr->state = CPDMA_STATE_TEARDOWN;
338
for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
339
if (ctlr->channels[i])
340
cpdma_chan_stop(ctlr->channels[i]);
343
dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
344
dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
346
dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
347
dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
349
ctlr->state = CPDMA_STATE_IDLE;
351
spin_unlock_irqrestore(&ctlr->lock, flags);
355
int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
357
struct device *dev = ctlr->dev;
361
spin_lock_irqsave(&ctlr->lock, flags);
363
dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]);
365
dev_info(dev, "CPDMA: txidver: %x",
366
dma_reg_read(ctlr, CPDMA_TXIDVER));
367
dev_info(dev, "CPDMA: txcontrol: %x",
368
dma_reg_read(ctlr, CPDMA_TXCONTROL));
369
dev_info(dev, "CPDMA: txteardown: %x",
370
dma_reg_read(ctlr, CPDMA_TXTEARDOWN));
371
dev_info(dev, "CPDMA: rxidver: %x",
372
dma_reg_read(ctlr, CPDMA_RXIDVER));
373
dev_info(dev, "CPDMA: rxcontrol: %x",
374
dma_reg_read(ctlr, CPDMA_RXCONTROL));
375
dev_info(dev, "CPDMA: softreset: %x",
376
dma_reg_read(ctlr, CPDMA_SOFTRESET));
377
dev_info(dev, "CPDMA: rxteardown: %x",
378
dma_reg_read(ctlr, CPDMA_RXTEARDOWN));
379
dev_info(dev, "CPDMA: txintstatraw: %x",
380
dma_reg_read(ctlr, CPDMA_TXINTSTATRAW));
381
dev_info(dev, "CPDMA: txintstatmasked: %x",
382
dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED));
383
dev_info(dev, "CPDMA: txintmaskset: %x",
384
dma_reg_read(ctlr, CPDMA_TXINTMASKSET));
385
dev_info(dev, "CPDMA: txintmaskclear: %x",
386
dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR));
387
dev_info(dev, "CPDMA: macinvector: %x",
388
dma_reg_read(ctlr, CPDMA_MACINVECTOR));
389
dev_info(dev, "CPDMA: maceoivector: %x",
390
dma_reg_read(ctlr, CPDMA_MACEOIVECTOR));
391
dev_info(dev, "CPDMA: rxintstatraw: %x",
392
dma_reg_read(ctlr, CPDMA_RXINTSTATRAW));
393
dev_info(dev, "CPDMA: rxintstatmasked: %x",
394
dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED));
395
dev_info(dev, "CPDMA: rxintmaskset: %x",
396
dma_reg_read(ctlr, CPDMA_RXINTMASKSET));
397
dev_info(dev, "CPDMA: rxintmaskclear: %x",
398
dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR));
399
dev_info(dev, "CPDMA: dmaintstatraw: %x",
400
dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW));
401
dev_info(dev, "CPDMA: dmaintstatmasked: %x",
402
dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED));
403
dev_info(dev, "CPDMA: dmaintmaskset: %x",
404
dma_reg_read(ctlr, CPDMA_DMAINTMASKSET));
405
dev_info(dev, "CPDMA: dmaintmaskclear: %x",
406
dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR));
408
if (!ctlr->params.has_ext_regs) {
409
dev_info(dev, "CPDMA: dmacontrol: %x",
410
dma_reg_read(ctlr, CPDMA_DMACONTROL));
411
dev_info(dev, "CPDMA: dmastatus: %x",
412
dma_reg_read(ctlr, CPDMA_DMASTATUS));
413
dev_info(dev, "CPDMA: rxbuffofs: %x",
414
dma_reg_read(ctlr, CPDMA_RXBUFFOFS));
417
for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
418
if (ctlr->channels[i])
419
cpdma_chan_dump(ctlr->channels[i]);
421
spin_unlock_irqrestore(&ctlr->lock, flags);
425
int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
433
spin_lock_irqsave(&ctlr->lock, flags);
434
if (ctlr->state != CPDMA_STATE_IDLE)
435
cpdma_ctlr_stop(ctlr);
437
for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
438
if (ctlr->channels[i])
439
cpdma_chan_destroy(ctlr->channels[i]);
442
cpdma_desc_pool_destroy(ctlr->pool);
443
spin_unlock_irqrestore(&ctlr->lock, flags);
448
int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
453
spin_lock_irqsave(&ctlr->lock, flags);
454
if (ctlr->state != CPDMA_STATE_ACTIVE) {
455
spin_unlock_irqrestore(&ctlr->lock, flags);
459
reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
460
dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
462
for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
463
if (ctlr->channels[i])
464
cpdma_chan_int_ctrl(ctlr->channels[i], enable);
467
spin_unlock_irqrestore(&ctlr->lock, flags);
471
void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
473
dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
476
struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
477
cpdma_handler_fn handler)
479
struct cpdma_chan *chan;
480
int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
483
if (__chan_linear(chan_num) >= ctlr->num_chan)
487
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
491
spin_lock_irqsave(&ctlr->lock, flags);
493
if (ctlr->channels[chan_num])
497
chan->state = CPDMA_STATE_IDLE;
498
chan->chan_num = chan_num;
499
chan->handler = handler;
501
if (is_rx_chan(chan)) {
502
chan->hdp = ctlr->params.rxhdp + offset;
503
chan->cp = ctlr->params.rxcp + offset;
504
chan->rxfree = ctlr->params.rxfree + offset;
505
chan->int_set = CPDMA_RXINTMASKSET;
506
chan->int_clear = CPDMA_RXINTMASKCLEAR;
507
chan->td = CPDMA_RXTEARDOWN;
508
chan->dir = DMA_FROM_DEVICE;
510
chan->hdp = ctlr->params.txhdp + offset;
511
chan->cp = ctlr->params.txcp + offset;
512
chan->int_set = CPDMA_TXINTMASKSET;
513
chan->int_clear = CPDMA_TXINTMASKCLEAR;
514
chan->td = CPDMA_TXTEARDOWN;
515
chan->dir = DMA_TO_DEVICE;
517
chan->mask = BIT(chan_linear(chan));
519
spin_lock_init(&chan->lock);
521
ctlr->channels[chan_num] = chan;
522
spin_unlock_irqrestore(&ctlr->lock, flags);
526
spin_unlock_irqrestore(&ctlr->lock, flags);
532
int cpdma_chan_destroy(struct cpdma_chan *chan)
534
struct cpdma_ctlr *ctlr = chan->ctlr;
540
spin_lock_irqsave(&ctlr->lock, flags);
541
if (chan->state != CPDMA_STATE_IDLE)
542
cpdma_chan_stop(chan);
543
ctlr->channels[chan->chan_num] = NULL;
544
spin_unlock_irqrestore(&ctlr->lock, flags);
549
int cpdma_chan_get_stats(struct cpdma_chan *chan,
550
struct cpdma_chan_stats *stats)
555
spin_lock_irqsave(&chan->lock, flags);
556
memcpy(stats, &chan->stats, sizeof(*stats));
557
spin_unlock_irqrestore(&chan->lock, flags);
561
int cpdma_chan_dump(struct cpdma_chan *chan)
564
struct device *dev = chan->ctlr->dev;
566
spin_lock_irqsave(&chan->lock, flags);
568
dev_info(dev, "channel %d (%s %d) state %s",
569
chan->chan_num, is_rx_chan(chan) ? "rx" : "tx",
570
chan_linear(chan), cpdma_state_str[chan->state]);
571
dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp));
572
dev_info(dev, "\tcp: %x\n", chan_read(chan, cp));
574
dev_info(dev, "\trxfree: %x\n",
575
chan_read(chan, rxfree));
578
dev_info(dev, "\tstats head_enqueue: %d\n",
579
chan->stats.head_enqueue);
580
dev_info(dev, "\tstats tail_enqueue: %d\n",
581
chan->stats.tail_enqueue);
582
dev_info(dev, "\tstats pad_enqueue: %d\n",
583
chan->stats.pad_enqueue);
584
dev_info(dev, "\tstats misqueued: %d\n",
585
chan->stats.misqueued);
586
dev_info(dev, "\tstats desc_alloc_fail: %d\n",
587
chan->stats.desc_alloc_fail);
588
dev_info(dev, "\tstats pad_alloc_fail: %d\n",
589
chan->stats.pad_alloc_fail);
590
dev_info(dev, "\tstats runt_receive_buff: %d\n",
591
chan->stats.runt_receive_buff);
592
dev_info(dev, "\tstats runt_transmit_buff: %d\n",
593
chan->stats.runt_transmit_buff);
594
dev_info(dev, "\tstats empty_dequeue: %d\n",
595
chan->stats.empty_dequeue);
596
dev_info(dev, "\tstats busy_dequeue: %d\n",
597
chan->stats.busy_dequeue);
598
dev_info(dev, "\tstats good_dequeue: %d\n",
599
chan->stats.good_dequeue);
600
dev_info(dev, "\tstats requeue: %d\n",
601
chan->stats.requeue);
602
dev_info(dev, "\tstats teardown_dequeue: %d\n",
603
chan->stats.teardown_dequeue);
605
spin_unlock_irqrestore(&chan->lock, flags);
609
static void __cpdma_chan_submit(struct cpdma_chan *chan,
610
struct cpdma_desc __iomem *desc)
612
struct cpdma_ctlr *ctlr = chan->ctlr;
613
struct cpdma_desc __iomem *prev = chan->tail;
614
struct cpdma_desc_pool *pool = ctlr->pool;
618
desc_dma = desc_phys(pool, desc);
620
/* simple case - idle channel */
622
chan->stats.head_enqueue++;
625
if (chan->state == CPDMA_STATE_ACTIVE)
626
chan_write(chan, hdp, desc_dma);
630
/* first chain the descriptor at the tail of the list */
631
desc_write(prev, hw_next, desc_dma);
633
chan->stats.tail_enqueue++;
635
/* next check if EOQ has been triggered already */
636
mode = desc_read(prev, hw_mode);
637
if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
638
(chan->state == CPDMA_STATE_ACTIVE)) {
639
desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
640
chan_write(chan, hdp, desc_dma);
641
chan->stats.misqueued++;
645
int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
646
int len, gfp_t gfp_mask)
648
struct cpdma_ctlr *ctlr = chan->ctlr;
649
struct cpdma_desc __iomem *desc;
655
spin_lock_irqsave(&chan->lock, flags);
657
if (chan->state == CPDMA_STATE_TEARDOWN) {
662
desc = cpdma_desc_alloc(ctlr->pool, 1);
664
chan->stats.desc_alloc_fail++;
669
if (len < ctlr->params.min_packet_size) {
670
len = ctlr->params.min_packet_size;
671
chan->stats.runt_transmit_buff++;
674
buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
675
mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
677
desc_write(desc, hw_next, 0);
678
desc_write(desc, hw_buffer, buffer);
679
desc_write(desc, hw_len, len);
680
desc_write(desc, hw_mode, mode | len);
681
desc_write(desc, sw_token, token);
682
desc_write(desc, sw_buffer, buffer);
683
desc_write(desc, sw_len, len);
685
__cpdma_chan_submit(chan, desc);
687
if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
688
chan_write(chan, rxfree, 1);
693
spin_unlock_irqrestore(&chan->lock, flags);
697
static void __cpdma_chan_free(struct cpdma_chan *chan,
698
struct cpdma_desc __iomem *desc,
699
int outlen, int status)
701
struct cpdma_ctlr *ctlr = chan->ctlr;
702
struct cpdma_desc_pool *pool = ctlr->pool;
707
token = (void *)desc_read(desc, sw_token);
708
buff_dma = desc_read(desc, sw_buffer);
709
origlen = desc_read(desc, sw_len);
711
dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
712
cpdma_desc_free(pool, desc, 1);
713
(*chan->handler)(token, outlen, status);
716
static int __cpdma_chan_process(struct cpdma_chan *chan)
718
struct cpdma_ctlr *ctlr = chan->ctlr;
719
struct cpdma_desc __iomem *desc;
721
struct cpdma_desc_pool *pool = ctlr->pool;
725
spin_lock_irqsave(&chan->lock, flags);
729
chan->stats.empty_dequeue++;
733
desc_dma = desc_phys(pool, desc);
735
status = __raw_readl(&desc->hw_mode);
736
outlen = status & 0x7ff;
737
if (status & CPDMA_DESC_OWNER) {
738
chan->stats.busy_dequeue++;
742
status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE);
744
chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
745
chan_write(chan, cp, desc_dma);
747
chan->stats.good_dequeue++;
749
if (status & CPDMA_DESC_EOQ) {
750
chan->stats.requeue++;
751
chan_write(chan, hdp, desc_phys(pool, chan->head));
754
spin_unlock_irqrestore(&chan->lock, flags);
756
__cpdma_chan_free(chan, desc, outlen, status);
760
spin_unlock_irqrestore(&chan->lock, flags);
764
int cpdma_chan_process(struct cpdma_chan *chan, int quota)
766
int used = 0, ret = 0;
768
if (chan->state != CPDMA_STATE_ACTIVE)
771
while (used < quota) {
772
ret = __cpdma_chan_process(chan);
780
int cpdma_chan_start(struct cpdma_chan *chan)
782
struct cpdma_ctlr *ctlr = chan->ctlr;
783
struct cpdma_desc_pool *pool = ctlr->pool;
786
spin_lock_irqsave(&chan->lock, flags);
787
if (chan->state != CPDMA_STATE_IDLE) {
788
spin_unlock_irqrestore(&chan->lock, flags);
791
if (ctlr->state != CPDMA_STATE_ACTIVE) {
792
spin_unlock_irqrestore(&chan->lock, flags);
795
dma_reg_write(ctlr, chan->int_set, chan->mask);
796
chan->state = CPDMA_STATE_ACTIVE;
798
chan_write(chan, hdp, desc_phys(pool, chan->head));
800
chan_write(chan, rxfree, chan->count);
803
spin_unlock_irqrestore(&chan->lock, flags);
807
int cpdma_chan_stop(struct cpdma_chan *chan)
809
struct cpdma_ctlr *ctlr = chan->ctlr;
810
struct cpdma_desc_pool *pool = ctlr->pool;
813
unsigned long timeout;
815
spin_lock_irqsave(&chan->lock, flags);
816
if (chan->state != CPDMA_STATE_ACTIVE) {
817
spin_unlock_irqrestore(&chan->lock, flags);
821
chan->state = CPDMA_STATE_TEARDOWN;
822
dma_reg_write(ctlr, chan->int_clear, chan->mask);
824
/* trigger teardown */
825
dma_reg_write(ctlr, chan->td, chan->chan_num);
827
/* wait for teardown complete */
828
timeout = jiffies + HZ/10; /* 100 msec */
829
while (time_before(jiffies, timeout)) {
830
u32 cp = chan_read(chan, cp);
831
if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
835
WARN_ON(!time_before(jiffies, timeout));
836
chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
838
/* handle completed packets */
840
ret = __cpdma_chan_process(chan);
843
} while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
845
/* remaining packets haven't been tx/rx'ed, clean them up */
847
struct cpdma_desc __iomem *desc = chan->head;
850
next_dma = desc_read(desc, hw_next);
851
chan->head = desc_from_phys(pool, next_dma);
852
chan->stats.teardown_dequeue++;
854
/* issue callback without locks held */
855
spin_unlock_irqrestore(&chan->lock, flags);
856
__cpdma_chan_free(chan, desc, 0, -ENOSYS);
857
spin_lock_irqsave(&chan->lock, flags);
860
chan->state = CPDMA_STATE_IDLE;
861
spin_unlock_irqrestore(&chan->lock, flags);
865
int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
869
spin_lock_irqsave(&chan->lock, flags);
870
if (chan->state != CPDMA_STATE_ACTIVE) {
871
spin_unlock_irqrestore(&chan->lock, flags);
875
dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
877
spin_unlock_irqrestore(&chan->lock, flags);
882
struct cpdma_control_info {
886
#define ACCESS_RO BIT(0)
887
#define ACCESS_WO BIT(1)
888
#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
891
struct cpdma_control_info controls[] = {
892
[CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
893
[CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
894
[CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
895
[CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
896
[CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
897
[CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
898
[CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
899
[CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
900
[CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
901
[CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
902
[CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
905
int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
908
struct cpdma_control_info *info = &controls[control];
911
spin_lock_irqsave(&ctlr->lock, flags);
914
if (!ctlr->params.has_ext_regs)
918
if (ctlr->state != CPDMA_STATE_ACTIVE)
922
if (control < 0 || control >= ARRAY_SIZE(controls))
926
if ((info->access & ACCESS_RO) != ACCESS_RO)
929
ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
932
spin_unlock_irqrestore(&ctlr->lock, flags);
936
int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
939
struct cpdma_control_info *info = &controls[control];
943
spin_lock_irqsave(&ctlr->lock, flags);
946
if (!ctlr->params.has_ext_regs)
950
if (ctlr->state != CPDMA_STATE_ACTIVE)
954
if (control < 0 || control >= ARRAY_SIZE(controls))
958
if ((info->access & ACCESS_WO) != ACCESS_WO)
961
val = dma_reg_read(ctlr, info->reg);
962
val &= ~(info->mask << info->shift);
963
val |= (value & info->mask) << info->shift;
964
dma_reg_write(ctlr, info->reg, val);
968
spin_unlock_irqrestore(&ctlr->lock, flags);