2
* MUSB OTG driver host support
4
* Copyright 2005 Mentor Graphics Corporation
5
* Copyright (C) 2005-2006 by Texas Instruments
6
* Copyright (C) 2006-2007 Nokia Corporation
7
* Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
9
* This program is free software; you can redistribute it and/or
10
* modify it under the terms of the GNU General Public License
11
* version 2 as published by the Free Software Foundation.
13
* This program is distributed in the hope that it will be useful, but
14
* WITHOUT ANY WARRANTY; without even the implied warranty of
15
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16
* General Public License for more details.
18
* You should have received a copy of the GNU General Public License
19
* along with this program; if not, write to the Free Software
20
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
26
* NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38
#include <linux/module.h>
39
#include <linux/kernel.h>
40
#include <linux/delay.h>
41
#include <linux/sched.h>
42
#include <linux/slab.h>
43
#include <linux/errno.h>
44
#include <linux/init.h>
45
#include <linux/list.h>
46
#include <linux/dma-mapping.h>
50
#include "linux-compat.h"
51
#include "usb-compat.h"
54
#include "musb_core.h"
55
#include "musb_host.h"
58
/* MUSB HOST status 22-mar-2006
60
* - There's still lots of partial code duplication for fault paths, so
61
* they aren't handled as consistently as they need to be.
63
* - PIO mostly behaved when last tested.
64
* + including ep0, with all usbtest cases 9, 10
65
* + usbtest 14 (ep0out) doesn't seem to run at all
66
* + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
67
* configurations, but otherwise double buffering passes basic tests.
68
* + for 2.6.N, for N > ~10, needs API changes for hcd framework.
70
* - DMA (CPPI) ... partially behaves, not currently recommended
71
* + about 1/15 the speed of typical EHCI implementations (PCI)
72
* + RX, all too often reqpkt seems to misbehave after tx
73
* + TX, no known issues (other than evident silicon issue)
75
* - DMA (Mentor/OMAP) ...has at least toggle update problems
77
* - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
78
* starvation ... nothing yet for TX, interrupt, or bulk.
80
* - Not tested with HNP, but some SRP paths seem to behave.
82
* NOTE 24-August-2006:
84
* - Bulk traffic finally uses both sides of hardware ep1, freeing up an
85
* extra endpoint for periodic use enabling hub + keybd + mouse. That
86
* mostly works, except that with "usbnet" it's easy to trigger cases
87
* with "ping" where RX loses. (a) ping to davinci, even "ping -f",
88
* fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
89
* although ARP RX wins. (That test was done with a full speed link.)
94
* NOTE on endpoint usage:
96
* CONTROL transfers all go through ep0. BULK ones go through dedicated IN
97
* and OUT endpoints ... hardware is dedicated for those "async" queue(s).
98
* (Yes, bulk _could_ use more of the endpoints than that, and would even
101
* INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
102
* So far that scheduling is both dumb and optimistic: the endpoint will be
103
* "claimed" until its software queue is no longer refilled. No multiplexing
104
* of transfers between endpoints, or anything clever.
108
static void musb_ep_program(struct musb *musb, u8 epnum,
109
struct urb *urb, int is_out,
110
u8 *buf, u32 offset, u32 len);
113
* Clear TX fifo. Needed to avoid BABBLE errors.
115
static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
117
struct musb *musb = ep->musb;
118
void __iomem *epio = ep->regs;
123
csr = musb_readw(epio, MUSB_TXCSR);
124
while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
126
dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
128
csr |= MUSB_TXCSR_FLUSHFIFO;
129
musb_writew(epio, MUSB_TXCSR, csr);
130
csr = musb_readw(epio, MUSB_TXCSR);
131
if (WARN(retries-- < 1,
132
"Could not flush host TX%d fifo: csr: %04x\n",
139
static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
141
void __iomem *epio = ep->regs;
145
/* scrub any data left in the fifo */
147
csr = musb_readw(epio, MUSB_TXCSR);
148
if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
150
musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
151
csr = musb_readw(epio, MUSB_TXCSR);
155
WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
158
/* and reset for the next transfer */
159
musb_writew(epio, MUSB_TXCSR, 0);
163
* Start transmit. Caller is responsible for locking shared resources.
164
* musb must be locked.
166
static inline void musb_h_tx_start(struct musb_hw_ep *ep)
170
/* NOTE: no locks here; caller should lock and select EP */
172
txcsr = musb_readw(ep->regs, MUSB_TXCSR);
173
txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
174
musb_writew(ep->regs, MUSB_TXCSR, txcsr);
176
txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
177
musb_writew(ep->regs, MUSB_CSR0, txcsr);
182
static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
186
/* NOTE: no locks here; caller should lock and select EP */
187
txcsr = musb_readw(ep->regs, MUSB_TXCSR);
188
txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
189
if (is_cppi_enabled())
190
txcsr |= MUSB_TXCSR_DMAMODE;
191
musb_writew(ep->regs, MUSB_TXCSR, txcsr);
194
static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
196
if (is_in != 0 || ep->is_shared_fifo)
198
if (is_in == 0 || ep->is_shared_fifo)
202
static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
204
return is_in ? ep->in_qh : ep->out_qh;
208
* Start the URB at the front of an endpoint's queue
209
* end must be claimed from the caller.
211
* Context: controller locked, irqs blocked
214
musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
218
void __iomem *mbase = musb->mregs;
219
struct urb *urb = next_urb(qh);
220
void *buf = urb->transfer_buffer;
222
struct musb_hw_ep *hw_ep = qh->hw_ep;
223
unsigned pipe = urb->pipe;
224
u8 address = usb_pipedevice(pipe);
225
int epnum = hw_ep->epnum;
227
/* initialize software qh state */
231
/* gather right source of data */
233
case USB_ENDPOINT_XFER_CONTROL:
234
/* control transfers always start with SETUP */
236
musb->ep0_stage = MUSB_EP0_START;
237
buf = urb->setup_packet;
241
case USB_ENDPOINT_XFER_ISOC:
244
offset = urb->iso_frame_desc[0].offset;
245
len = urb->iso_frame_desc[0].length;
248
default: /* bulk, interrupt */
249
/* actual_length may be nonzero on retry paths */
250
buf = urb->transfer_buffer + urb->actual_length;
251
len = urb->transfer_buffer_length - urb->actual_length;
254
dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
255
qh, urb, address, qh->epnum,
256
is_in ? "in" : "out",
257
({char *s; switch (qh->type) {
258
case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
259
case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
261
case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
263
default: s = "-intr"; break;
265
epnum, buf + offset, len);
267
/* Configure endpoint */
268
musb_ep_set_qh(hw_ep, is_in, qh);
269
musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
271
/* transmit may have more work: start it when it is time */
275
/* determine if the time is right for a periodic transfer */
278
case USB_ENDPOINT_XFER_ISOC:
280
case USB_ENDPOINT_XFER_INT:
281
dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n");
282
frame = musb_readw(mbase, MUSB_FRAME);
283
/* FIXME this doesn't implement that scheduling policy ...
284
* or handle framecounter wrapping
287
if ((urb->transfer_flags & URB_ISO_ASAP)
288
|| (frame >= urb->start_frame)) {
289
/* REVISIT the SOF irq handler shouldn't duplicate
290
* this code; and we don't init urb->start_frame...
296
qh->frame = urb->start_frame;
297
/* enable SOF interrupt so we can count down */
298
dev_dbg(musb->controller, "SOF for %d\n", epnum);
299
#if 1 /* ifndef CONFIG_ARCH_DAVINCI */
300
musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
308
dev_dbg(musb->controller, "Start TX%d %s\n", epnum,
309
hw_ep->tx_channel ? "dma" : "pio");
311
if (!hw_ep->tx_channel)
312
musb_h_tx_start(hw_ep);
313
else if (is_cppi_enabled() || tusb_dma_omap())
314
musb_h_tx_dma_start(hw_ep);
318
/* Context: caller owns controller lock, IRQs are blocked */
319
static void musb_giveback(struct musb *musb, struct urb *urb, int status)
320
__releases(musb->lock)
321
__acquires(musb->lock)
323
dev_dbg(musb->controller,
324
"complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
325
urb, urb->complete, status,
326
usb_pipedevice(urb->pipe),
327
usb_pipeendpoint(urb->pipe),
328
usb_pipein(urb->pipe) ? "in" : "out",
329
urb->actual_length, urb->transfer_buffer_length
332
usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
333
spin_unlock(&musb->lock);
334
usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
335
spin_lock(&musb->lock);
338
/* For bulk/interrupt endpoints only */
339
static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
342
void __iomem *epio = qh->hw_ep->regs;
346
* FIXME: the current Mentor DMA code seems to have
347
* problems getting toggle correct.
351
csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
353
csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
355
usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
359
* Advance this hardware endpoint's queue, completing the specified URB and
360
* advancing to either the next URB queued to that qh, or else invalidating
361
* that qh and advancing to the next qh scheduled after the current one.
363
* Context: caller owns controller lock, IRQs are blocked
365
static void musb_advance_schedule(struct musb *musb, struct urb *urb,
366
struct musb_hw_ep *hw_ep, int is_in)
368
struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
369
struct musb_hw_ep *ep = qh->hw_ep;
370
int ready = qh->is_ready;
373
status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
375
/* save toggle eagerly, for paranoia */
377
case USB_ENDPOINT_XFER_BULK:
378
case USB_ENDPOINT_XFER_INT:
379
musb_save_toggle(qh, is_in, urb);
382
case USB_ENDPOINT_XFER_ISOC:
383
if (status == 0 && urb->error_count)
390
musb_giveback(musb, urb, status);
391
qh->is_ready = ready;
393
/* reclaim resources (and bandwidth) ASAP; deschedule it, and
394
* invalidate qh as soon as list_empty(&hep->urb_list)
396
if (list_empty(&qh->hep->urb_list)) {
397
struct list_head *head;
398
struct dma_controller *dma = musb->dma_controller;
402
if (ep->rx_channel) {
403
dma->channel_release(ep->rx_channel);
404
ep->rx_channel = NULL;
408
if (ep->tx_channel) {
409
dma->channel_release(ep->tx_channel);
410
ep->tx_channel = NULL;
414
/* Clobber old pointers to this qh */
415
musb_ep_set_qh(ep, is_in, NULL);
416
qh->hep->hcpriv = NULL;
420
case USB_ENDPOINT_XFER_CONTROL:
421
case USB_ENDPOINT_XFER_BULK:
422
/* fifo policy for these lists, except that NAKing
423
* should rotate a qh to the end (for fairness).
426
head = qh->ring.prev;
433
case USB_ENDPOINT_XFER_ISOC:
434
case USB_ENDPOINT_XFER_INT:
435
/* this is where periodic bandwidth should be
436
* de-allocated if it's tracked and allocated;
437
* and where we'd update the schedule tree...
445
if (qh != NULL && qh->is_ready) {
446
dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
447
hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
448
musb_start_urb(musb, is_in, qh);
452
static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
454
/* we don't want fifo to fill itself again;
455
* ignore dma (various models),
456
* leave toggle alone (may not have been saved yet)
458
csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
459
csr &= ~(MUSB_RXCSR_H_REQPKT
460
| MUSB_RXCSR_H_AUTOREQ
461
| MUSB_RXCSR_AUTOCLEAR);
463
/* write 2x to allow double buffering */
464
musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
465
musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
467
/* flush writebuffer */
468
return musb_readw(hw_ep->regs, MUSB_RXCSR);
472
* PIO RX for a packet (or part of it).
475
musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
483
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
484
void __iomem *epio = hw_ep->regs;
485
struct musb_qh *qh = hw_ep->in_qh;
486
int pipe = urb->pipe;
487
void *buffer = urb->transfer_buffer;
489
/* musb_ep_select(mbase, epnum); */
490
rx_count = musb_readw(epio, MUSB_RXCOUNT);
491
dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
492
urb->transfer_buffer, qh->offset,
493
urb->transfer_buffer_length);
497
if (usb_pipeisoc(pipe)) {
499
struct usb_iso_packet_descriptor *d;
506
d = urb->iso_frame_desc + qh->iso_idx;
507
buf = buffer + d->offset;
509
if (rx_count > length) {
514
dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
518
urb->actual_length += length;
519
d->actual_length = length;
523
/* see if we are done */
524
done = (++qh->iso_idx >= urb->number_of_packets);
528
buf = buffer + qh->offset;
529
length = urb->transfer_buffer_length - qh->offset;
530
if (rx_count > length) {
531
if (urb->status == -EINPROGRESS)
532
urb->status = -EOVERFLOW;
533
dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
537
urb->actual_length += length;
538
qh->offset += length;
540
/* see if we are done */
541
done = (urb->actual_length == urb->transfer_buffer_length)
542
|| (rx_count < qh->maxpacket)
543
|| (urb->status != -EINPROGRESS);
545
&& (urb->status == -EINPROGRESS)
546
&& (urb->transfer_flags & URB_SHORT_NOT_OK)
547
&& (urb->actual_length
548
< urb->transfer_buffer_length))
549
urb->status = -EREMOTEIO;
554
musb_read_fifo(hw_ep, length, buf);
556
csr = musb_readw(epio, MUSB_RXCSR);
557
csr |= MUSB_RXCSR_H_WZC_BITS;
558
if (unlikely(do_flush))
559
musb_h_flush_rxfifo(hw_ep, csr);
561
/* REVISIT this assumes AUTOCLEAR is never set */
562
csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
564
csr |= MUSB_RXCSR_H_REQPKT;
565
musb_writew(epio, MUSB_RXCSR, csr);
571
/* we don't always need to reinit a given side of an endpoint...
572
* when we do, use tx/rx reinit routine and then construct a new CSR
573
* to address data toggle, NYET, and DMA or PIO.
575
* it's possible that driver bugs (especially for DMA) or aborting a
576
* transfer might have left the endpoint busier than it should be.
577
* the busy/not-empty tests are basically paranoia.
580
musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
584
/* NOTE: we know the "rx" fifo reinit never triggers for ep0.
585
* That always uses tx_reinit since ep0 repurposes TX register
586
* offsets; the initial SETUP packet is also a kind of OUT.
589
/* if programmed for Tx, put it in RX mode */
590
if (ep->is_shared_fifo) {
591
csr = musb_readw(ep->regs, MUSB_TXCSR);
592
if (csr & MUSB_TXCSR_MODE) {
593
musb_h_tx_flush_fifo(ep);
594
csr = musb_readw(ep->regs, MUSB_TXCSR);
595
musb_writew(ep->regs, MUSB_TXCSR,
596
csr | MUSB_TXCSR_FRCDATATOG);
600
* Clear the MODE bit (and everything else) to enable Rx.
601
* NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
603
if (csr & MUSB_TXCSR_DMAMODE)
604
musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
605
musb_writew(ep->regs, MUSB_TXCSR, 0);
607
/* scrub all previous state, clearing toggle */
609
csr = musb_readw(ep->regs, MUSB_RXCSR);
610
if (csr & MUSB_RXCSR_RXPKTRDY)
611
WARNING("rx%d, packet/%d ready?\n", ep->epnum,
612
musb_readw(ep->regs, MUSB_RXCOUNT));
614
musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
617
/* target addr and (for multipoint) hub addr/port */
618
if (musb->is_multipoint) {
619
musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
620
musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
621
musb_write_rxhubport(ep->target_regs, qh->h_port_reg);
624
musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
626
/* protocol/endpoint, interval/NAKlimit, i/o size */
627
musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
628
musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
629
/* NOTE: bulk combining rewrites high bits of maxpacket */
630
/* Set RXMAXP with the FIFO size of the endpoint
631
* to disable double buffer mode.
633
if (musb->double_buffer_not_ok)
634
musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
636
musb_writew(ep->regs, MUSB_RXMAXP,
637
qh->maxpacket | ((qh->hb_mult - 1) << 11));
642
static bool musb_tx_dma_program(struct dma_controller *dma,
643
struct musb_hw_ep *hw_ep, struct musb_qh *qh,
644
struct urb *urb, u32 offset, u32 length)
646
struct dma_channel *channel = hw_ep->tx_channel;
647
void __iomem *epio = hw_ep->regs;
648
u16 pkt_size = qh->maxpacket;
652
#ifdef CONFIG_USB_INVENTRA_DMA
653
if (length > channel->max_len)
654
length = channel->max_len;
656
csr = musb_readw(epio, MUSB_TXCSR);
657
if (length > pkt_size) {
659
csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
660
/* autoset shouldn't be set in high bandwidth */
661
if (qh->hb_mult == 1)
662
csr |= MUSB_TXCSR_AUTOSET;
665
csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
666
csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
668
channel->desired_mode = mode;
669
musb_writew(epio, MUSB_TXCSR, csr);
671
if (!is_cppi_enabled() && !tusb_dma_omap())
674
channel->actual_len = 0;
677
* TX uses "RNDIS" mode automatically but needs help
678
* to identify the zero-length-final-packet case.
680
mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
683
qh->segsize = length;
686
* Ensure the data reaches to main memory before starting
691
if (!dma->channel_program(channel, pkt_size, mode,
692
urb->transfer_dma + offset, length)) {
693
dma->channel_release(channel);
694
hw_ep->tx_channel = NULL;
696
csr = musb_readw(epio, MUSB_TXCSR);
697
csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
698
musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
705
* Program an HDRC endpoint as per the given URB
706
* Context: irqs blocked, controller lock held
708
static void musb_ep_program(struct musb *musb, u8 epnum,
709
struct urb *urb, int is_out,
710
u8 *buf, u32 offset, u32 len)
712
struct dma_controller *dma_controller;
713
struct dma_channel *dma_channel;
715
void __iomem *mbase = musb->mregs;
716
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
717
void __iomem *epio = hw_ep->regs;
718
struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
719
u16 packet_sz = qh->maxpacket;
721
dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s "
722
"h_addr%02x h_port%02x bytes %d\n",
723
is_out ? "-->" : "<--",
724
epnum, urb, urb->dev->speed,
725
qh->addr_reg, qh->epnum, is_out ? "out" : "in",
726
qh->h_addr_reg, qh->h_port_reg,
729
musb_ep_select(mbase, epnum);
731
/* candidate for DMA? */
732
dma_controller = musb->dma_controller;
733
if (is_dma_capable() && epnum && dma_controller) {
734
dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
736
dma_channel = dma_controller->channel_alloc(
737
dma_controller, hw_ep, is_out);
739
hw_ep->tx_channel = dma_channel;
741
hw_ep->rx_channel = dma_channel;
746
/* make sure we clear DMAEnab, autoSet bits from previous run */
748
/* OUT/transmit/EP0 or IN/receive? */
754
csr = musb_readw(epio, MUSB_TXCSR);
756
/* disable interrupt in case we flush */
757
int_txe = musb_readw(mbase, MUSB_INTRTXE);
758
musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
760
/* general endpoint setup */
762
/* flush all old state, set default */
763
musb_h_tx_flush_fifo(hw_ep);
766
* We must not clear the DMAMODE bit before or in
767
* the same cycle with the DMAENAB bit, so we clear
768
* the latter first...
770
csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
773
| MUSB_TXCSR_FRCDATATOG
774
| MUSB_TXCSR_H_RXSTALL
776
| MUSB_TXCSR_TXPKTRDY
778
csr |= MUSB_TXCSR_MODE;
780
if (usb_gettoggle(urb->dev, qh->epnum, 1))
781
csr |= MUSB_TXCSR_H_WR_DATATOGGLE
782
| MUSB_TXCSR_H_DATATOGGLE;
784
csr |= MUSB_TXCSR_CLRDATATOG;
786
musb_writew(epio, MUSB_TXCSR, csr);
787
/* REVISIT may need to clear FLUSHFIFO ... */
788
csr &= ~MUSB_TXCSR_DMAMODE;
789
musb_writew(epio, MUSB_TXCSR, csr);
790
csr = musb_readw(epio, MUSB_TXCSR);
792
/* endpoint 0: just flush */
793
musb_h_ep0_flush_fifo(hw_ep);
796
/* target addr and (for multipoint) hub addr/port */
797
if (musb->is_multipoint) {
798
musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
799
musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
800
musb_write_txhubport(mbase, epnum, qh->h_port_reg);
801
/* FIXME if !epnum, do the same for RX ... */
803
musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
805
/* protocol/endpoint/interval/NAKlimit */
807
musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
808
if (musb->double_buffer_not_ok)
809
musb_writew(epio, MUSB_TXMAXP,
810
hw_ep->max_packet_sz_tx);
811
else if (can_bulk_split(musb, qh->type))
812
musb_writew(epio, MUSB_TXMAXP, packet_sz
813
| ((hw_ep->max_packet_sz_tx /
814
packet_sz) - 1) << 11);
816
musb_writew(epio, MUSB_TXMAXP,
818
((qh->hb_mult - 1) << 11));
819
musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
821
musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
822
if (musb->is_multipoint)
823
musb_writeb(epio, MUSB_TYPE0,
827
if (can_bulk_split(musb, qh->type))
828
load_count = min((u32) hw_ep->max_packet_sz_tx,
831
load_count = min((u32) packet_sz, len);
833
if (dma_channel && musb_tx_dma_program(dma_controller,
834
hw_ep, qh, urb, offset, len))
838
/* PIO to load FIFO */
839
qh->segsize = load_count;
840
musb_write_fifo(hw_ep, load_count, buf);
843
/* re-enable interrupt */
844
musb_writew(mbase, MUSB_INTRTXE, int_txe);
850
if (hw_ep->rx_reinit) {
851
musb_rx_reinit(musb, qh, hw_ep);
853
/* init new state: toggle and NYET, maybe DMA later */
854
if (usb_gettoggle(urb->dev, qh->epnum, 0))
855
csr = MUSB_RXCSR_H_WR_DATATOGGLE
856
| MUSB_RXCSR_H_DATATOGGLE;
859
if (qh->type == USB_ENDPOINT_XFER_INT)
860
csr |= MUSB_RXCSR_DISNYET;
863
csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
865
if (csr & (MUSB_RXCSR_RXPKTRDY
867
| MUSB_RXCSR_H_REQPKT))
868
ERR("broken !rx_reinit, ep%d csr %04x\n",
871
/* scrub any stale state, leaving toggle alone */
872
csr &= MUSB_RXCSR_DISNYET;
875
/* kick things off */
877
if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
878
/* Candidate for DMA */
879
dma_channel->actual_len = 0L;
882
/* AUTOREQ is in a DMA register */
883
musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
884
csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
887
* Unless caller treats short RX transfers as
888
* errors, we dare not queue multiple transfers.
890
dma_ok = dma_controller->channel_program(dma_channel,
891
packet_sz, !(urb->transfer_flags &
893
urb->transfer_dma + offset,
896
dma_controller->channel_release(dma_channel);
897
hw_ep->rx_channel = dma_channel = NULL;
899
csr |= MUSB_RXCSR_DMAENAB;
902
csr |= MUSB_RXCSR_H_REQPKT;
903
dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr);
904
musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
905
csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
911
* Service the default endpoint (ep0) as host.
912
* Return true until it's time to start the status stage.
914
static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
917
u8 *fifo_dest = NULL;
919
struct musb_hw_ep *hw_ep = musb->control_ep;
920
struct musb_qh *qh = hw_ep->in_qh;
921
struct usb_ctrlrequest *request;
923
switch (musb->ep0_stage) {
925
fifo_dest = urb->transfer_buffer + urb->actual_length;
926
fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
928
if (fifo_count < len)
929
urb->status = -EOVERFLOW;
931
musb_read_fifo(hw_ep, fifo_count, fifo_dest);
933
urb->actual_length += fifo_count;
934
if (len < qh->maxpacket) {
935
/* always terminate on short read; it's
936
* rarely reported as an error.
938
} else if (urb->actual_length <
939
urb->transfer_buffer_length)
943
request = (struct usb_ctrlrequest *) urb->setup_packet;
945
if (!request->wLength) {
946
dev_dbg(musb->controller, "start no-DATA\n");
948
} else if (request->bRequestType & USB_DIR_IN) {
949
dev_dbg(musb->controller, "start IN-DATA\n");
950
musb->ep0_stage = MUSB_EP0_IN;
954
dev_dbg(musb->controller, "start OUT-DATA\n");
955
musb->ep0_stage = MUSB_EP0_OUT;
960
fifo_count = min_t(size_t, qh->maxpacket,
961
urb->transfer_buffer_length -
964
fifo_dest = (u8 *) (urb->transfer_buffer
965
+ urb->actual_length);
966
dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n",
968
(fifo_count == 1) ? "" : "s",
970
musb_write_fifo(hw_ep, fifo_count, fifo_dest);
972
urb->actual_length += fifo_count;
977
ERR("bogus ep0 stage %d\n", musb->ep0_stage);
985
* Handle default endpoint interrupt as host. Only called in IRQ time
986
* from musb_interrupt().
988
* called with controller irqlocked
990
irqreturn_t musb_h_ep0_irq(struct musb *musb)
995
void __iomem *mbase = musb->mregs;
996
struct musb_hw_ep *hw_ep = musb->control_ep;
997
void __iomem *epio = hw_ep->regs;
998
struct musb_qh *qh = hw_ep->in_qh;
999
bool complete = false;
1000
irqreturn_t retval = IRQ_NONE;
1002
/* ep0 only has one queue, "in" */
1005
musb_ep_select(mbase, 0);
1006
csr = musb_readw(epio, MUSB_CSR0);
1007
len = (csr & MUSB_CSR0_RXPKTRDY)
1008
? musb_readb(epio, MUSB_COUNT0)
1011
dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
1012
csr, qh, len, urb, musb->ep0_stage);
1014
/* if we just did status stage, we are done */
1015
if (MUSB_EP0_STATUS == musb->ep0_stage) {
1016
retval = IRQ_HANDLED;
1020
/* prepare status */
1021
if (csr & MUSB_CSR0_H_RXSTALL) {
1022
dev_dbg(musb->controller, "STALLING ENDPOINT\n");
1025
} else if (csr & MUSB_CSR0_H_ERROR) {
1026
dev_dbg(musb->controller, "no response, csr0 %04x\n", csr);
1029
} else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1030
dev_dbg(musb->controller, "control NAK timeout\n");
1032
/* NOTE: this code path would be a good place to PAUSE a
1033
* control transfer, if another one is queued, so that
1034
* ep0 is more likely to stay busy. That's already done
1035
* for bulk RX transfers.
1037
* if (qh->ring.next != &musb->control), then
1038
* we have a candidate... NAKing is *NOT* an error
1040
musb_writew(epio, MUSB_CSR0, 0);
1041
retval = IRQ_HANDLED;
1045
dev_dbg(musb->controller, "aborting\n");
1046
retval = IRQ_HANDLED;
1048
urb->status = status;
1051
/* use the proper sequence to abort the transfer */
1052
if (csr & MUSB_CSR0_H_REQPKT) {
1053
csr &= ~MUSB_CSR0_H_REQPKT;
1054
musb_writew(epio, MUSB_CSR0, csr);
1055
csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1056
musb_writew(epio, MUSB_CSR0, csr);
1058
musb_h_ep0_flush_fifo(hw_ep);
1061
musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1064
musb_writew(epio, MUSB_CSR0, 0);
1067
if (unlikely(!urb)) {
1068
/* stop endpoint since we have no place for its data, this
1069
* SHOULD NEVER HAPPEN! */
1070
ERR("no URB for end 0\n");
1072
musb_h_ep0_flush_fifo(hw_ep);
1077
/* call common logic and prepare response */
1078
if (musb_h_ep0_continue(musb, len, urb)) {
1079
/* more packets required */
1080
csr = (MUSB_EP0_IN == musb->ep0_stage)
1081
? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1083
/* data transfer complete; perform status phase */
1084
if (usb_pipeout(urb->pipe)
1085
|| !urb->transfer_buffer_length)
1086
csr = MUSB_CSR0_H_STATUSPKT
1087
| MUSB_CSR0_H_REQPKT;
1089
csr = MUSB_CSR0_H_STATUSPKT
1090
| MUSB_CSR0_TXPKTRDY;
1092
/* flag status stage */
1093
musb->ep0_stage = MUSB_EP0_STATUS;
1095
dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr);
1098
musb_writew(epio, MUSB_CSR0, csr);
1099
retval = IRQ_HANDLED;
1101
musb->ep0_stage = MUSB_EP0_IDLE;
1103
/* call completion handler if done */
1105
musb_advance_schedule(musb, urb, hw_ep, 1);
1111
#ifdef CONFIG_USB_INVENTRA_DMA
1113
/* Host side TX (OUT) using Mentor DMA works as follows:
1115
- if queue was empty, Program Endpoint
1116
- ... which starts DMA to fifo in mode 1 or 0
1118
DMA Isr (transfer complete) -> TxAvail()
1119
- Stop DMA (~DmaEnab) (<--- Alert ... currently happens
1120
only in musb_cleanup_urb)
1121
- TxPktRdy has to be set in mode 0 or for
1122
short packets in mode 1.
1127
/* Service a Tx-Available or dma completion irq for the endpoint */
1128
void musb_host_tx(struct musb *musb, u8 epnum)
1135
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1136
void __iomem *epio = hw_ep->regs;
1137
struct musb_qh *qh = hw_ep->out_qh;
1138
struct urb *urb = next_urb(qh);
1140
void __iomem *mbase = musb->mregs;
1141
struct dma_channel *dma;
1142
bool transfer_pending = false;
1144
musb_ep_select(mbase, epnum);
1145
tx_csr = musb_readw(epio, MUSB_TXCSR);
1147
/* with CPPI, DMA sometimes triggers "extra" irqs */
1149
dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1154
dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1155
dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
1156
dma ? ", dma" : "");
1158
/* check for errors */
1159
if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1160
/* dma was disabled, fifo flushed */
1161
dev_dbg(musb->controller, "TX end %d stall\n", epnum);
1163
/* stall; record URB status */
1166
} else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1167
/* (NON-ISO) dma was disabled, fifo flushed */
1168
dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum);
1170
status = -ETIMEDOUT;
1172
} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1173
dev_dbg(musb->controller, "TX end=%d device not responding\n", epnum);
1175
/* NOTE: this code path would be a good place to PAUSE a
1176
* transfer, if there's some other (nonperiodic) tx urb
1177
* that could use this fifo. (dma complicates it...)
1178
* That's already done for bulk RX transfers.
1180
* if (bulk && qh->ring.next != &musb->out_bulk), then
1181
* we have a candidate... NAKing is *NOT* an error
1183
musb_ep_select(mbase, epnum);
1184
musb_writew(epio, MUSB_TXCSR,
1185
MUSB_TXCSR_H_WZC_BITS
1186
| MUSB_TXCSR_TXPKTRDY);
1191
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1192
dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1193
(void) musb->dma_controller->channel_abort(dma);
1196
/* do the proper sequence to abort the transfer in the
1197
* usb core; the dma engine should already be stopped.
1199
musb_h_tx_flush_fifo(hw_ep);
1200
tx_csr &= ~(MUSB_TXCSR_AUTOSET
1201
| MUSB_TXCSR_DMAENAB
1202
| MUSB_TXCSR_H_ERROR
1203
| MUSB_TXCSR_H_RXSTALL
1204
| MUSB_TXCSR_H_NAKTIMEOUT
1207
musb_ep_select(mbase, epnum);
1208
musb_writew(epio, MUSB_TXCSR, tx_csr);
1209
/* REVISIT may need to clear FLUSHFIFO ... */
1210
musb_writew(epio, MUSB_TXCSR, tx_csr);
1211
musb_writeb(epio, MUSB_TXINTERVAL, 0);
1216
/* second cppi case */
1217
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1218
dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1222
if (is_dma_capable() && dma && !status) {
1224
* DMA has completed. But if we're using DMA mode 1 (multi
1225
* packet DMA), we need a terminal TXPKTRDY interrupt before
1226
* we can consider this transfer completed, lest we trash
1227
* its last packet when writing the next URB's data. So we
1228
* switch back to mode 0 to get that interrupt; we'll come
1229
* back here once it happens.
1231
if (tx_csr & MUSB_TXCSR_DMAMODE) {
1233
* We shouldn't clear DMAMODE with DMAENAB set; so
1234
* clear them in a safe order. That should be OK
1235
* once TXPKTRDY has been set (and I've never seen
1236
* it being 0 at this moment -- DMA interrupt latency
1237
* is significant) but if it hasn't been then we have
1238
* no choice but to stop being polite and ignore the
1239
* programmer's guide... :-)
1241
* Note that we must write TXCSR with TXPKTRDY cleared
1242
* in order not to re-trigger the packet send (this bit
1243
* can't be cleared by CPU), and there's another caveat:
1244
* TXPKTRDY may be set shortly and then cleared in the
1245
* double-buffered FIFO mode, so we do an extra TXCSR
1246
* read for debouncing...
1248
tx_csr &= musb_readw(epio, MUSB_TXCSR);
1249
if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1250
tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1251
MUSB_TXCSR_TXPKTRDY);
1252
musb_writew(epio, MUSB_TXCSR,
1253
tx_csr | MUSB_TXCSR_H_WZC_BITS);
1255
tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1256
MUSB_TXCSR_TXPKTRDY);
1257
musb_writew(epio, MUSB_TXCSR,
1258
tx_csr | MUSB_TXCSR_H_WZC_BITS);
1261
* There is no guarantee that we'll get an interrupt
1262
* after clearing DMAMODE as we might have done this
1263
* too late (after TXPKTRDY was cleared by controller).
1264
* Re-read TXCSR as we have spoiled its previous value.
1266
tx_csr = musb_readw(epio, MUSB_TXCSR);
1270
* We may get here from a DMA completion or TXPKTRDY interrupt.
1271
* In any case, we must check the FIFO status here and bail out
1272
* only if the FIFO still has data -- that should prevent the
1273
* "missed" TXPKTRDY interrupts and deal with double-buffered
1276
if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1277
dev_dbg(musb->controller, "DMA complete but packet still in FIFO, "
1278
"CSR %04x\n", tx_csr);
1283
if (!status || dma || usb_pipeisoc(pipe)) {
1285
length = dma->actual_len;
1287
length = qh->segsize;
1288
qh->offset += length;
1290
if (usb_pipeisoc(pipe)) {
1292
struct usb_iso_packet_descriptor *d;
1294
d = urb->iso_frame_desc + qh->iso_idx;
1295
d->actual_length = length;
1297
if (++qh->iso_idx >= urb->number_of_packets) {
1305
} else if (dma && urb->transfer_buffer_length == qh->offset) {
1308
/* see if we need to send more data, or ZLP */
1309
if (qh->segsize < qh->maxpacket)
1311
else if (qh->offset == urb->transfer_buffer_length
1312
&& !(urb->transfer_flags
1316
offset = qh->offset;
1317
length = urb->transfer_buffer_length - offset;
1318
transfer_pending = true;
1323
/* urb->status != -EINPROGRESS means request has been faulted,
1324
* so we must abort this transfer after cleanup
1326
if (urb->status != -EINPROGRESS) {
1329
status = urb->status;
1334
urb->status = status;
1335
urb->actual_length = qh->offset;
1336
musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1338
} else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1339
if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1341
if (is_cppi_enabled() || tusb_dma_omap())
1342
musb_h_tx_dma_start(hw_ep);
1345
} else if (tx_csr & MUSB_TXCSR_DMAENAB) {
1346
dev_dbg(musb->controller, "not complete, but DMA enabled?\n");
1351
* PIO: start next packet in this URB.
1353
* REVISIT: some docs say that when hw_ep->tx_double_buffered,
1354
* (and presumably, FIFO is not half-full) we should write *two*
1355
* packets before updating TXCSR; other docs disagree...
1357
if (length > qh->maxpacket)
1358
length = qh->maxpacket;
1359
/* Unmap the buffer so that CPU can use it */
1360
usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
1361
musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1362
qh->segsize = length;
1364
musb_ep_select(mbase, epnum);
1365
musb_writew(epio, MUSB_TXCSR,
1366
MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1370
#ifdef CONFIG_USB_INVENTRA_DMA
1372
/* Host side RX (IN) using Mentor DMA works as follows:
1374
- if queue was empty, ProgramEndpoint
1375
- first IN token is sent out (by setting ReqPkt)
1376
LinuxIsr -> RxReady()
1377
/\ => first packet is received
1378
| - Set in mode 0 (DmaEnab, ~ReqPkt)
1379
| -> DMA Isr (transfer complete) -> RxReady()
1380
| - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1381
| - if urb not complete, send next IN token (ReqPkt)
1382
| | else complete urb.
1384
---------------------------
1386
* Nuances of mode 1:
1387
* For short packets, no ack (+RxPktRdy) is sent automatically
1388
* (even if AutoClear is ON)
1389
* For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1390
* automatically => major problem, as collecting the next packet becomes
1391
* difficult. Hence mode 1 is not used.
1394
* All we care about at this driver level is that
1395
* (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1396
* (b) termination conditions are: short RX, or buffer full;
1397
* (c) fault modes include
1398
* - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1399
* (and that endpoint's dma queue stops immediately)
1400
* - overflow (full, PLUS more bytes in the terminal packet)
1402
* So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1403
* thus be a great candidate for using mode 1 ... for all but the
1404
* last packet of one URB's transfer.
1409
/* Schedule next QH from musb->in_bulk and move the current qh to
1410
* the end; avoids starvation for other endpoints.
1412
static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
1414
struct dma_channel *dma;
1416
void __iomem *mbase = musb->mregs;
1417
void __iomem *epio = ep->regs;
1418
struct musb_qh *cur_qh, *next_qh;
1421
musb_ep_select(mbase, ep->epnum);
1422
dma = is_dma_capable() ? ep->rx_channel : NULL;
1424
/* clear nak timeout bit */
1425
rx_csr = musb_readw(epio, MUSB_RXCSR);
1426
rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1427
rx_csr &= ~MUSB_RXCSR_DATAERROR;
1428
musb_writew(epio, MUSB_RXCSR, rx_csr);
1430
cur_qh = first_qh(&musb->in_bulk);
1432
urb = next_urb(cur_qh);
1433
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1434
dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1435
musb->dma_controller->channel_abort(dma);
1436
urb->actual_length += dma->actual_len;
1437
dma->actual_len = 0L;
1439
musb_save_toggle(cur_qh, 1, urb);
1441
/* move cur_qh to end of queue */
1442
list_move_tail(&cur_qh->ring, &musb->in_bulk);
1444
/* get the next qh from musb->in_bulk */
1445
next_qh = first_qh(&musb->in_bulk);
1447
/* set rx_reinit and schedule the next qh */
1449
musb_start_urb(musb, 1, next_qh);
1454
* Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1455
* and high-bandwidth IN transfer cases.
1457
void musb_host_rx(struct musb *musb, u8 epnum)
1460
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1461
void __iomem *epio = hw_ep->regs;
1462
struct musb_qh *qh = hw_ep->in_qh;
1464
void __iomem *mbase = musb->mregs;
1467
bool iso_err = false;
1470
struct dma_channel *dma;
1472
musb_ep_select(mbase, epnum);
1475
dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1479
rx_csr = musb_readw(epio, MUSB_RXCSR);
1482
if (unlikely(!urb)) {
1483
/* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1484
* usbtest #11 (unlinks) triggers it regularly, sometimes
1485
* with fifo full. (Only with DMA??)
1487
dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
1488
musb_readw(epio, MUSB_RXCOUNT));
1489
musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1495
dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1496
epnum, rx_csr, urb->actual_length,
1497
dma ? dma->actual_len : 0);
1499
/* check for errors, concurrent stall & unlink is not really
1501
if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1502
dev_dbg(musb->controller, "RX end %d STALL\n", epnum);
1504
/* stall; record URB status */
1507
} else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1508
dev_dbg(musb->controller, "end %d RX proto error\n", epnum);
1511
musb_writeb(epio, MUSB_RXINTERVAL, 0);
1513
} else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1515
if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1516
dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum);
1518
/* NOTE: NAKing is *NOT* an error, so we want to
1519
* continue. Except ... if there's a request for
1520
* another QH, use that instead of starving it.
1522
* Devices like Ethernet and serial adapters keep
1523
* reads posted at all times, which will starve
1524
* other devices without this logic.
1526
if (usb_pipebulk(urb->pipe)
1528
&& !list_is_singular(&musb->in_bulk)) {
1529
musb_bulk_rx_nak_timeout(musb, hw_ep);
1532
musb_ep_select(mbase, epnum);
1533
rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1534
rx_csr &= ~MUSB_RXCSR_DATAERROR;
1535
musb_writew(epio, MUSB_RXCSR, rx_csr);
1539
dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum);
1540
/* packet error reported later */
1543
} else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1544
dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n",
1549
/* faults abort the transfer */
1551
/* clean up dma and collect transfer count */
1552
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1553
dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1554
(void) musb->dma_controller->channel_abort(dma);
1555
xfer_len = dma->actual_len;
1557
musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1558
musb_writeb(epio, MUSB_RXINTERVAL, 0);
1563
if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1564
/* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1565
ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1569
/* thorough shutdown for now ... given more precise fault handling
1570
* and better queueing support, we might keep a DMA pipeline going
1571
* while processing this irq for earlier completions.
1574
/* FIXME this is _way_ too much in-line logic for Mentor DMA */
1576
#ifndef CONFIG_USB_INVENTRA_DMA
1577
if (rx_csr & MUSB_RXCSR_H_REQPKT) {
1578
/* REVISIT this happened for a while on some short reads...
1579
* the cleanup still needs investigation... looks bad...
1580
* and also duplicates dma cleanup code above ... plus,
1581
* shouldn't this be the "half full" double buffer case?
1583
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1584
dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1585
(void) musb->dma_controller->channel_abort(dma);
1586
xfer_len = dma->actual_len;
1590
dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
1591
xfer_len, dma ? ", dma" : "");
1592
rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1594
musb_ep_select(mbase, epnum);
1595
musb_writew(epio, MUSB_RXCSR,
1596
MUSB_RXCSR_H_WZC_BITS | rx_csr);
1599
if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1600
xfer_len = dma->actual_len;
1602
val &= ~(MUSB_RXCSR_DMAENAB
1603
| MUSB_RXCSR_H_AUTOREQ
1604
| MUSB_RXCSR_AUTOCLEAR
1605
| MUSB_RXCSR_RXPKTRDY);
1606
musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1608
#ifdef CONFIG_USB_INVENTRA_DMA
1609
if (usb_pipeisoc(pipe)) {
1610
struct usb_iso_packet_descriptor *d;
1612
d = urb->iso_frame_desc + qh->iso_idx;
1613
d->actual_length = xfer_len;
1615
/* even if there was an error, we did the dma
1616
* for iso_frame_desc->length
1618
if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1621
if (++qh->iso_idx >= urb->number_of_packets)
1627
/* done if urb buffer is full or short packet is recd */
1628
done = (urb->actual_length + xfer_len >=
1629
urb->transfer_buffer_length
1630
|| dma->actual_len < qh->maxpacket);
1633
/* send IN token for next packet, without AUTOREQ */
1635
val |= MUSB_RXCSR_H_REQPKT;
1636
musb_writew(epio, MUSB_RXCSR,
1637
MUSB_RXCSR_H_WZC_BITS | val);
1640
dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
1641
done ? "off" : "reset",
1642
musb_readw(epio, MUSB_RXCSR),
1643
musb_readw(epio, MUSB_RXCOUNT));
1647
} else if (urb->status == -EINPROGRESS) {
1648
/* if no errors, be sure a packet is ready for unloading */
1649
if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1651
ERR("Rx interrupt with no errors or packet!\n");
1653
/* FIXME this is another "SHOULD NEVER HAPPEN" */
1656
/* do the proper sequence to abort the transfer */
1657
musb_ep_select(mbase, epnum);
1658
val &= ~MUSB_RXCSR_H_REQPKT;
1659
musb_writew(epio, MUSB_RXCSR, val);
1663
/* we are expecting IN packets */
1664
#ifdef CONFIG_USB_INVENTRA_DMA
1666
struct dma_controller *c;
1671
rx_count = musb_readw(epio, MUSB_RXCOUNT);
1673
dev_dbg(musb->controller, "RX%d count %d, buffer 0x%x len %d/%d\n",
1676
+ urb->actual_length,
1678
urb->transfer_buffer_length);
1680
c = musb->dma_controller;
1682
if (usb_pipeisoc(pipe)) {
1684
struct usb_iso_packet_descriptor *d;
1686
d = urb->iso_frame_desc + qh->iso_idx;
1692
if (rx_count > d->length) {
1693
if (d_status == 0) {
1694
d_status = -EOVERFLOW;
1697
dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",\
1698
rx_count, d->length);
1703
d->status = d_status;
1704
buf = urb->transfer_dma + d->offset;
1707
buf = urb->transfer_dma +
1711
dma->desired_mode = 0;
1713
/* because of the issue below, mode 1 will
1714
* only rarely behave with correct semantics.
1716
if ((urb->transfer_flags &
1718
&& (urb->transfer_buffer_length -
1721
dma->desired_mode = 1;
1722
if (rx_count < hw_ep->max_packet_sz_rx) {
1724
dma->desired_mode = 0;
1726
length = urb->transfer_buffer_length;
1730
/* Disadvantage of using mode 1:
1731
* It's basically usable only for mass storage class; essentially all
1732
* other protocols also terminate transfers on short packets.
1735
* An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1736
* If you try to use mode 1 for (transfer_buffer_length - 512), and try
1737
* to use the extra IN token to grab the last packet using mode 0, then
1738
* the problem is that you cannot be sure when the device will send the
1739
* last packet and RxPktRdy set. Sometimes the packet is recd too soon
1740
* such that it gets lost when RxCSR is re-set at the end of the mode 1
1741
* transfer, while sometimes it is recd just a little late so that if you
1742
* try to configure for mode 0 soon after the mode 1 transfer is
1743
* completed, you will find rxcount 0. Okay, so you might think why not
1744
* wait for an interrupt when the pkt is recd. Well, you won't get any!
1747
val = musb_readw(epio, MUSB_RXCSR);
1748
val &= ~MUSB_RXCSR_H_REQPKT;
1750
if (dma->desired_mode == 0)
1751
val &= ~MUSB_RXCSR_H_AUTOREQ;
1753
val |= MUSB_RXCSR_H_AUTOREQ;
1754
val |= MUSB_RXCSR_DMAENAB;
1756
/* autoclear shouldn't be set in high bandwidth */
1757
if (qh->hb_mult == 1)
1758
val |= MUSB_RXCSR_AUTOCLEAR;
1760
musb_writew(epio, MUSB_RXCSR,
1761
MUSB_RXCSR_H_WZC_BITS | val);
1763
/* REVISIT if when actual_length != 0,
1764
* transfer_buffer_length needs to be
1767
ret = c->channel_program(
1769
dma->desired_mode, buf, length);
1772
c->channel_release(dma);
1773
hw_ep->rx_channel = NULL;
1775
val = musb_readw(epio, MUSB_RXCSR);
1776
val &= ~(MUSB_RXCSR_DMAENAB
1777
| MUSB_RXCSR_H_AUTOREQ
1778
| MUSB_RXCSR_AUTOCLEAR);
1779
musb_writew(epio, MUSB_RXCSR, val);
1782
#endif /* Mentor DMA */
1785
/* Unmap the buffer so that CPU can use it */
1786
usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
1787
done = musb_host_packet_rx(musb, urb,
1789
dev_dbg(musb->controller, "read %spacket\n", done ? "last " : "");
1794
urb->actual_length += xfer_len;
1795
qh->offset += xfer_len;
1797
if (urb->status == -EINPROGRESS)
1798
urb->status = status;
1799
musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1803
/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1804
* the software schedule associates multiple such nodes with a given
1805
* host side hardware endpoint + direction; scheduling may activate
1806
* that hardware endpoint.
1808
static int musb_schedule(
1815
int best_end, epnum;
1816
struct musb_hw_ep *hw_ep = NULL;
1817
struct list_head *head = NULL;
1820
struct urb *urb = next_urb(qh);
1822
/* use fixed hardware for control and bulk */
1823
if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1824
head = &musb->control;
1825
hw_ep = musb->control_ep;
1829
/* else, periodic transfers get muxed to other endpoints */
1832
* We know this qh hasn't been scheduled, so all we need to do
1833
* is choose which hardware endpoint to put it on ...
1835
* REVISIT what we really want here is a regular schedule tree
1836
* like e.g. OHCI uses.
1841
for (epnum = 1, hw_ep = musb->endpoints + 1;
1842
epnum < musb->nr_endpoints;
1846
if (musb_ep_get_qh(hw_ep, is_in) != NULL)
1849
if (hw_ep == musb->bulk_ep)
1853
diff = hw_ep->max_packet_sz_rx;
1855
diff = hw_ep->max_packet_sz_tx;
1856
diff -= (qh->maxpacket * qh->hb_mult);
1858
if (diff >= 0 && best_diff > diff) {
1861
* Mentor controller has a bug in that if we schedule
1862
* a BULK Tx transfer on an endpoint that had earlier
1863
* handled ISOC then the BULK transfer has to start on
1864
* a zero toggle. If the BULK transfer starts on a 1
1865
* toggle then this transfer will fail as the mentor
1866
* controller starts the Bulk transfer on a 0 toggle
1867
* irrespective of the programming of the toggle bits
1868
* in the TXCSR register. Check for this condition
1869
* while allocating the EP for a Tx Bulk transfer. If
1872
hw_ep = musb->endpoints + epnum;
1873
toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
1874
txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
1876
if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
1877
toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
1884
/* use bulk reserved ep1 if no other ep is free */
1885
if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
1886
hw_ep = musb->bulk_ep;
1888
head = &musb->in_bulk;
1890
head = &musb->out_bulk;
1892
/* Enable bulk RX NAK timeout scheme when bulk requests are
1893
* multiplexed. This scheme doen't work in high speed to full
1894
* speed scenario as NAK interrupts are not coming from a
1895
* full speed device connected to a high speed device.
1896
* NAK timeout interval is 8 (128 uframe or 16ms) for HS and
1897
* 4 (8 frame or 8ms) for FS device.
1899
if (is_in && qh->dev)
1901
(USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
1903
} else if (best_end < 0) {
1909
hw_ep = musb->endpoints + best_end;
1910
dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end);
1913
idle = list_empty(head);
1914
list_add_tail(&qh->ring, head);
1918
qh->hep->hcpriv = qh;
1920
musb_start_urb(musb, is_in, qh);
1925
/* check if transaction translator is needed for device */
1926
static int tt_needed(struct musb *musb, struct usb_device *dev)
1928
if ((musb_readb(musb->mregs, MUSB_POWER) & MUSB_POWER_HSMODE) &&
1929
(dev->speed < USB_SPEED_HIGH))
1936
static int musb_urb_enqueue(
1938
int musb_urb_enqueue(
1940
struct usb_hcd *hcd,
1944
unsigned long flags;
1945
struct musb *musb = hcd_to_musb(hcd);
1946
struct usb_host_endpoint *hep = urb->ep;
1948
struct usb_endpoint_descriptor *epd = &hep->desc;
1953
/* host role must be active */
1954
if (!is_host_active(musb) || !musb->is_active)
1957
spin_lock_irqsave(&musb->lock, flags);
1958
ret = usb_hcd_link_urb_to_ep(hcd, urb);
1959
qh = ret ? NULL : hep->hcpriv;
1962
spin_unlock_irqrestore(&musb->lock, flags);
1964
/* DMA mapping was already done, if needed, and this urb is on
1965
* hep->urb_list now ... so we're done, unless hep wasn't yet
1966
* scheduled onto a live qh.
1968
* REVISIT best to keep hep->hcpriv valid until the endpoint gets
1969
* disabled, testing for empty qh->ring and avoiding qh setup costs
1970
* except for the first urb queued after a config change.
1975
/* Allocate and initialize qh, minimizing the work done each time
1976
* hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
1978
* REVISIT consider a dedicated qh kmem_cache, so it's harder
1979
* for bugs in other kernel code to break this driver...
1981
qh = kzalloc(sizeof *qh, mem_flags);
1983
spin_lock_irqsave(&musb->lock, flags);
1984
usb_hcd_unlink_urb_from_ep(hcd, urb);
1985
spin_unlock_irqrestore(&musb->lock, flags);
1991
INIT_LIST_HEAD(&qh->ring);
1994
qh->maxpacket = usb_endpoint_maxp(epd);
1995
qh->type = usb_endpoint_type(epd);
1997
/* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
1998
* Some musb cores don't support high bandwidth ISO transfers; and
1999
* we don't (yet!) support high bandwidth interrupt transfers.
2001
qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
2002
if (qh->hb_mult > 1) {
2003
int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
2006
ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
2007
|| (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
2012
qh->maxpacket &= 0x7ff;
2015
qh->epnum = usb_endpoint_num(epd);
2017
/* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
2018
qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
2020
/* precompute rxtype/txtype/type0 register */
2021
type_reg = (qh->type << 4) | qh->epnum;
2022
switch (urb->dev->speed) {
2026
case USB_SPEED_FULL:
2032
qh->type_reg = type_reg;
2034
/* Precompute RXINTERVAL/TXINTERVAL register */
2036
case USB_ENDPOINT_XFER_INT:
2038
* Full/low speeds use the linear encoding,
2039
* high speed uses the logarithmic encoding.
2041
if (urb->dev->speed <= USB_SPEED_FULL) {
2042
interval = max_t(u8, epd->bInterval, 1);
2046
case USB_ENDPOINT_XFER_ISOC:
2047
/* ISO always uses logarithmic encoding */
2048
interval = min_t(u8, epd->bInterval, 16);
2051
/* REVISIT we actually want to use NAK limits, hinting to the
2052
* transfer scheduling logic to try some other qh, e.g. try
2055
* interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
2057
* The downside of disabling this is that transfer scheduling
2058
* gets VERY unfair for nonperiodic transfers; a misbehaving
2059
* peripheral could make that hurt. That's perfectly normal
2060
* for reads from network or serial adapters ... so we have
2061
* partial NAKlimit support for bulk RX.
2063
* The upside of disabling it is simpler transfer scheduling.
2067
qh->intv_reg = interval;
2069
/* precompute addressing for external hub/tt ports */
2070
if (musb->is_multipoint) {
2071
struct usb_device *parent = urb->dev->parent;
2074
if (parent != hcd->self.root_hub) {
2078
qh->h_addr_reg = (u8) parent->devnum;
2081
/* set up tt info if needed */
2083
qh->h_port_reg = (u8) urb->dev->ttport;
2084
if (urb->dev->tt->hub)
2086
(u8) urb->dev->tt->hub->devnum;
2087
if (urb->dev->tt->multi)
2088
qh->h_addr_reg |= 0x80;
2091
if (tt_needed(musb, urb->dev)) {
2092
u16 hub_port = find_tt(urb->dev);
2093
qh->h_addr_reg = (u8) (hub_port >> 8);
2094
qh->h_port_reg = (u8) (hub_port & 0xff);
2100
/* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2101
* until we get real dma queues (with an entry for each urb/buffer),
2102
* we only have work to do in the former case.
2104
spin_lock_irqsave(&musb->lock, flags);
2106
/* some concurrent activity submitted another urb to hep...
2107
* odd, rare, error prone, but legal.
2113
ret = musb_schedule(musb, qh,
2114
epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2118
/* FIXME set urb->start_frame for iso/intr, it's tested in
2119
* musb_start_urb(), but otherwise only konicawc cares ...
2122
spin_unlock_irqrestore(&musb->lock, flags);
2126
spin_lock_irqsave(&musb->lock, flags);
2127
usb_hcd_unlink_urb_from_ep(hcd, urb);
2128
spin_unlock_irqrestore(&musb->lock, flags);
2137
* abort a transfer that's at the head of a hardware queue.
2138
* called with controller locked, irqs blocked
2139
* that hardware queue advances to the next transfer, unless prevented
2141
static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2143
struct musb_hw_ep *ep = qh->hw_ep;
2144
struct musb *musb = ep->musb;
2145
void __iomem *epio = ep->regs;
2146
unsigned hw_end = ep->epnum;
2147
void __iomem *regs = ep->musb->mregs;
2148
int is_in = usb_pipein(urb->pipe);
2152
musb_ep_select(regs, hw_end);
2154
if (is_dma_capable()) {
2155
struct dma_channel *dma;
2157
dma = is_in ? ep->rx_channel : ep->tx_channel;
2159
status = ep->musb->dma_controller->channel_abort(dma);
2160
dev_dbg(musb->controller,
2161
"abort %cX%d DMA for urb %p --> %d\n",
2162
is_in ? 'R' : 'T', ep->epnum,
2164
urb->actual_length += dma->actual_len;
2168
/* turn off DMA requests, discard state, stop polling ... */
2169
if (ep->epnum && is_in) {
2170
/* giveback saves bulk toggle */
2171
csr = musb_h_flush_rxfifo(ep, 0);
2173
/* REVISIT we still get an irq; should likely clear the
2174
* endpoint's irq status here to avoid bogus irqs.
2175
* clearing that status is platform-specific...
2177
} else if (ep->epnum) {
2178
musb_h_tx_flush_fifo(ep);
2179
csr = musb_readw(epio, MUSB_TXCSR);
2180
csr &= ~(MUSB_TXCSR_AUTOSET
2181
| MUSB_TXCSR_DMAENAB
2182
| MUSB_TXCSR_H_RXSTALL
2183
| MUSB_TXCSR_H_NAKTIMEOUT
2184
| MUSB_TXCSR_H_ERROR
2185
| MUSB_TXCSR_TXPKTRDY);
2186
musb_writew(epio, MUSB_TXCSR, csr);
2187
/* REVISIT may need to clear FLUSHFIFO ... */
2188
musb_writew(epio, MUSB_TXCSR, csr);
2189
/* flush cpu writebuffer */
2190
csr = musb_readw(epio, MUSB_TXCSR);
2192
musb_h_ep0_flush_fifo(ep);
2195
musb_advance_schedule(ep->musb, urb, ep, is_in);
2199
static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2201
struct musb *musb = hcd_to_musb(hcd);
2203
unsigned long flags;
2204
int is_in = usb_pipein(urb->pipe);
2207
dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb,
2208
usb_pipedevice(urb->pipe),
2209
usb_pipeendpoint(urb->pipe),
2210
is_in ? "in" : "out");
2212
spin_lock_irqsave(&musb->lock, flags);
2213
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2222
* Any URB not actively programmed into endpoint hardware can be
2223
* immediately given back; that's any URB not at the head of an
2224
* endpoint queue, unless someday we get real DMA queues. And even
2225
* if it's at the head, it might not be known to the hardware...
2227
* Otherwise abort current transfer, pending DMA, etc.; urb->status
2228
* has already been updated. This is a synchronous abort; it'd be
2229
* OK to hold off until after some IRQ, though.
2231
* NOTE: qh is invalid unless !list_empty(&hep->urb_list)
2234
|| urb->urb_list.prev != &qh->hep->urb_list
2235
|| musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2236
int ready = qh->is_ready;
2239
musb_giveback(musb, urb, 0);
2240
qh->is_ready = ready;
2242
/* If nothing else (usually musb_giveback) is using it
2243
* and its URB list has emptied, recycle this qh.
2245
if (ready && list_empty(&qh->hep->urb_list)) {
2246
qh->hep->hcpriv = NULL;
2247
list_del(&qh->ring);
2251
ret = musb_cleanup_urb(urb, qh);
2253
spin_unlock_irqrestore(&musb->lock, flags);
2257
/* disable an endpoint */
2259
musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2261
u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
2262
unsigned long flags;
2263
struct musb *musb = hcd_to_musb(hcd);
2267
spin_lock_irqsave(&musb->lock, flags);
2273
/* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2275
/* Kick the first URB off the hardware, if needed */
2277
if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2280
/* make software (then hardware) stop ASAP */
2282
urb->status = -ESHUTDOWN;
2285
musb_cleanup_urb(urb, qh);
2287
/* Then nuke all the others ... and advance the
2288
* queue on hw_ep (e.g. bulk ring) when we're done.
2290
while (!list_empty(&hep->urb_list)) {
2292
urb->status = -ESHUTDOWN;
2293
musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2296
/* Just empty the queue; the hardware is busy with
2297
* other transfers, and since !qh->is_ready nothing
2298
* will activate any of these as it advances.
2300
while (!list_empty(&hep->urb_list))
2301
musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2304
list_del(&qh->ring);
2308
spin_unlock_irqrestore(&musb->lock, flags);
2311
static int musb_h_get_frame_number(struct usb_hcd *hcd)
2313
struct musb *musb = hcd_to_musb(hcd);
2315
return musb_readw(musb->mregs, MUSB_FRAME);
2318
static int musb_h_start(struct usb_hcd *hcd)
2320
struct musb *musb = hcd_to_musb(hcd);
2322
/* NOTE: musb_start() is called when the hub driver turns
2323
* on port power, or when (OTG) peripheral starts.
2325
hcd->state = HC_STATE_RUNNING;
2326
musb->port1_status = 0;
2330
static void musb_h_stop(struct usb_hcd *hcd)
2332
musb_stop(hcd_to_musb(hcd));
2333
hcd->state = HC_STATE_HALT;
2336
static int musb_bus_suspend(struct usb_hcd *hcd)
2338
struct musb *musb = hcd_to_musb(hcd);
2341
if (!is_host_active(musb))
2344
switch (musb->xceiv->state) {
2345
case OTG_STATE_A_SUSPEND:
2347
case OTG_STATE_A_WAIT_VRISE:
2348
/* ID could be grounded even if there's no device
2349
* on the other end of the cable. NOTE that the
2350
* A_WAIT_VRISE timers are messy with MUSB...
2352
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2353
if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2354
musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
2360
if (musb->is_active) {
2361
WARNING("trying to suspend as %s while active\n",
2362
otg_state_string(musb->xceiv->state));
2368
static int musb_bus_resume(struct usb_hcd *hcd)
2370
/* resuming child port does the work */
2374
const struct hc_driver musb_hc_driver = {
2375
.description = "musb-hcd",
2376
.product_desc = "MUSB HDRC host driver",
2377
.hcd_priv_size = sizeof(struct musb),
2378
.flags = HCD_USB2 | HCD_MEMORY,
2380
/* not using irq handler or reset hooks from usbcore, since
2381
* those must be shared with peripheral code for OTG configs
2384
.start = musb_h_start,
2385
.stop = musb_h_stop,
2387
.get_frame_number = musb_h_get_frame_number,
2389
.urb_enqueue = musb_urb_enqueue,
2390
.urb_dequeue = musb_urb_dequeue,
2391
.endpoint_disable = musb_h_disable,
2393
.hub_status_data = musb_hub_status_data,
2394
.hub_control = musb_hub_control,
2395
.bus_suspend = musb_bus_suspend,
2396
.bus_resume = musb_bus_resume,
2397
/* .start_port_reset = NULL, */
2398
/* .hub_irq_enable = NULL, */