367
367
static const char ep0_string[] = "ep0in";
368
368
static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
369
369
struct pch_udc_dev *pch_udc; /* pointer to device object */
371
370
static int speed_fs;
372
371
module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
373
372
MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
383
382
* @dma_mapped: DMA memory mapped for request
384
383
* @dma_done: DMA completed for request
385
384
* @chain_len: chain length
385
* @buf: Buffer memory for align adjustment
386
* @dma: DMA memory for align adjustment
387
388
struct pch_udc_request {
388
389
struct usb_request req;
616
619
* pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
617
620
* @ep: Reference to structure of type pch_udc_ep_regs
618
* @buf_size: The buffer size
621
* @buf_size: The buffer word size
620
623
static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
621
624
u32 buf_size, u32 ep_in)
636
639
* pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
637
640
* @ep: Reference to structure of type pch_udc_ep_regs
638
* @pkt_size: The packet size
641
* @pkt_size: The packet byte size
640
643
static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
921
924
static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
923
unsigned int loopcnt = 0;
924
struct pch_udc_dev *dev = ep->dev;
926
926
if (dir) { /* IN ep */
927
927
pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
931
if (pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP)
933
pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_MRXFLUSH);
934
/* Wait for RxFIFO Empty */
936
while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
940
dev_err(&dev->pdev->dev, "RxFIFO not Empty\n");
941
pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_MRXFLUSH);
1222
1210
if (req->dma_mapped) {
1224
dma_unmap_single(&dev->pdev->dev, req->req.dma,
1225
req->req.length, DMA_TO_DEVICE);
1227
dma_unmap_single(&dev->pdev->dev, req->req.dma,
1228
req->req.length, DMA_FROM_DEVICE);
1211
if (req->dma == DMA_ADDR_INVALID) {
1213
dma_unmap_single(&dev->pdev->dev, req->req.dma,
1217
dma_unmap_single(&dev->pdev->dev, req->req.dma,
1220
req->req.dma = DMA_ADDR_INVALID;
1223
dma_unmap_single(&dev->pdev->dev, req->dma,
1227
dma_unmap_single(&dev->pdev->dev, req->dma,
1230
memcpy(req->req.buf, req->buf, req->req.length);
1233
req->dma = DMA_ADDR_INVALID;
1229
1235
req->dma_mapped = 0;
1230
req->req.dma = DMA_ADDR_INVALID;
1232
1237
ep->halted = 1;
1233
1238
spin_unlock(&dev->lock);
1268
1273
struct pch_udc_data_dma_desc *td = req->td_data;
1269
1274
unsigned i = req->chain_len;
1277
dma_addr_t addr = (dma_addr_t)td->next;
1271
1279
for (; i > 1; --i) {
1272
dma_addr_t addr = (dma_addr_t)td->next;
1273
1280
/* do not free first desc., will be done by free for request */
1274
1281
td = phys_to_virt(addr);
1282
addr2 = (dma_addr_t)td->next;
1275
1283
pci_pool_free(dev->data_requests, td, addr);
1301
1312
if (req->chain_len > 1)
1302
1313
pch_udc_free_dma_chain(ep->dev, req);
1315
if (req->dma == DMA_ADDR_INVALID)
1316
td->dataptr = req->req.dma;
1318
td->dataptr = req->dma;
1320
td->status = PCH_UDC_BS_HST_BSY;
1304
1321
for (; ; bytes -= buf_len, ++len) {
1306
td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1308
td->status = PCH_UDC_BS_HST_BSY;
1322
td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1310
1323
if (bytes <= buf_len)
1314
1326
td = pci_pool_alloc(ep->dev->data_requests, gfp_flags,
1320
td->dataptr = req->req.dma + i;
1331
td->dataptr = req->td_data->dataptr + i;
1321
1332
last->next = dma_addr;
1355
req->td_data->dataptr = req->req.dma;
1356
req->td_data->status |= PCH_UDC_DMA_LAST;
1357
1366
/* Allocate and create a DMA chain */
1358
1367
retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1360
pr_err("%s: could not create DMA chain: %d\n",
1369
pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1366
if (req->req.length <= ep->ep.maxpacket)
1367
req->td_data->status = PCH_UDC_DMA_LAST | PCH_UDC_BS_HST_BSY |
1369
/* if bytes < max packet then tx bytes must
1370
* be written in packet per buffer mode
1372
if ((req->req.length < ep->ep.maxpacket) || !ep->num)
1373
1373
req->td_data->status = (req->td_data->status &
1374
~PCH_UDC_RXTX_BYTES) | req->req.length;
1375
req->td_data->status = (req->td_data->status &
1376
~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_BSY;
1374
~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1613
1612
/* map the buffer for dma */
1614
1613
if (usbreq->length &&
1615
1614
((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1617
usbreq->dma = dma_map_single(&dev->pdev->dev,
1622
usbreq->dma = dma_map_single(&dev->pdev->dev,
1615
if (!((unsigned long)(usbreq->buf) & 0x03)) {
1617
usbreq->dma = dma_map_single(&dev->pdev->dev,
1622
usbreq->dma = dma_map_single(&dev->pdev->dev,
1627
req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1631
memcpy(req->buf, usbreq->buf, usbreq->length);
1632
req->dma = dma_map_single(&dev->pdev->dev,
1637
req->dma = dma_map_single(&dev->pdev->dev,
1626
1642
req->dma_mapped = 1;
1628
1644
if (usbreq->length > 0) {
1920
1936
struct pch_udc_request *req;
1921
1937
struct pch_udc_dev *dev = ep->dev;
1922
1938
unsigned int count;
1939
struct pch_udc_data_dma_desc *td;
1924
1942
if (list_empty(&ep->queue))
1927
1944
/* next request */
1928
1945
req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1929
if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
1930
PCH_UDC_BS_DMA_DONE)
1932
1946
pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1933
1947
pch_udc_ep_set_ddptr(ep, 0);
1934
if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
1936
dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
1937
"epstatus=0x%08x\n",
1938
(req->td_data_last->status & PCH_UDC_RXTX_STS),
1948
if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
1949
PCH_UDC_BS_DMA_DONE)
1950
td = req->td_data_last;
1955
if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
1956
dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
1957
"epstatus=0x%08x\n",
1958
(req->td_data->status & PCH_UDC_RXTX_STS),
1962
if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
1963
if (td->status | PCH_UDC_DMA_LAST) {
1964
count = td->status & PCH_UDC_RXTX_BYTES;
1967
if (td == req->td_data_last) {
1968
dev_err(&dev->pdev->dev, "Not complete RX descriptor");
1971
addr = (dma_addr_t)td->next;
1972
td = phys_to_virt(addr);
1942
count = req->td_data_last->status & PCH_UDC_RXTX_BYTES;
1944
1974
/* on 64k packets the RXBYTES field is zero */
1945
1975
if (!count && (req->req.length == UDC_DMA_MAXPACKET))
1946
1976
count = UDC_DMA_MAXPACKET;
1947
1977
req->td_data->status |= PCH_UDC_DMA_LAST;
1948
req->td_data_last->status |= PCH_UDC_BS_HST_BSY;
1978
td->status |= PCH_UDC_BS_HST_BSY;
1950
1980
req->dma_going = 0;
1951
1981
req->req.actual = count;