127
127
unmap_array[unmap_cons].skb = NULL;
129
pci_unmap_single(bnad->pcidev,
130
pci_unmap_addr(&unmap_array[unmap_cons],
129
dma_unmap_single(&bnad->pcidev->dev,
130
dma_unmap_addr(&unmap_array[unmap_cons],
131
131
dma_addr), skb_headlen(skb),
134
pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
134
dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
135
135
if (++unmap_cons >= unmap_q->q_depth)
138
138
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
139
pci_unmap_page(bnad->pcidev,
140
pci_unmap_addr(&unmap_array[unmap_cons],
139
dma_unmap_page(&bnad->pcidev->dev,
140
dma_unmap_addr(&unmap_array[unmap_cons],
142
142
skb_shinfo(skb)->frags[i].size,
144
pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
144
dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
146
146
if (++unmap_cons >= unmap_q->q_depth)
199
199
sent_bytes += skb->len;
200
200
wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
202
pci_unmap_single(bnad->pcidev,
203
pci_unmap_addr(&unmap_array[unmap_cons],
202
dma_unmap_single(&bnad->pcidev->dev,
203
dma_unmap_addr(&unmap_array[unmap_cons],
204
204
dma_addr), skb_headlen(skb),
206
pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
206
dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
207
207
BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
209
209
prefetch(&unmap_array[unmap_cons + 1]);
210
210
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
211
211
prefetch(&unmap_array[unmap_cons + 1]);
213
pci_unmap_page(bnad->pcidev,
214
pci_unmap_addr(&unmap_array[unmap_cons],
213
dma_unmap_page(&bnad->pcidev->dev,
214
dma_unmap_addr(&unmap_array[unmap_cons],
216
216
skb_shinfo(skb)->frags[i].size,
218
pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
218
dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
220
220
BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
340
340
bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
342
342
struct bnad_unmap_q *unmap_q;
343
struct bnad_skb_unmap *unmap_array;
343
344
struct sk_buff *skb;
346
347
unmap_q = rcb->unmap_q;
348
unmap_array = unmap_q->unmap_array;
347
349
for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
348
skb = unmap_q->unmap_array[unmap_cons].skb;
350
skb = unmap_array[unmap_cons].skb;
351
unmap_q->unmap_array[unmap_cons].skb = NULL;
352
pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
353
unmap_array[unmap_cons],
354
dma_addr), rcb->rxq->buffer_size,
353
unmap_array[unmap_cons].skb = NULL;
354
dma_unmap_single(&bnad->pcidev->dev,
355
dma_unmap_addr(&unmap_array[unmap_cons],
357
rcb->rxq->buffer_size,
356
359
dev_kfree_skb(skb);
358
361
bnad_reset_rcb(bnad, rcb);
391
394
skb->dev = bnad->netdev;
392
395
skb_reserve(skb, NET_IP_ALIGN);
393
396
unmap_array[unmap_prod].skb = skb;
394
dma_addr = pci_map_single(bnad->pcidev, skb->data,
395
rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
396
pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
397
dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
398
rcb->rxq->buffer_size,
400
dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
398
402
BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
399
403
BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
434
438
struct bna_rcb *rcb = NULL;
435
439
unsigned int wi_range, packets = 0, wis = 0;
436
440
struct bnad_unmap_q *unmap_q;
441
struct bnad_skb_unmap *unmap_array;
437
442
struct sk_buff *skb;
443
u32 flags, unmap_cons;
439
444
u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
440
445
struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
456
461
rcb = ccb->rcb[1];
458
463
unmap_q = rcb->unmap_q;
464
unmap_array = unmap_q->unmap_array;
465
unmap_cons = unmap_q->consumer_index;
460
skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
467
skb = unmap_array[unmap_cons].skb;
462
unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
463
pci_unmap_single(bnad->pcidev,
464
pci_unmap_addr(&unmap_q->
465
unmap_array[unmap_q->
469
unmap_array[unmap_cons].skb = NULL;
470
dma_unmap_single(&bnad->pcidev->dev,
471
dma_unmap_addr(&unmap_array[unmap_cons],
468
rcb->rxq->buffer_size,
473
rcb->rxq->buffer_size,
470
475
BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
472
477
/* Should be more efficient ? Performance ? */
1015
1020
if (mem_info->mem_type == BNA_MEM_T_DMA) {
1016
1021
BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1018
pci_free_consistent(bnad->pcidev,
1019
mem_info->mdl[i].len,
1020
mem_info->mdl[i].kva, dma_pa);
1023
dma_free_coherent(&bnad->pcidev->dev,
1024
mem_info->mdl[i].len,
1025
mem_info->mdl[i].kva, dma_pa);
1022
1027
kfree(mem_info->mdl[i].kva);
1047
1052
for (i = 0; i < mem_info->num; i++) {
1048
1053
mem_info->mdl[i].len = mem_info->len;
1049
1054
mem_info->mdl[i].kva =
1050
pci_alloc_consistent(bnad->pcidev,
1051
mem_info->len, &dma_pa);
1055
dma_alloc_coherent(&bnad->pcidev->dev,
1056
mem_info->len, &dma_pa,
1053
1059
if (mem_info->mdl[i].kva == NULL)
1054
1060
goto err_return;
1831
1837
/* Initialize the Rx event handlers */
1832
1838
rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
1833
1839
rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
1834
rx_cbfn.rcb_destroy_cbfn = NULL;
1835
1840
rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1836
1841
rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1837
1842
rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
2600
2605
unmap_q->unmap_array[unmap_prod].skb = skb;
2601
2606
BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2602
2607
txqent->vector[vect_id].length = htons(skb_headlen(skb));
2603
dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
2605
pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2608
dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2609
skb_headlen(skb), DMA_TO_DEVICE);
2610
dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2608
2613
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2631
2636
BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2632
2637
txqent->vector[vect_id].length = htons(size);
2634
pci_map_page(bnad->pcidev, frag->page,
2635
frag->page_offset, size,
2637
pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2638
dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
2639
frag->page_offset, size, DMA_TO_DEVICE);
2640
dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2639
2642
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2640
2643
BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
3022
3025
err = pci_request_regions(pdev, BNAD_NAME);
3024
3027
goto disable_device;
3025
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
3026
!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
3028
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3029
!dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3027
3030
*using_dac = 1;
3029
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3032
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3031
err = pci_set_consistent_dma_mask(pdev,
3034
err = dma_set_coherent_mask(&pdev->dev,
3034
3037
goto release_regions;