~ubuntu-branches/debian/wheezy/linux-2.6/wheezy

« back to all changes in this revision

Viewing changes to drivers/net/bna/bnad.c

  • Committer: Bazaar Package Importer
  • Author(s): Ben Hutchings, Ben Hutchings, Aurelien Jarno
  • Date: 2011-06-07 12:14:05 UTC
  • mfrom: (43.1.9 sid)
  • Revision ID: james.westby@ubuntu.com-20110607121405-i3h1rd7nrnd2b73h
Tags: 2.6.39-2
[ Ben Hutchings ]
* [x86] Enable BACKLIGHT_APPLE, replacing BACKLIGHT_MBP_NVIDIA
  (Closes: #627492)
* cgroups: Disable memory resource controller by default. Allow it
  to be enabled using kernel parameter 'cgroup_enable=memory'.
* rt2800usb: Enable support for more USB devices including
  Linksys WUSB600N (Closes: #596626) (this change was accidentally
  omitted from 2.6.39-1)
* [x86] Remove Celeron from list of processors supporting PAE. Most
  'Celeron M' models do not.
* Update debconf template translations:
  - Swedish (Martin Bagge) (Closes: #628932)
  - French (David Prévot) (Closes: #628191)
* aufs: Update for 2.6.39 (Closes: #627837)
* Add stable 2.6.39.1, including:
  - ext4: dont set PageUptodate in ext4_end_bio()
  - pata_cmd64x: fix boot crash on parisc (Closes: #622997, #622745)
  - ext3: Fix fs corruption when make_indexed_dir() fails
  - netfilter: nf_ct_sip: validate Content-Length in TCP SIP messages
  - sctp: fix race between sctp_bind_addr_free() and
    sctp_bind_addr_conflict()
  - sctp: fix memory leak of the ASCONF queue when free asoc
  - md/bitmap: fix saving of events_cleared and other state
  - cdc_acm: Fix oops when Droids MuIn LCD is connected
  - cx88: Fix conversion from BKL to fine-grained locks (Closes: #619827)
  - keys: Set cred->user_ns in key_replace_session_keyring (CVE-2011-2184)
  - tmpfs: fix race between truncate and writepage
  - nfs41: Correct offset for LAYOUTCOMMIT
  - xen/mmu: fix a race window causing leave_mm BUG()
  - ext4: fix possible use-after-free in ext4_remove_li_request()
  For the complete list of changes, see:
   http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.39.1
* Bump ABI to 2
* netfilter: Enable IP_SET, IP_SET_BITMAP_IP, IP_SET_BITMAP_IPMAC,
  IP_SET_BITMAP_PORT, IP_SET_HASH_IP, IP_SET_HASH_IPPORT,
  IP_SET_HASH_IPPORTIP, IP_SET_HASH_IPPORTNET, IP_SET_HASH_NET,
  IP_SET_HASH_NETPORT, IP_SET_LIST_SET, NETFILTER_XT_SET as modules
  (Closes: #629401)

[ Aurelien Jarno ]
* [mipsel/loongson-2f] Disable_SCSI_LPFC to workaround GCC ICE.

Show diffs side-by-side

added added

removed removed

Lines of Context:
126
126
                }
127
127
                unmap_array[unmap_cons].skb = NULL;
128
128
 
129
 
                pci_unmap_single(bnad->pcidev,
130
 
                                 pci_unmap_addr(&unmap_array[unmap_cons],
 
129
                dma_unmap_single(&bnad->pcidev->dev,
 
130
                                 dma_unmap_addr(&unmap_array[unmap_cons],
131
131
                                                dma_addr), skb_headlen(skb),
132
 
                                                PCI_DMA_TODEVICE);
 
132
                                                DMA_TO_DEVICE);
133
133
 
134
 
                pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
 
134
                dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
135
135
                if (++unmap_cons >= unmap_q->q_depth)
136
136
                        break;
137
137
 
138
138
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
139
 
                        pci_unmap_page(bnad->pcidev,
140
 
                                       pci_unmap_addr(&unmap_array[unmap_cons],
 
139
                        dma_unmap_page(&bnad->pcidev->dev,
 
140
                                       dma_unmap_addr(&unmap_array[unmap_cons],
141
141
                                                      dma_addr),
142
142
                                       skb_shinfo(skb)->frags[i].size,
143
 
                                       PCI_DMA_TODEVICE);
144
 
                        pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
 
143
                                       DMA_TO_DEVICE);
 
144
                        dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
145
145
                                           0);
146
146
                        if (++unmap_cons >= unmap_q->q_depth)
147
147
                                break;
199
199
                sent_bytes += skb->len;
200
200
                wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
201
201
 
202
 
                pci_unmap_single(bnad->pcidev,
203
 
                                 pci_unmap_addr(&unmap_array[unmap_cons],
 
202
                dma_unmap_single(&bnad->pcidev->dev,
 
203
                                 dma_unmap_addr(&unmap_array[unmap_cons],
204
204
                                                dma_addr), skb_headlen(skb),
205
 
                                 PCI_DMA_TODEVICE);
206
 
                pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
 
205
                                 DMA_TO_DEVICE);
 
206
                dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
207
207
                BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
208
208
 
209
209
                prefetch(&unmap_array[unmap_cons + 1]);
210
210
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
211
211
                        prefetch(&unmap_array[unmap_cons + 1]);
212
212
 
213
 
                        pci_unmap_page(bnad->pcidev,
214
 
                                       pci_unmap_addr(&unmap_array[unmap_cons],
 
213
                        dma_unmap_page(&bnad->pcidev->dev,
 
214
                                       dma_unmap_addr(&unmap_array[unmap_cons],
215
215
                                                      dma_addr),
216
216
                                       skb_shinfo(skb)->frags[i].size,
217
 
                                       PCI_DMA_TODEVICE);
218
 
                        pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
 
217
                                       DMA_TO_DEVICE);
 
218
                        dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
219
219
                                           0);
220
220
                        BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
221
221
                }
340
340
bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
341
341
{
342
342
        struct bnad_unmap_q *unmap_q;
 
343
        struct bnad_skb_unmap *unmap_array;
343
344
        struct sk_buff *skb;
344
345
        int unmap_cons;
345
346
 
346
347
        unmap_q = rcb->unmap_q;
 
348
        unmap_array = unmap_q->unmap_array;
347
349
        for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
348
 
                skb = unmap_q->unmap_array[unmap_cons].skb;
 
350
                skb = unmap_array[unmap_cons].skb;
349
351
                if (!skb)
350
352
                        continue;
351
 
                unmap_q->unmap_array[unmap_cons].skb = NULL;
352
 
                pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
353
 
                                        unmap_array[unmap_cons],
354
 
                                        dma_addr), rcb->rxq->buffer_size,
355
 
                                        PCI_DMA_FROMDEVICE);
 
353
                unmap_array[unmap_cons].skb = NULL;
 
354
                dma_unmap_single(&bnad->pcidev->dev,
 
355
                                 dma_unmap_addr(&unmap_array[unmap_cons],
 
356
                                                dma_addr),
 
357
                                 rcb->rxq->buffer_size,
 
358
                                 DMA_FROM_DEVICE);
356
359
                dev_kfree_skb(skb);
357
360
        }
358
361
        bnad_reset_rcb(bnad, rcb);
391
394
                skb->dev = bnad->netdev;
392
395
                skb_reserve(skb, NET_IP_ALIGN);
393
396
                unmap_array[unmap_prod].skb = skb;
394
 
                dma_addr = pci_map_single(bnad->pcidev, skb->data,
395
 
                        rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
396
 
                pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
 
397
                dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
 
398
                                          rcb->rxq->buffer_size,
 
399
                                          DMA_FROM_DEVICE);
 
400
                dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
397
401
                                   dma_addr);
398
402
                BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
399
403
                BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
434
438
        struct bna_rcb *rcb = NULL;
435
439
        unsigned int wi_range, packets = 0, wis = 0;
436
440
        struct bnad_unmap_q *unmap_q;
 
441
        struct bnad_skb_unmap *unmap_array;
437
442
        struct sk_buff *skb;
438
 
        u32 flags;
 
443
        u32 flags, unmap_cons;
439
444
        u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
440
445
        struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
441
446
 
456
461
                        rcb = ccb->rcb[1];
457
462
 
458
463
                unmap_q = rcb->unmap_q;
 
464
                unmap_array = unmap_q->unmap_array;
 
465
                unmap_cons = unmap_q->consumer_index;
459
466
 
460
 
                skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
 
467
                skb = unmap_array[unmap_cons].skb;
461
468
                BUG_ON(!(skb));
462
 
                unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
463
 
                pci_unmap_single(bnad->pcidev,
464
 
                                 pci_unmap_addr(&unmap_q->
465
 
                                                unmap_array[unmap_q->
466
 
                                                            consumer_index],
 
469
                unmap_array[unmap_cons].skb = NULL;
 
470
                dma_unmap_single(&bnad->pcidev->dev,
 
471
                                 dma_unmap_addr(&unmap_array[unmap_cons],
467
472
                                                dma_addr),
468
 
                                                rcb->rxq->buffer_size,
469
 
                                                PCI_DMA_FROMDEVICE);
 
473
                                 rcb->rxq->buffer_size,
 
474
                                 DMA_FROM_DEVICE);
470
475
                BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
471
476
 
472
477
                /* Should be more efficient ? Performance ? */
1015
1020
                        if (mem_info->mem_type == BNA_MEM_T_DMA) {
1016
1021
                                BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1017
1022
                                                dma_pa);
1018
 
                                pci_free_consistent(bnad->pcidev,
1019
 
                                                mem_info->mdl[i].len,
1020
 
                                                mem_info->mdl[i].kva, dma_pa);
 
1023
                                dma_free_coherent(&bnad->pcidev->dev,
 
1024
                                                  mem_info->mdl[i].len,
 
1025
                                                  mem_info->mdl[i].kva, dma_pa);
1021
1026
                        } else
1022
1027
                                kfree(mem_info->mdl[i].kva);
1023
1028
                }
1047
1052
                for (i = 0; i < mem_info->num; i++) {
1048
1053
                        mem_info->mdl[i].len = mem_info->len;
1049
1054
                        mem_info->mdl[i].kva =
1050
 
                                pci_alloc_consistent(bnad->pcidev,
1051
 
                                                mem_info->len, &dma_pa);
 
1055
                                dma_alloc_coherent(&bnad->pcidev->dev,
 
1056
                                                mem_info->len, &dma_pa,
 
1057
                                                GFP_KERNEL);
1052
1058
 
1053
1059
                        if (mem_info->mdl[i].kva == NULL)
1054
1060
                                goto err_return;
1831
1837
        /* Initialize the Rx event handlers */
1832
1838
        rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
1833
1839
        rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
1834
 
        rx_cbfn.rcb_destroy_cbfn = NULL;
1835
1840
        rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1836
1841
        rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1837
1842
        rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
2600
2605
        unmap_q->unmap_array[unmap_prod].skb = skb;
2601
2606
        BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2602
2607
        txqent->vector[vect_id].length = htons(skb_headlen(skb));
2603
 
        dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
2604
 
                PCI_DMA_TODEVICE);
2605
 
        pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
 
2608
        dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
 
2609
                                  skb_headlen(skb), DMA_TO_DEVICE);
 
2610
        dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2606
2611
                           dma_addr);
2607
2612
 
2608
2613
        BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2630
2635
 
2631
2636
                BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2632
2637
                txqent->vector[vect_id].length = htons(size);
2633
 
                dma_addr =
2634
 
                        pci_map_page(bnad->pcidev, frag->page,
2635
 
                                     frag->page_offset, size,
2636
 
                                     PCI_DMA_TODEVICE);
2637
 
                pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
 
2638
                dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
 
2639
                                        frag->page_offset, size, DMA_TO_DEVICE);
 
2640
                dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2638
2641
                                   dma_addr);
2639
2642
                BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2640
2643
                BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
3022
3025
        err = pci_request_regions(pdev, BNAD_NAME);
3023
3026
        if (err)
3024
3027
                goto disable_device;
3025
 
        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
3026
 
            !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
 
3028
        if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
 
3029
            !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3027
3030
                *using_dac = 1;
3028
3031
        } else {
3029
 
                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 
3032
                err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3030
3033
                if (err) {
3031
 
                        err = pci_set_consistent_dma_mask(pdev,
3032
 
                                                DMA_BIT_MASK(32));
 
3034
                        err = dma_set_coherent_mask(&pdev->dev,
 
3035
                                                    DMA_BIT_MASK(32));
3033
3036
                        if (err)
3034
3037
                                goto release_regions;
3035
3038
                }