~ubuntu-branches/debian/wheezy/linux-2.6/wheezy

« back to all changes in this revision

Viewing changes to drivers/dma/pch_dma.c

  • Committer: Bazaar Package Importer
  • Author(s): Ben Hutchings, Ben Hutchings, Aurelien Jarno
  • Date: 2011-06-07 12:14:05 UTC
  • mfrom: (43.1.9 sid)
  • Revision ID: james.westby@ubuntu.com-20110607121405-i3h1rd7nrnd2b73h
Tags: 2.6.39-2
[ Ben Hutchings ]
* [x86] Enable BACKLIGHT_APPLE, replacing BACKLIGHT_MBP_NVIDIA
  (Closes: #627492)
* cgroups: Disable memory resource controller by default. Allow it
  to be enabled using kernel parameter 'cgroup_enable=memory'.
* rt2800usb: Enable support for more USB devices including
  Linksys WUSB600N (Closes: #596626) (this change was accidentally
  omitted from 2.6.39-1)
* [x86] Remove Celeron from list of processors supporting PAE. Most
  'Celeron M' models do not.
* Update debconf template translations:
  - Swedish (Martin Bagge) (Closes: #628932)
  - French (David Prévot) (Closes: #628191)
* aufs: Update for 2.6.39 (Closes: #627837)
* Add stable 2.6.39.1, including:
  - ext4: dont set PageUptodate in ext4_end_bio()
  - pata_cmd64x: fix boot crash on parisc (Closes: #622997, #622745)
  - ext3: Fix fs corruption when make_indexed_dir() fails
  - netfilter: nf_ct_sip: validate Content-Length in TCP SIP messages
  - sctp: fix race between sctp_bind_addr_free() and
    sctp_bind_addr_conflict()
  - sctp: fix memory leak of the ASCONF queue when free asoc
  - md/bitmap: fix saving of events_cleared and other state
  - cdc_acm: Fix oops when Droids MuIn LCD is connected
  - cx88: Fix conversion from BKL to fine-grained locks (Closes: #619827)
  - keys: Set cred->user_ns in key_replace_session_keyring (CVE-2011-2184)
  - tmpfs: fix race between truncate and writepage
  - nfs41: Correct offset for LAYOUTCOMMIT
  - xen/mmu: fix a race window causing leave_mm BUG()
  - ext4: fix possible use-after-free in ext4_remove_li_request()
  For the complete list of changes, see:
   http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.39.1
* Bump ABI to 2
* netfilter: Enable IP_SET, IP_SET_BITMAP_IP, IP_SET_BITMAP_IPMAC,
  IP_SET_BITMAP_PORT, IP_SET_HASH_IP, IP_SET_HASH_IPPORT,
  IP_SET_HASH_IPPORTIP, IP_SET_HASH_IPPORTNET, IP_SET_HASH_NET,
  IP_SET_HASH_NETPORT, IP_SET_LIST_SET, NETFILTER_XT_SET as modules
  (Closes: #629401)

[ Aurelien Jarno ]
* [mipsel/loongson-2f] Disable_SCSI_LPFC to workaround GCC ICE.

Show diffs side-by-side

added added

removed removed

Lines of Context:
82
82
        u32     dma_sts1;
83
83
        u32     reserved2;
84
84
        u32     reserved3;
85
 
        struct pch_dma_desc_regs desc[0];
 
85
        struct pch_dma_desc_regs desc[MAX_CHAN_NR];
86
86
};
87
87
 
88
88
struct pch_dma_desc {
124
124
        struct pci_pool         *pool;
125
125
        struct pch_dma_regs     regs;
126
126
        struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
127
 
        struct pch_dma_chan     channels[0];
 
127
        struct pch_dma_chan     channels[MAX_CHAN_NR];
128
128
};
129
129
 
130
130
#define PCH_DMA_CTL0    0x00
366
366
        struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
367
367
        dma_cookie_t cookie;
368
368
 
369
 
        spin_lock_bh(&pd_chan->lock);
 
369
        spin_lock(&pd_chan->lock);
370
370
        cookie = pdc_assign_cookie(pd_chan, desc);
371
371
 
372
372
        if (list_empty(&pd_chan->active_list)) {
376
376
                list_add_tail(&desc->desc_node, &pd_chan->queue);
377
377
        }
378
378
 
379
 
        spin_unlock_bh(&pd_chan->lock);
 
379
        spin_unlock(&pd_chan->lock);
380
380
        return 0;
381
381
}
382
382
 
386
386
        struct pch_dma *pd = to_pd(chan->device);
387
387
        dma_addr_t addr;
388
388
 
389
 
        desc = pci_pool_alloc(pd->pool, GFP_KERNEL, &addr);
 
389
        desc = pci_pool_alloc(pd->pool, flags, &addr);
390
390
        if (desc) {
391
391
                memset(desc, 0, sizeof(struct pch_dma_desc));
392
392
                INIT_LIST_HEAD(&desc->tx_list);
405
405
        struct pch_dma_desc *ret = NULL;
406
406
        int i;
407
407
 
408
 
        spin_lock_bh(&pd_chan->lock);
 
408
        spin_lock(&pd_chan->lock);
409
409
        list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
410
410
                i++;
411
411
                if (async_tx_test_ack(&desc->txd)) {
415
415
                }
416
416
                dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
417
417
        }
418
 
        spin_unlock_bh(&pd_chan->lock);
 
418
        spin_unlock(&pd_chan->lock);
419
419
        dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
420
420
 
421
421
        if (!ret) {
422
422
                ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
423
423
                if (ret) {
424
 
                        spin_lock_bh(&pd_chan->lock);
 
424
                        spin_lock(&pd_chan->lock);
425
425
                        pd_chan->descs_allocated++;
426
 
                        spin_unlock_bh(&pd_chan->lock);
 
426
                        spin_unlock(&pd_chan->lock);
427
427
                } else {
428
428
                        dev_err(chan2dev(&pd_chan->chan),
429
429
                                "failed to alloc desc\n");
437
437
                         struct pch_dma_desc *desc)
438
438
{
439
439
        if (desc) {
440
 
                spin_lock_bh(&pd_chan->lock);
 
440
                spin_lock(&pd_chan->lock);
441
441
                list_splice_init(&desc->tx_list, &pd_chan->free_list);
442
442
                list_add(&desc->desc_node, &pd_chan->free_list);
443
 
                spin_unlock_bh(&pd_chan->lock);
 
443
                spin_unlock(&pd_chan->lock);
444
444
        }
445
445
}
446
446
 
530
530
        struct pch_dma_chan *pd_chan = to_pd_chan(chan);
531
531
 
532
532
        if (pdc_is_idle(pd_chan)) {
533
 
                spin_lock_bh(&pd_chan->lock);
 
533
                spin_lock(&pd_chan->lock);
534
534
                pdc_advance_work(pd_chan);
535
 
                spin_unlock_bh(&pd_chan->lock);
 
535
                spin_unlock(&pd_chan->lock);
536
536
        }
537
537
}
538
538
 
592
592
                        goto err_desc_get;
593
593
                }
594
594
 
595
 
 
596
595
                if (!first) {
597
596
                        first = desc;
598
597
                } else {
641
640
 
642
641
        spin_unlock_bh(&pd_chan->lock);
643
642
 
644
 
 
645
643
        return 0;
646
644
}
647
645
 
648
646
static void pdc_tasklet(unsigned long data)
649
647
{
650
648
        struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
 
649
        unsigned long flags;
651
650
 
652
651
        if (!pdc_is_idle(pd_chan)) {
653
652
                dev_err(chan2dev(&pd_chan->chan),
655
654
                return;
656
655
        }
657
656
 
658
 
        spin_lock_bh(&pd_chan->lock);
 
657
        spin_lock_irqsave(&pd_chan->lock, flags);
659
658
        if (test_and_clear_bit(0, &pd_chan->err_status))
660
659
                pdc_handle_error(pd_chan);
661
660
        else
662
661
                pdc_advance_work(pd_chan);
663
 
        spin_unlock_bh(&pd_chan->lock);
 
662
        spin_unlock_irqrestore(&pd_chan->lock, flags);
664
663
}
665
664
 
666
665
static irqreturn_t pd_irq(int irq, void *devid)
694
693
        return ret;
695
694
}
696
695
 
 
696
#ifdef  CONFIG_PM
697
697
static void pch_dma_save_regs(struct pch_dma *pd)
698
698
{
699
699
        struct pch_dma_chan *pd_chan;
771
771
 
772
772
        return 0;
773
773
}
 
774
#endif
774
775
 
775
776
static int __devinit pch_dma_probe(struct pci_dev *pdev,
776
777
                                   const struct pci_device_id *id)