~ubuntu-branches/ubuntu/precise/linux-ti-omap4/precise

« back to all changes in this revision

Viewing changes to drivers/md/dm.c

  • Committer: Bazaar Package Importer
  • Author(s): Paolo Pisati
  • Date: 2011-06-29 15:23:51 UTC
  • mfrom: (26.1.1 natty-proposed)
  • Revision ID: james.westby@ubuntu.com-20110629152351-xs96tm303d95rpbk
Tags: 3.0.0-1200.2
* Rebased against 3.0.0-6.7
* BSP from TI based on 3.0.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
477
477
        cpu = part_stat_lock();
478
478
        part_round_stats(cpu, &dm_disk(md)->part0);
479
479
        part_stat_unlock();
480
 
        dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
 
480
        atomic_set(&dm_disk(md)->part0.in_flight[rw],
 
481
                atomic_inc_return(&md->pending[rw]));
481
482
}
482
483
 
483
484
static void end_io_acct(struct dm_io *io)
497
498
         * After this is decremented the bio must not be touched if it is
498
499
         * a flush.
499
500
         */
500
 
        dm_disk(md)->part0.in_flight[rw] = pending =
501
 
                atomic_dec_return(&md->pending[rw]);
 
501
        pending = atomic_dec_return(&md->pending[rw]);
 
502
        atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
502
503
        pending += atomic_read(&md->pending[rw^0x1]);
503
504
 
504
505
        /* nudge anyone waiting on suspend queue */
807
808
        dm_unprep_request(rq);
808
809
 
809
810
        spin_lock_irqsave(q->queue_lock, flags);
810
 
        if (elv_queue_empty(q))
811
 
                blk_plug_device(q);
812
811
        blk_requeue_request(q, rq);
813
812
        spin_unlock_irqrestore(q->queue_lock, flags);
814
813
 
1613
1612
         * number of in-flight I/Os after the queue is stopped in
1614
1613
         * dm_suspend().
1615
1614
         */
1616
 
        while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
 
1615
        while (!blk_queue_stopped(q)) {
1617
1616
                rq = blk_peek_request(q);
1618
1617
                if (!rq)
1619
 
                        goto plug_and_out;
 
1618
                        goto delay_and_out;
1620
1619
 
1621
1620
                /* always use block 0 to find the target for flushes for now */
1622
1621
                pos = 0;
1627
1626
                BUG_ON(!dm_target_is_valid(ti));
1628
1627
 
1629
1628
                if (ti->type->busy && ti->type->busy(ti))
1630
 
                        goto plug_and_out;
 
1629
                        goto delay_and_out;
1631
1630
 
1632
1631
                blk_start_request(rq);
1633
1632
                clone = rq->special;
1647
1646
        BUG_ON(!irqs_disabled());
1648
1647
        spin_lock(q->queue_lock);
1649
1648
 
1650
 
plug_and_out:
1651
 
        if (!elv_queue_empty(q))
1652
 
                /* Some requests still remain, retry later */
1653
 
                blk_plug_device(q);
1654
 
 
 
1649
delay_and_out:
 
1650
        blk_delay_queue(q, HZ / 10);
1655
1651
out:
1656
1652
        dm_table_put(map);
1657
1653
 
1680
1676
        return r;
1681
1677
}
1682
1678
 
1683
 
static void dm_unplug_all(struct request_queue *q)
1684
 
{
1685
 
        struct mapped_device *md = q->queuedata;
1686
 
        struct dm_table *map = dm_get_live_table(md);
1687
 
 
1688
 
        if (map) {
1689
 
                if (dm_request_based(md))
1690
 
                        generic_unplug_device(q);
1691
 
 
1692
 
                dm_table_unplug_all(map);
1693
 
                dm_table_put(map);
1694
 
        }
1695
 
}
1696
 
 
1697
1679
static int dm_any_congested(void *congested_data, int bdi_bits)
1698
1680
{
1699
1681
        int r = bdi_bits;
1817
1799
        md->queue->backing_dev_info.congested_data = md;
1818
1800
        blk_queue_make_request(md->queue, dm_request);
1819
1801
        blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1820
 
        md->queue->unplug_fn = dm_unplug_all;
1821
1802
        blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1822
1803
        blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
1823
1804
}
2263
2244
        int r = 0;
2264
2245
        DECLARE_WAITQUEUE(wait, current);
2265
2246
 
2266
 
        dm_unplug_all(md->queue);
2267
 
 
2268
2247
        add_wait_queue(&md->wait, &wait);
2269
2248
 
2270
2249
        while (1) {
2539
2518
 
2540
2519
        clear_bit(DMF_SUSPENDED, &md->flags);
2541
2520
 
2542
 
        dm_table_unplug_all(map);
2543
2521
        r = 0;
2544
2522
out:
2545
2523
        dm_table_put(map);
2644
2622
}
2645
2623
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2646
2624
 
2647
 
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type)
 
2625
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
2648
2626
{
2649
2627
        struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
 
2628
        unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS;
2650
2629
 
2651
2630
        if (!pools)
2652
2631
                return NULL;
2663
2642
        if (!pools->tio_pool)
2664
2643
                goto free_io_pool_and_out;
2665
2644
 
2666
 
        pools->bs = (type == DM_TYPE_BIO_BASED) ?
2667
 
                    bioset_create(16, 0) : bioset_create(MIN_IOS, 0);
 
2645
        pools->bs = bioset_create(pool_size, 0);
2668
2646
        if (!pools->bs)
2669
2647
                goto free_tio_pool_and_out;
2670
2648
 
 
2649
        if (integrity && bioset_integrity_create(pools->bs, pool_size))
 
2650
                goto free_bioset_and_out;
 
2651
 
2671
2652
        return pools;
2672
2653
 
 
2654
free_bioset_and_out:
 
2655
        bioset_free(pools->bs);
 
2656
 
2673
2657
free_tio_pool_and_out:
2674
2658
        mempool_destroy(pools->tio_pool);
2675
2659