~ubuntu-branches/ubuntu/precise/linux-ti-omap4/precise

« back to all changes in this revision

Viewing changes to net/rds/ib_rdma.c

  • Committer: Bazaar Package Importer
  • Author(s): Paolo Pisati
  • Date: 2011-06-29 15:23:51 UTC
  • mfrom: (26.1.1 natty-proposed)
  • Revision ID: james.westby@ubuntu.com-20110629152351-xs96tm303d95rpbk
Tags: 3.0.0-1200.2
* Rebased against 3.0.0-6.7
* BSP from TI based on 3.0.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
38
38
#include "ib.h"
39
39
#include "xlist.h"
40
40
 
41
 
static struct workqueue_struct *rds_ib_fmr_wq;
42
 
 
43
41
static DEFINE_PER_CPU(unsigned long, clean_list_grace);
44
42
#define CLEAN_LIST_BUSY_BIT 0
45
43
 
307
305
        int err = 0, iter = 0;
308
306
 
309
307
        if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
310
 
                queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
 
308
                schedule_delayed_work(&pool->flush_worker, 10);
311
309
 
312
310
        while (1) {
313
311
                ibmr = rds_ib_reuse_fmr(pool);
696
694
        return ret;
697
695
}
698
696
 
699
 
int rds_ib_fmr_init(void)
700
 
{
701
 
        rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
702
 
        if (!rds_ib_fmr_wq)
703
 
                return -ENOMEM;
704
 
        return 0;
705
 
}
706
 
 
707
 
/*
708
 
 * By the time this is called all the IB devices should have been torn down and
709
 
 * had their pools freed.  As each pool is freed its work struct is waited on,
710
 
 * so the pool flushing work queue should be idle by the time we get here.
711
 
 */
712
 
void rds_ib_fmr_exit(void)
713
 
{
714
 
        destroy_workqueue(rds_ib_fmr_wq);
715
 
}
716
 
 
717
697
static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
718
698
{
719
699
        struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
741
721
        /* If we've pinned too many pages, request a flush */
742
722
        if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
743
723
            atomic_read(&pool->dirty_count) >= pool->max_items / 10)
744
 
                queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
 
724
                schedule_delayed_work(&pool->flush_worker, 10);
745
725
 
746
726
        if (invalidate) {
747
727
                if (likely(!in_interrupt())) {
749
729
                } else {
750
730
                        /* We get here if the user created a MR marked
751
731
                         * as use_once and invalidate at the same time. */
752
 
                        queue_delayed_work(rds_ib_fmr_wq,
753
 
                                           &pool->flush_worker, 10);
 
732
                        schedule_delayed_work(&pool->flush_worker, 10);
754
733
                }
755
734
        }
756
735