~ubuntu-branches/ubuntu/precise/linux-ti-omap4/precise

« back to all changes in this revision

Viewing changes to net/netfilter/ipvs/ip_vs_lblcr.c

  • Committer: Bazaar Package Importer
  • Author(s): Paolo Pisati
  • Date: 2011-06-29 15:23:51 UTC
  • mfrom: (26.1.1 natty-proposed)
  • Revision ID: james.westby@ubuntu.com-20110629152351-xs96tm303d95rpbk
Tags: 3.0.0-1200.2
* Rebased against 3.0.0-6.7
* BSP from TI based on 3.0.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
63
63
#define CHECK_EXPIRE_INTERVAL   (60*HZ)
64
64
#define ENTRY_TIMEOUT           (6*60*HZ)
65
65
 
 
66
#define DEFAULT_EXPIRATION      (24*60*60*HZ)
 
67
 
66
68
/*
67
69
 *    It is for full expiration check.
68
70
 *    When there is no partial expiration check (garbage collection)
70
72
 *    entries that haven't been touched for a day.
71
73
 */
72
74
#define COUNT_FOR_FULL_EXPIRATION   30
73
 
static int sysctl_ip_vs_lblcr_expiration = 24*60*60*HZ;
74
 
 
75
75
 
76
76
/*
77
77
 *     for IPVS lblcr entry hash table
152
152
        write_lock(&set->lock);
153
153
        list_for_each_entry_safe(e, ep, &set->list, list) {
154
154
                /*
155
 
                 * We don't kfree dest because it is refered either
 
155
                 * We don't kfree dest because it is referred either
156
156
                 * by its service or by the trash dest list.
157
157
                 */
158
158
                atomic_dec(&e->dest->refcnt);
180
180
 
181
181
                if ((atomic_read(&least->weight) > 0)
182
182
                    && (least->flags & IP_VS_DEST_F_AVAILABLE)) {
183
 
                        loh = atomic_read(&least->activeconns) * 50
184
 
                                + atomic_read(&least->inactconns);
 
183
                        loh = ip_vs_dest_conn_overhead(least);
185
184
                        goto nextstage;
186
185
                }
187
186
        }
194
193
                if (dest->flags & IP_VS_DEST_F_OVERLOAD)
195
194
                        continue;
196
195
 
197
 
                doh = atomic_read(&dest->activeconns) * 50
198
 
                        + atomic_read(&dest->inactconns);
 
196
                doh = ip_vs_dest_conn_overhead(dest);
199
197
                if ((loh * atomic_read(&dest->weight) >
200
198
                     doh * atomic_read(&least->weight))
201
199
                    && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
230
228
        list_for_each_entry(e, &set->list, list) {
231
229
                most = e->dest;
232
230
                if (atomic_read(&most->weight) > 0) {
233
 
                        moh = atomic_read(&most->activeconns) * 50
234
 
                                + atomic_read(&most->inactconns);
 
231
                        moh = ip_vs_dest_conn_overhead(most);
235
232
                        goto nextstage;
236
233
                }
237
234
        }
241
238
  nextstage:
242
239
        list_for_each_entry(e, &set->list, list) {
243
240
                dest = e->dest;
244
 
                doh = atomic_read(&dest->activeconns) * 50
245
 
                        + atomic_read(&dest->inactconns);
 
241
                doh = ip_vs_dest_conn_overhead(dest);
246
242
                /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
247
243
                if ((moh * atomic_read(&dest->weight) <
248
244
                     doh * atomic_read(&most->weight))
289
285
};
290
286
 
291
287
 
 
288
#ifdef CONFIG_SYSCTL
292
289
/*
293
290
 *      IPVS LBLCR sysctl table
294
291
 */
296
293
static ctl_table vs_vars_table[] = {
297
294
        {
298
295
                .procname       = "lblcr_expiration",
299
 
                .data           = &sysctl_ip_vs_lblcr_expiration,
 
296
                .data           = NULL,
300
297
                .maxlen         = sizeof(int),
301
298
                .mode           = 0644,
302
299
                .proc_handler   = proc_dointvec_jiffies,
303
300
        },
304
301
        { }
305
302
};
306
 
 
307
 
static struct ctl_table_header * sysctl_header;
 
303
#endif
308
304
 
309
305
static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
310
306
{
418
414
        }
419
415
}
420
416
 
 
417
static int sysctl_lblcr_expiration(struct ip_vs_service *svc)
 
418
{
 
419
#ifdef CONFIG_SYSCTL
 
420
        struct netns_ipvs *ipvs = net_ipvs(svc->net);
 
421
        return ipvs->sysctl_lblcr_expiration;
 
422
#else
 
423
        return DEFAULT_EXPIRATION;
 
424
#endif
 
425
}
421
426
 
422
427
static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
423
428
{
431
436
 
432
437
                write_lock(&svc->sched_lock);
433
438
                list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
434
 
                        if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration,
435
 
                                       now))
 
439
                        if (time_after(en->lastuse +
 
440
                                       sysctl_lblcr_expiration(svc), now))
436
441
                                continue;
437
442
 
438
443
                        ip_vs_lblcr_free(en);
566
571
        int loh, doh;
567
572
 
568
573
        /*
569
 
         * We think the overhead of processing active connections is fifty
570
 
         * times higher than that of inactive connections in average. (This
571
 
         * fifty times might not be accurate, we will change it later.) We
572
 
         * use the following formula to estimate the overhead:
573
 
         *                dest->activeconns*50 + dest->inactconns
574
 
         * and the load:
 
574
         * We use the following formula to estimate the load:
575
575
         *                (dest overhead) / dest->weight
576
576
         *
577
577
         * Remember -- no floats in kernel mode!!!
588
588
 
589
589
                if (atomic_read(&dest->weight) > 0) {
590
590
                        least = dest;
591
 
                        loh = atomic_read(&least->activeconns) * 50
592
 
                                + atomic_read(&least->inactconns);
 
591
                        loh = ip_vs_dest_conn_overhead(least);
593
592
                        goto nextstage;
594
593
                }
595
594
        }
603
602
                if (dest->flags & IP_VS_DEST_F_OVERLOAD)
604
603
                        continue;
605
604
 
606
 
                doh = atomic_read(&dest->activeconns) * 50
607
 
                        + atomic_read(&dest->inactconns);
 
605
                doh = ip_vs_dest_conn_overhead(dest);
608
606
                if (loh * atomic_read(&dest->weight) >
609
607
                    doh * atomic_read(&least->weight)) {
610
608
                        least = dest;
675
673
                /* More than one destination + enough time passed by, cleanup */
676
674
                if (atomic_read(&en->set.size) > 1 &&
677
675
                                time_after(jiffies, en->set.lastmod +
678
 
                                sysctl_ip_vs_lblcr_expiration)) {
 
676
                                sysctl_lblcr_expiration(svc))) {
679
677
                        struct ip_vs_dest *m;
680
678
 
681
679
                        write_lock(&en->set.lock);
694
692
                /* The cache entry is invalid, time to schedule */
695
693
                dest = __ip_vs_lblcr_schedule(svc);
696
694
                if (!dest) {
697
 
                        IP_VS_ERR_RL("LBLCR: no destination available\n");
 
695
                        ip_vs_scheduler_err(svc, "no destination available");
698
696
                        read_unlock(&svc->sched_lock);
699
697
                        return NULL;
700
698
                }
744
742
        .schedule =             ip_vs_lblcr_schedule,
745
743
};
746
744
 
 
745
/*
 
746
 *  per netns init.
 
747
 */
 
748
#ifdef CONFIG_SYSCTL
 
749
static int __net_init __ip_vs_lblcr_init(struct net *net)
 
750
{
 
751
        struct netns_ipvs *ipvs = net_ipvs(net);
 
752
 
 
753
        if (!net_eq(net, &init_net)) {
 
754
                ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
 
755
                                                sizeof(vs_vars_table),
 
756
                                                GFP_KERNEL);
 
757
                if (ipvs->lblcr_ctl_table == NULL)
 
758
                        return -ENOMEM;
 
759
        } else
 
760
                ipvs->lblcr_ctl_table = vs_vars_table;
 
761
        ipvs->sysctl_lblcr_expiration = DEFAULT_EXPIRATION;
 
762
        ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration;
 
763
 
 
764
        ipvs->lblcr_ctl_header =
 
765
                register_net_sysctl_table(net, net_vs_ctl_path,
 
766
                                          ipvs->lblcr_ctl_table);
 
767
        if (!ipvs->lblcr_ctl_header) {
 
768
                if (!net_eq(net, &init_net))
 
769
                        kfree(ipvs->lblcr_ctl_table);
 
770
                return -ENOMEM;
 
771
        }
 
772
 
 
773
        return 0;
 
774
}
 
775
 
 
776
static void __net_exit __ip_vs_lblcr_exit(struct net *net)
 
777
{
 
778
        struct netns_ipvs *ipvs = net_ipvs(net);
 
779
 
 
780
        unregister_net_sysctl_table(ipvs->lblcr_ctl_header);
 
781
 
 
782
        if (!net_eq(net, &init_net))
 
783
                kfree(ipvs->lblcr_ctl_table);
 
784
}
 
785
 
 
786
#else
 
787
 
 
788
static int __net_init __ip_vs_lblcr_init(struct net *net) { return 0; }
 
789
static void __net_exit __ip_vs_lblcr_exit(struct net *net) { }
 
790
 
 
791
#endif
 
792
 
 
793
static struct pernet_operations ip_vs_lblcr_ops = {
 
794
        .init = __ip_vs_lblcr_init,
 
795
        .exit = __ip_vs_lblcr_exit,
 
796
};
747
797
 
748
798
static int __init ip_vs_lblcr_init(void)
749
799
{
750
800
        int ret;
751
801
 
752
 
        sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
 
802
        ret = register_pernet_subsys(&ip_vs_lblcr_ops);
 
803
        if (ret)
 
804
                return ret;
 
805
 
753
806
        ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
754
807
        if (ret)
755
 
                unregister_sysctl_table(sysctl_header);
 
808
                unregister_pernet_subsys(&ip_vs_lblcr_ops);
756
809
        return ret;
757
810
}
758
811
 
759
 
 
760
812
static void __exit ip_vs_lblcr_cleanup(void)
761
813
{
762
 
        unregister_sysctl_table(sysctl_header);
763
814
        unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
 
815
        unregister_pernet_subsys(&ip_vs_lblcr_ops);
764
816
}
765
817
 
766
818