63
63
#define CHECK_EXPIRE_INTERVAL (60*HZ)
64
64
#define ENTRY_TIMEOUT (6*60*HZ)
66
#define DEFAULT_EXPIRATION (24*60*60*HZ)
67
69
* It is for full expiration check.
68
70
* When there is no partial expiration check (garbage collection)
152
152
write_lock(&set->lock);
153
153
list_for_each_entry_safe(e, ep, &set->list, list) {
155
* We don't kfree dest because it is refered either
155
* We don't kfree dest because it is referred either
156
156
* by its service or by the trash dest list.
158
158
atomic_dec(&e->dest->refcnt);
181
181
if ((atomic_read(&least->weight) > 0)
182
182
&& (least->flags & IP_VS_DEST_F_AVAILABLE)) {
183
loh = atomic_read(&least->activeconns) * 50
184
+ atomic_read(&least->inactconns);
183
loh = ip_vs_dest_conn_overhead(least);
194
193
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
197
doh = atomic_read(&dest->activeconns) * 50
198
+ atomic_read(&dest->inactconns);
196
doh = ip_vs_dest_conn_overhead(dest);
199
197
if ((loh * atomic_read(&dest->weight) >
200
198
doh * atomic_read(&least->weight))
201
199
&& (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
230
228
list_for_each_entry(e, &set->list, list) {
232
230
if (atomic_read(&most->weight) > 0) {
233
moh = atomic_read(&most->activeconns) * 50
234
+ atomic_read(&most->inactconns);
231
moh = ip_vs_dest_conn_overhead(most);
242
239
list_for_each_entry(e, &set->list, list) {
244
doh = atomic_read(&dest->activeconns) * 50
245
+ atomic_read(&dest->inactconns);
241
doh = ip_vs_dest_conn_overhead(dest);
246
242
/* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
247
243
if ((moh * atomic_read(&dest->weight) <
248
244
doh * atomic_read(&most->weight))
296
293
static ctl_table vs_vars_table[] = {
298
295
.procname = "lblcr_expiration",
299
.data = &sysctl_ip_vs_lblcr_expiration,
300
297
.maxlen = sizeof(int),
302
299
.proc_handler = proc_dointvec_jiffies,
307
static struct ctl_table_header * sysctl_header;
309
305
static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
417
static int sysctl_lblcr_expiration(struct ip_vs_service *svc)
420
struct netns_ipvs *ipvs = net_ipvs(svc->net);
421
return ipvs->sysctl_lblcr_expiration;
423
return DEFAULT_EXPIRATION;
422
427
static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
432
437
write_lock(&svc->sched_lock);
433
438
list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
434
if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration,
439
if (time_after(en->lastuse +
440
sysctl_lblcr_expiration(svc), now))
438
443
ip_vs_lblcr_free(en);
569
* We think the overhead of processing active connections is fifty
570
* times higher than that of inactive connections in average. (This
571
* fifty times might not be accurate, we will change it later.) We
572
* use the following formula to estimate the overhead:
573
* dest->activeconns*50 + dest->inactconns
574
* We use the following formula to estimate the load:
575
575
* (dest overhead) / dest->weight
577
577
* Remember -- no floats in kernel mode!!!
603
602
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
606
doh = atomic_read(&dest->activeconns) * 50
607
+ atomic_read(&dest->inactconns);
605
doh = ip_vs_dest_conn_overhead(dest);
608
606
if (loh * atomic_read(&dest->weight) >
609
607
doh * atomic_read(&least->weight)) {
675
673
/* More than one destination + enough time passed by, cleanup */
676
674
if (atomic_read(&en->set.size) > 1 &&
677
675
time_after(jiffies, en->set.lastmod +
678
sysctl_ip_vs_lblcr_expiration)) {
676
sysctl_lblcr_expiration(svc))) {
679
677
struct ip_vs_dest *m;
681
679
write_lock(&en->set.lock);
694
692
/* The cache entry is invalid, time to schedule */
695
693
dest = __ip_vs_lblcr_schedule(svc);
697
IP_VS_ERR_RL("LBLCR: no destination available\n");
695
ip_vs_scheduler_err(svc, "no destination available");
698
696
read_unlock(&svc->sched_lock);
744
742
.schedule = ip_vs_lblcr_schedule,
749
static int __net_init __ip_vs_lblcr_init(struct net *net)
751
struct netns_ipvs *ipvs = net_ipvs(net);
753
if (!net_eq(net, &init_net)) {
754
ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
755
sizeof(vs_vars_table),
757
if (ipvs->lblcr_ctl_table == NULL)
760
ipvs->lblcr_ctl_table = vs_vars_table;
761
ipvs->sysctl_lblcr_expiration = DEFAULT_EXPIRATION;
762
ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration;
764
ipvs->lblcr_ctl_header =
765
register_net_sysctl_table(net, net_vs_ctl_path,
766
ipvs->lblcr_ctl_table);
767
if (!ipvs->lblcr_ctl_header) {
768
if (!net_eq(net, &init_net))
769
kfree(ipvs->lblcr_ctl_table);
776
static void __net_exit __ip_vs_lblcr_exit(struct net *net)
778
struct netns_ipvs *ipvs = net_ipvs(net);
780
unregister_net_sysctl_table(ipvs->lblcr_ctl_header);
782
if (!net_eq(net, &init_net))
783
kfree(ipvs->lblcr_ctl_table);
788
static int __net_init __ip_vs_lblcr_init(struct net *net) { return 0; }
789
static void __net_exit __ip_vs_lblcr_exit(struct net *net) { }
793
static struct pernet_operations ip_vs_lblcr_ops = {
794
.init = __ip_vs_lblcr_init,
795
.exit = __ip_vs_lblcr_exit,
748
798
static int __init ip_vs_lblcr_init(void)
752
sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
802
ret = register_pernet_subsys(&ip_vs_lblcr_ops);
753
806
ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
755
unregister_sysctl_table(sysctl_header);
808
unregister_pernet_subsys(&ip_vs_lblcr_ops);
760
812
static void __exit ip_vs_lblcr_cleanup(void)
762
unregister_sysctl_table(sysctl_header);
763
814
unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
815
unregister_pernet_subsys(&ip_vs_lblcr_ops);