2 * IPVS: Locality-Based Least-Connection scheduling module
4 * Version: $Id: ip_vs_lblc.c,v 1.9 2002/03/25 12:44:35 wensong Exp $
6 * Authors: Wensong Zhang <wensong@gnuchina.org>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Martin Hamilton : fixed the terrible locking bugs
15 * *lock(tbl->lock) ==> *lock(&tbl->lock)
16 * Wensong Zhang : fixed the uninitilized tbl->lock bug
17 * Wensong Zhang : added doing full expiration check to
18 * collect stale entries of 24+ hours when
19 * no partial expire check in a half hour
20 * Julian Anastasov : replaced del_timer call with del_timer_sync
21 * to avoid the possible race between timer
22 * handler and del_timer thread in SMP
27 * The lblc algorithm is as follows (pseudo code):
29 * if cachenode[dest_ip] is null then
30 * n, cachenode[dest_ip] <- {weighted least-conn node};
32 * n <- cachenode[dest_ip];
34 * (n.conns>n.weight AND
35 * there is a node m with m.conns<m.weight/2) then
36 * n, cachenode[dest_ip] <- {weighted least-conn node};
40 * Thanks must go to Wenzhuo Zhang for talking WCCP to me and pushing
41 * me to write this module.
44 #include <linux/module.h>
45 #include <linux/kernel.h>
49 #include <linux/sysctl.h>
51 #include <net/ip_vs.h>
55 * It is for garbage collection of stale IPVS lblc entries,
56 * when the table is full.
58 #define CHECK_EXPIRE_INTERVAL (60*HZ)
59 #define ENTRY_TIMEOUT (6*60*HZ)
62 * It is for full expiration check.
63 * When there is no partial expiration check (garbage collection)
64 * in a half hour, do a full expiration check to collect stale
65 * entries that haven't been touched for a day.
67 #define COUNT_FOR_FULL_EXPIRATION 30
68 static int sysctl_ip_vs_lblc_expiration = 24*60*60*HZ;
72 * for IPVS lblc entry hash table
74 #ifndef CONFIG_IP_VS_LBLC_TAB_BITS
75 #define CONFIG_IP_VS_LBLC_TAB_BITS 10
77 #define IP_VS_LBLC_TAB_BITS CONFIG_IP_VS_LBLC_TAB_BITS
78 #define IP_VS_LBLC_TAB_SIZE (1 << IP_VS_LBLC_TAB_BITS)
79 #define IP_VS_LBLC_TAB_MASK (IP_VS_LBLC_TAB_SIZE - 1)
83 * IPVS lblc entry represents an association between destination
84 * IP address and its destination server
86 struct ip_vs_lblc_entry {
87 struct list_head list;
88 __u32 addr; /* destination IP address */
89 struct ip_vs_dest *dest; /* real server (cache) */
90 unsigned long lastuse; /* last used time */
95 * IPVS lblc hash table
97 struct ip_vs_lblc_table {
98 rwlock_t lock; /* lock for this table */
99 struct list_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */
100 atomic_t entries; /* number of entries */
101 int max_size; /* maximum size of entries */
102 struct timer_list periodic_timer; /* collect stale entries */
103 int rover; /* rover for expire check */
104 int counter; /* counter for no expire */
109 * IPVS LBLC sysctl table
111 struct ip_vs_lblc_sysctl_table {
112 struct ctl_table_header *sysctl_header;
113 ctl_table vs_vars[2];
115 ctl_table ipv4_dir[2];
116 ctl_table root_dir[2];
120 static struct ip_vs_lblc_sysctl_table lblc_sysctl_table = {
122 {{NET_IPV4_VS_LBLC_EXPIRE, "lblc_expiration",
123 &sysctl_ip_vs_lblc_expiration,
124 sizeof(int), 0644, NULL, &proc_dointvec_jiffies},
126 {{NET_IPV4_VS, "vs", NULL, 0, 0555, lblc_sysctl_table.vs_vars},
128 {{NET_IPV4, "ipv4", NULL, 0, 0555, lblc_sysctl_table.vs_dir},
130 {{CTL_NET, "net", NULL, 0, 0555, lblc_sysctl_table.ipv4_dir},
136 * new/free a ip_vs_lblc_entry, which is a mapping of a destionation
137 * IP address to a server.
139 static inline struct ip_vs_lblc_entry *
140 ip_vs_lblc_new(__u32 daddr, struct ip_vs_dest *dest)
142 struct ip_vs_lblc_entry *en;
144 en = kmalloc(sizeof(struct ip_vs_lblc_entry), GFP_ATOMIC);
146 IP_VS_ERR("ip_vs_lblc_new(): no memory\n");
150 INIT_LIST_HEAD(&en->list);
153 atomic_inc(&dest->refcnt);
160 static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
164 * We don't kfree dest because it is refered either by its service
165 * or the trash dest list.
167 atomic_dec(&en->dest->refcnt);
173 * Returns hash value for IPVS LBLC entry
175 static inline unsigned ip_vs_lblc_hashkey(__u32 addr)
177 return (ntohl(addr)*2654435761UL) & IP_VS_LBLC_TAB_MASK;
182 * Hash an entry in the ip_vs_lblc_table.
183 * returns bool success.
186 ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
190 if (!list_empty(&en->list)) {
191 IP_VS_ERR("ip_vs_lblc_hash(): request for already hashed, "
192 "called from %p\n", __builtin_return_address(0));
197 * Hash by destination IP address
199 hash = ip_vs_lblc_hashkey(en->addr);
201 write_lock(&tbl->lock);
202 list_add(&en->list, &tbl->bucket[hash]);
203 atomic_inc(&tbl->entries);
204 write_unlock(&tbl->lock);
212 * Unhash ip_vs_lblc_entry from ip_vs_lblc_table.
213 * returns bool success.
215 static int ip_vs_lblc_unhash(struct ip_vs_lblc_table *tbl,
216 struct ip_vs_lblc_entry *en)
218 if (list_empty(&en->list)) {
219 IP_VS_ERR("ip_vs_lblc_unhash(): request for not hashed entry, "
220 "called from %p\n", __builtin_return_address(0));
225 * Remove it from the table
227 write_lock(&tbl->lock);
229 INIT_LIST_HEAD(&en->list);
230 write_unlock(&tbl->lock);
238 * Get ip_vs_lblc_entry associated with supplied parameters.
240 static inline struct ip_vs_lblc_entry *
241 ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __u32 addr)
244 struct ip_vs_lblc_entry *en;
245 struct list_head *l,*e;
247 hash = ip_vs_lblc_hashkey(addr);
248 l = &tbl->bucket[hash];
250 read_lock(&tbl->lock);
252 for (e=l->next; e!=l; e=e->next) {
253 en = list_entry(e, struct ip_vs_lblc_entry, list);
254 if (en->addr == addr) {
256 read_unlock(&tbl->lock);
261 read_unlock(&tbl->lock);
268 * Flush all the entries of the specified table.
270 static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl)
274 struct ip_vs_lblc_entry *en;
276 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
277 write_lock(&tbl->lock);
278 for (l=&tbl->bucket[i]; l->next!=l; ) {
279 en = list_entry(l->next,
280 struct ip_vs_lblc_entry, list);
282 atomic_dec(&tbl->entries);
284 write_unlock(&tbl->lock);
289 static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl)
291 unsigned long now = jiffies;
293 struct list_head *l, *e;
294 struct ip_vs_lblc_entry *en;
296 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
297 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
298 e = l = &tbl->bucket[j];
299 write_lock(&tbl->lock);
300 while (e->next != l) {
301 en = list_entry(e->next,
302 struct ip_vs_lblc_entry, list);
303 if ((now - en->lastuse) <
304 sysctl_ip_vs_lblc_expiration) {
309 atomic_dec(&tbl->entries);
311 write_unlock(&tbl->lock);
318 * Periodical timer handler for IPVS lblc table
319 * It is used to collect stale entries when the number of entries
320 * exceeds the maximum size of the table.
322 * Fixme: we probably need more complicated algorithm to collect
323 * entries that have not been used for a long time even
324 * if the number of entries doesn't exceed the maximum size
326 * The full expiration check is for this purpose now.
328 static void ip_vs_lblc_check_expire(unsigned long data)
330 struct ip_vs_lblc_table *tbl;
331 unsigned long now = jiffies;
334 struct list_head *l, *e;
335 struct ip_vs_lblc_entry *en;
337 tbl = (struct ip_vs_lblc_table *)data;
339 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
340 /* do full expiration check */
341 ip_vs_lblc_full_check(tbl);
346 if (atomic_read(&tbl->entries) <= tbl->max_size) {
351 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3;
352 if (goal > tbl->max_size/2)
353 goal = tbl->max_size/2;
355 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
356 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
357 e = l = &tbl->bucket[j];
358 write_lock(&tbl->lock);
359 while (e->next != l) {
360 en = list_entry(e->next,
361 struct ip_vs_lblc_entry, list);
362 if ((now - en->lastuse) < ENTRY_TIMEOUT) {
367 atomic_dec(&tbl->entries);
370 write_unlock(&tbl->lock);
377 mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
381 static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
384 struct ip_vs_lblc_table *tbl;
387 * Allocate the ip_vs_lblc_table for this service
389 tbl = kmalloc(sizeof(struct ip_vs_lblc_table), GFP_ATOMIC);
391 IP_VS_ERR("ip_vs_lblc_init_svc(): no memory\n");
394 svc->sched_data = tbl;
395 IP_VS_DBG(6, "LBLC hash table (memory=%dbytes) allocated for "
397 sizeof(struct ip_vs_lblc_table));
400 * Initialize the hash buckets
402 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
403 INIT_LIST_HEAD(&tbl->bucket[i]);
405 tbl->lock = RW_LOCK_UNLOCKED;
406 tbl->max_size = IP_VS_LBLC_TAB_SIZE*16;
411 * Hook periodic timer for garbage collection
413 init_timer(&tbl->periodic_timer);
414 tbl->periodic_timer.data = (unsigned long)tbl;
415 tbl->periodic_timer.function = ip_vs_lblc_check_expire;
416 tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL;
417 add_timer(&tbl->periodic_timer);
423 static int ip_vs_lblc_done_svc(struct ip_vs_service *svc)
425 struct ip_vs_lblc_table *tbl = svc->sched_data;
427 /* remove periodic timer */
428 del_timer_sync(&tbl->periodic_timer);
430 /* got to clean up table entries here */
431 ip_vs_lblc_flush(tbl);
433 /* release the table itself */
434 kfree(svc->sched_data);
435 IP_VS_DBG(6, "LBLC hash table (memory=%dbytes) released\n",
436 sizeof(struct ip_vs_lblc_table));
442 static int ip_vs_lblc_update_svc(struct ip_vs_service *svc)
448 static inline struct ip_vs_dest *
449 __ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph)
451 register struct list_head *l, *e;
452 struct ip_vs_dest *dest, *least;
456 * We think the overhead of processing active connections is fifty
457 * times higher than that of inactive connections in average. (This
458 * fifty times might not be accurate, we will change it later.) We
459 * use the following formula to estimate the overhead:
460 * dest->activeconns*50 + dest->inactconns
462 * (dest overhead) / dest->weight
464 * Remember -- no floats in kernel mode!!!
465 * The comparison of h1*w2 > h2*w1 is equivalent to that of
467 * if every weight is larger than zero.
469 * The server with weight=0 is quiesced and will not receive any
473 l = &svc->destinations;
474 for (e=l->next; e!=l; e=e->next) {
475 least = list_entry(e, struct ip_vs_dest, n_list);
476 if (atomic_read(&least->weight) > 0) {
477 loh = atomic_read(&least->activeconns) * 50
478 + atomic_read(&least->inactconns);
485 * Find the destination with the least load.
488 for (e=e->next; e!=l; e=e->next) {
489 dest = list_entry(e, struct ip_vs_dest, n_list);
490 doh = atomic_read(&dest->activeconns) * 50
491 + atomic_read(&dest->inactconns);
492 if (loh * atomic_read(&dest->weight) >
493 doh * atomic_read(&least->weight)) {
499 IP_VS_DBG(6, "LBLC: server %d.%d.%d.%d:%d "
500 "activeconns %d refcnt %d weight %d overhead %d\n",
501 NIPQUAD(least->addr), ntohs(least->port),
502 atomic_read(&least->activeconns),
503 atomic_read(&least->refcnt),
504 atomic_read(&least->weight), loh);
511 * If this destination server is overloaded and there is a less loaded
512 * server, then return true.
515 is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
517 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
518 register struct list_head *l, *e;
519 struct ip_vs_dest *d;
521 l = &svc->destinations;
522 for (e=l->next; e!=l; e=e->next) {
523 d = list_entry(e, struct ip_vs_dest, n_list);
524 if (atomic_read(&d->activeconns)*2
525 < atomic_read(&d->weight)) {
535 * Locality-Based (weighted) Least-Connection scheduling
537 static struct ip_vs_dest *
538 ip_vs_lblc_schedule(struct ip_vs_service *svc, struct iphdr *iph)
540 struct ip_vs_dest *dest;
541 struct ip_vs_lblc_table *tbl;
542 struct ip_vs_lblc_entry *en;
544 IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n");
546 tbl = (struct ip_vs_lblc_table *)svc->sched_data;
547 en = ip_vs_lblc_get(tbl, iph->daddr);
549 dest = __ip_vs_wlc_schedule(svc, iph);
551 IP_VS_DBG(1, "no destination available\n");
554 en = ip_vs_lblc_new(iph->daddr, dest);
558 ip_vs_lblc_hash(tbl, en);
561 if (!(dest->flags & IP_VS_DEST_F_AVAILABLE)
562 || atomic_read(&dest->weight) <= 0
563 || is_overloaded(dest, svc)) {
564 dest = __ip_vs_wlc_schedule(svc, iph);
566 IP_VS_DBG(1, "no destination available\n");
569 atomic_dec(&en->dest->refcnt);
570 atomic_inc(&dest->refcnt);
574 en->lastuse = jiffies;
576 IP_VS_DBG(6, "LBLC: destination IP address %u.%u.%u.%u "
577 "--> server %u.%u.%u.%u:%d\n",
587 * IPVS LBLC Scheduler structure
589 static struct ip_vs_scheduler ip_vs_lblc_scheduler =
593 ATOMIC_INIT(0), /* refcnt */
594 THIS_MODULE, /* this module */
595 ip_vs_lblc_init_svc, /* service initializer */
596 ip_vs_lblc_done_svc, /* service done */
597 ip_vs_lblc_update_svc, /* service updater */
598 ip_vs_lblc_schedule, /* select a server from the destination list */
602 static int __init ip_vs_lblc_init(void)
604 INIT_LIST_HEAD(&ip_vs_lblc_scheduler.n_list);
605 lblc_sysctl_table.sysctl_header =
606 register_sysctl_table(lblc_sysctl_table.root_dir, 0);
607 return register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
611 static void __exit ip_vs_lblc_cleanup(void)
613 unregister_sysctl_table(lblc_sysctl_table.sysctl_header);
614 unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
618 module_init(ip_vs_lblc_init);
619 module_exit(ip_vs_lblc_cleanup);
620 MODULE_LICENSE("GPL");