2 * IPVS Application module
4 * Version: $Id: ip_vs_app.c,v 1.14 2001/11/23 14:34:10 wensong Exp $
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * Most code here is taken from ip_masq_app.c in kernel 2.2. The difference
14 * is that ip_vs_app module handles the reverse direction (incoming requests
15 * and outgoing responses). The ip_vs_app modules are only used for VS/NAT.
17 * IP_MASQ_APP application masquerading module
19 * Author: Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/skbuff.h>
28 #include <net/protocol.h>
29 #include <asm/system.h>
30 #include <linux/stat.h>
31 #include <linux/proc_fs.h>
33 #include <net/ip_vs.h>
35 #define IP_VS_APP_TAB_SIZE 16 /* must be power of 2 */
37 #define IP_VS_APP_HASH(proto, port) ((port^proto) & (IP_VS_APP_TAB_SIZE-1))
38 #define IP_VS_APP_TYPE(proto, port) (proto<<16 | port)
39 #define IP_VS_APP_PORT(type) (type & 0xffff)
40 #define IP_VS_APP_PROTO(type) ((type>>16) & 0x00ff)
43 EXPORT_SYMBOL(register_ip_vs_app);
44 EXPORT_SYMBOL(unregister_ip_vs_app);
48 * will hold ipvs app. hashed list heads
50 static struct list_head ip_vs_app_base[IP_VS_APP_TAB_SIZE];
52 /* lock for ip_vs_app table */
53 static rwlock_t __ip_vs_app_lock = RW_LOCK_UNLOCKED;
57 * ip_vs_app registration routine
58 * port: host byte order.
60 int register_ip_vs_app(struct ip_vs_app *vapp,
61 unsigned short proto, __u16 port)
66 IP_VS_ERR("register_ip_vs_app(): NULL arg\n");
72 vapp->type = IP_VS_APP_TYPE(proto, port);
73 hash = IP_VS_APP_HASH(proto, port);
75 write_lock_bh(&__ip_vs_app_lock);
76 list_add(&vapp->n_list, &ip_vs_app_base[hash]);
77 write_unlock_bh(&__ip_vs_app_lock);
84 * ip_vs_app unregistration routine.
86 int unregister_ip_vs_app(struct ip_vs_app *vapp)
89 IP_VS_ERR("unregister_ip_vs_app(): NULL arg\n");
93 write_lock_bh(&__ip_vs_app_lock);
94 list_del(&vapp->n_list);
95 write_unlock_bh(&__ip_vs_app_lock);
104 * get ip_vs_app object by its proto and port (net byte order).
106 static struct ip_vs_app * ip_vs_app_get(unsigned short proto, __u16 port)
109 struct ip_vs_app *vapp;
114 type = IP_VS_APP_TYPE(proto, port);
115 hash = IP_VS_APP_HASH(proto, port);
117 read_lock_bh(&__ip_vs_app_lock);
119 list_for_each(e, &ip_vs_app_base[hash]) {
120 vapp = list_entry(e, struct ip_vs_app, n_list);
123 * Test and MOD_INC_USE_COUNT atomically
125 if (vapp->module && !try_inc_mod_count(vapp->module)) {
127 * This application module is just deleted
131 if (type == vapp->type) {
132 read_unlock_bh(&__ip_vs_app_lock);
137 __MOD_DEC_USE_COUNT(vapp->module);
140 read_unlock_bh(&__ip_vs_app_lock);
146 * Bind ip_vs_conn to its ip_vs_app based on proto and dport,
147 * and call the ip_vs_app constructor.
149 struct ip_vs_app * ip_vs_bind_app(struct ip_vs_conn *cp)
151 struct ip_vs_app *vapp;
153 /* no need to bind app if its forwarding method is not NAT */
154 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
157 if (cp->protocol != IPPROTO_TCP && cp->protocol != IPPROTO_UDP)
161 * don't allow binding if already bound
163 if (cp->app != NULL) {
164 IP_VS_ERR("ip_vs_bind_app(): "
165 "called for already bound object.\n");
169 vapp = ip_vs_app_get(cp->protocol, cp->vport);
175 vapp->init_conn(vapp, cp);
182 * Unbind cp from type object and call cp destructor (does not kfree()).
184 int ip_vs_unbind_app(struct ip_vs_conn *cp)
186 struct ip_vs_app *vapp = cp->app;
188 if (cp->protocol != IPPROTO_TCP && cp->protocol != IPPROTO_UDP)
193 vapp->done_conn(vapp, cp);
196 __MOD_DEC_USE_COUNT(vapp->module);
198 return (vapp != NULL);
203 * Fixes th->seq based on ip_vs_seq info.
205 static inline void vs_fix_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
207 __u32 seq = ntohl(th->seq);
210 * Adjust seq with delta-offset for all packets after
211 * the most recent resized pkt seq and with previous_delta offset
212 * for all packets before most recent resized pkt seq.
214 if (vseq->delta || vseq->previous_delta) {
215 if(after(seq, vseq->init_seq)) {
216 th->seq = htonl(seq + vseq->delta);
217 IP_VS_DBG(9, "vs_fix_seq(): added delta (%d) to seq\n",
220 th->seq = htonl(seq + vseq->previous_delta);
221 IP_VS_DBG(9, "vs_fix_seq(): added previous_delta "
222 "(%d) to seq\n", vseq->previous_delta);
229 * Fixes th->ack_seq based on ip_vs_seq info.
232 vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
234 __u32 ack_seq = ntohl(th->ack_seq);
237 * Adjust ack_seq with delta-offset for
238 * the packets AFTER most recent resized pkt has caused a shift
239 * for packets before most recent resized pkt, use previous_delta
241 if (vseq->delta || vseq->previous_delta) {
242 /* since ack_seq is the number of octet that is expected
243 to receive next, so compare it with init_seq+delta */
244 if(after(ack_seq, vseq->init_seq+vseq->delta)) {
245 th->ack_seq = htonl(ack_seq - vseq->delta);
246 IP_VS_DBG(9, "vs_fix_ack_seq(): subtracted delta "
247 "(%d) from ack_seq\n", vseq->delta);
250 th->ack_seq = htonl(ack_seq - vseq->previous_delta);
251 IP_VS_DBG(9, "vs_fix_ack_seq(): subtracted "
252 "previous_delta (%d) from ack_seq\n",
253 vseq->previous_delta);
260 * Updates ip_vs_seq if pkt has been resized
261 * Assumes already checked proto==IPPROTO_TCP and diff!=0.
263 static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq,
264 unsigned flag, __u32 seq, int diff)
266 /* spinlock is to keep updating cp->flags atomic */
267 spin_lock(&cp->lock);
268 if ( !(cp->flags & flag) || after(seq, vseq->init_seq)) {
269 vseq->previous_delta = vseq->delta;
271 vseq->init_seq = seq;
274 spin_unlock(&cp->lock);
279 * Output pkt hook. Will call bound ip_vs_app specific function
280 * called by ip_vs_out(), assumes previously checked cp!=NULL
281 * returns (new - old) skb->len diff.
283 int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb)
285 struct ip_vs_app *vapp;
292 * check if application module is bound to
295 if ((vapp = cp->app) == NULL)
299 th = (struct tcphdr *)&(((char *)iph)[iph->ihl*4]);
302 * Remember seq number in case this pkt gets resized
304 seq = ntohl(th->seq);
307 * Fix seq stuff if flagged as so.
309 if (cp->protocol == IPPROTO_TCP) {
310 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
311 vs_fix_seq(&cp->out_seq, th);
312 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
313 vs_fix_ack_seq(&cp->in_seq, th);
317 * Call private output hook function
319 if (vapp->pkt_out == NULL)
322 diff = vapp->pkt_out(vapp, cp, skb);
325 * Update ip_vs seq stuff if len has changed.
327 if (diff != 0 && cp->protocol == IPPROTO_TCP)
328 vs_seq_update(cp, &cp->out_seq,
329 IP_VS_CONN_F_OUT_SEQ, seq, diff);
336 * Input pkt hook. Will call bound ip_vs_app specific function
337 * called by ip_fw_demasquerade(), assumes previously checked cp!=NULL.
338 * returns (new - old) skb->len diff.
340 int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb)
342 struct ip_vs_app *vapp;
349 * check if application module is bound to
352 if ((vapp = cp->app) == NULL)
356 th = (struct tcphdr *)&(((char *)iph)[iph->ihl*4]);
359 * Remember seq number in case this pkt gets resized
361 seq = ntohl(th->seq);
364 * Fix seq stuff if flagged as so.
366 if (cp->protocol == IPPROTO_TCP) {
367 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
368 vs_fix_seq(&cp->in_seq, th);
369 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
370 vs_fix_ack_seq(&cp->out_seq, th);
374 * Call private input hook function
376 if (vapp->pkt_in == NULL)
379 diff = vapp->pkt_in(vapp, cp, skb);
382 * Update ip_vs seq stuff if len has changed.
384 if (diff != 0 && cp->protocol == IPPROTO_TCP)
385 vs_seq_update(cp, &cp->in_seq,
386 IP_VS_CONN_F_IN_SEQ, seq, diff);
393 * /proc/net/ip_vs_app entry function
395 static int ip_vs_app_getinfo(char *buffer, char **start, off_t offset,
402 struct ip_vs_app *vapp;
407 len += sprintf(buffer+len, "%-63s\n",
408 "prot port usecnt name");
411 read_lock_bh(&__ip_vs_app_lock);
412 for (idx=0 ; idx < IP_VS_APP_TAB_SIZE; idx++) {
413 list_for_each (e, &ip_vs_app_base[idx]) {
414 vapp = list_entry(e, struct ip_vs_app, n_list);
419 sprintf(temp, "%-3s %-7u %-6d %-17s",
420 ip_vs_proto_name(IP_VS_APP_PROTO(vapp->type)),
421 IP_VS_APP_PORT(vapp->type),
422 vapp->module?GET_USE_COUNT(vapp->module):0,
424 len += sprintf(buffer+len, "%-63s\n", temp);
425 if (pos >= offset+length)
430 read_unlock_bh(&__ip_vs_app_lock);
432 *start = buffer+len-(pos-offset); /* Start of wanted data */
443 * Replace a segment of data with a new segment
445 int ip_vs_skb_replace(struct sk_buff *skb, int pri,
446 char *o_buf, int o_len, char *n_buf, int n_len)
455 diff = n_len - o_len;
456 o_offset = o_buf - (char *)skb->data;
457 /* The length of left data after o_buf+o_len in the skb data */
458 o_left = skb->len - (o_offset + o_len);
461 memmove(o_buf + n_len, o_buf + o_len, o_left);
462 memcpy(o_buf, n_buf, n_len);
463 skb_trim(skb, skb->len + diff);
464 } else if (diff <= skb_tailroom(skb)) {
466 memmove(o_buf + n_len, o_buf + o_len, o_left);
467 memcpy(o_buf, n_buf, n_len);
469 if (pskb_expand_head(skb, skb_headroom(skb), diff, pri))
472 memmove(skb->data + o_offset + n_len,
473 skb->data + o_offset + o_len, o_left);
474 memcpy(skb->data + o_offset, n_buf, n_len);
477 /* must update the iph total length here */
479 iph->tot_len = htons(skb->len);
486 int ip_vs_app_init(void)
490 for (idx=0 ; idx < IP_VS_APP_TAB_SIZE; idx++) {
491 INIT_LIST_HEAD(&ip_vs_app_base[idx]);
494 /* we will replace it with proc_net_ipvs_create() soon */
495 proc_net_create("ip_vs_app", 0, ip_vs_app_getinfo);
499 void ip_vs_app_cleanup(void)
501 proc_net_remove("ip_vs_app");