2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/cache.h>
12 #include <linux/capability.h>
13 #include <linux/skbuff.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/netdevice.h>
17 #include <linux/module.h>
18 #include <linux/icmp.h>
20 #include <net/compat.h>
21 #include <asm/uaccess.h>
22 #include <linux/mutex.h>
23 #include <linux/proc_fs.h>
24 #include <linux/err.h>
25 #include <linux/cpumask.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_ipv4/ip_tables.h>
30 MODULE_LICENSE("GPL");
31 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
32 MODULE_DESCRIPTION("IPv4 packet filter");
34 /*#define DEBUG_IP_FIREWALL*/
35 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
36 /*#define DEBUG_IP_FIREWALL_USER*/
38 #ifdef DEBUG_IP_FIREWALL
39 #define dprintf(format, args...) printk(format , ## args)
41 #define dprintf(format, args...)
44 #ifdef DEBUG_IP_FIREWALL_USER
45 #define duprintf(format, args...) printk(format , ## args)
47 #define duprintf(format, args...)
50 #ifdef CONFIG_NETFILTER_DEBUG
51 #define IP_NF_ASSERT(x) \
54 printk("IP_NF_ASSERT: %s:%s:%u\n", \
55 __FUNCTION__, __FILE__, __LINE__); \
58 #define IP_NF_ASSERT(x)
62 /* All the better to debug you with... */
68 We keep a set of rules for each CPU, so we can avoid write-locking
69 them in the softirq when updating the counters and therefore
70 only need to read-lock in the softirq; doing a write_lock_bh() in user
71 context stops packets coming through and allows user context to read
72 the counters or update the rules.
74 Hence the start of any table is given by get_table() below. */
76 /* Returns whether matches rule or not. */
78 ip_packet_match(const struct iphdr *ip,
81 const struct ipt_ip *ipinfo,
87 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
89 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
91 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
93 dprintf("Source or dest mismatch.\n");
95 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
97 NIPQUAD(ipinfo->smsk.s_addr),
98 NIPQUAD(ipinfo->src.s_addr),
99 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
100 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
102 NIPQUAD(ipinfo->dmsk.s_addr),
103 NIPQUAD(ipinfo->dst.s_addr),
104 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
108 /* Look for ifname matches; this should unroll nicely. */
109 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
110 ret |= (((const unsigned long *)indev)[i]
111 ^ ((const unsigned long *)ipinfo->iniface)[i])
112 & ((const unsigned long *)ipinfo->iniface_mask)[i];
115 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
116 dprintf("VIA in mismatch (%s vs %s).%s\n",
117 indev, ipinfo->iniface,
118 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
122 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
123 ret |= (((const unsigned long *)outdev)[i]
124 ^ ((const unsigned long *)ipinfo->outiface)[i])
125 & ((const unsigned long *)ipinfo->outiface_mask)[i];
128 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
129 dprintf("VIA out mismatch (%s vs %s).%s\n",
130 outdev, ipinfo->outiface,
131 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
135 /* Check specific protocol */
137 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
138 dprintf("Packet protocol %hi does not match %hi.%s\n",
139 ip->protocol, ipinfo->proto,
140 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
144 /* If we have a fragment rule but the packet is not a fragment
145 * then we return zero */
146 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
147 dprintf("Fragment rule but not fragment.%s\n",
148 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
156 ip_checkentry(const struct ipt_ip *ip)
158 if (ip->flags & ~IPT_F_MASK) {
159 duprintf("Unknown flag bits set: %08X\n",
160 ip->flags & ~IPT_F_MASK);
163 if (ip->invflags & ~IPT_INV_MASK) {
164 duprintf("Unknown invflag bits set: %08X\n",
165 ip->invflags & ~IPT_INV_MASK);
172 ipt_error(struct sk_buff *skb,
173 const struct net_device *in,
174 const struct net_device *out,
175 unsigned int hooknum,
176 const struct xt_target *target,
177 const void *targinfo)
180 printk("ip_tables: error: `%s'\n", (char *)targinfo);
186 bool do_match(struct ipt_entry_match *m,
187 const struct sk_buff *skb,
188 const struct net_device *in,
189 const struct net_device *out,
193 /* Stop iteration if it doesn't match */
194 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
195 offset, ip_hdrlen(skb), hotdrop))
201 static inline struct ipt_entry *
202 get_entry(void *base, unsigned int offset)
204 return (struct ipt_entry *)(base + offset);
207 /* All zeroes == unconditional rule. */
209 unconditional(const struct ipt_ip *ip)
213 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
214 if (((__u32 *)ip)[i])
220 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
221 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
222 static const char *hooknames[] = {
223 [NF_INET_PRE_ROUTING] = "PREROUTING",
224 [NF_INET_LOCAL_IN] = "INPUT",
225 [NF_INET_FORWARD] = "FORWARD",
226 [NF_INET_LOCAL_OUT] = "OUTPUT",
227 [NF_INET_POST_ROUTING] = "POSTROUTING",
230 enum nf_ip_trace_comments {
231 NF_IP_TRACE_COMMENT_RULE,
232 NF_IP_TRACE_COMMENT_RETURN,
233 NF_IP_TRACE_COMMENT_POLICY,
236 static const char *comments[] = {
237 [NF_IP_TRACE_COMMENT_RULE] = "rule",
238 [NF_IP_TRACE_COMMENT_RETURN] = "return",
239 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
242 static struct nf_loginfo trace_loginfo = {
243 .type = NF_LOG_TYPE_LOG,
247 .logflags = NF_LOG_MASK,
253 get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
254 char *hookname, char **chainname,
255 char **comment, unsigned int *rulenum)
257 struct ipt_standard_target *t = (void *)ipt_get_target(s);
259 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
260 /* Head of user chain: ERROR target with chainname */
261 *chainname = t->target.data;
266 if (s->target_offset == sizeof(struct ipt_entry)
267 && strcmp(t->target.u.kernel.target->name,
268 IPT_STANDARD_TARGET) == 0
270 && unconditional(&s->ip)) {
271 /* Tail of chains: STANDARD target (return/policy) */
272 *comment = *chainname == hookname
273 ? (char *)comments[NF_IP_TRACE_COMMENT_POLICY]
274 : (char *)comments[NF_IP_TRACE_COMMENT_RETURN];
283 static void trace_packet(struct sk_buff *skb,
285 const struct net_device *in,
286 const struct net_device *out,
288 struct xt_table_info *private,
292 struct ipt_entry *root;
293 char *hookname, *chainname, *comment;
294 unsigned int rulenum = 0;
296 table_base = (void *)private->entries[smp_processor_id()];
297 root = get_entry(table_base, private->hook_entry[hook]);
299 hookname = chainname = (char *)hooknames[hook];
300 comment = (char *)comments[NF_IP_TRACE_COMMENT_RULE];
302 IPT_ENTRY_ITERATE(root,
303 private->size - private->hook_entry[hook],
304 get_chainname_rulenum,
305 e, hookname, &chainname, &comment, &rulenum);
307 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
308 "TRACE: %s:%s:%s:%u ",
309 tablename, chainname, comment, rulenum);
313 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
315 ipt_do_table(struct sk_buff *skb,
317 const struct net_device *in,
318 const struct net_device *out,
319 struct xt_table *table)
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
325 bool hotdrop = false;
326 /* Initializing verdict to NF_DROP keeps gcc happy. */
327 unsigned int verdict = NF_DROP;
328 const char *indev, *outdev;
330 struct ipt_entry *e, *back;
331 struct xt_table_info *private;
335 datalen = skb->len - ip->ihl * 4;
336 indev = in ? in->name : nulldevname;
337 outdev = out ? out->name : nulldevname;
338 /* We handle fragments by dealing with the first fragment as
339 * if it was a normal packet. All other fragments are treated
340 * normally, except that they will NEVER match rules that ask
341 * things we don't know, ie. tcp syn flag or ports). If the
342 * rule is also a fragment-specific rule, non-fragments won't
344 offset = ntohs(ip->frag_off) & IP_OFFSET;
346 read_lock_bh(&table->lock);
347 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
348 private = table->private;
349 table_base = (void *)private->entries[smp_processor_id()];
350 e = get_entry(table_base, private->hook_entry[hook]);
352 /* For return from builtin chain */
353 back = get_entry(table_base, private->underflow[hook]);
358 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
359 struct ipt_entry_target *t;
361 if (IPT_MATCH_ITERATE(e, do_match,
363 offset, &hotdrop) != 0)
366 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
368 t = ipt_get_target(e);
369 IP_NF_ASSERT(t->u.kernel.target);
371 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
372 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
373 /* The packet is traced: log it */
374 if (unlikely(skb->nf_trace))
375 trace_packet(skb, hook, in, out,
376 table->name, private, e);
378 /* Standard target? */
379 if (!t->u.kernel.target->target) {
382 v = ((struct ipt_standard_target *)t)->verdict;
384 /* Pop from stack? */
385 if (v != IPT_RETURN) {
386 verdict = (unsigned)(-v) - 1;
390 back = get_entry(table_base,
394 if (table_base + v != (void *)e + e->next_offset
395 && !(e->ip.flags & IPT_F_GOTO)) {
396 /* Save old back ptr in next entry */
397 struct ipt_entry *next
398 = (void *)e + e->next_offset;
400 = (void *)back - table_base;
401 /* set back pointer to next entry */
405 e = get_entry(table_base, v);
407 /* Targets which reenter must return
409 #ifdef CONFIG_NETFILTER_DEBUG
410 ((struct ipt_entry *)table_base)->comefrom
413 verdict = t->u.kernel.target->target(skb,
419 #ifdef CONFIG_NETFILTER_DEBUG
420 if (((struct ipt_entry *)table_base)->comefrom
422 && verdict == IPT_CONTINUE) {
423 printk("Target %s reentered!\n",
424 t->u.kernel.target->name);
427 ((struct ipt_entry *)table_base)->comefrom
430 /* Target might have changed stuff. */
432 datalen = skb->len - ip->ihl * 4;
434 if (verdict == IPT_CONTINUE)
435 e = (void *)e + e->next_offset;
443 e = (void *)e + e->next_offset;
447 read_unlock_bh(&table->lock);
449 #ifdef DEBUG_ALLOW_ALL
458 /* Figures out from what hook each rule can be called: returns 0 if
459 there are loops. Puts hook bitmask in comefrom. */
461 mark_source_chains(struct xt_table_info *newinfo,
462 unsigned int valid_hooks, void *entry0)
466 /* No recursion; use packet counter to save back ptrs (reset
467 to 0 as we leave), and comefrom to save source hook bitmask */
468 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
469 unsigned int pos = newinfo->hook_entry[hook];
471 = (struct ipt_entry *)(entry0 + pos);
473 if (!(valid_hooks & (1 << hook)))
476 /* Set initial back pointer. */
477 e->counters.pcnt = pos;
480 struct ipt_standard_target *t
481 = (void *)ipt_get_target(e);
482 int visited = e->comefrom & (1 << hook);
484 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
485 printk("iptables: loop hook %u pos %u %08X.\n",
486 hook, pos, e->comefrom);
490 |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
492 /* Unconditional return/END. */
493 if ((e->target_offset == sizeof(struct ipt_entry)
494 && (strcmp(t->target.u.user.name,
495 IPT_STANDARD_TARGET) == 0)
497 && unconditional(&e->ip)) || visited) {
498 unsigned int oldpos, size;
500 if (t->verdict < -NF_MAX_VERDICT - 1) {
501 duprintf("mark_source_chains: bad "
502 "negative verdict (%i)\n",
507 /* Return: backtrack through the last
510 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
511 #ifdef DEBUG_IP_FIREWALL_USER
513 & (1 << NF_INET_NUMHOOKS)) {
514 duprintf("Back unset "
521 pos = e->counters.pcnt;
522 e->counters.pcnt = 0;
524 /* We're at the start. */
528 e = (struct ipt_entry *)
530 } while (oldpos == pos + e->next_offset);
533 size = e->next_offset;
534 e = (struct ipt_entry *)
535 (entry0 + pos + size);
536 e->counters.pcnt = pos;
539 int newpos = t->verdict;
541 if (strcmp(t->target.u.user.name,
542 IPT_STANDARD_TARGET) == 0
544 if (newpos > newinfo->size -
545 sizeof(struct ipt_entry)) {
546 duprintf("mark_source_chains: "
547 "bad verdict (%i)\n",
551 /* This a jump; chase it. */
552 duprintf("Jump rule %u -> %u\n",
555 /* ... this is a fallthru */
556 newpos = pos + e->next_offset;
558 e = (struct ipt_entry *)
560 e->counters.pcnt = pos;
565 duprintf("Finished chain %u\n", hook);
571 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
573 if (i && (*i)-- == 0)
576 if (m->u.kernel.match->destroy)
577 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
578 module_put(m->u.kernel.match->me);
583 check_entry(struct ipt_entry *e, const char *name)
585 struct ipt_entry_target *t;
587 if (!ip_checkentry(&e->ip)) {
588 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
592 if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset)
595 t = ipt_get_target(e);
596 if (e->target_offset + t->u.target_size > e->next_offset)
602 static inline int check_match(struct ipt_entry_match *m, const char *name,
603 const struct ipt_ip *ip, unsigned int hookmask,
606 struct xt_match *match;
609 match = m->u.kernel.match;
610 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
611 name, hookmask, ip->proto,
612 ip->invflags & IPT_INV_PROTO);
613 if (!ret && m->u.kernel.match->checkentry
614 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
616 duprintf("ip_tables: check failed for `%s'.\n",
617 m->u.kernel.match->name);
626 find_check_match(struct ipt_entry_match *m,
628 const struct ipt_ip *ip,
629 unsigned int hookmask,
632 struct xt_match *match;
635 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
637 "ipt_%s", m->u.user.name);
638 if (IS_ERR(match) || !match) {
639 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
640 return match ? PTR_ERR(match) : -ENOENT;
642 m->u.kernel.match = match;
644 ret = check_match(m, name, ip, hookmask, i);
650 module_put(m->u.kernel.match->me);
654 static inline int check_target(struct ipt_entry *e, const char *name)
656 struct ipt_entry_target *t;
657 struct xt_target *target;
660 t = ipt_get_target(e);
661 target = t->u.kernel.target;
662 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
663 name, e->comefrom, e->ip.proto,
664 e->ip.invflags & IPT_INV_PROTO);
665 if (!ret && t->u.kernel.target->checkentry
666 && !t->u.kernel.target->checkentry(name, e, target,
667 t->data, e->comefrom)) {
668 duprintf("ip_tables: check failed for `%s'.\n",
669 t->u.kernel.target->name);
676 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
679 struct ipt_entry_target *t;
680 struct xt_target *target;
684 ret = check_entry(e, name);
689 ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip,
692 goto cleanup_matches;
694 t = ipt_get_target(e);
695 target = try_then_request_module(xt_find_target(AF_INET,
698 "ipt_%s", t->u.user.name);
699 if (IS_ERR(target) || !target) {
700 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
701 ret = target ? PTR_ERR(target) : -ENOENT;
702 goto cleanup_matches;
704 t->u.kernel.target = target;
706 ret = check_target(e, name);
713 module_put(t->u.kernel.target->me);
715 IPT_MATCH_ITERATE(e, cleanup_match, &j);
720 check_entry_size_and_hooks(struct ipt_entry *e,
721 struct xt_table_info *newinfo,
723 unsigned char *limit,
724 const unsigned int *hook_entries,
725 const unsigned int *underflows,
730 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
731 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
732 duprintf("Bad offset %p\n", e);
737 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
738 duprintf("checking: element %p size %u\n",
743 /* Check hooks & underflows */
744 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
745 if ((unsigned char *)e - base == hook_entries[h])
746 newinfo->hook_entry[h] = hook_entries[h];
747 if ((unsigned char *)e - base == underflows[h])
748 newinfo->underflow[h] = underflows[h];
751 /* FIXME: underflows must be unconditional, standard verdicts
752 < 0 (not IPT_RETURN). --RR */
754 /* Clear counters and comefrom */
755 e->counters = ((struct xt_counters) { 0, 0 });
763 cleanup_entry(struct ipt_entry *e, unsigned int *i)
765 struct ipt_entry_target *t;
767 if (i && (*i)-- == 0)
770 /* Cleanup all matches */
771 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
772 t = ipt_get_target(e);
773 if (t->u.kernel.target->destroy)
774 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
775 module_put(t->u.kernel.target->me);
779 /* Checks and translates the user-supplied table segment (held in
782 translate_table(const char *name,
783 unsigned int valid_hooks,
784 struct xt_table_info *newinfo,
788 const unsigned int *hook_entries,
789 const unsigned int *underflows)
794 newinfo->size = size;
795 newinfo->number = number;
797 /* Init all hooks to impossible value. */
798 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
799 newinfo->hook_entry[i] = 0xFFFFFFFF;
800 newinfo->underflow[i] = 0xFFFFFFFF;
803 duprintf("translate_table: size %u\n", newinfo->size);
805 /* Walk through entries, checking offsets. */
806 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
807 check_entry_size_and_hooks,
811 hook_entries, underflows, &i);
816 duprintf("translate_table: %u not %u entries\n",
821 /* Check hooks all assigned */
822 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
823 /* Only hooks which are valid */
824 if (!(valid_hooks & (1 << i)))
826 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
827 duprintf("Invalid hook entry %u %u\n",
831 if (newinfo->underflow[i] == 0xFFFFFFFF) {
832 duprintf("Invalid underflow %u %u\n",
838 if (!mark_source_chains(newinfo, valid_hooks, entry0))
841 /* Finally, each sanity check must pass */
843 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
844 find_check_entry, name, size, &i);
847 IPT_ENTRY_ITERATE(entry0, newinfo->size,
852 /* And one copy for every other CPU */
853 for_each_possible_cpu(i) {
854 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
855 memcpy(newinfo->entries[i], entry0, newinfo->size);
863 add_entry_to_counter(const struct ipt_entry *e,
864 struct xt_counters total[],
867 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
874 set_entry_to_counter(const struct ipt_entry *e,
875 struct ipt_counters total[],
878 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
885 get_counters(const struct xt_table_info *t,
886 struct xt_counters counters[])
892 /* Instead of clearing (by a previous call to memset())
893 * the counters and using adds, we set the counters
894 * with data used by 'current' CPU
895 * We dont care about preemption here.
897 curcpu = raw_smp_processor_id();
900 IPT_ENTRY_ITERATE(t->entries[curcpu],
902 set_entry_to_counter,
906 for_each_possible_cpu(cpu) {
910 IPT_ENTRY_ITERATE(t->entries[cpu],
912 add_entry_to_counter,
918 static inline struct xt_counters * alloc_counters(struct xt_table *table)
920 unsigned int countersize;
921 struct xt_counters *counters;
922 struct xt_table_info *private = table->private;
924 /* We need atomic snapshot of counters: rest doesn't change
925 (other than comefrom, which userspace doesn't care
927 countersize = sizeof(struct xt_counters) * private->number;
928 counters = vmalloc_node(countersize, numa_node_id());
930 if (counters == NULL)
931 return ERR_PTR(-ENOMEM);
933 /* First, sum counters... */
934 write_lock_bh(&table->lock);
935 get_counters(private, counters);
936 write_unlock_bh(&table->lock);
942 copy_entries_to_user(unsigned int total_size,
943 struct xt_table *table,
944 void __user *userptr)
946 unsigned int off, num;
948 struct xt_counters *counters;
949 struct xt_table_info *private = table->private;
953 counters = alloc_counters(table);
954 if (IS_ERR(counters))
955 return PTR_ERR(counters);
957 /* choose the copy that is on our node/cpu, ...
958 * This choice is lazy (because current thread is
959 * allowed to migrate to another cpu)
961 loc_cpu_entry = private->entries[raw_smp_processor_id()];
962 /* ... then copy entire thing ... */
963 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
968 /* FIXME: use iterator macros --RR */
969 /* ... then go back and fix counters and names */
970 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
972 struct ipt_entry_match *m;
973 struct ipt_entry_target *t;
975 e = (struct ipt_entry *)(loc_cpu_entry + off);
976 if (copy_to_user(userptr + off
977 + offsetof(struct ipt_entry, counters),
979 sizeof(counters[num])) != 0) {
984 for (i = sizeof(struct ipt_entry);
985 i < e->target_offset;
986 i += m->u.match_size) {
989 if (copy_to_user(userptr + off + i
990 + offsetof(struct ipt_entry_match,
992 m->u.kernel.match->name,
993 strlen(m->u.kernel.match->name)+1)
1000 t = ipt_get_target(e);
1001 if (copy_to_user(userptr + off + e->target_offset
1002 + offsetof(struct ipt_entry_target,
1004 t->u.kernel.target->name,
1005 strlen(t->u.kernel.target->name)+1) != 0) {
1016 #ifdef CONFIG_COMPAT
1017 struct compat_delta {
1018 struct compat_delta *next;
1019 unsigned int offset;
1023 static struct compat_delta *compat_offsets = NULL;
1025 static int compat_add_offset(unsigned int offset, short delta)
1027 struct compat_delta *tmp;
1029 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
1032 tmp->offset = offset;
1034 if (compat_offsets) {
1035 tmp->next = compat_offsets->next;
1036 compat_offsets->next = tmp;
1038 compat_offsets = tmp;
1044 static void compat_flush_offsets(void)
1046 struct compat_delta *tmp, *next;
1048 if (compat_offsets) {
1049 for(tmp = compat_offsets; tmp; tmp = next) {
1053 compat_offsets = NULL;
1057 static short compat_calc_jump(unsigned int offset)
1059 struct compat_delta *tmp;
1062 for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
1063 if (tmp->offset < offset)
1064 delta += tmp->delta;
1068 static void compat_standard_from_user(void *dst, void *src)
1070 int v = *(compat_int_t *)src;
1073 v += compat_calc_jump(v);
1074 memcpy(dst, &v, sizeof(v));
1077 static int compat_standard_to_user(void __user *dst, void *src)
1079 compat_int_t cv = *(int *)src;
1082 cv -= compat_calc_jump(cv);
1083 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1087 compat_calc_match(struct ipt_entry_match *m, int * size)
1089 *size += xt_compat_match_offset(m->u.kernel.match);
1093 static int compat_calc_entry(struct ipt_entry *e,
1094 const struct xt_table_info *info,
1095 void *base, struct xt_table_info *newinfo)
1097 struct ipt_entry_target *t;
1098 unsigned int entry_offset;
1102 entry_offset = (void *)e - base;
1103 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1104 t = ipt_get_target(e);
1105 off += xt_compat_target_offset(t->u.kernel.target);
1106 newinfo->size -= off;
1107 ret = compat_add_offset(entry_offset, off);
1111 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1112 if (info->hook_entry[i] && (e < (struct ipt_entry *)
1113 (base + info->hook_entry[i])))
1114 newinfo->hook_entry[i] -= off;
1115 if (info->underflow[i] && (e < (struct ipt_entry *)
1116 (base + info->underflow[i])))
1117 newinfo->underflow[i] -= off;
1122 static int compat_table_info(const struct xt_table_info *info,
1123 struct xt_table_info *newinfo)
1125 void *loc_cpu_entry;
1127 if (!newinfo || !info)
1130 /* we dont care about newinfo->entries[] */
1131 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1132 newinfo->initial_entries = 0;
1133 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1134 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1135 compat_calc_entry, info, loc_cpu_entry, newinfo);
1139 static int get_info(void __user *user, int *len, int compat)
1141 char name[IPT_TABLE_MAXNAMELEN];
1145 if (*len != sizeof(struct ipt_getinfo)) {
1146 duprintf("length %u != %u\n", *len,
1147 (unsigned int)sizeof(struct ipt_getinfo));
1151 if (copy_from_user(name, user, sizeof(name)) != 0)
1154 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1155 #ifdef CONFIG_COMPAT
1157 xt_compat_lock(AF_INET);
1159 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1160 "iptable_%s", name);
1161 if (t && !IS_ERR(t)) {
1162 struct ipt_getinfo info;
1163 struct xt_table_info *private = t->private;
1165 #ifdef CONFIG_COMPAT
1167 struct xt_table_info tmp;
1168 ret = compat_table_info(private, &tmp);
1169 compat_flush_offsets();
1173 info.valid_hooks = t->valid_hooks;
1174 memcpy(info.hook_entry, private->hook_entry,
1175 sizeof(info.hook_entry));
1176 memcpy(info.underflow, private->underflow,
1177 sizeof(info.underflow));
1178 info.num_entries = private->number;
1179 info.size = private->size;
1180 strcpy(info.name, name);
1182 if (copy_to_user(user, &info, *len) != 0)
1190 ret = t ? PTR_ERR(t) : -ENOENT;
1191 #ifdef CONFIG_COMPAT
1193 xt_compat_unlock(AF_INET);
1199 get_entries(struct ipt_get_entries __user *uptr, int *len)
1202 struct ipt_get_entries get;
1205 if (*len < sizeof(get)) {
1206 duprintf("get_entries: %u < %d\n", *len,
1207 (unsigned int)sizeof(get));
1210 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1212 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1213 duprintf("get_entries: %u != %u\n", *len,
1214 (unsigned int)(sizeof(struct ipt_get_entries) +
1219 t = xt_find_table_lock(AF_INET, get.name);
1220 if (t && !IS_ERR(t)) {
1221 struct xt_table_info *private = t->private;
1222 duprintf("t->private->number = %u\n",
1224 if (get.size == private->size)
1225 ret = copy_entries_to_user(private->size,
1226 t, uptr->entrytable);
1228 duprintf("get_entries: I've got %u not %u!\n",
1236 ret = t ? PTR_ERR(t) : -ENOENT;
1242 __do_replace(const char *name, unsigned int valid_hooks,
1243 struct xt_table_info *newinfo, unsigned int num_counters,
1244 void __user *counters_ptr)
1248 struct xt_table_info *oldinfo;
1249 struct xt_counters *counters;
1250 void *loc_cpu_old_entry;
1253 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1259 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1260 "iptable_%s", name);
1261 if (!t || IS_ERR(t)) {
1262 ret = t ? PTR_ERR(t) : -ENOENT;
1263 goto free_newinfo_counters_untrans;
1267 if (valid_hooks != t->valid_hooks) {
1268 duprintf("Valid hook crap: %08X vs %08X\n",
1269 valid_hooks, t->valid_hooks);
1274 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1278 /* Update module usage count based on number of rules */
1279 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1280 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1281 if ((oldinfo->number > oldinfo->initial_entries) ||
1282 (newinfo->number <= oldinfo->initial_entries))
1284 if ((oldinfo->number > oldinfo->initial_entries) &&
1285 (newinfo->number <= oldinfo->initial_entries))
1288 /* Get the old counters. */
1289 get_counters(oldinfo, counters);
1290 /* Decrease module usage counts and free resource */
1291 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1292 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1293 xt_free_table_info(oldinfo);
1294 if (copy_to_user(counters_ptr, counters,
1295 sizeof(struct xt_counters) * num_counters) != 0)
1304 free_newinfo_counters_untrans:
1311 do_replace(void __user *user, unsigned int len)
1314 struct ipt_replace tmp;
1315 struct xt_table_info *newinfo;
1316 void *loc_cpu_entry;
1318 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1321 /* Hack: Causes ipchains to give correct error msg --RR */
1322 if (len != sizeof(tmp) + tmp.size)
1323 return -ENOPROTOOPT;
1325 /* overflow check */
1326 if (tmp.size >= INT_MAX / num_possible_cpus())
1328 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1331 newinfo = xt_alloc_table_info(tmp.size);
1335 /* choose the copy that is our node/cpu */
1336 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1337 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1343 ret = translate_table(tmp.name, tmp.valid_hooks,
1344 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1345 tmp.hook_entry, tmp.underflow);
1349 duprintf("ip_tables: Translated table\n");
1351 ret = __do_replace(tmp.name, tmp.valid_hooks,
1352 newinfo, tmp.num_counters,
1355 goto free_newinfo_untrans;
1358 free_newinfo_untrans:
1359 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1361 xt_free_table_info(newinfo);
1365 /* We're lazy, and add to the first CPU; overflow works its fey magic
1366 * and everything is OK. */
1368 add_counter_to_entry(struct ipt_entry *e,
1369 const struct xt_counters addme[],
1373 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1375 (long unsigned int)e->counters.pcnt,
1376 (long unsigned int)e->counters.bcnt,
1377 (long unsigned int)addme[*i].pcnt,
1378 (long unsigned int)addme[*i].bcnt);
1381 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1388 do_add_counters(void __user *user, unsigned int len, int compat)
1391 struct xt_counters_info tmp;
1392 struct xt_counters *paddc;
1393 unsigned int num_counters;
1398 struct xt_table_info *private;
1400 void *loc_cpu_entry;
1401 #ifdef CONFIG_COMPAT
1402 struct compat_xt_counters_info compat_tmp;
1406 size = sizeof(struct compat_xt_counters_info);
1411 size = sizeof(struct xt_counters_info);
1414 if (copy_from_user(ptmp, user, size) != 0)
1417 #ifdef CONFIG_COMPAT
1419 num_counters = compat_tmp.num_counters;
1420 name = compat_tmp.name;
1424 num_counters = tmp.num_counters;
1428 if (len != size + num_counters * sizeof(struct xt_counters))
1431 paddc = vmalloc_node(len - size, numa_node_id());
1435 if (copy_from_user(paddc, user + size, len - size) != 0) {
1440 t = xt_find_table_lock(AF_INET, name);
1441 if (!t || IS_ERR(t)) {
1442 ret = t ? PTR_ERR(t) : -ENOENT;
1446 write_lock_bh(&t->lock);
1447 private = t->private;
1448 if (private->number != num_counters) {
1450 goto unlock_up_free;
1454 /* Choose the copy that is on our node */
1455 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1456 IPT_ENTRY_ITERATE(loc_cpu_entry,
1458 add_counter_to_entry,
1462 write_unlock_bh(&t->lock);
1471 #ifdef CONFIG_COMPAT
1472 struct compat_ipt_replace {
1473 char name[IPT_TABLE_MAXNAMELEN];
1477 u32 hook_entry[NF_INET_NUMHOOKS];
1478 u32 underflow[NF_INET_NUMHOOKS];
1480 compat_uptr_t counters; /* struct ipt_counters * */
1481 struct compat_ipt_entry entries[0];
1484 static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
1485 void __user **dstptr, compat_uint_t *size)
1487 return xt_compat_match_to_user(m, dstptr, size);
1491 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1492 compat_uint_t *size, struct xt_counters *counters,
1495 struct ipt_entry_target *t;
1496 struct compat_ipt_entry __user *ce;
1497 u_int16_t target_offset, next_offset;
1498 compat_uint_t origsize;
1503 ce = (struct compat_ipt_entry __user *)*dstptr;
1504 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1507 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1510 *dstptr += sizeof(struct compat_ipt_entry);
1511 ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1512 target_offset = e->target_offset - (origsize - *size);
1515 t = ipt_get_target(e);
1516 ret = xt_compat_target_to_user(t, dstptr, size);
1520 next_offset = e->next_offset - (origsize - *size);
1521 if (put_user(target_offset, &ce->target_offset))
1523 if (put_user(next_offset, &ce->next_offset))
1533 compat_find_calc_match(struct ipt_entry_match *m,
1535 const struct ipt_ip *ip,
1536 unsigned int hookmask,
1539 struct xt_match *match;
1541 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1542 m->u.user.revision),
1543 "ipt_%s", m->u.user.name);
1544 if (IS_ERR(match) || !match) {
1545 duprintf("compat_check_calc_match: `%s' not found\n",
1547 return match ? PTR_ERR(match) : -ENOENT;
1549 m->u.kernel.match = match;
1550 *size += xt_compat_match_offset(match);
1557 compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1559 if (i && (*i)-- == 0)
1562 module_put(m->u.kernel.match->me);
1567 compat_release_entry(struct ipt_entry *e, unsigned int *i)
1569 struct ipt_entry_target *t;
1571 if (i && (*i)-- == 0)
1574 /* Cleanup all matches */
1575 IPT_MATCH_ITERATE(e, compat_release_match, NULL);
1576 t = ipt_get_target(e);
1577 module_put(t->u.kernel.target->me);
1582 check_compat_entry_size_and_hooks(struct ipt_entry *e,
1583 struct xt_table_info *newinfo,
1585 unsigned char *base,
1586 unsigned char *limit,
1587 unsigned int *hook_entries,
1588 unsigned int *underflows,
1592 struct ipt_entry_target *t;
1593 struct xt_target *target;
1594 unsigned int entry_offset;
1597 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1598 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1599 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1600 duprintf("Bad offset %p, limit = %p\n", e, limit);
1604 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1605 sizeof(struct compat_xt_entry_target)) {
1606 duprintf("checking: element %p size %u\n",
1611 ret = check_entry(e, name);
1616 entry_offset = (void *)e - (void *)base;
1618 ret = IPT_MATCH_ITERATE(e, compat_find_calc_match, name, &e->ip,
1619 e->comefrom, &off, &j);
1621 goto release_matches;
1623 t = ipt_get_target(e);
1624 target = try_then_request_module(xt_find_target(AF_INET,
1626 t->u.user.revision),
1627 "ipt_%s", t->u.user.name);
1628 if (IS_ERR(target) || !target) {
1629 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1631 ret = target ? PTR_ERR(target) : -ENOENT;
1632 goto release_matches;
1634 t->u.kernel.target = target;
1636 off += xt_compat_target_offset(target);
1638 ret = compat_add_offset(entry_offset, off);
1642 /* Check hooks & underflows */
1643 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1644 if ((unsigned char *)e - base == hook_entries[h])
1645 newinfo->hook_entry[h] = hook_entries[h];
1646 if ((unsigned char *)e - base == underflows[h])
1647 newinfo->underflow[h] = underflows[h];
1650 /* Clear counters and comefrom */
1651 e->counters = ((struct ipt_counters) { 0, 0 });
1658 module_put(t->u.kernel.target->me);
1660 IPT_MATCH_ITERATE(e, compat_release_match, &j);
1664 static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1665 void **dstptr, compat_uint_t *size, const char *name,
1666 const struct ipt_ip *ip, unsigned int hookmask)
1668 xt_compat_match_from_user(m, dstptr, size);
1672 static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1673 unsigned int *size, const char *name,
1674 struct xt_table_info *newinfo, unsigned char *base)
1676 struct ipt_entry_target *t;
1677 struct xt_target *target;
1678 struct ipt_entry *de;
1679 unsigned int origsize;
1684 de = (struct ipt_entry *)*dstptr;
1685 memcpy(de, e, sizeof(struct ipt_entry));
1687 *dstptr += sizeof(struct compat_ipt_entry);
1688 ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
1689 name, &de->ip, de->comefrom);
1692 de->target_offset = e->target_offset - (origsize - *size);
1693 t = ipt_get_target(e);
1694 target = t->u.kernel.target;
1695 xt_compat_target_from_user(t, dstptr, size);
1697 de->next_offset = e->next_offset - (origsize - *size);
1698 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1699 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1700 newinfo->hook_entry[h] -= origsize - *size;
1701 if ((unsigned char *)de - base < newinfo->underflow[h])
1702 newinfo->underflow[h] -= origsize - *size;
1707 static inline int compat_check_entry(struct ipt_entry *e, const char *name,
1713 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
1715 goto cleanup_matches;
1717 ret = check_target(e, name);
1719 goto cleanup_matches;
1725 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1730 translate_compat_table(const char *name,
1731 unsigned int valid_hooks,
1732 struct xt_table_info **pinfo,
1734 unsigned int total_size,
1735 unsigned int number,
1736 unsigned int *hook_entries,
1737 unsigned int *underflows)
1740 struct xt_table_info *newinfo, *info;
1741 void *pos, *entry0, *entry1;
1748 info->number = number;
1750 /* Init all hooks to impossible value. */
1751 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1752 info->hook_entry[i] = 0xFFFFFFFF;
1753 info->underflow[i] = 0xFFFFFFFF;
1756 duprintf("translate_compat_table: size %u\n", info->size);
1758 xt_compat_lock(AF_INET);
1759 /* Walk through entries, checking offsets. */
1760 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1761 check_compat_entry_size_and_hooks,
1762 info, &size, entry0,
1763 entry0 + total_size,
1764 hook_entries, underflows, &j, name);
1770 duprintf("translate_compat_table: %u not %u entries\n",
1775 /* Check hooks all assigned */
1776 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1777 /* Only hooks which are valid */
1778 if (!(valid_hooks & (1 << i)))
1780 if (info->hook_entry[i] == 0xFFFFFFFF) {
1781 duprintf("Invalid hook entry %u %u\n",
1782 i, hook_entries[i]);
1785 if (info->underflow[i] == 0xFFFFFFFF) {
1786 duprintf("Invalid underflow %u %u\n",
1793 newinfo = xt_alloc_table_info(size);
1797 newinfo->number = number;
1798 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1799 newinfo->hook_entry[i] = info->hook_entry[i];
1800 newinfo->underflow[i] = info->underflow[i];
1802 entry1 = newinfo->entries[raw_smp_processor_id()];
1805 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1806 compat_copy_entry_from_user, &pos, &size,
1807 name, newinfo, entry1);
1808 compat_flush_offsets();
1809 xt_compat_unlock(AF_INET);
1814 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1818 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1822 IPT_ENTRY_ITERATE_CONTINUE(entry1, newinfo->size, i,
1823 compat_release_entry, &j);
1824 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1825 xt_free_table_info(newinfo);
1829 /* And one copy for every other CPU */
1830 for_each_possible_cpu(i)
1831 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1832 memcpy(newinfo->entries[i], entry1, newinfo->size);
1836 xt_free_table_info(info);
1840 xt_free_table_info(newinfo);
1842 IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1845 compat_flush_offsets();
1846 xt_compat_unlock(AF_INET);
1851 compat_do_replace(void __user *user, unsigned int len)
1854 struct compat_ipt_replace tmp;
1855 struct xt_table_info *newinfo;
1856 void *loc_cpu_entry;
1858 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1861 /* Hack: Causes ipchains to give correct error msg --RR */
1862 if (len != sizeof(tmp) + tmp.size)
1863 return -ENOPROTOOPT;
1865 /* overflow check */
1866 if (tmp.size >= INT_MAX / num_possible_cpus())
1868 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1871 newinfo = xt_alloc_table_info(tmp.size);
1875 /* choose the copy that is our node/cpu */
1876 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1877 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1883 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1884 &newinfo, &loc_cpu_entry, tmp.size,
1885 tmp.num_entries, tmp.hook_entry, tmp.underflow);
1889 duprintf("compat_do_replace: Translated table\n");
1891 ret = __do_replace(tmp.name, tmp.valid_hooks,
1892 newinfo, tmp.num_counters,
1893 compat_ptr(tmp.counters));
1895 goto free_newinfo_untrans;
1898 free_newinfo_untrans:
1899 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1901 xt_free_table_info(newinfo);
1906 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1911 if (!capable(CAP_NET_ADMIN))
1915 case IPT_SO_SET_REPLACE:
1916 ret = compat_do_replace(user, len);
1919 case IPT_SO_SET_ADD_COUNTERS:
1920 ret = do_add_counters(user, len, 1);
1924 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1931 struct compat_ipt_get_entries
1933 char name[IPT_TABLE_MAXNAMELEN];
1935 struct compat_ipt_entry entrytable[0];
1938 static int compat_copy_entries_to_user(unsigned int total_size,
1939 struct xt_table *table, void __user *userptr)
1941 struct xt_counters *counters;
1942 struct xt_table_info *private = table->private;
1946 void *loc_cpu_entry;
1949 counters = alloc_counters(table);
1950 if (IS_ERR(counters))
1951 return PTR_ERR(counters);
1953 /* choose the copy that is on our node/cpu, ...
1954 * This choice is lazy (because current thread is
1955 * allowed to migrate to another cpu)
1957 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1960 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1961 compat_copy_entry_to_user,
1962 &pos, &size, counters, &i);
1969 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1972 struct compat_ipt_get_entries get;
1976 if (*len < sizeof(get)) {
1977 duprintf("compat_get_entries: %u < %u\n",
1978 *len, (unsigned int)sizeof(get));
1982 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1985 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1986 duprintf("compat_get_entries: %u != %u\n", *len,
1987 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1992 xt_compat_lock(AF_INET);
1993 t = xt_find_table_lock(AF_INET, get.name);
1994 if (t && !IS_ERR(t)) {
1995 struct xt_table_info *private = t->private;
1996 struct xt_table_info info;
1997 duprintf("t->private->number = %u\n",
1999 ret = compat_table_info(private, &info);
2000 if (!ret && get.size == info.size) {
2001 ret = compat_copy_entries_to_user(private->size,
2002 t, uptr->entrytable);
2004 duprintf("compat_get_entries: I've got %u not %u!\n",
2009 compat_flush_offsets();
2013 ret = t ? PTR_ERR(t) : -ENOENT;
2015 xt_compat_unlock(AF_INET);
2019 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
2022 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2026 if (!capable(CAP_NET_ADMIN))
2030 case IPT_SO_GET_INFO:
2031 ret = get_info(user, len, 1);
2033 case IPT_SO_GET_ENTRIES:
2034 ret = compat_get_entries(user, len);
2037 ret = do_ipt_get_ctl(sk, cmd, user, len);
2044 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2048 if (!capable(CAP_NET_ADMIN))
2052 case IPT_SO_SET_REPLACE:
2053 ret = do_replace(user, len);
2056 case IPT_SO_SET_ADD_COUNTERS:
2057 ret = do_add_counters(user, len, 0);
2061 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2069 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2073 if (!capable(CAP_NET_ADMIN))
2077 case IPT_SO_GET_INFO:
2078 ret = get_info(user, len, 0);
2081 case IPT_SO_GET_ENTRIES:
2082 ret = get_entries(user, len);
2085 case IPT_SO_GET_REVISION_MATCH:
2086 case IPT_SO_GET_REVISION_TARGET: {
2087 struct ipt_get_revision rev;
2090 if (*len != sizeof(rev)) {
2094 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2099 if (cmd == IPT_SO_GET_REVISION_TARGET)
2104 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2107 "ipt_%s", rev.name);
2112 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2119 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2122 struct xt_table_info *newinfo;
2123 struct xt_table_info bootstrap
2124 = { 0, 0, 0, { 0 }, { 0 }, { } };
2125 void *loc_cpu_entry;
2127 newinfo = xt_alloc_table_info(repl->size);
2131 /* choose the copy on our node/cpu
2132 * but dont care of preemption
2134 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2135 memcpy(loc_cpu_entry, repl->entries, repl->size);
2137 ret = translate_table(table->name, table->valid_hooks,
2138 newinfo, loc_cpu_entry, repl->size,
2143 xt_free_table_info(newinfo);
2147 ret = xt_register_table(table, &bootstrap, newinfo);
2149 xt_free_table_info(newinfo);
2156 void ipt_unregister_table(struct xt_table *table)
2158 struct xt_table_info *private;
2159 void *loc_cpu_entry;
2161 private = xt_unregister_table(table);
2163 /* Decrease module usage counts and free resources */
2164 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2165 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2166 xt_free_table_info(private);
2169 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2171 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2172 u_int8_t type, u_int8_t code,
2175 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2180 icmp_match(const struct sk_buff *skb,
2181 const struct net_device *in,
2182 const struct net_device *out,
2183 const struct xt_match *match,
2184 const void *matchinfo,
2186 unsigned int protoff,
2189 struct icmphdr _icmph, *ic;
2190 const struct ipt_icmp *icmpinfo = matchinfo;
2192 /* Must not be a fragment. */
2196 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2198 /* We've been asked to examine this packet, and we
2199 * can't. Hence, no choice but to drop.
2201 duprintf("Dropping evil ICMP tinygram.\n");
2206 return icmp_type_code_match(icmpinfo->type,
2210 !!(icmpinfo->invflags&IPT_ICMP_INV));
2213 /* Called when user tries to insert an entry of this type. */
2215 icmp_checkentry(const char *tablename,
2217 const struct xt_match *match,
2219 unsigned int hook_mask)
2221 const struct ipt_icmp *icmpinfo = matchinfo;
2223 /* Must specify no unknown invflags */
2224 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2227 /* The built-in targets: standard (NULL) and error. */
2228 static struct xt_target ipt_standard_target __read_mostly = {
2229 .name = IPT_STANDARD_TARGET,
2230 .targetsize = sizeof(int),
2232 #ifdef CONFIG_COMPAT
2233 .compatsize = sizeof(compat_int_t),
2234 .compat_from_user = compat_standard_from_user,
2235 .compat_to_user = compat_standard_to_user,
2239 static struct xt_target ipt_error_target __read_mostly = {
2240 .name = IPT_ERROR_TARGET,
2241 .target = ipt_error,
2242 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2246 static struct nf_sockopt_ops ipt_sockopts = {
2248 .set_optmin = IPT_BASE_CTL,
2249 .set_optmax = IPT_SO_SET_MAX+1,
2250 .set = do_ipt_set_ctl,
2251 #ifdef CONFIG_COMPAT
2252 .compat_set = compat_do_ipt_set_ctl,
2254 .get_optmin = IPT_BASE_CTL,
2255 .get_optmax = IPT_SO_GET_MAX+1,
2256 .get = do_ipt_get_ctl,
2257 #ifdef CONFIG_COMPAT
2258 .compat_get = compat_do_ipt_get_ctl,
2260 .owner = THIS_MODULE,
2263 static struct xt_match icmp_matchstruct __read_mostly = {
2265 .match = icmp_match,
2266 .matchsize = sizeof(struct ipt_icmp),
2267 .proto = IPPROTO_ICMP,
2269 .checkentry = icmp_checkentry,
2272 static int __init ip_tables_init(void)
2276 ret = xt_proto_init(AF_INET);
2280 /* Noone else will be downing sem now, so we won't sleep */
2281 ret = xt_register_target(&ipt_standard_target);
2284 ret = xt_register_target(&ipt_error_target);
2287 ret = xt_register_match(&icmp_matchstruct);
2291 /* Register setsockopt */
2292 ret = nf_register_sockopt(&ipt_sockopts);
2296 printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2300 xt_unregister_match(&icmp_matchstruct);
2302 xt_unregister_target(&ipt_error_target);
2304 xt_unregister_target(&ipt_standard_target);
2306 xt_proto_fini(AF_INET);
2311 static void __exit ip_tables_fini(void)
2313 nf_unregister_sockopt(&ipt_sockopts);
2315 xt_unregister_match(&icmp_matchstruct);
2316 xt_unregister_target(&ipt_error_target);
2317 xt_unregister_target(&ipt_standard_target);
2319 xt_proto_fini(AF_INET);
2322 EXPORT_SYMBOL(ipt_register_table);
2323 EXPORT_SYMBOL(ipt_unregister_table);
2324 EXPORT_SYMBOL(ipt_do_table);
2325 module_init(ip_tables_init);
2326 module_exit(ip_tables_fini);