2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside
14 * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
15 * - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
17 #include <linux/config.h>
18 #include <linux/cache.h>
19 #include <linux/capability.h>
20 #include <linux/skbuff.h>
21 #include <linux/kmod.h>
22 #include <linux/vmalloc.h>
23 #include <linux/netdevice.h>
24 #include <linux/module.h>
25 #include <linux/icmp.h>
27 #include <net/compat.h>
28 #include <asm/uaccess.h>
29 #include <linux/mutex.h>
30 #include <linux/proc_fs.h>
31 #include <linux/err.h>
32 #include <linux/cpumask.h>
34 #include <linux/netfilter/x_tables.h>
35 #include <linux/netfilter_ipv4/ip_tables.h>
37 MODULE_LICENSE("GPL");
38 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
39 MODULE_DESCRIPTION("IPv4 packet filter");
41 /*#define DEBUG_IP_FIREWALL*/
42 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
43 /*#define DEBUG_IP_FIREWALL_USER*/
45 #ifdef DEBUG_IP_FIREWALL
46 #define dprintf(format, args...) printk(format , ## args)
48 #define dprintf(format, args...)
51 #ifdef DEBUG_IP_FIREWALL_USER
52 #define duprintf(format, args...) printk(format , ## args)
54 #define duprintf(format, args...)
57 #ifdef CONFIG_NETFILTER_DEBUG
58 #define IP_NF_ASSERT(x) \
61 printk("IP_NF_ASSERT: %s:%s:%u\n", \
62 __FUNCTION__, __FILE__, __LINE__); \
65 #define IP_NF_ASSERT(x)
69 /* All the better to debug you with... */
75 We keep a set of rules for each CPU, so we can avoid write-locking
76 them in the softirq when updating the counters and therefore
77 only need to read-lock in the softirq; doing a write_lock_bh() in user
78 context stops packets coming through and allows user context to read
79 the counters or update the rules.
81 Hence the start of any table is given by get_table() below. */
83 /* Returns whether matches rule or not. */
85 ip_packet_match(const struct iphdr *ip,
88 const struct ipt_ip *ipinfo,
94 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
96 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
98 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
100 dprintf("Source or dest mismatch.\n");
102 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
104 NIPQUAD(ipinfo->smsk.s_addr),
105 NIPQUAD(ipinfo->src.s_addr),
106 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
107 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
109 NIPQUAD(ipinfo->dmsk.s_addr),
110 NIPQUAD(ipinfo->dst.s_addr),
111 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
115 /* Look for ifname matches; this should unroll nicely. */
116 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
117 ret |= (((const unsigned long *)indev)[i]
118 ^ ((const unsigned long *)ipinfo->iniface)[i])
119 & ((const unsigned long *)ipinfo->iniface_mask)[i];
122 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
123 dprintf("VIA in mismatch (%s vs %s).%s\n",
124 indev, ipinfo->iniface,
125 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
129 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
130 ret |= (((const unsigned long *)outdev)[i]
131 ^ ((const unsigned long *)ipinfo->outiface)[i])
132 & ((const unsigned long *)ipinfo->outiface_mask)[i];
135 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
136 dprintf("VIA out mismatch (%s vs %s).%s\n",
137 outdev, ipinfo->outiface,
138 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
142 /* Check specific protocol */
144 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
145 dprintf("Packet protocol %hi does not match %hi.%s\n",
146 ip->protocol, ipinfo->proto,
147 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
151 /* If we have a fragment rule but the packet is not a fragment
152 * then we return zero */
153 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
154 dprintf("Fragment rule but not fragment.%s\n",
155 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
163 ip_checkentry(const struct ipt_ip *ip)
165 if (ip->flags & ~IPT_F_MASK) {
166 duprintf("Unknown flag bits set: %08X\n",
167 ip->flags & ~IPT_F_MASK);
170 if (ip->invflags & ~IPT_INV_MASK) {
171 duprintf("Unknown invflag bits set: %08X\n",
172 ip->invflags & ~IPT_INV_MASK);
179 ipt_error(struct sk_buff **pskb,
180 const struct net_device *in,
181 const struct net_device *out,
182 unsigned int hooknum,
183 const struct xt_target *target,
184 const void *targinfo,
188 printk("ip_tables: error: `%s'\n", (char *)targinfo);
194 int do_match(struct ipt_entry_match *m,
195 const struct sk_buff *skb,
196 const struct net_device *in,
197 const struct net_device *out,
201 /* Stop iteration if it doesn't match */
202 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
203 offset, skb->nh.iph->ihl*4, hotdrop))
209 static inline struct ipt_entry *
210 get_entry(void *base, unsigned int offset)
212 return (struct ipt_entry *)(base + offset);
215 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
217 ipt_do_table(struct sk_buff **pskb,
219 const struct net_device *in,
220 const struct net_device *out,
221 struct ipt_table *table,
224 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
229 /* Initializing verdict to NF_DROP keeps gcc happy. */
230 unsigned int verdict = NF_DROP;
231 const char *indev, *outdev;
233 struct ipt_entry *e, *back;
234 struct xt_table_info *private;
237 ip = (*pskb)->nh.iph;
238 datalen = (*pskb)->len - ip->ihl * 4;
239 indev = in ? in->name : nulldevname;
240 outdev = out ? out->name : nulldevname;
241 /* We handle fragments by dealing with the first fragment as
242 * if it was a normal packet. All other fragments are treated
243 * normally, except that they will NEVER match rules that ask
244 * things we don't know, ie. tcp syn flag or ports). If the
245 * rule is also a fragment-specific rule, non-fragments won't
247 offset = ntohs(ip->frag_off) & IP_OFFSET;
249 read_lock_bh(&table->lock);
250 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
251 private = table->private;
252 table_base = (void *)private->entries[smp_processor_id()];
253 e = get_entry(table_base, private->hook_entry[hook]);
255 /* For return from builtin chain */
256 back = get_entry(table_base, private->underflow[hook]);
261 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
262 struct ipt_entry_target *t;
264 if (IPT_MATCH_ITERATE(e, do_match,
266 offset, &hotdrop) != 0)
269 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
271 t = ipt_get_target(e);
272 IP_NF_ASSERT(t->u.kernel.target);
273 /* Standard target? */
274 if (!t->u.kernel.target->target) {
277 v = ((struct ipt_standard_target *)t)->verdict;
279 /* Pop from stack? */
280 if (v != IPT_RETURN) {
281 verdict = (unsigned)(-v) - 1;
285 back = get_entry(table_base,
289 if (table_base + v != (void *)e + e->next_offset
290 && !(e->ip.flags & IPT_F_GOTO)) {
291 /* Save old back ptr in next entry */
292 struct ipt_entry *next
293 = (void *)e + e->next_offset;
295 = (void *)back - table_base;
296 /* set back pointer to next entry */
300 e = get_entry(table_base, v);
302 /* Targets which reenter must return
304 #ifdef CONFIG_NETFILTER_DEBUG
305 ((struct ipt_entry *)table_base)->comefrom
308 verdict = t->u.kernel.target->target(pskb,
315 #ifdef CONFIG_NETFILTER_DEBUG
316 if (((struct ipt_entry *)table_base)->comefrom
318 && verdict == IPT_CONTINUE) {
319 printk("Target %s reentered!\n",
320 t->u.kernel.target->name);
323 ((struct ipt_entry *)table_base)->comefrom
326 /* Target might have changed stuff. */
327 ip = (*pskb)->nh.iph;
328 datalen = (*pskb)->len - ip->ihl * 4;
330 if (verdict == IPT_CONTINUE)
331 e = (void *)e + e->next_offset;
339 e = (void *)e + e->next_offset;
343 read_unlock_bh(&table->lock);
345 #ifdef DEBUG_ALLOW_ALL
354 /* All zeroes == unconditional rule. */
356 unconditional(const struct ipt_ip *ip)
360 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
361 if (((__u32 *)ip)[i])
367 /* Figures out from what hook each rule can be called: returns 0 if
368 there are loops. Puts hook bitmask in comefrom. */
370 mark_source_chains(struct xt_table_info *newinfo,
371 unsigned int valid_hooks, void *entry0)
375 /* No recursion; use packet counter to save back ptrs (reset
376 to 0 as we leave), and comefrom to save source hook bitmask */
377 for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
378 unsigned int pos = newinfo->hook_entry[hook];
380 = (struct ipt_entry *)(entry0 + pos);
382 if (!(valid_hooks & (1 << hook)))
385 /* Set initial back pointer. */
386 e->counters.pcnt = pos;
389 struct ipt_standard_target *t
390 = (void *)ipt_get_target(e);
392 if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
393 printk("iptables: loop hook %u pos %u %08X.\n",
394 hook, pos, e->comefrom);
398 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
400 /* Unconditional return/END. */
401 if (e->target_offset == sizeof(struct ipt_entry)
402 && (strcmp(t->target.u.user.name,
403 IPT_STANDARD_TARGET) == 0)
405 && unconditional(&e->ip)) {
406 unsigned int oldpos, size;
408 /* Return: backtrack through the last
411 e->comefrom ^= (1<<NF_IP_NUMHOOKS);
412 #ifdef DEBUG_IP_FIREWALL_USER
414 & (1 << NF_IP_NUMHOOKS)) {
415 duprintf("Back unset "
422 pos = e->counters.pcnt;
423 e->counters.pcnt = 0;
425 /* We're at the start. */
429 e = (struct ipt_entry *)
431 } while (oldpos == pos + e->next_offset);
434 size = e->next_offset;
435 e = (struct ipt_entry *)
436 (entry0 + pos + size);
437 e->counters.pcnt = pos;
440 int newpos = t->verdict;
442 if (strcmp(t->target.u.user.name,
443 IPT_STANDARD_TARGET) == 0
445 /* This a jump; chase it. */
446 duprintf("Jump rule %u -> %u\n",
449 /* ... this is a fallthru */
450 newpos = pos + e->next_offset;
452 e = (struct ipt_entry *)
454 e->counters.pcnt = pos;
459 duprintf("Finished chain %u\n", hook);
465 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
467 if (i && (*i)-- == 0)
470 if (m->u.kernel.match->destroy)
471 m->u.kernel.match->destroy(m->u.kernel.match, m->data,
472 m->u.match_size - sizeof(*m));
473 module_put(m->u.kernel.match->me);
478 standard_check(const struct ipt_entry_target *t,
479 unsigned int max_offset)
481 struct ipt_standard_target *targ = (void *)t;
483 /* Check standard info. */
484 if (targ->verdict >= 0
485 && targ->verdict > max_offset - sizeof(struct ipt_entry)) {
486 duprintf("ipt_standard_check: bad verdict (%i)\n",
490 if (targ->verdict < -NF_MAX_VERDICT - 1) {
491 duprintf("ipt_standard_check: bad negative verdict (%i)\n",
499 check_match(struct ipt_entry_match *m,
501 const struct ipt_ip *ip,
502 unsigned int hookmask,
505 struct ipt_match *match;
508 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
510 "ipt_%s", m->u.user.name);
511 if (IS_ERR(match) || !match) {
512 duprintf("check_match: `%s' not found\n", m->u.user.name);
513 return match ? PTR_ERR(match) : -ENOENT;
515 m->u.kernel.match = match;
517 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
518 name, hookmask, ip->proto,
519 ip->invflags & IPT_INV_PROTO);
523 if (m->u.kernel.match->checkentry
524 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
525 m->u.match_size - sizeof(*m),
527 duprintf("ip_tables: check failed for `%s'.\n",
528 m->u.kernel.match->name);
536 module_put(m->u.kernel.match->me);
540 static struct ipt_target ipt_standard_target;
543 check_entry(struct ipt_entry *e, const char *name, unsigned int size,
546 struct ipt_entry_target *t;
547 struct ipt_target *target;
551 if (!ip_checkentry(&e->ip)) {
552 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
557 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
559 goto cleanup_matches;
561 t = ipt_get_target(e);
562 target = try_then_request_module(xt_find_target(AF_INET,
565 "ipt_%s", t->u.user.name);
566 if (IS_ERR(target) || !target) {
567 duprintf("check_entry: `%s' not found\n", t->u.user.name);
568 ret = target ? PTR_ERR(target) : -ENOENT;
569 goto cleanup_matches;
571 t->u.kernel.target = target;
573 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
574 name, e->comefrom, e->ip.proto,
575 e->ip.invflags & IPT_INV_PROTO);
579 if (t->u.kernel.target == &ipt_standard_target) {
580 if (!standard_check(t, size)) {
582 goto cleanup_matches;
584 } else if (t->u.kernel.target->checkentry
585 && !t->u.kernel.target->checkentry(name, e, target, t->data,
589 duprintf("ip_tables: check failed for `%s'.\n",
590 t->u.kernel.target->name);
598 module_put(t->u.kernel.target->me);
600 IPT_MATCH_ITERATE(e, cleanup_match, &j);
605 check_entry_size_and_hooks(struct ipt_entry *e,
606 struct xt_table_info *newinfo,
608 unsigned char *limit,
609 const unsigned int *hook_entries,
610 const unsigned int *underflows,
615 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
616 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
617 duprintf("Bad offset %p\n", e);
622 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
623 duprintf("checking: element %p size %u\n",
628 /* Check hooks & underflows */
629 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
630 if ((unsigned char *)e - base == hook_entries[h])
631 newinfo->hook_entry[h] = hook_entries[h];
632 if ((unsigned char *)e - base == underflows[h])
633 newinfo->underflow[h] = underflows[h];
636 /* FIXME: underflows must be unconditional, standard verdicts
637 < 0 (not IPT_RETURN). --RR */
639 /* Clear counters and comefrom */
640 e->counters = ((struct xt_counters) { 0, 0 });
648 cleanup_entry(struct ipt_entry *e, unsigned int *i)
650 struct ipt_entry_target *t;
652 if (i && (*i)-- == 0)
655 /* Cleanup all matches */
656 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
657 t = ipt_get_target(e);
658 if (t->u.kernel.target->destroy)
659 t->u.kernel.target->destroy(t->u.kernel.target, t->data,
660 t->u.target_size - sizeof(*t));
661 module_put(t->u.kernel.target->me);
665 /* Checks and translates the user-supplied table segment (held in
668 translate_table(const char *name,
669 unsigned int valid_hooks,
670 struct xt_table_info *newinfo,
674 const unsigned int *hook_entries,
675 const unsigned int *underflows)
680 newinfo->size = size;
681 newinfo->number = number;
683 /* Init all hooks to impossible value. */
684 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
685 newinfo->hook_entry[i] = 0xFFFFFFFF;
686 newinfo->underflow[i] = 0xFFFFFFFF;
689 duprintf("translate_table: size %u\n", newinfo->size);
691 /* Walk through entries, checking offsets. */
692 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
693 check_entry_size_and_hooks,
697 hook_entries, underflows, &i);
702 duprintf("translate_table: %u not %u entries\n",
707 /* Check hooks all assigned */
708 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
709 /* Only hooks which are valid */
710 if (!(valid_hooks & (1 << i)))
712 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
713 duprintf("Invalid hook entry %u %u\n",
717 if (newinfo->underflow[i] == 0xFFFFFFFF) {
718 duprintf("Invalid underflow %u %u\n",
724 if (!mark_source_chains(newinfo, valid_hooks, entry0))
727 /* Finally, each sanity check must pass */
729 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
730 check_entry, name, size, &i);
733 IPT_ENTRY_ITERATE(entry0, newinfo->size,
738 /* And one copy for every other CPU */
739 for_each_possible_cpu(i) {
740 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
741 memcpy(newinfo->entries[i], entry0, newinfo->size);
749 add_entry_to_counter(const struct ipt_entry *e,
750 struct xt_counters total[],
753 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
760 set_entry_to_counter(const struct ipt_entry *e,
761 struct ipt_counters total[],
764 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
771 get_counters(const struct xt_table_info *t,
772 struct xt_counters counters[])
778 /* Instead of clearing (by a previous call to memset())
779 * the counters and using adds, we set the counters
780 * with data used by 'current' CPU
781 * We dont care about preemption here.
783 curcpu = raw_smp_processor_id();
786 IPT_ENTRY_ITERATE(t->entries[curcpu],
788 set_entry_to_counter,
792 for_each_possible_cpu(cpu) {
796 IPT_ENTRY_ITERATE(t->entries[cpu],
798 add_entry_to_counter,
804 static inline struct xt_counters * alloc_counters(struct ipt_table *table)
806 unsigned int countersize;
807 struct xt_counters *counters;
808 struct xt_table_info *private = table->private;
810 /* We need atomic snapshot of counters: rest doesn't change
811 (other than comefrom, which userspace doesn't care
813 countersize = sizeof(struct xt_counters) * private->number;
814 counters = vmalloc_node(countersize, numa_node_id());
816 if (counters == NULL)
817 return ERR_PTR(-ENOMEM);
819 /* First, sum counters... */
820 write_lock_bh(&table->lock);
821 get_counters(private, counters);
822 write_unlock_bh(&table->lock);
828 copy_entries_to_user(unsigned int total_size,
829 struct ipt_table *table,
830 void __user *userptr)
832 unsigned int off, num;
834 struct xt_counters *counters;
835 struct xt_table_info *private = table->private;
839 counters = alloc_counters(table);
840 if (IS_ERR(counters))
841 return PTR_ERR(counters);
843 /* choose the copy that is on our node/cpu, ...
844 * This choice is lazy (because current thread is
845 * allowed to migrate to another cpu)
847 loc_cpu_entry = private->entries[raw_smp_processor_id()];
848 /* ... then copy entire thing ... */
849 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
854 /* FIXME: use iterator macros --RR */
855 /* ... then go back and fix counters and names */
856 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
858 struct ipt_entry_match *m;
859 struct ipt_entry_target *t;
861 e = (struct ipt_entry *)(loc_cpu_entry + off);
862 if (copy_to_user(userptr + off
863 + offsetof(struct ipt_entry, counters),
865 sizeof(counters[num])) != 0) {
870 for (i = sizeof(struct ipt_entry);
871 i < e->target_offset;
872 i += m->u.match_size) {
875 if (copy_to_user(userptr + off + i
876 + offsetof(struct ipt_entry_match,
878 m->u.kernel.match->name,
879 strlen(m->u.kernel.match->name)+1)
886 t = ipt_get_target(e);
887 if (copy_to_user(userptr + off + e->target_offset
888 + offsetof(struct ipt_entry_target,
890 t->u.kernel.target->name,
891 strlen(t->u.kernel.target->name)+1) != 0) {
903 struct compat_delta {
904 struct compat_delta *next;
909 static struct compat_delta *compat_offsets = NULL;
911 static int compat_add_offset(u_int16_t offset, short delta)
913 struct compat_delta *tmp;
915 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
918 tmp->offset = offset;
920 if (compat_offsets) {
921 tmp->next = compat_offsets->next;
922 compat_offsets->next = tmp;
924 compat_offsets = tmp;
930 static void compat_flush_offsets(void)
932 struct compat_delta *tmp, *next;
934 if (compat_offsets) {
935 for(tmp = compat_offsets; tmp; tmp = next) {
939 compat_offsets = NULL;
943 static short compat_calc_jump(u_int16_t offset)
945 struct compat_delta *tmp;
948 for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
949 if (tmp->offset < offset)
954 struct compat_ipt_standard_target
956 struct compat_xt_entry_target target;
957 compat_int_t verdict;
960 struct compat_ipt_standard
962 struct compat_ipt_entry entry;
963 struct compat_ipt_standard_target target;
966 #define IPT_ST_LEN XT_ALIGN(sizeof(struct ipt_standard_target))
967 #define IPT_ST_COMPAT_LEN COMPAT_XT_ALIGN(sizeof(struct compat_ipt_standard_target))
968 #define IPT_ST_OFFSET (IPT_ST_LEN - IPT_ST_COMPAT_LEN)
970 static int compat_ipt_standard_fn(void *target,
971 void **dstptr, int *size, int convert)
973 struct compat_ipt_standard_target compat_st, *pcompat_st;
974 struct ipt_standard_target st, *pst;
981 memcpy(&compat_st.target, &pst->target,
982 sizeof(compat_st.target));
983 compat_st.verdict = pst->verdict;
984 if (compat_st.verdict > 0)
986 compat_calc_jump(compat_st.verdict);
987 compat_st.target.u.user.target_size = IPT_ST_COMPAT_LEN;
988 if (copy_to_user(*dstptr, &compat_st, IPT_ST_COMPAT_LEN))
990 *size -= IPT_ST_OFFSET;
991 *dstptr += IPT_ST_COMPAT_LEN;
993 case COMPAT_FROM_USER:
995 memcpy(&st.target, &pcompat_st->target, IPT_ST_COMPAT_LEN);
996 st.verdict = pcompat_st->verdict;
998 st.verdict += compat_calc_jump(st.verdict);
999 st.target.u.user.target_size = IPT_ST_LEN;
1000 memcpy(*dstptr, &st, IPT_ST_LEN);
1001 *size += IPT_ST_OFFSET;
1002 *dstptr += IPT_ST_LEN;
1004 case COMPAT_CALC_SIZE:
1005 *size += IPT_ST_OFFSET;
1015 compat_calc_match(struct ipt_entry_match *m, int * size)
1017 if (m->u.kernel.match->compat)
1018 m->u.kernel.match->compat(m, NULL, size, COMPAT_CALC_SIZE);
1020 xt_compat_match(m, NULL, size, COMPAT_CALC_SIZE);
1024 static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
1025 void *base, struct xt_table_info *newinfo)
1027 struct ipt_entry_target *t;
1028 u_int16_t entry_offset;
1032 entry_offset = (void *)e - base;
1033 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1034 t = ipt_get_target(e);
1035 if (t->u.kernel.target->compat)
1036 t->u.kernel.target->compat(t, NULL, &off, COMPAT_CALC_SIZE);
1038 xt_compat_target(t, NULL, &off, COMPAT_CALC_SIZE);
1039 newinfo->size -= off;
1040 ret = compat_add_offset(entry_offset, off);
1044 for (i = 0; i< NF_IP_NUMHOOKS; i++) {
1045 if (info->hook_entry[i] && (e < (struct ipt_entry *)
1046 (base + info->hook_entry[i])))
1047 newinfo->hook_entry[i] -= off;
1048 if (info->underflow[i] && (e < (struct ipt_entry *)
1049 (base + info->underflow[i])))
1050 newinfo->underflow[i] -= off;
1055 static int compat_table_info(struct xt_table_info *info,
1056 struct xt_table_info *newinfo)
1058 void *loc_cpu_entry;
1061 if (!newinfo || !info)
1064 memset(newinfo, 0, sizeof(struct xt_table_info));
1065 newinfo->size = info->size;
1066 newinfo->number = info->number;
1067 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1068 newinfo->hook_entry[i] = info->hook_entry[i];
1069 newinfo->underflow[i] = info->underflow[i];
1071 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1072 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1073 compat_calc_entry, info, loc_cpu_entry, newinfo);
1077 static int get_info(void __user *user, int *len, int compat)
1079 char name[IPT_TABLE_MAXNAMELEN];
1080 struct ipt_table *t;
1083 if (*len != sizeof(struct ipt_getinfo)) {
1084 duprintf("length %u != %u\n", *len,
1085 (unsigned int)sizeof(struct ipt_getinfo));
1089 if (copy_from_user(name, user, sizeof(name)) != 0)
1092 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1093 #ifdef CONFIG_COMPAT
1095 xt_compat_lock(AF_INET);
1097 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1098 "iptable_%s", name);
1099 if (t && !IS_ERR(t)) {
1100 struct ipt_getinfo info;
1101 struct xt_table_info *private = t->private;
1103 #ifdef CONFIG_COMPAT
1105 struct xt_table_info tmp;
1106 ret = compat_table_info(private, &tmp);
1107 compat_flush_offsets();
1111 info.valid_hooks = t->valid_hooks;
1112 memcpy(info.hook_entry, private->hook_entry,
1113 sizeof(info.hook_entry));
1114 memcpy(info.underflow, private->underflow,
1115 sizeof(info.underflow));
1116 info.num_entries = private->number;
1117 info.size = private->size;
1118 strcpy(info.name, name);
1120 if (copy_to_user(user, &info, *len) != 0)
1128 ret = t ? PTR_ERR(t) : -ENOENT;
1129 #ifdef CONFIG_COMPAT
1131 xt_compat_unlock(AF_INET);
1137 get_entries(struct ipt_get_entries __user *uptr, int *len)
1140 struct ipt_get_entries get;
1141 struct ipt_table *t;
1143 if (*len < sizeof(get)) {
1144 duprintf("get_entries: %u < %d\n", *len,
1145 (unsigned int)sizeof(get));
1148 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1150 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1151 duprintf("get_entries: %u != %u\n", *len,
1152 (unsigned int)(sizeof(struct ipt_get_entries) +
1157 t = xt_find_table_lock(AF_INET, get.name);
1158 if (t && !IS_ERR(t)) {
1159 struct xt_table_info *private = t->private;
1160 duprintf("t->private->number = %u\n",
1162 if (get.size == private->size)
1163 ret = copy_entries_to_user(private->size,
1164 t, uptr->entrytable);
1166 duprintf("get_entries: I've got %u not %u!\n",
1174 ret = t ? PTR_ERR(t) : -ENOENT;
1180 __do_replace(const char *name, unsigned int valid_hooks,
1181 struct xt_table_info *newinfo, unsigned int num_counters,
1182 void __user *counters_ptr)
1185 struct ipt_table *t;
1186 struct xt_table_info *oldinfo;
1187 struct xt_counters *counters;
1188 void *loc_cpu_old_entry;
1191 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1197 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1198 "iptable_%s", name);
1199 if (!t || IS_ERR(t)) {
1200 ret = t ? PTR_ERR(t) : -ENOENT;
1201 goto free_newinfo_counters_untrans;
1205 if (valid_hooks != t->valid_hooks) {
1206 duprintf("Valid hook crap: %08X vs %08X\n",
1207 valid_hooks, t->valid_hooks);
1212 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1216 /* Update module usage count based on number of rules */
1217 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1218 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1219 if ((oldinfo->number > oldinfo->initial_entries) ||
1220 (newinfo->number <= oldinfo->initial_entries))
1222 if ((oldinfo->number > oldinfo->initial_entries) &&
1223 (newinfo->number <= oldinfo->initial_entries))
1226 /* Get the old counters. */
1227 get_counters(oldinfo, counters);
1228 /* Decrease module usage counts and free resource */
1229 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1230 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1231 xt_free_table_info(oldinfo);
1232 if (copy_to_user(counters_ptr, counters,
1233 sizeof(struct xt_counters) * num_counters) != 0)
1242 free_newinfo_counters_untrans:
1249 do_replace(void __user *user, unsigned int len)
1252 struct ipt_replace tmp;
1253 struct xt_table_info *newinfo;
1254 void *loc_cpu_entry;
1256 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1259 /* Hack: Causes ipchains to give correct error msg --RR */
1260 if (len != sizeof(tmp) + tmp.size)
1261 return -ENOPROTOOPT;
1263 /* overflow check */
1264 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1267 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1270 newinfo = xt_alloc_table_info(tmp.size);
1274 /* choose the copy that is our node/cpu */
1275 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1276 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1282 ret = translate_table(tmp.name, tmp.valid_hooks,
1283 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1284 tmp.hook_entry, tmp.underflow);
1288 duprintf("ip_tables: Translated table\n");
1290 ret = __do_replace(tmp.name, tmp.valid_hooks,
1291 newinfo, tmp.num_counters,
1294 goto free_newinfo_untrans;
1297 free_newinfo_untrans:
1298 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1300 xt_free_table_info(newinfo);
1304 /* We're lazy, and add to the first CPU; overflow works its fey magic
1305 * and everything is OK. */
1307 add_counter_to_entry(struct ipt_entry *e,
1308 const struct xt_counters addme[],
1312 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1314 (long unsigned int)e->counters.pcnt,
1315 (long unsigned int)e->counters.bcnt,
1316 (long unsigned int)addme[*i].pcnt,
1317 (long unsigned int)addme[*i].bcnt);
1320 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1327 do_add_counters(void __user *user, unsigned int len, int compat)
1330 struct xt_counters_info tmp;
1331 struct xt_counters *paddc;
1332 unsigned int num_counters;
1336 struct ipt_table *t;
1337 struct xt_table_info *private;
1339 void *loc_cpu_entry;
1340 #ifdef CONFIG_COMPAT
1341 struct compat_xt_counters_info compat_tmp;
1345 size = sizeof(struct compat_xt_counters_info);
1350 size = sizeof(struct xt_counters_info);
1353 if (copy_from_user(ptmp, user, size) != 0)
1356 #ifdef CONFIG_COMPAT
1358 num_counters = compat_tmp.num_counters;
1359 name = compat_tmp.name;
1363 num_counters = tmp.num_counters;
1367 if (len != size + num_counters * sizeof(struct xt_counters))
1370 paddc = vmalloc_node(len - size, numa_node_id());
1374 if (copy_from_user(paddc, user + size, len - size) != 0) {
1379 t = xt_find_table_lock(AF_INET, name);
1380 if (!t || IS_ERR(t)) {
1381 ret = t ? PTR_ERR(t) : -ENOENT;
1385 write_lock_bh(&t->lock);
1386 private = t->private;
1387 if (private->number != num_counters) {
1389 goto unlock_up_free;
1393 /* Choose the copy that is on our node */
1394 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1395 IPT_ENTRY_ITERATE(loc_cpu_entry,
1397 add_counter_to_entry,
1401 write_unlock_bh(&t->lock);
1410 #ifdef CONFIG_COMPAT
1411 struct compat_ipt_replace {
1412 char name[IPT_TABLE_MAXNAMELEN];
1416 u32 hook_entry[NF_IP_NUMHOOKS];
1417 u32 underflow[NF_IP_NUMHOOKS];
1419 compat_uptr_t counters; /* struct ipt_counters * */
1420 struct compat_ipt_entry entries[0];
1423 static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
1424 void __user **dstptr, compat_uint_t *size)
1426 if (m->u.kernel.match->compat)
1427 return m->u.kernel.match->compat(m, dstptr, size,
1430 return xt_compat_match(m, dstptr, size, COMPAT_TO_USER);
1433 static int compat_copy_entry_to_user(struct ipt_entry *e,
1434 void __user **dstptr, compat_uint_t *size)
1436 struct ipt_entry_target __user *t;
1437 struct compat_ipt_entry __user *ce;
1438 u_int16_t target_offset, next_offset;
1439 compat_uint_t origsize;
1444 ce = (struct compat_ipt_entry __user *)*dstptr;
1445 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1448 *dstptr += sizeof(struct compat_ipt_entry);
1449 ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1450 target_offset = e->target_offset - (origsize - *size);
1453 t = ipt_get_target(e);
1454 if (t->u.kernel.target->compat)
1455 ret = t->u.kernel.target->compat(t, dstptr, size,
1458 ret = xt_compat_target(t, dstptr, size, COMPAT_TO_USER);
1462 next_offset = e->next_offset - (origsize - *size);
1463 if (put_user(target_offset, &ce->target_offset))
1465 if (put_user(next_offset, &ce->next_offset))
1473 compat_check_calc_match(struct ipt_entry_match *m,
1475 const struct ipt_ip *ip,
1476 unsigned int hookmask,
1479 struct ipt_match *match;
1481 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1482 m->u.user.revision),
1483 "ipt_%s", m->u.user.name);
1484 if (IS_ERR(match) || !match) {
1485 duprintf("compat_check_calc_match: `%s' not found\n",
1487 return match ? PTR_ERR(match) : -ENOENT;
1489 m->u.kernel.match = match;
1491 if (m->u.kernel.match->compat)
1492 m->u.kernel.match->compat(m, NULL, size, COMPAT_CALC_SIZE);
1494 xt_compat_match(m, NULL, size, COMPAT_CALC_SIZE);
1501 check_compat_entry_size_and_hooks(struct ipt_entry *e,
1502 struct xt_table_info *newinfo,
1504 unsigned char *base,
1505 unsigned char *limit,
1506 unsigned int *hook_entries,
1507 unsigned int *underflows,
1511 struct ipt_entry_target *t;
1512 struct ipt_target *target;
1513 u_int16_t entry_offset;
1516 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1517 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1518 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1519 duprintf("Bad offset %p, limit = %p\n", e, limit);
1523 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1524 sizeof(struct compat_xt_entry_target)) {
1525 duprintf("checking: element %p size %u\n",
1530 if (!ip_checkentry(&e->ip)) {
1531 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
1536 entry_offset = (void *)e - (void *)base;
1538 ret = IPT_MATCH_ITERATE(e, compat_check_calc_match, name, &e->ip,
1539 e->comefrom, &off, &j);
1543 t = ipt_get_target(e);
1544 target = try_then_request_module(xt_find_target(AF_INET,
1546 t->u.user.revision),
1547 "ipt_%s", t->u.user.name);
1548 if (IS_ERR(target) || !target) {
1549 duprintf("check_entry: `%s' not found\n", t->u.user.name);
1550 ret = target ? PTR_ERR(target) : -ENOENT;
1553 t->u.kernel.target = target;
1555 if (t->u.kernel.target->compat)
1556 t->u.kernel.target->compat(t, NULL, &off, COMPAT_CALC_SIZE);
1558 xt_compat_target(t, NULL, &off, COMPAT_CALC_SIZE);
1560 ret = compat_add_offset(entry_offset, off);
1564 /* Check hooks & underflows */
1565 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1566 if ((unsigned char *)e - base == hook_entries[h])
1567 newinfo->hook_entry[h] = hook_entries[h];
1568 if ((unsigned char *)e - base == underflows[h])
1569 newinfo->underflow[h] = underflows[h];
1572 /* Clear counters and comefrom */
1573 e->counters = ((struct ipt_counters) { 0, 0 });
1579 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1583 static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1584 void **dstptr, compat_uint_t *size, const char *name,
1585 const struct ipt_ip *ip, unsigned int hookmask)
1587 struct ipt_entry_match *dm;
1588 struct ipt_match *match;
1591 dm = (struct ipt_entry_match *)*dstptr;
1592 match = m->u.kernel.match;
1594 match->compat(m, dstptr, size, COMPAT_FROM_USER);
1596 xt_compat_match(m, dstptr, size, COMPAT_FROM_USER);
1598 ret = xt_check_match(match, AF_INET, dm->u.match_size - sizeof(*dm),
1599 name, hookmask, ip->proto,
1600 ip->invflags & IPT_INV_PROTO);
1604 if (m->u.kernel.match->checkentry
1605 && !m->u.kernel.match->checkentry(name, ip, match, dm->data,
1606 dm->u.match_size - sizeof(*dm),
1608 duprintf("ip_tables: check failed for `%s'.\n",
1609 m->u.kernel.match->name);
1615 static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1616 unsigned int *size, const char *name,
1617 struct xt_table_info *newinfo, unsigned char *base)
1619 struct ipt_entry_target *t;
1620 struct ipt_target *target;
1621 struct ipt_entry *de;
1622 unsigned int origsize;
1627 de = (struct ipt_entry *)*dstptr;
1628 memcpy(de, e, sizeof(struct ipt_entry));
1630 *dstptr += sizeof(struct compat_ipt_entry);
1631 ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
1632 name, &de->ip, de->comefrom);
1635 de->target_offset = e->target_offset - (origsize - *size);
1636 t = ipt_get_target(e);
1637 target = t->u.kernel.target;
1639 target->compat(t, dstptr, size, COMPAT_FROM_USER);
1641 xt_compat_target(t, dstptr, size, COMPAT_FROM_USER);
1643 de->next_offset = e->next_offset - (origsize - *size);
1644 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1645 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1646 newinfo->hook_entry[h] -= origsize - *size;
1647 if ((unsigned char *)de - base < newinfo->underflow[h])
1648 newinfo->underflow[h] -= origsize - *size;
1651 t = ipt_get_target(de);
1652 target = t->u.kernel.target;
1653 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
1654 name, e->comefrom, e->ip.proto,
1655 e->ip.invflags & IPT_INV_PROTO);
1660 if (t->u.kernel.target == &ipt_standard_target) {
1661 if (!standard_check(t, *size))
1663 } else if (t->u.kernel.target->checkentry
1664 && !t->u.kernel.target->checkentry(name, de, target,
1665 t->data, t->u.target_size - sizeof(*t),
1667 duprintf("ip_tables: compat: check failed for `%s'.\n",
1668 t->u.kernel.target->name);
1677 translate_compat_table(const char *name,
1678 unsigned int valid_hooks,
1679 struct xt_table_info **pinfo,
1681 unsigned int total_size,
1682 unsigned int number,
1683 unsigned int *hook_entries,
1684 unsigned int *underflows)
1687 struct xt_table_info *newinfo, *info;
1688 void *pos, *entry0, *entry1;
1695 info->number = number;
1697 /* Init all hooks to impossible value. */
1698 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1699 info->hook_entry[i] = 0xFFFFFFFF;
1700 info->underflow[i] = 0xFFFFFFFF;
1703 duprintf("translate_compat_table: size %u\n", info->size);
1705 xt_compat_lock(AF_INET);
1706 /* Walk through entries, checking offsets. */
1707 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1708 check_compat_entry_size_and_hooks,
1709 info, &size, entry0,
1710 entry0 + total_size,
1711 hook_entries, underflows, &i, name);
1717 duprintf("translate_compat_table: %u not %u entries\n",
1722 /* Check hooks all assigned */
1723 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1724 /* Only hooks which are valid */
1725 if (!(valid_hooks & (1 << i)))
1727 if (info->hook_entry[i] == 0xFFFFFFFF) {
1728 duprintf("Invalid hook entry %u %u\n",
1729 i, hook_entries[i]);
1732 if (info->underflow[i] == 0xFFFFFFFF) {
1733 duprintf("Invalid underflow %u %u\n",
1740 newinfo = xt_alloc_table_info(size);
1744 newinfo->number = number;
1745 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1746 newinfo->hook_entry[i] = info->hook_entry[i];
1747 newinfo->underflow[i] = info->underflow[i];
1749 entry1 = newinfo->entries[raw_smp_processor_id()];
1752 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1753 compat_copy_entry_from_user, &pos, &size,
1754 name, newinfo, entry1);
1755 compat_flush_offsets();
1756 xt_compat_unlock(AF_INET);
1761 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1764 /* And one copy for every other CPU */
1766 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1767 memcpy(newinfo->entries[i], entry1, newinfo->size);
1771 xt_free_table_info(info);
1775 xt_free_table_info(newinfo);
1779 xt_compat_unlock(AF_INET);
1784 compat_do_replace(void __user *user, unsigned int len)
1787 struct compat_ipt_replace tmp;
1788 struct xt_table_info *newinfo;
1789 void *loc_cpu_entry;
1791 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1794 /* Hack: Causes ipchains to give correct error msg --RR */
1795 if (len != sizeof(tmp) + tmp.size)
1796 return -ENOPROTOOPT;
1798 /* overflow check */
1799 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1802 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1805 newinfo = xt_alloc_table_info(tmp.size);
1809 /* choose the copy that is our node/cpu */
1810 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1811 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1817 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1818 &newinfo, &loc_cpu_entry, tmp.size,
1819 tmp.num_entries, tmp.hook_entry, tmp.underflow);
1823 duprintf("compat_do_replace: Translated table\n");
1825 ret = __do_replace(tmp.name, tmp.valid_hooks,
1826 newinfo, tmp.num_counters,
1827 compat_ptr(tmp.counters));
1829 goto free_newinfo_untrans;
1832 free_newinfo_untrans:
1833 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1835 xt_free_table_info(newinfo);
1840 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1845 if (!capable(CAP_NET_ADMIN))
1849 case IPT_SO_SET_REPLACE:
1850 ret = compat_do_replace(user, len);
1853 case IPT_SO_SET_ADD_COUNTERS:
1854 ret = do_add_counters(user, len, 1);
1858 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1865 struct compat_ipt_get_entries
1867 char name[IPT_TABLE_MAXNAMELEN];
1869 struct compat_ipt_entry entrytable[0];
1872 static int compat_copy_entries_to_user(unsigned int total_size,
1873 struct ipt_table *table, void __user *userptr)
1875 unsigned int off, num;
1876 struct compat_ipt_entry e;
1877 struct xt_counters *counters;
1878 struct xt_table_info *private = table->private;
1882 void *loc_cpu_entry;
1884 counters = alloc_counters(table);
1885 if (IS_ERR(counters))
1886 return PTR_ERR(counters);
1888 /* choose the copy that is on our node/cpu, ...
1889 * This choice is lazy (because current thread is
1890 * allowed to migrate to another cpu)
1892 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1895 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1896 compat_copy_entry_to_user, &pos, &size);
1900 /* ... then go back and fix counters and names */
1901 for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
1903 struct ipt_entry_match m;
1904 struct ipt_entry_target t;
1907 if (copy_from_user(&e, userptr + off,
1908 sizeof(struct compat_ipt_entry)))
1910 if (copy_to_user(userptr + off +
1911 offsetof(struct compat_ipt_entry, counters),
1912 &counters[num], sizeof(counters[num])))
1915 for (i = sizeof(struct compat_ipt_entry);
1916 i < e.target_offset; i += m.u.match_size) {
1917 if (copy_from_user(&m, userptr + off + i,
1918 sizeof(struct ipt_entry_match)))
1920 if (copy_to_user(userptr + off + i +
1921 offsetof(struct ipt_entry_match, u.user.name),
1922 m.u.kernel.match->name,
1923 strlen(m.u.kernel.match->name) + 1))
1927 if (copy_from_user(&t, userptr + off + e.target_offset,
1928 sizeof(struct ipt_entry_target)))
1930 if (copy_to_user(userptr + off + e.target_offset +
1931 offsetof(struct ipt_entry_target, u.user.name),
1932 t.u.kernel.target->name,
1933 strlen(t.u.kernel.target->name) + 1))
1943 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1946 struct compat_ipt_get_entries get;
1947 struct ipt_table *t;
1950 if (*len < sizeof(get)) {
1951 duprintf("compat_get_entries: %u < %u\n",
1952 *len, (unsigned int)sizeof(get));
1956 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1959 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1960 duprintf("compat_get_entries: %u != %u\n", *len,
1961 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1966 xt_compat_lock(AF_INET);
1967 t = xt_find_table_lock(AF_INET, get.name);
1968 if (t && !IS_ERR(t)) {
1969 struct xt_table_info *private = t->private;
1970 struct xt_table_info info;
1971 duprintf("t->private->number = %u\n",
1973 ret = compat_table_info(private, &info);
1974 if (!ret && get.size == info.size) {
1975 ret = compat_copy_entries_to_user(private->size,
1976 t, uptr->entrytable);
1978 duprintf("compat_get_entries: I've got %u not %u!\n",
1983 compat_flush_offsets();
1987 ret = t ? PTR_ERR(t) : -ENOENT;
1989 xt_compat_unlock(AF_INET);
1994 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1999 case IPT_SO_GET_INFO:
2000 ret = get_info(user, len, 1);
2002 case IPT_SO_GET_ENTRIES:
2003 ret = compat_get_entries(user, len);
2006 duprintf("compat_do_ipt_get_ctl: unknown request %i\n", cmd);
2014 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2018 if (!capable(CAP_NET_ADMIN))
2022 case IPT_SO_SET_REPLACE:
2023 ret = do_replace(user, len);
2026 case IPT_SO_SET_ADD_COUNTERS:
2027 ret = do_add_counters(user, len, 0);
2031 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2039 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2043 if (!capable(CAP_NET_ADMIN))
2047 case IPT_SO_GET_INFO:
2048 ret = get_info(user, len, 0);
2051 case IPT_SO_GET_ENTRIES:
2052 ret = get_entries(user, len);
2055 case IPT_SO_GET_REVISION_MATCH:
2056 case IPT_SO_GET_REVISION_TARGET: {
2057 struct ipt_get_revision rev;
2060 if (*len != sizeof(rev)) {
2064 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2069 if (cmd == IPT_SO_GET_REVISION_TARGET)
2074 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2077 "ipt_%s", rev.name);
2082 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2089 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2092 struct xt_table_info *newinfo;
2093 static struct xt_table_info bootstrap
2094 = { 0, 0, 0, { 0 }, { 0 }, { } };
2095 void *loc_cpu_entry;
2097 newinfo = xt_alloc_table_info(repl->size);
2101 /* choose the copy on our node/cpu
2102 * but dont care of preemption
2104 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2105 memcpy(loc_cpu_entry, repl->entries, repl->size);
2107 ret = translate_table(table->name, table->valid_hooks,
2108 newinfo, loc_cpu_entry, repl->size,
2113 xt_free_table_info(newinfo);
2117 if (xt_register_table(table, &bootstrap, newinfo) != 0) {
2118 xt_free_table_info(newinfo);
2125 void ipt_unregister_table(struct ipt_table *table)
2127 struct xt_table_info *private;
2128 void *loc_cpu_entry;
2130 private = xt_unregister_table(table);
2132 /* Decrease module usage counts and free resources */
2133 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2134 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2135 xt_free_table_info(private);
2138 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2140 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2141 u_int8_t type, u_int8_t code,
2144 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2149 icmp_match(const struct sk_buff *skb,
2150 const struct net_device *in,
2151 const struct net_device *out,
2152 const struct xt_match *match,
2153 const void *matchinfo,
2155 unsigned int protoff,
2158 struct icmphdr _icmph, *ic;
2159 const struct ipt_icmp *icmpinfo = matchinfo;
2161 /* Must not be a fragment. */
2165 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2167 /* We've been asked to examine this packet, and we
2168 * can't. Hence, no choice but to drop.
2170 duprintf("Dropping evil ICMP tinygram.\n");
2175 return icmp_type_code_match(icmpinfo->type,
2179 !!(icmpinfo->invflags&IPT_ICMP_INV));
2182 /* Called when user tries to insert an entry of this type. */
2184 icmp_checkentry(const char *tablename,
2186 const struct xt_match *match,
2188 unsigned int matchsize,
2189 unsigned int hook_mask)
2191 const struct ipt_icmp *icmpinfo = matchinfo;
2193 /* Must specify no unknown invflags */
2194 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2197 /* The built-in targets: standard (NULL) and error. */
2198 static struct ipt_target ipt_standard_target = {
2199 .name = IPT_STANDARD_TARGET,
2200 .targetsize = sizeof(int),
2202 #ifdef CONFIG_COMPAT
2203 .compat = &compat_ipt_standard_fn,
2207 static struct ipt_target ipt_error_target = {
2208 .name = IPT_ERROR_TARGET,
2209 .target = ipt_error,
2210 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2214 static struct nf_sockopt_ops ipt_sockopts = {
2216 .set_optmin = IPT_BASE_CTL,
2217 .set_optmax = IPT_SO_SET_MAX+1,
2218 .set = do_ipt_set_ctl,
2219 #ifdef CONFIG_COMPAT
2220 .compat_set = compat_do_ipt_set_ctl,
2222 .get_optmin = IPT_BASE_CTL,
2223 .get_optmax = IPT_SO_GET_MAX+1,
2224 .get = do_ipt_get_ctl,
2225 #ifdef CONFIG_COMPAT
2226 .compat_get = compat_do_ipt_get_ctl,
2230 static struct ipt_match icmp_matchstruct = {
2232 .match = icmp_match,
2233 .matchsize = sizeof(struct ipt_icmp),
2234 .proto = IPPROTO_ICMP,
2236 .checkentry = icmp_checkentry,
2239 static int __init ip_tables_init(void)
2243 xt_proto_init(AF_INET);
2245 /* Noone else will be downing sem now, so we won't sleep */
2246 xt_register_target(&ipt_standard_target);
2247 xt_register_target(&ipt_error_target);
2248 xt_register_match(&icmp_matchstruct);
2250 /* Register setsockopt */
2251 ret = nf_register_sockopt(&ipt_sockopts);
2253 duprintf("Unable to register sockopts.\n");
2257 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2261 static void __exit ip_tables_fini(void)
2263 nf_unregister_sockopt(&ipt_sockopts);
2265 xt_unregister_match(&icmp_matchstruct);
2266 xt_unregister_target(&ipt_error_target);
2267 xt_unregister_target(&ipt_standard_target);
2269 xt_proto_fini(AF_INET);
2272 EXPORT_SYMBOL(ipt_register_table);
2273 EXPORT_SYMBOL(ipt_unregister_table);
2274 EXPORT_SYMBOL(ipt_do_table);
2275 module_init(ip_tables_init);
2276 module_exit(ip_tables_fini);