1 /* netfilter.c: look after the filters for various protocols.
2 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
7 * Rusty Russell (C)2000 -- This code is GPL.
9 * February 2000: Modified by James Morris to have 1 queue per protocol.
10 * 15-Mar-2000: Added NF_REPEAT --RR.
11 * 08-May-2003: Internal logging interface added by Jozsef Kadlecsik.
13 #include <linux/config.h>
14 #include <linux/kernel.h>
15 #include <linux/netfilter.h>
16 #include <net/protocol.h>
17 #include <linux/init.h>
18 #include <linux/skbuff.h>
19 #include <linux/wait.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/tcp.h>
26 #include <linux/udp.h>
27 #include <linux/icmp.h>
29 #include <net/route.h>
32 /* In this code, we can be waiting indefinitely for userspace to
33 * service a packet if a hook returns NF_QUEUE. We could keep a count
34 * of skbuffs queued for userspace, and not deregister a hook unless
35 * this is zero, but that sucks. Now, we simply check when the
36 * packets come back: if the hook is gone, the packet is discarded. */
37 #ifdef CONFIG_NETFILTER_DEBUG
38 #define NFDEBUG(format, args...) printk(format , ## args)
40 #define NFDEBUG(format, args...)
43 /* Sockopts only registered and called from user context, so
44 net locking would be overkill. Also, [gs]etsockopt calls may
46 static DECLARE_MUTEX(nf_sockopt_mutex);
48 struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS];
49 static LIST_HEAD(nf_sockopts);
50 static DEFINE_SPINLOCK(nf_hook_lock);
53 * A queue handler may be registered for each protocol. Each is protected by
54 * long term mutex. The handler must provide an an outfn() to accept packets
55 * for queueing and must reinject all packets it receives, no matter what.
57 static struct nf_queue_handler_t {
58 nf_queue_outfn_t outfn;
60 } queue_handler[NPROTO];
61 static DEFINE_RWLOCK(queue_handler_lock);
63 int nf_register_hook(struct nf_hook_ops *reg)
67 spin_lock_bh(&nf_hook_lock);
68 list_for_each(i, &nf_hooks[reg->pf][reg->hooknum]) {
69 if (reg->priority < ((struct nf_hook_ops *)i)->priority)
72 list_add_rcu(®->list, i->prev);
73 spin_unlock_bh(&nf_hook_lock);
79 void nf_unregister_hook(struct nf_hook_ops *reg)
81 spin_lock_bh(&nf_hook_lock);
82 list_del_rcu(®->list);
83 spin_unlock_bh(&nf_hook_lock);
88 /* Do exclusive ranges overlap? */
89 static inline int overlap(int min1, int max1, int min2, int max2)
91 return max1 > min2 && min1 < max2;
94 /* Functions to register sockopt ranges (exclusive). */
95 int nf_register_sockopt(struct nf_sockopt_ops *reg)
100 if (down_interruptible(&nf_sockopt_mutex) != 0)
103 list_for_each(i, &nf_sockopts) {
104 struct nf_sockopt_ops *ops = (struct nf_sockopt_ops *)i;
105 if (ops->pf == reg->pf
106 && (overlap(ops->set_optmin, ops->set_optmax,
107 reg->set_optmin, reg->set_optmax)
108 || overlap(ops->get_optmin, ops->get_optmax,
109 reg->get_optmin, reg->get_optmax))) {
110 NFDEBUG("nf_sock overlap: %u-%u/%u-%u v %u-%u/%u-%u\n",
111 ops->set_optmin, ops->set_optmax,
112 ops->get_optmin, ops->get_optmax,
113 reg->set_optmin, reg->set_optmax,
114 reg->get_optmin, reg->get_optmax);
120 list_add(®->list, &nf_sockopts);
122 up(&nf_sockopt_mutex);
126 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
128 /* No point being interruptible: we're probably in cleanup_module() */
130 down(&nf_sockopt_mutex);
132 /* To be woken by nf_sockopt call... */
133 /* FIXME: Stuart Young's name appears gratuitously. */
134 set_current_state(TASK_UNINTERRUPTIBLE);
135 reg->cleanup_task = current;
136 up(&nf_sockopt_mutex);
140 list_del(®->list);
141 up(&nf_sockopt_mutex);
144 #ifdef CONFIG_NETFILTER_DEBUG
147 #include <linux/netfilter_ipv4.h>
149 static void debug_print_hooks_ip(unsigned int nf_debug)
151 if (nf_debug & (1 << NF_IP_PRE_ROUTING)) {
152 printk("PRE_ROUTING ");
153 nf_debug ^= (1 << NF_IP_PRE_ROUTING);
155 if (nf_debug & (1 << NF_IP_LOCAL_IN)) {
157 nf_debug ^= (1 << NF_IP_LOCAL_IN);
159 if (nf_debug & (1 << NF_IP_FORWARD)) {
161 nf_debug ^= (1 << NF_IP_FORWARD);
163 if (nf_debug & (1 << NF_IP_LOCAL_OUT)) {
164 printk("LOCAL_OUT ");
165 nf_debug ^= (1 << NF_IP_LOCAL_OUT);
167 if (nf_debug & (1 << NF_IP_POST_ROUTING)) {
168 printk("POST_ROUTING ");
169 nf_debug ^= (1 << NF_IP_POST_ROUTING);
172 printk("Crap bits: 0x%04X", nf_debug);
176 static void nf_dump_skb(int pf, struct sk_buff *skb)
178 printk("skb: pf=%i %s dev=%s len=%u\n",
180 skb->sk ? "(owned)" : "(unowned)",
181 skb->dev ? skb->dev->name : "(no dev)",
185 const struct iphdr *ip = skb->nh.iph;
186 __u32 *opt = (__u32 *) (ip + 1);
188 __u16 src_port = 0, dst_port = 0;
190 if (ip->protocol == IPPROTO_TCP
191 || ip->protocol == IPPROTO_UDP) {
192 struct tcphdr *tcp=(struct tcphdr *)((__u32 *)ip+ip->ihl);
193 src_port = ntohs(tcp->source);
194 dst_port = ntohs(tcp->dest);
197 printk("PROTO=%d %u.%u.%u.%u:%hu %u.%u.%u.%u:%hu"
198 " L=%hu S=0x%2.2hX I=%hu F=0x%4.4hX T=%hu",
199 ip->protocol, NIPQUAD(ip->saddr),
200 src_port, NIPQUAD(ip->daddr),
202 ntohs(ip->tot_len), ip->tos, ntohs(ip->id),
203 ntohs(ip->frag_off), ip->ttl);
205 for (opti = 0; opti < (ip->ihl - sizeof(struct iphdr) / 4); opti++)
206 printk(" O=0x%8.8X", *opt++);
207 printk(" MARK=%lu (0x%lu)",
208 (long unsigned int)skb->nfmark,
209 (long unsigned int)skb->nfmark);
215 void nf_debug_ip_local_deliver(struct sk_buff *skb)
217 /* If it's a loopback packet, it must have come through
218 * NF_IP_LOCAL_OUT, NF_IP_RAW_INPUT, NF_IP_PRE_ROUTING and
219 * NF_IP_LOCAL_IN. Otherwise, must have gone through
220 * NF_IP_RAW_INPUT and NF_IP_PRE_ROUTING. */
222 printk("ip_local_deliver: skb->dev is NULL.\n");
224 if (skb->nf_debug != ((1<<NF_IP_PRE_ROUTING)
225 | (1<<NF_IP_LOCAL_IN))) {
226 printk("ip_local_deliver: bad skb: ");
227 debug_print_hooks_ip(skb->nf_debug);
228 nf_dump_skb(PF_INET, skb);
233 void nf_debug_ip_loopback_xmit(struct sk_buff *newskb)
235 if (newskb->nf_debug != ((1 << NF_IP_LOCAL_OUT)
236 | (1 << NF_IP_POST_ROUTING))) {
237 printk("ip_dev_loopback_xmit: bad owned skb = %p: ",
239 debug_print_hooks_ip(newskb->nf_debug);
240 nf_dump_skb(PF_INET, newskb);
244 void nf_debug_ip_finish_output2(struct sk_buff *skb)
246 /* If it's owned, it must have gone through the
247 * NF_IP_LOCAL_OUT and NF_IP_POST_ROUTING.
248 * Otherwise, must have gone through
249 * NF_IP_PRE_ROUTING, NF_IP_FORWARD and NF_IP_POST_ROUTING.
252 if (skb->nf_debug != ((1 << NF_IP_LOCAL_OUT)
253 | (1 << NF_IP_POST_ROUTING))) {
254 printk("ip_finish_output: bad owned skb = %p: ", skb);
255 debug_print_hooks_ip(skb->nf_debug);
256 nf_dump_skb(PF_INET, skb);
259 if (skb->nf_debug != ((1 << NF_IP_PRE_ROUTING)
260 | (1 << NF_IP_FORWARD)
261 | (1 << NF_IP_POST_ROUTING))) {
262 /* Fragments, entunnelled packets, TCP RSTs
263 generated by ipt_REJECT will have no
264 owners, but still may be local */
265 if (skb->nf_debug != ((1 << NF_IP_LOCAL_OUT)
266 | (1 << NF_IP_POST_ROUTING))){
267 printk("ip_finish_output:"
268 " bad unowned skb = %p: ",skb);
269 debug_print_hooks_ip(skb->nf_debug);
270 nf_dump_skb(PF_INET, skb);
275 #endif /*CONFIG_NETFILTER_DEBUG*/
277 /* Call get/setsockopt() */
278 static int nf_sockopt(struct sock *sk, int pf, int val,
279 char __user *opt, int *len, int get)
282 struct nf_sockopt_ops *ops;
285 if (down_interruptible(&nf_sockopt_mutex) != 0)
288 list_for_each(i, &nf_sockopts) {
289 ops = (struct nf_sockopt_ops *)i;
292 if (val >= ops->get_optmin
293 && val < ops->get_optmax) {
295 up(&nf_sockopt_mutex);
296 ret = ops->get(sk, val, opt, len);
300 if (val >= ops->set_optmin
301 && val < ops->set_optmax) {
303 up(&nf_sockopt_mutex);
304 ret = ops->set(sk, val, opt, *len);
310 up(&nf_sockopt_mutex);
314 down(&nf_sockopt_mutex);
316 if (ops->cleanup_task)
317 wake_up_process(ops->cleanup_task);
318 up(&nf_sockopt_mutex);
322 int nf_setsockopt(struct sock *sk, int pf, int val, char __user *opt,
325 return nf_sockopt(sk, pf, val, opt, &len, 0);
328 int nf_getsockopt(struct sock *sk, int pf, int val, char __user *opt, int *len)
330 return nf_sockopt(sk, pf, val, opt, len, 1);
333 static unsigned int nf_iterate(struct list_head *head,
334 struct sk_buff **skb,
336 const struct net_device *indev,
337 const struct net_device *outdev,
338 struct list_head **i,
339 int (*okfn)(struct sk_buff *),
342 unsigned int verdict;
345 * The caller must not block between calls to this
346 * function because of risk of continuing from deleted element.
348 list_for_each_continue_rcu(*i, head) {
349 struct nf_hook_ops *elem = (struct nf_hook_ops *)*i;
351 if (hook_thresh > elem->priority)
354 /* Optimization: we don't need to hold module
355 reference here, since function can't sleep. --RR */
356 verdict = elem->hook(hook, skb, indev, outdev, okfn);
357 if (verdict != NF_ACCEPT) {
358 #ifdef CONFIG_NETFILTER_DEBUG
359 if (unlikely(verdict > NF_MAX_VERDICT)) {
360 NFDEBUG("Evil return from %p(%u).\n",
365 if (verdict != NF_REPEAT)
373 int nf_register_queue_handler(int pf, nf_queue_outfn_t outfn, void *data)
377 write_lock_bh(&queue_handler_lock);
378 if (queue_handler[pf].outfn)
381 queue_handler[pf].outfn = outfn;
382 queue_handler[pf].data = data;
385 write_unlock_bh(&queue_handler_lock);
390 /* The caller must flush their queue before this */
391 int nf_unregister_queue_handler(int pf)
393 write_lock_bh(&queue_handler_lock);
394 queue_handler[pf].outfn = NULL;
395 queue_handler[pf].data = NULL;
396 write_unlock_bh(&queue_handler_lock);
402 * Any packet that leaves via this function must come back
403 * through nf_reinject().
405 static int nf_queue(struct sk_buff *skb,
406 struct list_head *elem,
407 int pf, unsigned int hook,
408 struct net_device *indev,
409 struct net_device *outdev,
410 int (*okfn)(struct sk_buff *))
413 struct nf_info *info;
414 #ifdef CONFIG_BRIDGE_NETFILTER
415 struct net_device *physindev = NULL;
416 struct net_device *physoutdev = NULL;
419 /* QUEUE == DROP if noone is waiting, to be safe. */
420 read_lock(&queue_handler_lock);
421 if (!queue_handler[pf].outfn) {
422 read_unlock(&queue_handler_lock);
427 info = kmalloc(sizeof(*info), GFP_ATOMIC);
430 printk(KERN_ERR "OOM queueing packet %p\n",
432 read_unlock(&queue_handler_lock);
437 *info = (struct nf_info) {
438 (struct nf_hook_ops *)elem, pf, hook, indev, outdev, okfn };
440 /* If it's going away, ignore hook. */
441 if (!try_module_get(info->elem->owner)) {
442 read_unlock(&queue_handler_lock);
447 /* Bump dev refs so they don't vanish while packet is out */
448 if (indev) dev_hold(indev);
449 if (outdev) dev_hold(outdev);
451 #ifdef CONFIG_BRIDGE_NETFILTER
452 if (skb->nf_bridge) {
453 physindev = skb->nf_bridge->physindev;
454 if (physindev) dev_hold(physindev);
455 physoutdev = skb->nf_bridge->physoutdev;
456 if (physoutdev) dev_hold(physoutdev);
460 status = queue_handler[pf].outfn(skb, info, queue_handler[pf].data);
461 read_unlock(&queue_handler_lock);
464 /* James M doesn't say fuck enough. */
465 if (indev) dev_put(indev);
466 if (outdev) dev_put(outdev);
467 #ifdef CONFIG_BRIDGE_NETFILTER
468 if (physindev) dev_put(physindev);
469 if (physoutdev) dev_put(physoutdev);
471 module_put(info->elem->owner);
479 /* Returns 1 if okfn() needs to be executed by the caller,
480 * -EPERM for NF_DROP, 0 otherwise. */
481 int nf_hook_slow(int pf, unsigned int hook, struct sk_buff **pskb,
482 struct net_device *indev,
483 struct net_device *outdev,
484 int (*okfn)(struct sk_buff *),
487 struct list_head *elem;
488 unsigned int verdict;
491 /* We may already have this, but read-locks nest anyway */
494 #ifdef CONFIG_NETFILTER_DEBUG
495 if (unlikely((*pskb)->nf_debug & (1 << hook))) {
496 printk("nf_hook: hook %i already set.\n", hook);
497 nf_dump_skb(pf, *pskb);
499 (*pskb)->nf_debug |= (1 << hook);
502 elem = &nf_hooks[pf][hook];
504 verdict = nf_iterate(&nf_hooks[pf][hook], pskb, hook, indev,
505 outdev, &elem, okfn, hook_thresh);
506 if (verdict == NF_ACCEPT || verdict == NF_STOP) {
509 } else if (verdict == NF_DROP) {
512 } else if (verdict == NF_QUEUE) {
513 NFDEBUG("nf_hook: Verdict = QUEUE.\n");
514 if (!nf_queue(*pskb, elem, pf, hook, indev, outdev, okfn))
522 void nf_reinject(struct sk_buff *skb, struct nf_info *info,
523 unsigned int verdict)
525 struct list_head *elem = &info->elem->list;
530 /* Release those devices we held, or Alexey will kill me. */
531 if (info->indev) dev_put(info->indev);
532 if (info->outdev) dev_put(info->outdev);
533 #ifdef CONFIG_BRIDGE_NETFILTER
534 if (skb->nf_bridge) {
535 if (skb->nf_bridge->physindev)
536 dev_put(skb->nf_bridge->physindev);
537 if (skb->nf_bridge->physoutdev)
538 dev_put(skb->nf_bridge->physoutdev);
542 /* Drop reference to owner of hook which queued us. */
543 module_put(info->elem->owner);
545 list_for_each_rcu(i, &nf_hooks[info->pf][info->hook]) {
550 if (elem == &nf_hooks[info->pf][info->hook]) {
551 /* The module which sent it to userspace is gone. */
552 NFDEBUG("%s: module disappeared, dropping packet.\n",
557 /* Continue traversal iff userspace said ok... */
558 if (verdict == NF_REPEAT) {
563 if (verdict == NF_ACCEPT) {
565 verdict = nf_iterate(&nf_hooks[info->pf][info->hook],
567 info->indev, info->outdev, &elem,
568 info->okfn, INT_MIN);
577 if (!nf_queue(skb, elem, info->pf, info->hook,
578 info->indev, info->outdev, info->okfn))
584 if (verdict == NF_DROP)
592 /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
593 int ip_route_me_harder(struct sk_buff **pskb)
595 struct iphdr *iph = (*pskb)->nh.iph;
597 struct flowi fl = {};
598 struct dst_entry *odst;
601 /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause
602 * packets with foreign saddr to appear on the NF_IP_LOCAL_OUT hook.
604 if (inet_addr_type(iph->saddr) == RTN_LOCAL) {
605 fl.nl_u.ip4_u.daddr = iph->daddr;
606 fl.nl_u.ip4_u.saddr = iph->saddr;
607 fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
608 fl.oif = (*pskb)->sk ? (*pskb)->sk->sk_bound_dev_if : 0;
609 #ifdef CONFIG_IP_ROUTE_FWMARK
610 fl.nl_u.ip4_u.fwmark = (*pskb)->nfmark;
612 fl.proto = iph->protocol;
613 if (ip_route_output_key(&rt, &fl) != 0)
616 /* Drop old route. */
617 dst_release((*pskb)->dst);
618 (*pskb)->dst = &rt->u.dst;
620 /* non-local src, find valid iif to satisfy
621 * rp-filter when calling ip_route_input. */
622 fl.nl_u.ip4_u.daddr = iph->saddr;
623 if (ip_route_output_key(&rt, &fl) != 0)
627 if (ip_route_input(*pskb, iph->daddr, iph->saddr,
628 RT_TOS(iph->tos), rt->u.dst.dev) != 0) {
629 dst_release(&rt->u.dst);
632 dst_release(&rt->u.dst);
636 if ((*pskb)->dst->error)
639 /* Change in oif may mean change in hh_len. */
640 hh_len = (*pskb)->dst->dev->hard_header_len;
641 if (skb_headroom(*pskb) < hh_len) {
642 struct sk_buff *nskb;
644 nskb = skb_realloc_headroom(*pskb, hh_len);
648 skb_set_owner_w(nskb, (*pskb)->sk);
655 EXPORT_SYMBOL(ip_route_me_harder);
657 int skb_ip_make_writable(struct sk_buff **pskb, unsigned int writable_len)
659 struct sk_buff *nskb;
661 if (writable_len > (*pskb)->len)
664 /* Not exclusive use of packet? Must copy. */
665 if (skb_shared(*pskb) || skb_cloned(*pskb))
668 return pskb_may_pull(*pskb, writable_len);
671 nskb = skb_copy(*pskb, GFP_ATOMIC);
674 BUG_ON(skb_is_nonlinear(nskb));
676 /* Rest of kernel will get very unhappy if we pass it a
677 suddenly-orphaned skbuff */
679 skb_set_owner_w(nskb, (*pskb)->sk);
684 EXPORT_SYMBOL(skb_ip_make_writable);
685 #endif /*CONFIG_INET*/
687 /* Internal logging interface, which relies on the real
688 LOG target modules */
690 #define NF_LOG_PREFIXLEN 128
692 static nf_logfn *nf_logging[NPROTO]; /* = NULL */
693 static int reported = 0;
694 static DEFINE_SPINLOCK(nf_log_lock);
696 int nf_log_register(int pf, nf_logfn *logfn)
700 /* Any setup of logging members must be done before
701 * substituting pointer. */
702 spin_lock(&nf_log_lock);
703 if (!nf_logging[pf]) {
704 rcu_assign_pointer(nf_logging[pf], logfn);
707 spin_unlock(&nf_log_lock);
711 void nf_log_unregister(int pf, nf_logfn *logfn)
713 spin_lock(&nf_log_lock);
714 if (nf_logging[pf] == logfn)
715 nf_logging[pf] = NULL;
716 spin_unlock(&nf_log_lock);
718 /* Give time to concurrent readers. */
722 void nf_log_packet(int pf,
723 unsigned int hooknum,
724 const struct sk_buff *skb,
725 const struct net_device *in,
726 const struct net_device *out,
727 const char *fmt, ...)
730 char prefix[NF_LOG_PREFIXLEN];
734 logfn = rcu_dereference(nf_logging[pf]);
737 vsnprintf(prefix, sizeof(prefix), fmt, args);
739 /* We must read logging before nf_logfn[pf] */
740 logfn(hooknum, skb, in, out, prefix);
741 } else if (!reported) {
742 printk(KERN_WARNING "nf_log_packet: can\'t log yet, "
743 "no backend logging module loaded in!\n");
748 EXPORT_SYMBOL(nf_log_register);
749 EXPORT_SYMBOL(nf_log_unregister);
750 EXPORT_SYMBOL(nf_log_packet);
752 /* This does not belong here, but locally generated errors need it if connection
753 tracking in use: without this, connection may not be in hash table, and hence
754 manufactured ICMP or RST packets will not be associated with it. */
755 void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
757 void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
759 void (*attach)(struct sk_buff *, struct sk_buff *);
761 if (skb->nfct && (attach = ip_ct_attach) != NULL) {
762 mb(); /* Just to be sure: must be read before executing this */
767 void __init netfilter_init(void)
771 for (i = 0; i < NPROTO; i++) {
772 for (h = 0; h < NF_MAX_HOOKS; h++)
773 INIT_LIST_HEAD(&nf_hooks[i][h]);
777 EXPORT_SYMBOL(ip_ct_attach);
778 EXPORT_SYMBOL(nf_ct_attach);
779 EXPORT_SYMBOL(nf_getsockopt);
780 EXPORT_SYMBOL(nf_hook_slow);
781 EXPORT_SYMBOL(nf_hooks);
782 EXPORT_SYMBOL(nf_register_hook);
783 EXPORT_SYMBOL(nf_register_queue_handler);
784 EXPORT_SYMBOL(nf_register_sockopt);
785 EXPORT_SYMBOL(nf_reinject);
786 EXPORT_SYMBOL(nf_setsockopt);
787 EXPORT_SYMBOL(nf_unregister_hook);
788 EXPORT_SYMBOL(nf_unregister_queue_handler);
789 EXPORT_SYMBOL(nf_unregister_sockopt);
790 #ifdef CONFIG_NETFILTER_DEBUG
791 EXPORT_SYMBOL(nf_dump_skb);