enable kexec
[linux-2.6.git] / net / core / netfilter.c
1 /* netfilter.c: look after the filters for various protocols. 
2  * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
3  *
4  * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
5  * way.
6  *
7  * Rusty Russell (C)2000 -- This code is GPL.
8  *
9  * February 2000: Modified by James Morris to have 1 queue per protocol.
10  * 15-Mar-2000:   Added NF_REPEAT --RR.
11  * 08-May-2003:   Internal logging interface added by Jozsef Kadlecsik.
12  */
13 #include <linux/config.h>
14 #include <linux/kernel.h>
15 #include <linux/netfilter.h>
16 #include <net/protocol.h>
17 #include <linux/init.h>
18 #include <linux/skbuff.h>
19 #include <linux/wait.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/if.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/tcp.h>
26 #include <linux/udp.h>
27 #include <linux/icmp.h>
28 #include <net/sock.h>
29 #include <net/route.h>
30 #include <linux/ip.h>
31
32 /* In this code, we can be waiting indefinitely for userspace to
33  * service a packet if a hook returns NF_QUEUE.  We could keep a count
34  * of skbuffs queued for userspace, and not deregister a hook unless
35  * this is zero, but that sucks.  Now, we simply check when the
36  * packets come back: if the hook is gone, the packet is discarded. */
37 #ifdef CONFIG_NETFILTER_DEBUG
38 #define NFDEBUG(format, args...)  printk(format , ## args)
39 #else
40 #define NFDEBUG(format, args...)
41 #endif
42
43 /* Sockopts only registered and called from user context, so
44    net locking would be overkill.  Also, [gs]etsockopt calls may
45    sleep. */
46 static DECLARE_MUTEX(nf_sockopt_mutex);
47
48 struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS];
49 static LIST_HEAD(nf_sockopts);
50 static spinlock_t nf_hook_lock = SPIN_LOCK_UNLOCKED;
51
52 /* 
53  * A queue handler may be registered for each protocol.  Each is protected by
54  * long term mutex.  The handler must provide an an outfn() to accept packets
55  * for queueing and must reinject all packets it receives, no matter what.
56  */
57 static struct nf_queue_handler_t {
58         nf_queue_outfn_t outfn;
59         void *data;
60 } queue_handler[NPROTO];
61 static rwlock_t queue_handler_lock = RW_LOCK_UNLOCKED;
62
63 int nf_register_hook(struct nf_hook_ops *reg)
64 {
65         struct list_head *i;
66
67         spin_lock_bh(&nf_hook_lock);
68         list_for_each(i, &nf_hooks[reg->pf][reg->hooknum]) {
69                 if (reg->priority < ((struct nf_hook_ops *)i)->priority)
70                         break;
71         }
72         list_add_rcu(&reg->list, i->prev);
73         spin_unlock_bh(&nf_hook_lock);
74
75         synchronize_net();
76         return 0;
77 }
78
79 void nf_unregister_hook(struct nf_hook_ops *reg)
80 {
81         spin_lock_bh(&nf_hook_lock);
82         list_del_rcu(&reg->list);
83         spin_unlock_bh(&nf_hook_lock);
84
85         synchronize_net();
86 }
87
88 /* Do exclusive ranges overlap? */
89 static inline int overlap(int min1, int max1, int min2, int max2)
90 {
91         return max1 > min2 && min1 < max2;
92 }
93
94 /* Functions to register sockopt ranges (exclusive). */
95 int nf_register_sockopt(struct nf_sockopt_ops *reg)
96 {
97         struct list_head *i;
98         int ret = 0;
99
100         if (down_interruptible(&nf_sockopt_mutex) != 0)
101                 return -EINTR;
102
103         list_for_each(i, &nf_sockopts) {
104                 struct nf_sockopt_ops *ops = (struct nf_sockopt_ops *)i;
105                 if (ops->pf == reg->pf
106                     && (overlap(ops->set_optmin, ops->set_optmax, 
107                                 reg->set_optmin, reg->set_optmax)
108                         || overlap(ops->get_optmin, ops->get_optmax, 
109                                    reg->get_optmin, reg->get_optmax))) {
110                         NFDEBUG("nf_sock overlap: %u-%u/%u-%u v %u-%u/%u-%u\n",
111                                 ops->set_optmin, ops->set_optmax, 
112                                 ops->get_optmin, ops->get_optmax, 
113                                 reg->set_optmin, reg->set_optmax,
114                                 reg->get_optmin, reg->get_optmax);
115                         ret = -EBUSY;
116                         goto out;
117                 }
118         }
119
120         list_add(&reg->list, &nf_sockopts);
121 out:
122         up(&nf_sockopt_mutex);
123         return ret;
124 }
125
126 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
127 {
128         /* No point being interruptible: we're probably in cleanup_module() */
129  restart:
130         down(&nf_sockopt_mutex);
131         if (reg->use != 0) {
132                 /* To be woken by nf_sockopt call... */
133                 /* FIXME: Stuart Young's name appears gratuitously. */
134                 set_current_state(TASK_UNINTERRUPTIBLE);
135                 reg->cleanup_task = current;
136                 up(&nf_sockopt_mutex);
137                 schedule();
138                 goto restart;
139         }
140         list_del(&reg->list);
141         up(&nf_sockopt_mutex);
142 }
143
144 #ifdef CONFIG_NETFILTER_DEBUG
145 #include <net/ip.h>
146 #include <net/tcp.h>
147 #include <linux/netfilter_ipv4.h>
148
149 static void debug_print_hooks_ip(unsigned int nf_debug)
150 {
151         if (nf_debug & (1 << NF_IP_PRE_ROUTING)) {
152                 printk("PRE_ROUTING ");
153                 nf_debug ^= (1 << NF_IP_PRE_ROUTING);
154         }
155         if (nf_debug & (1 << NF_IP_LOCAL_IN)) {
156                 printk("LOCAL_IN ");
157                 nf_debug ^= (1 << NF_IP_LOCAL_IN);
158         }
159         if (nf_debug & (1 << NF_IP_FORWARD)) {
160                 printk("FORWARD ");
161                 nf_debug ^= (1 << NF_IP_FORWARD);
162         }
163         if (nf_debug & (1 << NF_IP_LOCAL_OUT)) {
164                 printk("LOCAL_OUT ");
165                 nf_debug ^= (1 << NF_IP_LOCAL_OUT);
166         }
167         if (nf_debug & (1 << NF_IP_POST_ROUTING)) {
168                 printk("POST_ROUTING ");
169                 nf_debug ^= (1 << NF_IP_POST_ROUTING);
170         }
171         if (nf_debug)
172                 printk("Crap bits: 0x%04X", nf_debug);
173         printk("\n");
174 }
175
176 void nf_dump_skb(int pf, struct sk_buff *skb)
177 {
178         printk("skb: pf=%i %s dev=%s len=%u\n", 
179                pf,
180                skb->sk ? "(owned)" : "(unowned)",
181                skb->dev ? skb->dev->name : "(no dev)",
182                skb->len);
183         switch (pf) {
184         case PF_INET: {
185                 const struct iphdr *ip = skb->nh.iph;
186                 __u32 *opt = (__u32 *) (ip + 1);
187                 int opti;
188                 __u16 src_port = 0, dst_port = 0;
189
190                 if (ip->protocol == IPPROTO_TCP
191                     || ip->protocol == IPPROTO_UDP) {
192                         struct tcphdr *tcp=(struct tcphdr *)((__u32 *)ip+ip->ihl);
193                         src_port = ntohs(tcp->source);
194                         dst_port = ntohs(tcp->dest);
195                 }
196         
197                 printk("PROTO=%d %u.%u.%u.%u:%hu %u.%u.%u.%u:%hu"
198                        " L=%hu S=0x%2.2hX I=%hu F=0x%4.4hX T=%hu",
199                        ip->protocol, NIPQUAD(ip->saddr),
200                        src_port, NIPQUAD(ip->daddr),
201                        dst_port,
202                        ntohs(ip->tot_len), ip->tos, ntohs(ip->id),
203                        ntohs(ip->frag_off), ip->ttl);
204
205                 for (opti = 0; opti < (ip->ihl - sizeof(struct iphdr) / 4); opti++)
206                         printk(" O=0x%8.8X", *opt++);
207                 printk(" MARK=%lu (0x%lu)",
208                        (long unsigned int)skb->nfmark,
209                        (long unsigned int)skb->nfmark);
210                 printk("\n");
211         }
212         }
213 }
214
215 void nf_debug_ip_local_deliver(struct sk_buff *skb)
216 {
217         /* If it's a loopback packet, it must have come through
218          * NF_IP_LOCAL_OUT, NF_IP_RAW_INPUT, NF_IP_PRE_ROUTING and
219          * NF_IP_LOCAL_IN.  Otherwise, must have gone through
220          * NF_IP_RAW_INPUT and NF_IP_PRE_ROUTING.  */
221         if (!skb->dev) {
222                 printk("ip_local_deliver: skb->dev is NULL.\n");
223         }
224         else if (strcmp(skb->dev->name, "lo") == 0) {
225                 if (skb->nf_debug != ((1 << NF_IP_LOCAL_OUT)
226                                       | (1 << NF_IP_POST_ROUTING)
227                                       | (1 << NF_IP_PRE_ROUTING)
228                                       | (1 << NF_IP_LOCAL_IN))) {
229                         printk("ip_local_deliver: bad loopback skb: ");
230                         debug_print_hooks_ip(skb->nf_debug);
231                         nf_dump_skb(PF_INET, skb);
232                 }
233         }
234         else {
235                 if (skb->nf_debug != ((1<<NF_IP_PRE_ROUTING)
236                                       | (1<<NF_IP_LOCAL_IN))) {
237                         printk("ip_local_deliver: bad non-lo skb: ");
238                         debug_print_hooks_ip(skb->nf_debug);
239                         nf_dump_skb(PF_INET, skb);
240                 }
241         }
242 }
243
244 void nf_debug_ip_loopback_xmit(struct sk_buff *newskb)
245 {
246         if (newskb->nf_debug != ((1 << NF_IP_LOCAL_OUT)
247                                  | (1 << NF_IP_POST_ROUTING))) {
248                 printk("ip_dev_loopback_xmit: bad owned skb = %p: ", 
249                        newskb);
250                 debug_print_hooks_ip(newskb->nf_debug);
251                 nf_dump_skb(PF_INET, newskb);
252         }
253         /* Clear to avoid confusing input check */
254         newskb->nf_debug = 0;
255 }
256
257 void nf_debug_ip_finish_output2(struct sk_buff *skb)
258 {
259         /* If it's owned, it must have gone through the
260          * NF_IP_LOCAL_OUT and NF_IP_POST_ROUTING.
261          * Otherwise, must have gone through
262          * NF_IP_PRE_ROUTING, NF_IP_FORWARD and NF_IP_POST_ROUTING.
263          */
264         if (skb->sk) {
265                 if (skb->nf_debug != ((1 << NF_IP_LOCAL_OUT)
266                                       | (1 << NF_IP_POST_ROUTING))) {
267                         printk("ip_finish_output: bad owned skb = %p: ", skb);
268                         debug_print_hooks_ip(skb->nf_debug);
269                         nf_dump_skb(PF_INET, skb);
270                 }
271         } else {
272                 if (skb->nf_debug != ((1 << NF_IP_PRE_ROUTING)
273                                       | (1 << NF_IP_FORWARD)
274                                       | (1 << NF_IP_POST_ROUTING))) {
275                         /* Fragments, entunnelled packets, TCP RSTs
276                            generated by ipt_REJECT will have no
277                            owners, but still may be local */
278                         if (skb->nf_debug != ((1 << NF_IP_LOCAL_OUT)
279                                               | (1 << NF_IP_POST_ROUTING))){
280                                 printk("ip_finish_output:"
281                                        " bad unowned skb = %p: ",skb);
282                                 debug_print_hooks_ip(skb->nf_debug);
283                                 nf_dump_skb(PF_INET, skb);
284                         }
285                 }
286         }
287 }
288 #endif /*CONFIG_NETFILTER_DEBUG*/
289
290 /* Call get/setsockopt() */
291 static int nf_sockopt(struct sock *sk, int pf, int val, 
292                       char __user *opt, int *len, int get)
293 {
294         struct list_head *i;
295         struct nf_sockopt_ops *ops;
296         int ret;
297
298         if (down_interruptible(&nf_sockopt_mutex) != 0)
299                 return -EINTR;
300
301         list_for_each(i, &nf_sockopts) {
302                 ops = (struct nf_sockopt_ops *)i;
303                 if (ops->pf == pf) {
304                         if (get) {
305                                 if (val >= ops->get_optmin
306                                     && val < ops->get_optmax) {
307                                         ops->use++;
308                                         up(&nf_sockopt_mutex);
309                                         ret = ops->get(sk, val, opt, len);
310                                         goto out;
311                                 }
312                         } else {
313                                 if (val >= ops->set_optmin
314                                     && val < ops->set_optmax) {
315                                         ops->use++;
316                                         up(&nf_sockopt_mutex);
317                                         ret = ops->set(sk, val, opt, *len);
318                                         goto out;
319                                 }
320                         }
321                 }
322         }
323         up(&nf_sockopt_mutex);
324         return -ENOPROTOOPT;
325         
326  out:
327         down(&nf_sockopt_mutex);
328         ops->use--;
329         if (ops->cleanup_task)
330                 wake_up_process(ops->cleanup_task);
331         up(&nf_sockopt_mutex);
332         return ret;
333 }
334
335 int nf_setsockopt(struct sock *sk, int pf, int val, char __user *opt,
336                   int len)
337 {
338         return nf_sockopt(sk, pf, val, opt, &len, 0);
339 }
340
341 int nf_getsockopt(struct sock *sk, int pf, int val, char __user *opt, int *len)
342 {
343         return nf_sockopt(sk, pf, val, opt, len, 1);
344 }
345
346 static unsigned int nf_iterate(struct list_head *head,
347                                struct sk_buff **skb,
348                                int hook,
349                                const struct net_device *indev,
350                                const struct net_device *outdev,
351                                struct list_head **i,
352                                int (*okfn)(struct sk_buff *),
353                                int hook_thresh)
354 {
355         /*
356          * The caller must not block between calls to this
357          * function because of risk of continuing from deleted element.
358          */
359         list_for_each_continue_rcu(*i, head) {
360                 struct nf_hook_ops *elem = (struct nf_hook_ops *)*i;
361
362                 if (hook_thresh > elem->priority)
363                         continue;
364
365                 /* Optimization: we don't need to hold module
366                    reference here, since function can't sleep. --RR */
367                 switch (elem->hook(hook, skb, indev, outdev, okfn)) {
368                 case NF_QUEUE:
369                         return NF_QUEUE;
370
371                 case NF_STOLEN:
372                         return NF_STOLEN;
373
374                 case NF_DROP:
375                         return NF_DROP;
376
377                 case NF_REPEAT:
378                         *i = (*i)->prev;
379                         break;
380
381 #ifdef CONFIG_NETFILTER_DEBUG
382                 case NF_ACCEPT:
383                         break;
384
385                 default:
386                         NFDEBUG("Evil return from %p(%u).\n", 
387                                 elem->hook, hook);
388 #endif
389                 }
390         }
391         return NF_ACCEPT;
392 }
393
394 int nf_register_queue_handler(int pf, nf_queue_outfn_t outfn, void *data)
395 {      
396         int ret;
397
398         write_lock_bh(&queue_handler_lock);
399         if (queue_handler[pf].outfn)
400                 ret = -EBUSY;
401         else {
402                 queue_handler[pf].outfn = outfn;
403                 queue_handler[pf].data = data;
404                 ret = 0;
405         }
406         write_unlock_bh(&queue_handler_lock);
407
408         return ret;
409 }
410
411 /* The caller must flush their queue before this */
412 int nf_unregister_queue_handler(int pf)
413 {
414         write_lock_bh(&queue_handler_lock);
415         queue_handler[pf].outfn = NULL;
416         queue_handler[pf].data = NULL;
417         write_unlock_bh(&queue_handler_lock);
418         
419         return 0;
420 }
421
422 /* 
423  * Any packet that leaves via this function must come back 
424  * through nf_reinject().
425  */
426 static int nf_queue(struct sk_buff *skb, 
427                     struct list_head *elem, 
428                     int pf, unsigned int hook,
429                     struct net_device *indev,
430                     struct net_device *outdev,
431                     int (*okfn)(struct sk_buff *))
432 {
433         int status;
434         struct nf_info *info;
435 #ifdef CONFIG_BRIDGE_NETFILTER
436         struct net_device *physindev = NULL;
437         struct net_device *physoutdev = NULL;
438 #endif
439
440         /* QUEUE == DROP if noone is waiting, to be safe. */
441         read_lock(&queue_handler_lock);
442         if (!queue_handler[pf].outfn) {
443                 read_unlock(&queue_handler_lock);
444                 kfree_skb(skb);
445                 return 1;
446         }
447
448         info = kmalloc(sizeof(*info), GFP_ATOMIC);
449         if (!info) {
450                 if (net_ratelimit())
451                         printk(KERN_ERR "OOM queueing packet %p\n",
452                                skb);
453                 read_unlock(&queue_handler_lock);
454                 kfree_skb(skb);
455                 return 1;
456         }
457
458         *info = (struct nf_info) { 
459                 (struct nf_hook_ops *)elem, pf, hook, indev, outdev, okfn };
460
461         /* If it's going away, ignore hook. */
462         if (!try_module_get(info->elem->owner)) {
463                 read_unlock(&queue_handler_lock);
464                 kfree(info);
465                 return 0;
466         }
467
468         /* Bump dev refs so they don't vanish while packet is out */
469         if (indev) dev_hold(indev);
470         if (outdev) dev_hold(outdev);
471
472 #ifdef CONFIG_BRIDGE_NETFILTER
473         if (skb->nf_bridge) {
474                 physindev = skb->nf_bridge->physindev;
475                 if (physindev) dev_hold(physindev);
476                 physoutdev = skb->nf_bridge->physoutdev;
477                 if (physoutdev) dev_hold(physoutdev);
478         }
479 #endif
480
481         status = queue_handler[pf].outfn(skb, info, queue_handler[pf].data);
482         read_unlock(&queue_handler_lock);
483
484         if (status < 0) {
485                 /* James M doesn't say fuck enough. */
486                 if (indev) dev_put(indev);
487                 if (outdev) dev_put(outdev);
488 #ifdef CONFIG_BRIDGE_NETFILTER
489                 if (physindev) dev_put(physindev);
490                 if (physoutdev) dev_put(physoutdev);
491 #endif
492                 module_put(info->elem->owner);
493                 kfree(info);
494                 kfree_skb(skb);
495                 return 1;
496         }
497         return 1;
498 }
499
500 int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb,
501                  struct net_device *indev,
502                  struct net_device *outdev,
503                  int (*okfn)(struct sk_buff *),
504                  int hook_thresh)
505 {
506         struct list_head *elem;
507         unsigned int verdict;
508         int ret = 0;
509
510         /* We may already have this, but read-locks nest anyway */
511         rcu_read_lock();
512
513 #ifdef CONFIG_NETFILTER_DEBUG
514         if (skb->nf_debug & (1 << hook)) {
515                 printk("nf_hook: hook %i already set.\n", hook);
516                 nf_dump_skb(pf, skb);
517         }
518         skb->nf_debug |= (1 << hook);
519 #endif
520
521         elem = &nf_hooks[pf][hook];
522  next_hook:
523         verdict = nf_iterate(&nf_hooks[pf][hook], &skb, hook, indev,
524                              outdev, &elem, okfn, hook_thresh);
525         if (verdict == NF_QUEUE) {
526                 NFDEBUG("nf_hook: Verdict = QUEUE.\n");
527                 if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn))
528                         goto next_hook;
529         }
530
531         switch (verdict) {
532         case NF_ACCEPT:
533                 ret = okfn(skb);
534                 break;
535
536         case NF_DROP:
537                 kfree_skb(skb);
538                 ret = -EPERM;
539                 break;
540         }
541
542         rcu_read_unlock();
543         return ret;
544 }
545
546 void nf_reinject(struct sk_buff *skb, struct nf_info *info,
547                  unsigned int verdict)
548 {
549         struct list_head *elem = &info->elem->list;
550         struct list_head *i;
551
552         rcu_read_lock();
553
554         /* Release those devices we held, or Alexey will kill me. */
555         if (info->indev) dev_put(info->indev);
556         if (info->outdev) dev_put(info->outdev);
557 #ifdef CONFIG_BRIDGE_NETFILTER
558         if (skb->nf_bridge) {
559                 if (skb->nf_bridge->physindev)
560                         dev_put(skb->nf_bridge->physindev);
561                 if (skb->nf_bridge->physoutdev)
562                         dev_put(skb->nf_bridge->physoutdev);
563         }
564 #endif
565
566         /* Drop reference to owner of hook which queued us. */
567         module_put(info->elem->owner);
568
569         list_for_each_rcu(i, &nf_hooks[info->pf][info->hook]) {
570                 if (i == elem) 
571                         break;
572         }
573   
574         if (elem == &nf_hooks[info->pf][info->hook]) {
575                 /* The module which sent it to userspace is gone. */
576                 NFDEBUG("%s: module disappeared, dropping packet.\n",
577                         __FUNCTION__);
578                 verdict = NF_DROP;
579         }
580
581         /* Continue traversal iff userspace said ok... */
582         if (verdict == NF_REPEAT) {
583                 elem = elem->prev;
584                 verdict = NF_ACCEPT;
585         }
586
587         if (verdict == NF_ACCEPT) {
588         next_hook:
589                 verdict = nf_iterate(&nf_hooks[info->pf][info->hook],
590                                      &skb, info->hook, 
591                                      info->indev, info->outdev, &elem,
592                                      info->okfn, INT_MIN);
593         }
594
595         switch (verdict) {
596         case NF_ACCEPT:
597                 info->okfn(skb);
598                 break;
599
600         case NF_QUEUE:
601                 if (!nf_queue(skb, elem, info->pf, info->hook, 
602                               info->indev, info->outdev, info->okfn))
603                         goto next_hook;
604                 break;
605         }
606         rcu_read_unlock();
607
608         if (verdict == NF_DROP)
609                 kfree_skb(skb);
610
611         kfree(info);
612         return;
613 }
614
615 #ifdef CONFIG_INET
616 /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
617 int ip_route_me_harder(struct sk_buff **pskb)
618 {
619         struct iphdr *iph = (*pskb)->nh.iph;
620         struct rtable *rt;
621         struct flowi fl = {};
622         struct dst_entry *odst;
623         unsigned int hh_len;
624
625         /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause
626          * packets with foreign saddr to appear on the NF_IP_LOCAL_OUT hook.
627          */
628         if (inet_addr_type(iph->saddr) == RTN_LOCAL) {
629                 fl.nl_u.ip4_u.daddr = iph->daddr;
630                 fl.nl_u.ip4_u.saddr = iph->saddr;
631                 fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
632                 fl.oif = (*pskb)->sk ? (*pskb)->sk->sk_bound_dev_if : 0;
633 #ifdef CONFIG_IP_ROUTE_FWMARK
634                 fl.nl_u.ip4_u.fwmark = (*pskb)->nfmark;
635 #endif
636                 fl.proto = iph->protocol;
637                 if (ip_route_output_key(&rt, &fl) != 0)
638                         return -1;
639
640                 /* Drop old route. */
641                 dst_release((*pskb)->dst);
642                 (*pskb)->dst = &rt->u.dst;
643         } else {
644                 /* non-local src, find valid iif to satisfy
645                  * rp-filter when calling ip_route_input. */
646                 fl.nl_u.ip4_u.daddr = iph->saddr;
647                 if (ip_route_output_key(&rt, &fl) != 0)
648                         return -1;
649
650                 odst = (*pskb)->dst;
651                 if (ip_route_input(*pskb, iph->daddr, iph->saddr,
652                                    RT_TOS(iph->tos), rt->u.dst.dev) != 0) {
653                         dst_release(&rt->u.dst);
654                         return -1;
655                 }
656                 dst_release(&rt->u.dst);
657                 dst_release(odst);
658         }
659         
660         if ((*pskb)->dst->error)
661                 return -1;
662
663         /* Change in oif may mean change in hh_len. */
664         hh_len = (*pskb)->dst->dev->hard_header_len;
665         if (skb_headroom(*pskb) < hh_len) {
666                 struct sk_buff *nskb;
667
668                 nskb = skb_realloc_headroom(*pskb, hh_len);
669                 if (!nskb) 
670                         return -1;
671                 if ((*pskb)->sk)
672                         skb_set_owner_w(nskb, (*pskb)->sk);
673                 kfree_skb(*pskb);
674                 *pskb = nskb;
675         }
676
677         return 0;
678 }
679
680 int skb_ip_make_writable(struct sk_buff **pskb, unsigned int writable_len)
681 {
682         struct sk_buff *nskb;
683         unsigned int iplen;
684
685         if (writable_len > (*pskb)->len)
686                 return 0;
687
688         /* Not exclusive use of packet?  Must copy. */
689         if (skb_shared(*pskb) || skb_cloned(*pskb))
690                 goto copy_skb;
691
692         /* Alexey says IP hdr is always modifiable and linear, so ok. */
693         if (writable_len <= (*pskb)->nh.iph->ihl*4)
694                 return 1;
695
696         iplen = writable_len - (*pskb)->nh.iph->ihl*4;
697
698         /* DaveM says protocol headers are also modifiable. */
699         switch ((*pskb)->nh.iph->protocol) {
700         case IPPROTO_TCP: {
701                 struct tcphdr hdr;
702                 if (skb_copy_bits(*pskb, (*pskb)->nh.iph->ihl*4,
703                                   &hdr, sizeof(hdr)) != 0)
704                         goto copy_skb;
705                 if (writable_len <= (*pskb)->nh.iph->ihl*4 + hdr.doff*4)
706                         goto pull_skb;
707                 goto copy_skb;
708         }
709         case IPPROTO_UDP:
710                 if (writable_len<=(*pskb)->nh.iph->ihl*4+sizeof(struct udphdr))
711                         goto pull_skb;
712                 goto copy_skb;
713         case IPPROTO_ICMP:
714                 if (writable_len
715                     <= (*pskb)->nh.iph->ihl*4 + sizeof(struct icmphdr))
716                         goto pull_skb;
717                 goto copy_skb;
718         /* Insert other cases here as desired */
719         }
720
721 copy_skb:
722         nskb = skb_copy(*pskb, GFP_ATOMIC);
723         if (!nskb)
724                 return 0;
725         BUG_ON(skb_is_nonlinear(nskb));
726
727         /* Rest of kernel will get very unhappy if we pass it a
728            suddenly-orphaned skbuff */
729         if ((*pskb)->sk)
730                 skb_set_owner_w(nskb, (*pskb)->sk);
731         kfree_skb(*pskb);
732         *pskb = nskb;
733         return 1;
734
735 pull_skb:
736         return pskb_may_pull(*pskb, writable_len);
737 }
738 EXPORT_SYMBOL(skb_ip_make_writable);
739 #endif /*CONFIG_INET*/
740
741 /* Internal logging interface, which relies on the real 
742    LOG target modules */
743
744 #define NF_LOG_PREFIXLEN                128
745
746 static nf_logfn *nf_logging[NPROTO]; /* = NULL */
747 static int reported = 0;
748 static spinlock_t nf_log_lock = SPIN_LOCK_UNLOCKED;
749
750 int nf_log_register(int pf, nf_logfn *logfn)
751 {
752         int ret = -EBUSY;
753
754         /* Any setup of logging members must be done before
755          * substituting pointer. */
756         smp_wmb();
757         spin_lock(&nf_log_lock);
758         if (!nf_logging[pf]) {
759                 nf_logging[pf] = logfn;
760                 ret = 0;
761         }
762         spin_unlock(&nf_log_lock);
763         return ret;
764 }               
765
766 void nf_log_unregister(int pf, nf_logfn *logfn)
767 {
768         spin_lock(&nf_log_lock);
769         if (nf_logging[pf] == logfn)
770                 nf_logging[pf] = NULL;
771         spin_unlock(&nf_log_lock);
772
773         /* Give time to concurrent readers. */
774         synchronize_net();
775 }               
776
777 void nf_log_packet(int pf,
778                    unsigned int hooknum,
779                    const struct sk_buff *skb,
780                    const struct net_device *in,
781                    const struct net_device *out,
782                    const char *fmt, ...)
783 {
784         va_list args;
785         char prefix[NF_LOG_PREFIXLEN];
786         nf_logfn *logfn;
787         
788         rcu_read_lock();
789         logfn = nf_logging[pf];
790         if (logfn) {
791                 va_start(args, fmt);
792                 vsnprintf(prefix, sizeof(prefix), fmt, args);
793                 va_end(args);
794                 /* We must read logging before nf_logfn[pf] */
795                 smp_read_barrier_depends();
796                 logfn(hooknum, skb, in, out, prefix);
797         } else if (!reported) {
798                 printk(KERN_WARNING "nf_log_packet: can\'t log yet, "
799                        "no backend logging module loaded in!\n");
800                 reported++;
801         }
802         rcu_read_unlock();
803 }
804 EXPORT_SYMBOL(nf_log_register);
805 EXPORT_SYMBOL(nf_log_unregister);
806 EXPORT_SYMBOL(nf_log_packet);
807
808 /* This does not belong here, but ipt_REJECT needs it if connection
809    tracking in use: without this, connection may not be in hash table,
810    and hence manufactured ICMP or RST packets will not be associated
811    with it. */
812 void (*ip_ct_attach)(struct sk_buff *, struct nf_ct_info *);
813
814 void __init netfilter_init(void)
815 {
816         int i, h;
817
818         for (i = 0; i < NPROTO; i++) {
819                 for (h = 0; h < NF_MAX_HOOKS; h++)
820                         INIT_LIST_HEAD(&nf_hooks[i][h]);
821         }
822 }
823
824 EXPORT_SYMBOL(ip_ct_attach);
825 EXPORT_SYMBOL(ip_route_me_harder);
826 EXPORT_SYMBOL(nf_getsockopt);
827 EXPORT_SYMBOL(nf_hook_slow);
828 EXPORT_SYMBOL(nf_hooks);
829 EXPORT_SYMBOL(nf_register_hook);
830 EXPORT_SYMBOL(nf_register_queue_handler);
831 EXPORT_SYMBOL(nf_register_sockopt);
832 EXPORT_SYMBOL(nf_reinject);
833 EXPORT_SYMBOL(nf_setsockopt);
834 EXPORT_SYMBOL(nf_unregister_hook);
835 EXPORT_SYMBOL(nf_unregister_queue_handler);
836 EXPORT_SYMBOL(nf_unregister_sockopt);
837 #ifdef CONFIG_NETFILTER_DEBUG
838 EXPORT_SYMBOL(nf_dump_skb);
839 #endif