ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / net / ipv4 / ip_output.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              The Internet Protocol (IP) output module.
7  *
8  * Version:     $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
9  *
10  * Authors:     Ross Biro, <bir7@leland.Stanford.Edu>
11  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *              Donald Becker, <becker@super.org>
13  *              Alan Cox, <Alan.Cox@linux.org>
14  *              Richard Underwood
15  *              Stefan Becker, <stefanb@yello.ping.de>
16  *              Jorge Cwik, <jorge@laser.satlink.net>
17  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18  *              Hirokazu Takahashi, <taka@valinux.co.jp>
19  *
20  *      See ip_input.c for original log
21  *
22  *      Fixes:
23  *              Alan Cox        :       Missing nonblock feature in ip_build_xmit.
24  *              Mike Kilburn    :       htons() missing in ip_build_xmit.
25  *              Bradford Johnson:       Fix faulty handling of some frames when 
26  *                                      no route is found.
27  *              Alexander Demenshin:    Missing sk/skb free in ip_queue_xmit
28  *                                      (in case if packet not accepted by
29  *                                      output firewall rules)
30  *              Mike McLagan    :       Routing by source
31  *              Alexey Kuznetsov:       use new route cache
32  *              Andi Kleen:             Fix broken PMTU recovery and remove
33  *                                      some redundant tests.
34  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
35  *              Andi Kleen      :       Replace ip_reply with ip_send_reply.
36  *              Andi Kleen      :       Split fast and slow ip_build_xmit path 
37  *                                      for decreased register pressure on x86 
38  *                                      and more readibility. 
39  *              Marc Boucher    :       When call_out_firewall returns FW_QUEUE,
40  *                                      silently drop skb instead of failing with -EPERM.
41  *              Detlev Wengorz  :       Copy protocol for fragments.
42  *              Hirokazu Takahashi:     HW checksumming for outgoing UDP
43  *                                      datagrams.
44  *              Hirokazu Takahashi:     sendfile() on UDP works now.
45  */
46
47 #include <asm/uaccess.h>
48 #include <asm/system.h>
49 #include <linux/module.h>
50 #include <linux/types.h>
51 #include <linux/kernel.h>
52 #include <linux/sched.h>
53 #include <linux/mm.h>
54 #include <linux/string.h>
55 #include <linux/errno.h>
56 #include <linux/config.h>
57
58 #include <linux/socket.h>
59 #include <linux/sockios.h>
60 #include <linux/in.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/proc_fs.h>
65 #include <linux/stat.h>
66 #include <linux/init.h>
67
68 #include <net/snmp.h>
69 #include <net/ip.h>
70 #include <net/protocol.h>
71 #include <net/route.h>
72 #include <net/tcp.h>
73 #include <net/udp.h>
74 #include <linux/skbuff.h>
75 #include <net/sock.h>
76 #include <net/arp.h>
77 #include <net/icmp.h>
78 #include <net/raw.h>
79 #include <net/checksum.h>
80 #include <net/inetpeer.h>
81 #include <linux/igmp.h>
82 #include <linux/netfilter_ipv4.h>
83 #include <linux/netfilter_bridge.h>
84 #include <linux/mroute.h>
85 #include <linux/netlink.h>
86
87 /*
88  *      Shall we try to damage output packets if routing dev changes?
89  */
90
91 int sysctl_ip_dynaddr;
92 int sysctl_ip_default_ttl = IPDEFTTL;
93
94 /* Generate a checksum for an outgoing IP datagram. */
95 __inline__ void ip_send_check(struct iphdr *iph)
96 {
97         iph->check = 0;
98         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
99 }
100
101 /* dev_loopback_xmit for use with netfilter. */
102 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
103 {
104         newskb->mac.raw = newskb->data;
105         __skb_pull(newskb, newskb->nh.raw - newskb->data);
106         newskb->pkt_type = PACKET_LOOPBACK;
107         newskb->ip_summed = CHECKSUM_UNNECESSARY;
108         BUG_TRAP(newskb->dst);
109
110 #ifdef CONFIG_NETFILTER_DEBUG
111         nf_debug_ip_loopback_xmit(newskb);
112 #endif
113         netif_rx(newskb);
114         return 0;
115 }
116
117 static inline int ip_select_ttl(struct inet_opt *inet, struct dst_entry *dst)
118 {
119         int ttl = inet->uc_ttl;
120
121         if (ttl < 0)
122                 ttl = dst_metric(dst, RTAX_HOPLIMIT);
123         return ttl;
124 }
125
126 /* 
127  *              Add an ip header to a skbuff and send it out.
128  *
129  */
130 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
131                           u32 saddr, u32 daddr, struct ip_options *opt)
132 {
133         struct inet_opt *inet = inet_sk(sk);
134         struct rtable *rt = (struct rtable *)skb->dst;
135         struct iphdr *iph;
136
137         /* Build the IP header. */
138         if (opt)
139                 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
140         else
141                 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
142
143         iph->version  = 4;
144         iph->ihl      = 5;
145         iph->tos      = inet->tos;
146         if (ip_dont_fragment(sk, &rt->u.dst))
147                 iph->frag_off = htons(IP_DF);
148         else
149                 iph->frag_off = 0;
150         iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
151         iph->daddr    = rt->rt_dst;
152         iph->saddr    = rt->rt_src;
153         iph->protocol = sk->sk_protocol;
154         iph->tot_len  = htons(skb->len);
155         ip_select_ident(iph, &rt->u.dst, sk);
156         skb->nh.iph   = iph;
157
158         if (opt && opt->optlen) {
159                 iph->ihl += opt->optlen>>2;
160                 ip_options_build(skb, opt, daddr, rt, 0);
161         }
162         ip_send_check(iph);
163
164         skb->priority = sk->sk_priority;
165
166         /* Send it out. */
167         return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
168                        dst_output);
169 }
170
171 static inline int ip_finish_output2(struct sk_buff *skb)
172 {
173         struct dst_entry *dst = skb->dst;
174         struct hh_cache *hh = dst->hh;
175         struct net_device *dev = dst->dev;
176         int hh_len = LL_RESERVED_SPACE(dev);
177
178         /* Be paranoid, rather than too clever. */
179         if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
180                 struct sk_buff *skb2;
181
182                 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
183                 if (skb2 == NULL) {
184                         kfree_skb(skb);
185                         return -ENOMEM;
186                 }
187                 if (skb->sk)
188                         skb_set_owner_w(skb2, skb->sk);
189                 kfree_skb(skb);
190                 skb = skb2;
191         }
192
193 #ifdef CONFIG_NETFILTER_DEBUG
194         nf_debug_ip_finish_output2(skb);
195 #endif /*CONFIG_NETFILTER_DEBUG*/
196
197         if (hh) {
198                 int hh_alen;
199
200                 read_lock_bh(&hh->hh_lock);
201                 hh_alen = HH_DATA_ALIGN(hh->hh_len);
202                 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
203                 read_unlock_bh(&hh->hh_lock);
204                 skb_push(skb, hh->hh_len);
205                 return hh->hh_output(skb);
206         } else if (dst->neighbour)
207                 return dst->neighbour->output(skb);
208
209         if (net_ratelimit())
210                 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
211         kfree_skb(skb);
212         return -EINVAL;
213 }
214
215 int ip_finish_output(struct sk_buff *skb)
216 {
217         struct net_device *dev = skb->dst->dev;
218
219         skb->dev = dev;
220         skb->protocol = htons(ETH_P_IP);
221
222         return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
223                        ip_finish_output2);
224 }
225
226 int ip_mc_output(struct sk_buff *skb)
227 {
228         struct sock *sk = skb->sk;
229         struct rtable *rt = (struct rtable*)skb->dst;
230         struct net_device *dev = rt->u.dst.dev;
231
232         /*
233          *      If the indicated interface is up and running, send the packet.
234          */
235         IP_INC_STATS(IpOutRequests);
236
237         skb->dev = dev;
238         skb->protocol = htons(ETH_P_IP);
239
240         /*
241          *      Multicasts are looped back for other local users
242          */
243
244         if (rt->rt_flags&RTCF_MULTICAST) {
245                 if ((!sk || inet_sk(sk)->mc_loop)
246 #ifdef CONFIG_IP_MROUTE
247                 /* Small optimization: do not loopback not local frames,
248                    which returned after forwarding; they will be  dropped
249                    by ip_mr_input in any case.
250                    Note, that local frames are looped back to be delivered
251                    to local recipients.
252
253                    This check is duplicated in ip_mr_input at the moment.
254                  */
255                     && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
256 #endif
257                 ) {
258                         struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
259                         if (newskb)
260                                 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
261                                         newskb->dev, 
262                                         ip_dev_loopback_xmit);
263                 }
264
265                 /* Multicasts with ttl 0 must not go beyond the host */
266
267                 if (skb->nh.iph->ttl == 0) {
268                         kfree_skb(skb);
269                         return 0;
270                 }
271         }
272
273         if (rt->rt_flags&RTCF_BROADCAST) {
274                 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
275                 if (newskb)
276                         NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
277                                 newskb->dev, ip_dev_loopback_xmit);
278         }
279
280         if (skb->len > dst_pmtu(&rt->u.dst) || skb_shinfo(skb)->frag_list)
281                 return ip_fragment(skb, ip_finish_output);
282         else
283                 return ip_finish_output(skb);
284 }
285
286 int ip_output(struct sk_buff *skb)
287 {
288         IP_INC_STATS(IpOutRequests);
289
290         if ((skb->len > dst_pmtu(skb->dst) || skb_shinfo(skb)->frag_list) &&
291             !skb_shinfo(skb)->tso_size)
292                 return ip_fragment(skb, ip_finish_output);
293         else
294                 return ip_finish_output(skb);
295 }
296
297 int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
298 {
299         struct sock *sk = skb->sk;
300         struct inet_opt *inet = inet_sk(sk);
301         struct ip_options *opt = inet->opt;
302         struct rtable *rt;
303         struct iphdr *iph;
304         u32 mtu;
305
306         /* Skip all of this if the packet is already routed,
307          * f.e. by something like SCTP.
308          */
309         rt = (struct rtable *) skb->dst;
310         if (rt != NULL)
311                 goto packet_routed;
312
313         /* Make sure we can route this packet. */
314         rt = (struct rtable *)__sk_dst_check(sk, 0);
315         if (rt == NULL) {
316                 u32 daddr;
317
318                 /* Use correct destination address if we have options. */
319                 daddr = inet->daddr;
320                 if(opt && opt->srr)
321                         daddr = opt->faddr;
322
323                 {
324                         struct flowi fl = { .oif = sk->sk_bound_dev_if,
325                                             .nl_u = { .ip4_u =
326                                                       { .daddr = daddr,
327                                                         .saddr = inet->saddr,
328                                                         .tos = RT_CONN_FLAGS(sk) } },
329                                             .proto = sk->sk_protocol,
330                                             .uli_u = { .ports =
331                                                        { .sport = inet->sport,
332                                                          .dport = inet->dport } } };
333
334                         /* If this fails, retransmit mechanism of transport layer will
335                          * keep trying until route appears or the connection times
336                          * itself out.
337                          */
338                         if (ip_route_output_flow(&rt, &fl, sk, 0))
339                                 goto no_route;
340                 }
341                 __sk_dst_set(sk, &rt->u.dst);
342                 tcp_v4_setup_caps(sk, &rt->u.dst);
343         }
344         skb->dst = dst_clone(&rt->u.dst);
345
346 packet_routed:
347         if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
348                 goto no_route;
349
350         /* OK, we know where to send it, allocate and build IP header. */
351         iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
352         *((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
353         iph->tot_len = htons(skb->len);
354         if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
355                 iph->frag_off = htons(IP_DF);
356         else
357                 iph->frag_off = 0;
358         iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
359         iph->protocol = sk->sk_protocol;
360         iph->saddr    = rt->rt_src;
361         iph->daddr    = rt->rt_dst;
362         skb->nh.iph   = iph;
363         /* Transport layer set skb->h.foo itself. */
364
365         if(opt && opt->optlen) {
366                 iph->ihl += opt->optlen >> 2;
367                 ip_options_build(skb, opt, inet->daddr, rt, 0);
368         }
369
370         mtu = dst_pmtu(&rt->u.dst);
371         if (skb->len > mtu && (sk->sk_route_caps & NETIF_F_TSO)) {
372                 unsigned int hlen;
373
374                 /* Hack zone: all this must be done by TCP. */
375                 hlen = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
376                 skb_shinfo(skb)->tso_size = mtu - hlen;
377                 skb_shinfo(skb)->tso_segs =
378                         (skb->len - hlen + skb_shinfo(skb)->tso_size - 1)/
379                                 skb_shinfo(skb)->tso_size - 1;
380         }
381
382         ip_select_ident_more(iph, &rt->u.dst, sk, skb_shinfo(skb)->tso_segs);
383
384         /* Add an IP checksum. */
385         ip_send_check(iph);
386
387         skb->priority = sk->sk_priority;
388
389         return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
390                        dst_output);
391
392 no_route:
393         IP_INC_STATS(IpOutNoRoutes);
394         kfree_skb(skb);
395         return -EHOSTUNREACH;
396 }
397
398
399 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
400 {
401         to->pkt_type = from->pkt_type;
402         to->priority = from->priority;
403         to->protocol = from->protocol;
404         to->security = from->security;
405         to->dst = dst_clone(from->dst);
406         to->dev = from->dev;
407
408         /* Copy the flags to each fragment. */
409         IPCB(to)->flags = IPCB(from)->flags;
410
411 #ifdef CONFIG_NET_SCHED
412         to->tc_index = from->tc_index;
413 #endif
414 #ifdef CONFIG_NETFILTER
415         to->nfmark = from->nfmark;
416         to->nfcache = from->nfcache;
417         /* Connection association is same as pre-frag packet */
418         nf_conntrack_put(to->nfct);
419         to->nfct = from->nfct;
420         nf_conntrack_get(to->nfct);
421 #ifdef CONFIG_BRIDGE_NETFILTER
422         nf_bridge_put(to->nf_bridge);
423         to->nf_bridge = from->nf_bridge;
424         nf_bridge_get(to->nf_bridge);
425 #endif
426 #ifdef CONFIG_NETFILTER_DEBUG
427         to->nf_debug = from->nf_debug;
428 #endif
429 #endif
430 }
431
432 /*
433  *      This IP datagram is too large to be sent in one piece.  Break it up into
434  *      smaller pieces (each of size equal to IP header plus
435  *      a block of the data of the original IP data part) that will yet fit in a
436  *      single device frame, and queue such a frame for sending.
437  */
438
439 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
440 {
441         struct iphdr *iph;
442         int raw = 0;
443         int ptr;
444         struct net_device *dev;
445         struct sk_buff *skb2;
446         unsigned int mtu, hlen, left, len, ll_rs;
447         int offset;
448         int not_last_frag;
449         struct rtable *rt = (struct rtable*)skb->dst;
450         int err = 0;
451
452         dev = rt->u.dst.dev;
453
454         /*
455          *      Point into the IP datagram header.
456          */
457
458         iph = skb->nh.iph;
459
460         if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
461                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
462                           htonl(dst_pmtu(&rt->u.dst)));
463                 kfree_skb(skb);
464                 return -EMSGSIZE;
465         }
466
467         /*
468          *      Setup starting values.
469          */
470
471         hlen = iph->ihl * 4;
472         mtu = dst_pmtu(&rt->u.dst) - hlen;      /* Size of data space */
473
474         /* When frag_list is given, use it. First, check its validity:
475          * some transformers could create wrong frag_list or break existing
476          * one, it is not prohibited. In this case fall back to copying.
477          *
478          * LATER: this step can be merged to real generation of fragments,
479          * we can switch to copy when see the first bad fragment.
480          */
481         if (skb_shinfo(skb)->frag_list) {
482                 struct sk_buff *frag;
483                 int first_len = skb_pagelen(skb);
484
485                 if (first_len - hlen > mtu ||
486                     ((first_len - hlen) & 7) ||
487                     (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
488                     skb_cloned(skb))
489                         goto slow_path;
490
491                 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
492                         /* Correct geometry. */
493                         if (frag->len > mtu ||
494                             ((frag->len & 7) && frag->next) ||
495                             skb_headroom(frag) < hlen)
496                             goto slow_path;
497
498                         /* Correct socket ownership. */
499                         if (frag->sk == NULL && skb->sk)
500                                 goto slow_path;
501
502                         /* Partially cloned skb? */
503                         if (skb_shared(frag))
504                                 goto slow_path;
505                 }
506
507                 /* Everything is OK. Generate! */
508
509                 err = 0;
510                 offset = 0;
511                 frag = skb_shinfo(skb)->frag_list;
512                 skb_shinfo(skb)->frag_list = 0;
513                 skb->data_len = first_len - skb_headlen(skb);
514                 skb->len = first_len;
515                 iph->tot_len = htons(first_len);
516                 iph->frag_off |= htons(IP_MF);
517                 ip_send_check(iph);
518
519                 for (;;) {
520                         /* Prepare header of the next frame,
521                          * before previous one went down. */
522                         if (frag) {
523                                 frag->h.raw = frag->data;
524                                 frag->nh.raw = __skb_push(frag, hlen);
525                                 memcpy(frag->nh.raw, iph, hlen);
526                                 iph = frag->nh.iph;
527                                 iph->tot_len = htons(frag->len);
528                                 ip_copy_metadata(frag, skb);
529                                 if (offset == 0)
530                                         ip_options_fragment(frag);
531                                 offset += skb->len - hlen;
532                                 iph->frag_off = htons(offset>>3);
533                                 if (frag->next != NULL)
534                                         iph->frag_off |= htons(IP_MF);
535                                 /* Ready, complete checksum */
536                                 ip_send_check(iph);
537                         }
538
539                         err = output(skb);
540
541                         if (err || !frag)
542                                 break;
543
544                         skb = frag;
545                         frag = skb->next;
546                         skb->next = NULL;
547                 }
548
549                 if (err == 0) {
550                         IP_INC_STATS(IpFragOKs);
551                         return 0;
552                 }
553
554                 while (frag) {
555                         skb = frag->next;
556                         kfree_skb(frag);
557                         frag = skb;
558                 }
559                 IP_INC_STATS(IpFragFails);
560                 return err;
561         }
562
563 slow_path:
564         left = skb->len - hlen;         /* Space per frame */
565         ptr = raw + hlen;               /* Where to start from */
566
567 #ifdef CONFIG_BRIDGE_NETFILTER
568         /* for bridged IP traffic encapsulated inside f.e. a vlan header,
569          * we need to make room for the encapsulating header */
570         ll_rs = LL_RESERVED_SPACE(rt->u.dst.dev + nf_bridge_pad(skb));
571         mtu -= nf_bridge_pad(skb);
572 #else
573         ll_rs = LL_RESERVED_SPACE(rt->u.dst.dev);
574 #endif
575         /*
576          *      Fragment the datagram.
577          */
578
579         offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
580         not_last_frag = iph->frag_off & htons(IP_MF);
581
582         /*
583          *      Keep copying data until we run out.
584          */
585
586         while(left > 0) {
587                 len = left;
588                 /* IF: it doesn't fit, use 'mtu' - the data space left */
589                 if (len > mtu)
590                         len = mtu;
591                 /* IF: we are not sending upto and including the packet end
592                    then align the next start on an eight byte boundary */
593                 if (len < left) {
594                         len &= ~7;
595                 }
596                 /*
597                  *      Allocate buffer.
598                  */
599
600                 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
601                         NETDEBUG(printk(KERN_INFO "IP: frag: no memory for new fragment!\n"));
602                         err = -ENOMEM;
603                         goto fail;
604                 }
605
606                 /*
607                  *      Set up data on packet
608                  */
609
610                 ip_copy_metadata(skb2, skb);
611                 skb_reserve(skb2, ll_rs);
612                 skb_put(skb2, len + hlen);
613                 skb2->nh.raw = skb2->data;
614                 skb2->h.raw = skb2->data + hlen;
615
616                 /*
617                  *      Charge the memory for the fragment to any owner
618                  *      it might possess
619                  */
620
621                 if (skb->sk)
622                         skb_set_owner_w(skb2, skb->sk);
623
624                 /*
625                  *      Copy the packet header into the new buffer.
626                  */
627
628                 memcpy(skb2->nh.raw, skb->data, hlen);
629
630                 /*
631                  *      Copy a block of the IP datagram.
632                  */
633                 if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
634                         BUG();
635                 left -= len;
636
637                 /*
638                  *      Fill in the new header fields.
639                  */
640                 iph = skb2->nh.iph;
641                 iph->frag_off = htons((offset >> 3));
642
643                 /* ANK: dirty, but effective trick. Upgrade options only if
644                  * the segment to be fragmented was THE FIRST (otherwise,
645                  * options are already fixed) and make it ONCE
646                  * on the initial skb, so that all the following fragments
647                  * will inherit fixed options.
648                  */
649                 if (offset == 0)
650                         ip_options_fragment(skb);
651
652                 /*
653                  *      Added AC : If we are fragmenting a fragment that's not the
654                  *                 last fragment then keep MF on each bit
655                  */
656                 if (left > 0 || not_last_frag)
657                         iph->frag_off |= htons(IP_MF);
658                 ptr += len;
659                 offset += len;
660
661                 /*
662                  *      Put this fragment into the sending queue.
663                  */
664
665                 IP_INC_STATS(IpFragCreates);
666
667                 iph->tot_len = htons(len + hlen);
668
669                 ip_send_check(iph);
670
671                 err = output(skb2);
672                 if (err)
673                         goto fail;
674         }
675         kfree_skb(skb);
676         IP_INC_STATS(IpFragOKs);
677         return err;
678
679 fail:
680         kfree_skb(skb); 
681         IP_INC_STATS(IpFragFails);
682         return err;
683 }
684
685 int
686 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
687 {
688         struct iovec *iov = from;
689
690         if (skb->ip_summed == CHECKSUM_HW) {
691                 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
692                         return -EFAULT;
693         } else {
694                 unsigned int csum = 0;
695                 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
696                         return -EFAULT;
697                 skb->csum = csum_block_add(skb->csum, csum, odd);
698         }
699         return 0;
700 }
701
702 static inline int
703 skb_can_coalesce(struct sk_buff *skb, int i, struct page *page, int off)
704 {
705         if (i) {
706                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
707                 return page == frag->page &&
708                         off == frag->page_offset+frag->size;
709         }
710         return 0;
711 }
712
713 static inline unsigned int
714 csum_page(struct page *page, int offset, int copy)
715 {
716         char *kaddr;
717         unsigned int csum;
718         kaddr = kmap(page);
719         csum = csum_partial(kaddr + offset, copy, 0);
720         kunmap(page);
721         return csum;
722 }
723
724 /*
725  *      ip_append_data() and ip_append_page() can make one large IP datagram
726  *      from many pieces of data. Each pieces will be holded on the socket
727  *      until ip_push_pending_frames() is called. Eache pieces can be a page
728  *      or non-page data.
729  *      
730  *      Not only UDP, other transport protocols - e.g. raw sockets - can use
731  *      this interface potentially.
732  *
733  *      LATER: length must be adjusted by pad at tail, when it is required.
734  */
735 int ip_append_data(struct sock *sk,
736                    int getfrag(void *from, char *to, int offset, int len,
737                                int odd, struct sk_buff *skb),
738                    void *from, int length, int transhdrlen,
739                    struct ipcm_cookie *ipc, struct rtable *rt,
740                    unsigned int flags)
741 {
742         struct inet_opt *inet = inet_sk(sk);
743         struct sk_buff *skb;
744
745         struct ip_options *opt = NULL;
746         int hh_len;
747         int exthdrlen;
748         int mtu;
749         int copy;
750         int err;
751         int offset = 0;
752         unsigned int maxfraglen, fragheaderlen;
753         int csummode = CHECKSUM_NONE;
754
755         if (flags&MSG_PROBE)
756                 return 0;
757
758         if (skb_queue_empty(&sk->sk_write_queue)) {
759                 /*
760                  * setup for corking.
761                  */
762                 opt = ipc->opt;
763                 if (opt) {
764                         if (inet->cork.opt == NULL) {
765                                 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
766                                 if (unlikely(inet->cork.opt == NULL))
767                                         return -ENOBUFS;
768                         }
769                         memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
770                         inet->cork.flags |= IPCORK_OPT;
771                         inet->cork.addr = ipc->addr;
772                 }
773                 dst_hold(&rt->u.dst);
774                 inet->cork.fragsize = mtu = dst_pmtu(&rt->u.dst);
775                 inet->cork.rt = rt;
776                 inet->cork.length = 0;
777                 inet->sndmsg_page = NULL;
778                 inet->sndmsg_off = 0;
779                 if ((exthdrlen = rt->u.dst.header_len) != 0) {
780                         length += exthdrlen;
781                         transhdrlen += exthdrlen;
782                 }
783         } else {
784                 rt = inet->cork.rt;
785                 if (inet->cork.flags & IPCORK_OPT)
786                         opt = inet->cork.opt;
787
788                 transhdrlen = 0;
789                 exthdrlen = 0;
790                 mtu = inet->cork.fragsize;
791         }
792         hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
793
794         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
795         maxfraglen = ((mtu-fragheaderlen) & ~7) + fragheaderlen;
796
797         if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
798                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
799                 return -EMSGSIZE;
800         }
801
802         /*
803          * transhdrlen > 0 means that this is the first fragment and we wish
804          * it won't be fragmented in the future.
805          */
806         if (transhdrlen &&
807             length + fragheaderlen <= maxfraglen &&
808             rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) &&
809             !exthdrlen)
810                 csummode = CHECKSUM_HW;
811
812         inet->cork.length += length;
813
814         /* So, what's going on in the loop below?
815          *
816          * We use calculated fragment length to generate chained skb,
817          * each of segments is IP fragment ready for sending to network after
818          * adding appropriate IP header.
819          *
820          * Mistake is:
821          *
822          *    If mtu-fragheaderlen is not 0 modulo 8, we generate additional
823          *    small fragment of length (mtu-fragheaderlen)%8, even though
824          *    it is not necessary. Not a big bug, but needs a fix.
825          */
826
827         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
828                 goto alloc_new_skb;
829
830         while (length > 0) {
831                 if ((copy = maxfraglen - skb->len) <= 0) {
832                         char *data;
833                         unsigned int datalen;
834                         unsigned int fraglen;
835                         unsigned int alloclen;
836                         BUG_TRAP(copy == 0);
837
838 alloc_new_skb:
839                         datalen = maxfraglen - fragheaderlen;
840                         if (datalen > length)
841                                 datalen = length;
842
843                         fraglen = datalen + fragheaderlen;
844                         if ((flags & MSG_MORE) && 
845                             !(rt->u.dst.dev->features&NETIF_F_SG))
846                                 alloclen = maxfraglen;
847                         else
848                                 alloclen = datalen + fragheaderlen;
849
850                         /* The last fragment gets additional space at tail.
851                          * Note, with MSG_MORE we overallocate on fragments,
852                          * because we have no idea what fragment will be
853                          * the last.
854                          */
855                         if (datalen == length)
856                                 alloclen += rt->u.dst.trailer_len;
857
858                         if (transhdrlen) {
859                                 skb = sock_alloc_send_skb(sk, 
860                                                 alloclen + hh_len + 15,
861                                                 (flags & MSG_DONTWAIT), &err);
862                         } else {
863                                 skb = NULL;
864                                 if (atomic_read(&sk->sk_wmem_alloc) <=
865                                     2 * sk->sk_sndbuf)
866                                         skb = sock_wmalloc(sk, 
867                                                            alloclen + hh_len + 15, 1,
868                                                            sk->sk_allocation);
869                                 if (unlikely(skb == NULL))
870                                         err = -ENOBUFS;
871                         }
872                         if (skb == NULL)
873                                 goto error;
874
875                         /*
876                          *      Fill in the control structures
877                          */
878                         skb->ip_summed = csummode;
879                         skb->csum = 0;
880                         skb_reserve(skb, hh_len);
881
882                         /*
883                          *      Find where to start putting bytes.
884                          */
885                         data = skb_put(skb, fraglen);
886                         skb->nh.raw = data + exthdrlen;
887                         data += fragheaderlen;
888                         skb->h.raw = data + exthdrlen;
889
890                         copy = datalen - transhdrlen;
891                         if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, 0, skb) < 0) {
892                                 err = -EFAULT;
893                                 kfree_skb(skb);
894                                 goto error;
895                         }
896
897                         offset += copy;
898                         length -= datalen;
899                         transhdrlen = 0;
900                         exthdrlen = 0;
901                         csummode = CHECKSUM_NONE;
902
903                         /*
904                          * Put the packet on the pending queue.
905                          */
906                         __skb_queue_tail(&sk->sk_write_queue, skb);
907                         continue;
908                 }
909
910                 if (copy > length)
911                         copy = length;
912
913                 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
914                         unsigned int off;
915
916                         off = skb->len;
917                         if (getfrag(from, skb_put(skb, copy), 
918                                         offset, copy, off, skb) < 0) {
919                                 __skb_trim(skb, off);
920                                 err = -EFAULT;
921                                 goto error;
922                         }
923                 } else {
924                         int i = skb_shinfo(skb)->nr_frags;
925                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
926                         struct page *page = inet->sndmsg_page;
927                         int off = inet->sndmsg_off;
928                         unsigned int left;
929
930                         if (page && (left = PAGE_SIZE - off) > 0) {
931                                 if (copy >= left)
932                                         copy = left;
933                                 if (page != frag->page) {
934                                         if (i == MAX_SKB_FRAGS) {
935                                                 err = -EMSGSIZE;
936                                                 goto error;
937                                         }
938                                         get_page(page);
939                                         skb_fill_page_desc(skb, i, page, inet->sndmsg_off, 0);
940                                         frag = &skb_shinfo(skb)->frags[i];
941                                 }
942                         } else if (i < MAX_SKB_FRAGS) {
943                                 if (copy > PAGE_SIZE)
944                                         copy = PAGE_SIZE;
945                                 page = alloc_pages(sk->sk_allocation, 0);
946                                 if (page == NULL)  {
947                                         err = -ENOMEM;
948                                         goto error;
949                                 }
950                                 inet->sndmsg_page = page;
951                                 inet->sndmsg_off = 0;
952
953                                 skb_fill_page_desc(skb, i, page, 0, 0);
954                                 frag = &skb_shinfo(skb)->frags[i];
955                                 skb->truesize += PAGE_SIZE;
956                                 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
957                         } else {
958                                 err = -EMSGSIZE;
959                                 goto error;
960                         }
961                         if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
962                                 err = -EFAULT;
963                                 goto error;
964                         }
965                         inet->sndmsg_off += copy;
966                         frag->size += copy;
967                         skb->len += copy;
968                         skb->data_len += copy;
969                 }
970                 offset += copy;
971                 length -= copy;
972         }
973
974         return 0;
975
976 error:
977         inet->cork.length -= length;
978         IP_INC_STATS(IpOutDiscards);
979         return err; 
980 }
981
982 ssize_t ip_append_page(struct sock *sk, struct page *page,
983                        int offset, size_t size, int flags)
984 {
985         struct inet_opt *inet = inet_sk(sk);
986         struct sk_buff *skb;
987         struct rtable *rt;
988         struct ip_options *opt = NULL;
989         int hh_len;
990         int mtu;
991         int len;
992         int err;
993         unsigned int maxfraglen, fragheaderlen;
994
995         if (inet->hdrincl)
996                 return -EPERM;
997
998         if (flags&MSG_PROBE)
999                 return 0;
1000
1001         if (skb_queue_empty(&sk->sk_write_queue))
1002                 return -EINVAL;
1003
1004         rt = inet->cork.rt;
1005         if (inet->cork.flags & IPCORK_OPT)
1006                 opt = inet->cork.opt;
1007
1008         if (!(rt->u.dst.dev->features&NETIF_F_SG))
1009                 return -EOPNOTSUPP;
1010
1011         hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1012         mtu = inet->cork.fragsize;
1013
1014         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1015         maxfraglen = ((mtu-fragheaderlen) & ~7) + fragheaderlen;
1016
1017         if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1018                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1019                 return -EMSGSIZE;
1020         }
1021
1022         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1023                 return -EINVAL;
1024
1025         inet->cork.length += size;
1026
1027         while (size > 0) {
1028                 int i;
1029                 if ((len = maxfraglen - skb->len) <= 0) {
1030                         char *data;
1031                         struct iphdr *iph;
1032                         BUG_TRAP(len == 0);
1033
1034                         skb = sock_wmalloc(sk, fragheaderlen + hh_len + 15, 1,
1035                                            sk->sk_allocation);
1036                         if (unlikely(!skb)) {
1037                                 err = -ENOBUFS;
1038                                 goto error;
1039                         }
1040
1041                         /*
1042                          *      Fill in the control structures
1043                          */
1044                         skb->ip_summed = CHECKSUM_NONE;
1045                         skb->csum = 0;
1046                         skb_reserve(skb, hh_len);
1047
1048                         /*
1049                          *      Find where to start putting bytes.
1050                          */
1051                         data = skb_put(skb, fragheaderlen);
1052                         skb->nh.iph = iph = (struct iphdr *)data;
1053                         data += fragheaderlen;
1054                         skb->h.raw = data;
1055
1056                         /*
1057                          * Put the packet on the pending queue.
1058                          */
1059                         __skb_queue_tail(&sk->sk_write_queue, skb);
1060                         continue;
1061                 }
1062
1063                 i = skb_shinfo(skb)->nr_frags;
1064                 if (len > size)
1065                         len = size;
1066                 if (skb_can_coalesce(skb, i, page, offset)) {
1067                         skb_shinfo(skb)->frags[i-1].size += len;
1068                 } else if (i < MAX_SKB_FRAGS) {
1069                         get_page(page);
1070                         skb_fill_page_desc(skb, i, page, offset, len);
1071                 } else {
1072                         err = -EMSGSIZE;
1073                         goto error;
1074                 }
1075
1076                 if (skb->ip_summed == CHECKSUM_NONE) {
1077                         unsigned int csum;
1078                         csum = csum_page(page, offset, len);
1079                         skb->csum = csum_block_add(skb->csum, csum, skb->len);
1080                 }
1081
1082                 skb->len += len;
1083                 skb->data_len += len;
1084                 offset += len;
1085                 size -= len;
1086         }
1087         return 0;
1088
1089 error:
1090         inet->cork.length -= size;
1091         IP_INC_STATS(IpOutDiscards);
1092         return err;
1093 }
1094
1095 /*
1096  *      Combined all pending IP fragments on the socket as one IP datagram
1097  *      and push them out.
1098  */
1099 int ip_push_pending_frames(struct sock *sk)
1100 {
1101         struct sk_buff *skb, *tmp_skb;
1102         struct sk_buff **tail_skb;
1103         struct inet_opt *inet = inet_sk(sk);
1104         struct ip_options *opt = NULL;
1105         struct rtable *rt = inet->cork.rt;
1106         struct iphdr *iph;
1107         int df = 0;
1108         __u8 ttl;
1109         int err = 0;
1110
1111         if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1112                 goto out;
1113         tail_skb = &(skb_shinfo(skb)->frag_list);
1114
1115         /* move skb->data to ip header from ext header */
1116         if (skb->data < skb->nh.raw)
1117                 __skb_pull(skb, skb->nh.raw - skb->data);
1118         while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1119                 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1120                 *tail_skb = tmp_skb;
1121                 tail_skb = &(tmp_skb->next);
1122                 skb->len += tmp_skb->len;
1123                 skb->data_len += tmp_skb->len;
1124 #if 0 /* Logically correct, but useless work, ip_fragment() will have to undo */
1125                 skb->truesize += tmp_skb->truesize;
1126                 __sock_put(tmp_skb->sk);
1127                 tmp_skb->destructor = NULL;
1128                 tmp_skb->sk = NULL;
1129 #endif
1130         }
1131
1132         /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1133          * to fragment the frame generated here. No matter, what transforms
1134          * how transforms change size of the packet, it will come out.
1135          */
1136         if (inet->pmtudisc != IP_PMTUDISC_DO)
1137                 skb->local_df = 1;
1138
1139         /* DF bit is set when we want to see DF on outgoing frames.
1140          * If local_df is set too, we still allow to fragment this frame
1141          * locally. */
1142         if (inet->pmtudisc == IP_PMTUDISC_DO ||
1143             (!skb_shinfo(skb)->frag_list && ip_dont_fragment(sk, &rt->u.dst)))
1144                 df = htons(IP_DF);
1145
1146         if (inet->cork.flags & IPCORK_OPT)
1147                 opt = inet->cork.opt;
1148
1149         if (rt->rt_type == RTN_MULTICAST)
1150                 ttl = inet->mc_ttl;
1151         else
1152                 ttl = ip_select_ttl(inet, &rt->u.dst);
1153
1154         iph = (struct iphdr *)skb->data;
1155         iph->version = 4;
1156         iph->ihl = 5;
1157         if (opt) {
1158                 iph->ihl += opt->optlen>>2;
1159                 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1160         }
1161         iph->tos = inet->tos;
1162         iph->tot_len = htons(skb->len);
1163         iph->frag_off = df;
1164         if (!df) {
1165                 __ip_select_ident(iph, &rt->u.dst, 0);
1166         } else {
1167                 iph->id = htons(inet->id++);
1168         }
1169         iph->ttl = ttl;
1170         iph->protocol = sk->sk_protocol;
1171         iph->saddr = rt->rt_src;
1172         iph->daddr = rt->rt_dst;
1173         ip_send_check(iph);
1174
1175         skb->priority = sk->sk_priority;
1176         skb->dst = dst_clone(&rt->u.dst);
1177
1178         /* Netfilter gets whole the not fragmented skb. */
1179         err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, 
1180                       skb->dst->dev, dst_output);
1181         if (err) {
1182                 if (err > 0)
1183                         err = inet->recverr ? net_xmit_errno(err) : 0;
1184                 if (err)
1185                         goto error;
1186         }
1187
1188 out:
1189         inet->cork.flags &= ~IPCORK_OPT;
1190         if (inet->cork.opt) {
1191                 kfree(inet->cork.opt);
1192                 inet->cork.opt = NULL;
1193         }
1194         if (inet->cork.rt) {
1195                 ip_rt_put(inet->cork.rt);
1196                 inet->cork.rt = NULL;
1197         }
1198         return err;
1199
1200 error:
1201         IP_INC_STATS(IpOutDiscards);
1202         goto out;
1203 }
1204
1205 /*
1206  *      Throw away all pending data on the socket.
1207  */
1208 void ip_flush_pending_frames(struct sock *sk)
1209 {
1210         struct inet_opt *inet = inet_sk(sk);
1211         struct sk_buff *skb;
1212
1213         while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1214                 kfree_skb(skb);
1215
1216         inet->cork.flags &= ~IPCORK_OPT;
1217         if (inet->cork.opt) {
1218                 kfree(inet->cork.opt);
1219                 inet->cork.opt = NULL;
1220         }
1221         if (inet->cork.rt) {
1222                 ip_rt_put(inet->cork.rt);
1223                 inet->cork.rt = NULL;
1224         }
1225 }
1226
1227
1228 /*
1229  *      Fetch data from kernel space and fill in checksum if needed.
1230  */
1231 static int ip_reply_glue_bits(void *dptr, char *to, int offset, 
1232                               int len, int odd, struct sk_buff *skb)
1233 {
1234         unsigned int csum;
1235
1236         csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1237         skb->csum = csum_block_add(skb->csum, csum, odd);
1238         return 0;  
1239 }
1240
1241 /* 
1242  *      Generic function to send a packet as reply to another packet.
1243  *      Used to send TCP resets so far. ICMP should use this function too.
1244  *
1245  *      Should run single threaded per socket because it uses the sock 
1246  *      structure to pass arguments.
1247  *
1248  *      LATER: switch from ip_build_xmit to ip_append_*
1249  */
1250 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1251                    unsigned int len)
1252 {
1253         struct inet_opt *inet = inet_sk(sk);
1254         struct {
1255                 struct ip_options       opt;
1256                 char                    data[40];
1257         } replyopts;
1258         struct ipcm_cookie ipc;
1259         u32 daddr;
1260         struct rtable *rt = (struct rtable*)skb->dst;
1261
1262         if (ip_options_echo(&replyopts.opt, skb))
1263                 return;
1264
1265         daddr = ipc.addr = rt->rt_src;
1266         ipc.opt = NULL;
1267
1268         if (replyopts.opt.optlen) {
1269                 ipc.opt = &replyopts.opt;
1270
1271                 if (ipc.opt->srr)
1272                         daddr = replyopts.opt.faddr;
1273         }
1274
1275         {
1276                 struct flowi fl = { .nl_u = { .ip4_u =
1277                                               { .daddr = daddr,
1278                                                 .saddr = rt->rt_spec_dst,
1279                                                 .tos = RT_TOS(skb->nh.iph->tos) } },
1280                                     /* Not quite clean, but right. */
1281                                     .uli_u = { .ports =
1282                                                { .sport = skb->h.th->dest,
1283                                                  .dport = skb->h.th->source } },
1284                                     .proto = sk->sk_protocol };
1285                 if (ip_route_output_key(&rt, &fl))
1286                         return;
1287         }
1288
1289         /* And let IP do all the hard work.
1290
1291            This chunk is not reenterable, hence spinlock.
1292            Note that it uses the fact, that this function is called
1293            with locally disabled BH and that sk cannot be already spinlocked.
1294          */
1295         bh_lock_sock(sk);
1296         inet->tos = skb->nh.iph->tos;
1297         sk->sk_priority = skb->priority;
1298         sk->sk_protocol = skb->nh.iph->protocol;
1299         ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1300                        &ipc, rt, MSG_DONTWAIT);
1301         if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1302                 if (arg->csumoffset >= 0)
1303                         *((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
1304                 skb->ip_summed = CHECKSUM_NONE;
1305                 ip_push_pending_frames(sk);
1306         }
1307
1308         bh_unlock_sock(sk);
1309
1310         ip_rt_put(rt);
1311 }
1312
1313 /*
1314  *      IP protocol layer initialiser
1315  */
1316
1317 static struct packet_type ip_packet_type = {
1318         .type = __constant_htons(ETH_P_IP),
1319         .func = ip_rcv,
1320 };
1321
1322 /*
1323  *      IP registers the packet type and then calls the subprotocol initialisers
1324  */
1325
1326 void __init ip_init(void)
1327 {
1328         dev_add_pack(&ip_packet_type);
1329
1330         ip_rt_init();
1331         inet_initpeers();
1332
1333 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1334         igmp_mc_proc_init();
1335 #endif
1336 }
1337
1338 EXPORT_SYMBOL(ip_finish_output);
1339 EXPORT_SYMBOL(ip_fragment);
1340 EXPORT_SYMBOL(ip_generic_getfrag);
1341 EXPORT_SYMBOL(ip_queue_xmit);
1342 EXPORT_SYMBOL(ip_send_check);
1343
1344 #ifdef CONFIG_SYSCTL
1345 EXPORT_SYMBOL(sysctl_ip_default_ttl);
1346 #endif