This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / net / ipv4 / ip_output.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              The Internet Protocol (IP) output module.
7  *
8  * Version:     $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
9  *
10  * Authors:     Ross Biro, <bir7@leland.Stanford.Edu>
11  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *              Donald Becker, <becker@super.org>
13  *              Alan Cox, <Alan.Cox@linux.org>
14  *              Richard Underwood
15  *              Stefan Becker, <stefanb@yello.ping.de>
16  *              Jorge Cwik, <jorge@laser.satlink.net>
17  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18  *              Hirokazu Takahashi, <taka@valinux.co.jp>
19  *
20  *      See ip_input.c for original log
21  *
22  *      Fixes:
23  *              Alan Cox        :       Missing nonblock feature in ip_build_xmit.
24  *              Mike Kilburn    :       htons() missing in ip_build_xmit.
25  *              Bradford Johnson:       Fix faulty handling of some frames when 
26  *                                      no route is found.
27  *              Alexander Demenshin:    Missing sk/skb free in ip_queue_xmit
28  *                                      (in case if packet not accepted by
29  *                                      output firewall rules)
30  *              Mike McLagan    :       Routing by source
31  *              Alexey Kuznetsov:       use new route cache
32  *              Andi Kleen:             Fix broken PMTU recovery and remove
33  *                                      some redundant tests.
34  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
35  *              Andi Kleen      :       Replace ip_reply with ip_send_reply.
36  *              Andi Kleen      :       Split fast and slow ip_build_xmit path 
37  *                                      for decreased register pressure on x86 
38  *                                      and more readibility. 
39  *              Marc Boucher    :       When call_out_firewall returns FW_QUEUE,
40  *                                      silently drop skb instead of failing with -EPERM.
41  *              Detlev Wengorz  :       Copy protocol for fragments.
42  *              Hirokazu Takahashi:     HW checksumming for outgoing UDP
43  *                                      datagrams.
44  *              Hirokazu Takahashi:     sendfile() on UDP works now.
45  */
46
47 #include <asm/uaccess.h>
48 #include <asm/system.h>
49 #include <linux/module.h>
50 #include <linux/types.h>
51 #include <linux/kernel.h>
52 #include <linux/sched.h>
53 #include <linux/mm.h>
54 #include <linux/string.h>
55 #include <linux/errno.h>
56 #include <linux/config.h>
57
58 #include <linux/socket.h>
59 #include <linux/sockios.h>
60 #include <linux/in.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/proc_fs.h>
65 #include <linux/stat.h>
66 #include <linux/init.h>
67
68 #include <net/snmp.h>
69 #include <net/ip.h>
70 #include <net/protocol.h>
71 #include <net/route.h>
72 #include <net/tcp.h>
73 #include <net/udp.h>
74 #include <linux/skbuff.h>
75 #include <net/sock.h>
76 #include <net/arp.h>
77 #include <net/icmp.h>
78 #include <net/raw.h>
79 #include <net/checksum.h>
80 #include <net/inetpeer.h>
81 #include <net/checksum.h>
82 #include <linux/igmp.h>
83 #include <linux/netfilter_ipv4.h>
84 #include <linux/netfilter_bridge.h>
85 #include <linux/mroute.h>
86 #include <linux/netlink.h>
87
88 /*
89  *      Shall we try to damage output packets if routing dev changes?
90  */
91
92 int sysctl_ip_dynaddr;
93 int sysctl_ip_default_ttl = IPDEFTTL;
94
95 /* Generate a checksum for an outgoing IP datagram. */
96 __inline__ void ip_send_check(struct iphdr *iph)
97 {
98         iph->check = 0;
99         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
100 }
101
102 /* dev_loopback_xmit for use with netfilter. */
103 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
104 {
105         newskb->mac.raw = newskb->data;
106         __skb_pull(newskb, newskb->nh.raw - newskb->data);
107         newskb->pkt_type = PACKET_LOOPBACK;
108         newskb->ip_summed = CHECKSUM_UNNECESSARY;
109         BUG_TRAP(newskb->dst);
110
111 #ifdef CONFIG_NETFILTER_DEBUG
112         nf_debug_ip_loopback_xmit(newskb);
113 #endif
114         netif_rx(newskb);
115         return 0;
116 }
117
118 static inline int ip_select_ttl(struct inet_opt *inet, struct dst_entry *dst)
119 {
120         int ttl = inet->uc_ttl;
121
122         if (ttl < 0)
123                 ttl = dst_metric(dst, RTAX_HOPLIMIT);
124         return ttl;
125 }
126
127 /* 
128  *              Add an ip header to a skbuff and send it out.
129  *
130  */
131 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
132                           u32 saddr, u32 daddr, struct ip_options *opt)
133 {
134         struct inet_opt *inet = inet_sk(sk);
135         struct rtable *rt = (struct rtable *)skb->dst;
136         struct iphdr *iph;
137
138         /* Build the IP header. */
139         if (opt)
140                 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
141         else
142                 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
143
144         iph->version  = 4;
145         iph->ihl      = 5;
146         iph->tos      = inet->tos;
147         if (ip_dont_fragment(sk, &rt->u.dst))
148                 iph->frag_off = htons(IP_DF);
149         else
150                 iph->frag_off = 0;
151         iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
152         iph->daddr    = rt->rt_dst;
153         iph->saddr    = rt->rt_src;
154         iph->protocol = sk->sk_protocol;
155         iph->tot_len  = htons(skb->len);
156         ip_select_ident(iph, &rt->u.dst, sk);
157         skb->nh.iph   = iph;
158
159         if (opt && opt->optlen) {
160                 iph->ihl += opt->optlen>>2;
161                 ip_options_build(skb, opt, daddr, rt, 0);
162         }
163         ip_send_check(iph);
164
165         skb->priority = sk->sk_priority;
166
167         /* Send it out. */
168         return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
169                        dst_output);
170 }
171
172 static inline int ip_finish_output2(struct sk_buff *skb)
173 {
174         struct dst_entry *dst = skb->dst;
175         struct hh_cache *hh = dst->hh;
176         struct net_device *dev = dst->dev;
177         int hh_len = LL_RESERVED_SPACE(dev);
178
179         /* Be paranoid, rather than too clever. */
180         if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
181                 struct sk_buff *skb2;
182
183                 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
184                 if (skb2 == NULL) {
185                         kfree_skb(skb);
186                         return -ENOMEM;
187                 }
188                 if (skb->sk)
189                         skb_set_owner_w(skb2, skb->sk);
190                 kfree_skb(skb);
191                 skb = skb2;
192         }
193
194 #ifdef CONFIG_NETFILTER_DEBUG
195         nf_debug_ip_finish_output2(skb);
196 #endif /*CONFIG_NETFILTER_DEBUG*/
197
198         if (hh) {
199                 int hh_alen;
200
201                 read_lock_bh(&hh->hh_lock);
202                 hh_alen = HH_DATA_ALIGN(hh->hh_len);
203                 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
204                 read_unlock_bh(&hh->hh_lock);
205                 skb_push(skb, hh->hh_len);
206                 return hh->hh_output(skb);
207         } else if (dst->neighbour)
208                 return dst->neighbour->output(skb);
209
210         if (net_ratelimit())
211                 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
212         kfree_skb(skb);
213         return -EINVAL;
214 }
215
216 int ip_finish_output(struct sk_buff *skb)
217 {
218         struct net_device *dev = skb->dst->dev;
219
220         skb->dev = dev;
221         skb->protocol = htons(ETH_P_IP);
222
223         return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
224                        ip_finish_output2);
225 }
226
227 int ip_mc_output(struct sk_buff **pskb)
228 {
229         struct sk_buff *skb = *pskb;
230         struct sock *sk = skb->sk;
231         struct rtable *rt = (struct rtable*)skb->dst;
232         struct net_device *dev = rt->u.dst.dev;
233
234         /*
235          *      If the indicated interface is up and running, send the packet.
236          */
237         IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
238
239         skb->dev = dev;
240         skb->protocol = htons(ETH_P_IP);
241
242         /*
243          *      Multicasts are looped back for other local users
244          */
245
246         if (rt->rt_flags&RTCF_MULTICAST) {
247                 if ((!sk || inet_sk(sk)->mc_loop)
248 #ifdef CONFIG_IP_MROUTE
249                 /* Small optimization: do not loopback not local frames,
250                    which returned after forwarding; they will be  dropped
251                    by ip_mr_input in any case.
252                    Note, that local frames are looped back to be delivered
253                    to local recipients.
254
255                    This check is duplicated in ip_mr_input at the moment.
256                  */
257                     && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
258 #endif
259                 ) {
260                         struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
261                         if (newskb)
262                                 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
263                                         newskb->dev, 
264                                         ip_dev_loopback_xmit);
265                 }
266
267                 /* Multicasts with ttl 0 must not go beyond the host */
268
269                 if (skb->nh.iph->ttl == 0) {
270                         kfree_skb(skb);
271                         return 0;
272                 }
273         }
274
275         if (rt->rt_flags&RTCF_BROADCAST) {
276                 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
277                 if (newskb)
278                         NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
279                                 newskb->dev, ip_dev_loopback_xmit);
280         }
281
282         if (skb->len > dst_pmtu(&rt->u.dst))
283                 return ip_fragment(skb, ip_finish_output);
284         else
285                 return ip_finish_output(skb);
286 }
287
288 int ip_output(struct sk_buff **pskb)
289 {
290         struct sk_buff *skb = *pskb;
291
292         IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
293
294         if (skb->len > dst_pmtu(skb->dst) && !skb_shinfo(skb)->tso_size)
295                 return ip_fragment(skb, ip_finish_output);
296         else
297                 return ip_finish_output(skb);
298 }
299
300 int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
301 {
302         struct sock *sk = skb->sk;
303         struct inet_opt *inet = inet_sk(sk);
304         struct ip_options *opt = inet->opt;
305         struct rtable *rt;
306         struct iphdr *iph;
307
308         /* Skip all of this if the packet is already routed,
309          * f.e. by something like SCTP.
310          */
311         rt = (struct rtable *) skb->dst;
312         if (rt != NULL)
313                 goto packet_routed;
314
315         /* Make sure we can route this packet. */
316         rt = (struct rtable *)__sk_dst_check(sk, 0);
317         if (rt == NULL) {
318                 u32 daddr;
319
320                 /* Use correct destination address if we have options. */
321                 daddr = inet->daddr;
322                 if(opt && opt->srr)
323                         daddr = opt->faddr;
324
325                 {
326                         struct flowi fl = { .oif = sk->sk_bound_dev_if,
327                                             .nl_u = { .ip4_u =
328                                                       { .daddr = daddr,
329                                                         .saddr = inet->saddr,
330                                                         .tos = RT_CONN_FLAGS(sk) } },
331                                             .proto = sk->sk_protocol,
332                                             .uli_u = { .ports =
333                                                        { .sport = inet->sport,
334                                                          .dport = inet->dport } } };
335
336                         /* If this fails, retransmit mechanism of transport layer will
337                          * keep trying until route appears or the connection times
338                          * itself out.
339                          */
340                         if (ip_route_output_flow(&rt, &fl, sk, 0))
341                                 goto no_route;
342                 }
343                 __sk_dst_set(sk, &rt->u.dst);
344                 tcp_v4_setup_caps(sk, &rt->u.dst);
345         }
346         skb->dst = dst_clone(&rt->u.dst);
347
348 packet_routed:
349         if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
350                 goto no_route;
351
352         /* OK, we know where to send it, allocate and build IP header. */
353         iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
354         *((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
355         iph->tot_len = htons(skb->len);
356         if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
357                 iph->frag_off = htons(IP_DF);
358         else
359                 iph->frag_off = 0;
360         iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
361         iph->protocol = sk->sk_protocol;
362         iph->saddr    = rt->rt_src;
363         iph->daddr    = rt->rt_dst;
364         skb->nh.iph   = iph;
365         /* Transport layer set skb->h.foo itself. */
366
367         if (opt && opt->optlen) {
368                 iph->ihl += opt->optlen >> 2;
369                 ip_options_build(skb, opt, inet->daddr, rt, 0);
370         }
371
372         ip_select_ident_more(iph, &rt->u.dst, sk, skb_shinfo(skb)->tso_segs);
373
374         /* Add an IP checksum. */
375         ip_send_check(iph);
376
377         skb->priority = sk->sk_priority;
378
379         return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
380                        dst_output);
381
382 no_route:
383         IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
384         kfree_skb(skb);
385         return -EHOSTUNREACH;
386 }
387
388
389 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
390 {
391         to->pkt_type = from->pkt_type;
392         to->priority = from->priority;
393         to->protocol = from->protocol;
394         to->security = from->security;
395         to->dst = dst_clone(from->dst);
396         to->dev = from->dev;
397
398         /* Copy the flags to each fragment. */
399         IPCB(to)->flags = IPCB(from)->flags;
400
401 #ifdef CONFIG_NET_SCHED
402         to->tc_index = from->tc_index;
403 #endif
404 #ifdef CONFIG_NETFILTER
405         to->nfmark = from->nfmark;
406         to->nfcache = from->nfcache;
407         /* Connection association is same as pre-frag packet */
408         nf_conntrack_put(to->nfct);
409         to->nfct = from->nfct;
410         nf_conntrack_get(to->nfct);
411         to->nfctinfo = from->nfctinfo;
412 #ifdef CONFIG_BRIDGE_NETFILTER
413         nf_bridge_put(to->nf_bridge);
414         to->nf_bridge = from->nf_bridge;
415         nf_bridge_get(to->nf_bridge);
416 #endif
417 #ifdef CONFIG_NETFILTER_DEBUG
418         to->nf_debug = from->nf_debug;
419 #endif
420 #endif
421 }
422
423 /*
424  *      This IP datagram is too large to be sent in one piece.  Break it up into
425  *      smaller pieces (each of size equal to IP header plus
426  *      a block of the data of the original IP data part) that will yet fit in a
427  *      single device frame, and queue such a frame for sending.
428  */
429
430 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
431 {
432         struct iphdr *iph;
433         int raw = 0;
434         int ptr;
435         struct net_device *dev;
436         struct sk_buff *skb2;
437         unsigned int mtu, hlen, left, len, ll_rs;
438         int offset;
439         int not_last_frag;
440         struct rtable *rt = (struct rtable*)skb->dst;
441         int err = 0;
442
443         dev = rt->u.dst.dev;
444
445         /*
446          *      Point into the IP datagram header.
447          */
448
449         iph = skb->nh.iph;
450
451         if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
452                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
453                           htonl(dst_pmtu(&rt->u.dst)));
454                 kfree_skb(skb);
455                 return -EMSGSIZE;
456         }
457
458         /*
459          *      Setup starting values.
460          */
461
462         hlen = iph->ihl * 4;
463         mtu = dst_pmtu(&rt->u.dst) - hlen;      /* Size of data space */
464
465         /* When frag_list is given, use it. First, check its validity:
466          * some transformers could create wrong frag_list or break existing
467          * one, it is not prohibited. In this case fall back to copying.
468          *
469          * LATER: this step can be merged to real generation of fragments,
470          * we can switch to copy when see the first bad fragment.
471          */
472         if (skb_shinfo(skb)->frag_list) {
473                 struct sk_buff *frag;
474                 int first_len = skb_pagelen(skb);
475
476                 if (first_len - hlen > mtu ||
477                     ((first_len - hlen) & 7) ||
478                     (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
479                     skb_cloned(skb))
480                         goto slow_path;
481
482                 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
483                         /* Correct geometry. */
484                         if (frag->len > mtu ||
485                             ((frag->len & 7) && frag->next) ||
486                             skb_headroom(frag) < hlen)
487                             goto slow_path;
488
489                         /* Partially cloned skb? */
490                         if (skb_shared(frag))
491                                 goto slow_path;
492                 }
493
494                 /* Everything is OK. Generate! */
495
496                 err = 0;
497                 offset = 0;
498                 frag = skb_shinfo(skb)->frag_list;
499                 skb_shinfo(skb)->frag_list = NULL;
500                 skb->data_len = first_len - skb_headlen(skb);
501                 skb->len = first_len;
502                 iph->tot_len = htons(first_len);
503                 iph->frag_off |= htons(IP_MF);
504                 ip_send_check(iph);
505
506                 for (;;) {
507                         /* Prepare header of the next frame,
508                          * before previous one went down. */
509                         if (frag) {
510                                 frag->h.raw = frag->data;
511                                 frag->nh.raw = __skb_push(frag, hlen);
512                                 memcpy(frag->nh.raw, iph, hlen);
513                                 iph = frag->nh.iph;
514                                 iph->tot_len = htons(frag->len);
515                                 ip_copy_metadata(frag, skb);
516                                 if (offset == 0)
517                                         ip_options_fragment(frag);
518                                 offset += skb->len - hlen;
519                                 iph->frag_off = htons(offset>>3);
520                                 if (frag->next != NULL)
521                                         iph->frag_off |= htons(IP_MF);
522                                 /* Ready, complete checksum */
523                                 ip_send_check(iph);
524                         }
525
526                         err = output(skb);
527
528                         if (err || !frag)
529                                 break;
530
531                         skb = frag;
532                         frag = skb->next;
533                         skb->next = NULL;
534                 }
535
536                 if (err == 0) {
537                         IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
538                         return 0;
539                 }
540
541                 while (frag) {
542                         skb = frag->next;
543                         kfree_skb(frag);
544                         frag = skb;
545                 }
546                 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
547                 return err;
548         }
549
550 slow_path:
551         left = skb->len - hlen;         /* Space per frame */
552         ptr = raw + hlen;               /* Where to start from */
553
554 #ifdef CONFIG_BRIDGE_NETFILTER
555         /* for bridged IP traffic encapsulated inside f.e. a vlan header,
556          * we need to make room for the encapsulating header */
557         ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, nf_bridge_pad(skb));
558         mtu -= nf_bridge_pad(skb);
559 #else
560         ll_rs = LL_RESERVED_SPACE(rt->u.dst.dev);
561 #endif
562         /*
563          *      Fragment the datagram.
564          */
565
566         offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
567         not_last_frag = iph->frag_off & htons(IP_MF);
568
569         /*
570          *      Keep copying data until we run out.
571          */
572
573         while(left > 0) {
574                 len = left;
575                 /* IF: it doesn't fit, use 'mtu' - the data space left */
576                 if (len > mtu)
577                         len = mtu;
578                 /* IF: we are not sending upto and including the packet end
579                    then align the next start on an eight byte boundary */
580                 if (len < left) {
581                         len &= ~7;
582                 }
583                 /*
584                  *      Allocate buffer.
585                  */
586
587                 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
588                         NETDEBUG(printk(KERN_INFO "IP: frag: no memory for new fragment!\n"));
589                         err = -ENOMEM;
590                         goto fail;
591                 }
592
593                 /*
594                  *      Set up data on packet
595                  */
596
597                 ip_copy_metadata(skb2, skb);
598                 skb_reserve(skb2, ll_rs);
599                 skb_put(skb2, len + hlen);
600                 skb2->nh.raw = skb2->data;
601                 skb2->h.raw = skb2->data + hlen;
602
603                 /*
604                  *      Charge the memory for the fragment to any owner
605                  *      it might possess
606                  */
607
608                 if (skb->sk)
609                         skb_set_owner_w(skb2, skb->sk);
610
611                 /*
612                  *      Copy the packet header into the new buffer.
613                  */
614
615                 memcpy(skb2->nh.raw, skb->data, hlen);
616
617                 /*
618                  *      Copy a block of the IP datagram.
619                  */
620                 if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
621                         BUG();
622                 left -= len;
623
624                 /*
625                  *      Fill in the new header fields.
626                  */
627                 iph = skb2->nh.iph;
628                 iph->frag_off = htons((offset >> 3));
629
630                 /* ANK: dirty, but effective trick. Upgrade options only if
631                  * the segment to be fragmented was THE FIRST (otherwise,
632                  * options are already fixed) and make it ONCE
633                  * on the initial skb, so that all the following fragments
634                  * will inherit fixed options.
635                  */
636                 if (offset == 0)
637                         ip_options_fragment(skb);
638
639                 /*
640                  *      Added AC : If we are fragmenting a fragment that's not the
641                  *                 last fragment then keep MF on each bit
642                  */
643                 if (left > 0 || not_last_frag)
644                         iph->frag_off |= htons(IP_MF);
645                 ptr += len;
646                 offset += len;
647
648                 /*
649                  *      Put this fragment into the sending queue.
650                  */
651
652                 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
653
654                 iph->tot_len = htons(len + hlen);
655
656                 ip_send_check(iph);
657
658                 err = output(skb2);
659                 if (err)
660                         goto fail;
661         }
662         kfree_skb(skb);
663         IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
664         return err;
665
666 fail:
667         kfree_skb(skb); 
668         IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
669         return err;
670 }
671
672 int
673 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
674 {
675         struct iovec *iov = from;
676
677         if (skb->ip_summed == CHECKSUM_HW) {
678                 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
679                         return -EFAULT;
680         } else {
681                 unsigned int csum = 0;
682                 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
683                         return -EFAULT;
684                 skb->csum = csum_block_add(skb->csum, csum, odd);
685         }
686         return 0;
687 }
688
689 static inline unsigned int
690 csum_page(struct page *page, int offset, int copy)
691 {
692         char *kaddr;
693         unsigned int csum;
694         kaddr = kmap(page);
695         csum = csum_partial(kaddr + offset, copy, 0);
696         kunmap(page);
697         return csum;
698 }
699
700 /*
701  *      ip_append_data() and ip_append_page() can make one large IP datagram
702  *      from many pieces of data. Each pieces will be holded on the socket
703  *      until ip_push_pending_frames() is called. Each piece can be a page
704  *      or non-page data.
705  *      
706  *      Not only UDP, other transport protocols - e.g. raw sockets - can use
707  *      this interface potentially.
708  *
709  *      LATER: length must be adjusted by pad at tail, when it is required.
710  */
711 int ip_append_data(struct sock *sk,
712                    int getfrag(void *from, char *to, int offset, int len,
713                                int odd, struct sk_buff *skb),
714                    void *from, int length, int transhdrlen,
715                    struct ipcm_cookie *ipc, struct rtable *rt,
716                    unsigned int flags)
717 {
718         struct inet_opt *inet = inet_sk(sk);
719         struct sk_buff *skb;
720
721         struct ip_options *opt = NULL;
722         int hh_len;
723         int exthdrlen;
724         int mtu;
725         int copy;
726         int err;
727         int offset = 0;
728         unsigned int maxfraglen, fragheaderlen;
729         int csummode = CHECKSUM_NONE;
730
731         if (flags&MSG_PROBE)
732                 return 0;
733
734         if (skb_queue_empty(&sk->sk_write_queue)) {
735                 /*
736                  * setup for corking.
737                  */
738                 opt = ipc->opt;
739                 if (opt) {
740                         if (inet->cork.opt == NULL) {
741                                 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
742                                 if (unlikely(inet->cork.opt == NULL))
743                                         return -ENOBUFS;
744                         }
745                         memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
746                         inet->cork.flags |= IPCORK_OPT;
747                         inet->cork.addr = ipc->addr;
748                 }
749                 dst_hold(&rt->u.dst);
750                 inet->cork.fragsize = mtu = dst_pmtu(&rt->u.dst);
751                 inet->cork.rt = rt;
752                 inet->cork.length = 0;
753                 sk->sk_sndmsg_page = NULL;
754                 sk->sk_sndmsg_off = 0;
755                 if ((exthdrlen = rt->u.dst.header_len) != 0) {
756                         length += exthdrlen;
757                         transhdrlen += exthdrlen;
758                 }
759         } else {
760                 rt = inet->cork.rt;
761                 if (inet->cork.flags & IPCORK_OPT)
762                         opt = inet->cork.opt;
763
764                 transhdrlen = 0;
765                 exthdrlen = 0;
766                 mtu = inet->cork.fragsize;
767         }
768         hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
769
770         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
771         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
772
773         if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
774                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
775                 return -EMSGSIZE;
776         }
777
778         /*
779          * transhdrlen > 0 means that this is the first fragment and we wish
780          * it won't be fragmented in the future.
781          */
782         if (transhdrlen &&
783             length + fragheaderlen <= mtu &&
784             rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) &&
785             !exthdrlen)
786                 csummode = CHECKSUM_HW;
787
788         inet->cork.length += length;
789
790         /* So, what's going on in the loop below?
791          *
792          * We use calculated fragment length to generate chained skb,
793          * each of segments is IP fragment ready for sending to network after
794          * adding appropriate IP header.
795          */
796
797         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
798                 goto alloc_new_skb;
799
800         while (length > 0) {
801                 /* Check if the remaining data fits into current packet. */
802                 copy = mtu - skb->len;
803                 if (copy < length)
804                         copy = maxfraglen - skb->len;
805                 if (copy <= 0) {
806                         char *data;
807                         unsigned int datalen;
808                         unsigned int fraglen;
809                         unsigned int fraggap;
810                         unsigned int alloclen;
811                         struct sk_buff *skb_prev;
812 alloc_new_skb:
813                         skb_prev = skb;
814                         if (skb_prev)
815                                 fraggap = skb_prev->len - maxfraglen;
816                         else
817                                 fraggap = 0;
818
819                         /*
820                          * If remaining data exceeds the mtu,
821                          * we know we need more fragment(s).
822                          */
823                         datalen = length + fraggap;
824                         if (datalen > mtu - fragheaderlen)
825                                 datalen = maxfraglen - fragheaderlen;
826                         fraglen = datalen + fragheaderlen;
827
828                         if ((flags & MSG_MORE) && 
829                             !(rt->u.dst.dev->features&NETIF_F_SG))
830                                 alloclen = mtu;
831                         else
832                                 alloclen = datalen + fragheaderlen;
833
834                         /* The last fragment gets additional space at tail.
835                          * Note, with MSG_MORE we overallocate on fragments,
836                          * because we have no idea what fragment will be
837                          * the last.
838                          */
839                         if (datalen == length)
840                                 alloclen += rt->u.dst.trailer_len;
841
842                         if (transhdrlen) {
843                                 skb = sock_alloc_send_skb(sk, 
844                                                 alloclen + hh_len + 15,
845                                                 (flags & MSG_DONTWAIT), &err);
846                         } else {
847                                 skb = NULL;
848                                 if (atomic_read(&sk->sk_wmem_alloc) <=
849                                     2 * sk->sk_sndbuf)
850                                         skb = sock_wmalloc(sk, 
851                                                            alloclen + hh_len + 15, 1,
852                                                            sk->sk_allocation);
853                                 if (unlikely(skb == NULL))
854                                         err = -ENOBUFS;
855                         }
856                         if (skb == NULL)
857                                 goto error;
858
859                         /*
860                          *      Fill in the control structures
861                          */
862                         skb->ip_summed = csummode;
863                         skb->csum = 0;
864                         skb_reserve(skb, hh_len);
865
866                         /*
867                          *      Find where to start putting bytes.
868                          */
869                         data = skb_put(skb, fraglen);
870                         skb->nh.raw = data + exthdrlen;
871                         data += fragheaderlen;
872                         skb->h.raw = data + exthdrlen;
873
874                         if (fraggap) {
875                                 skb->csum = skb_copy_and_csum_bits(
876                                         skb_prev, maxfraglen,
877                                         data + transhdrlen, fraggap, 0);
878                                 skb_prev->csum = csum_sub(skb_prev->csum,
879                                                           skb->csum);
880                                 data += fraggap;
881                                 skb_trim(skb_prev, maxfraglen);
882                         }
883
884                         copy = datalen - transhdrlen - fraggap;
885                         if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
886                                 err = -EFAULT;
887                                 kfree_skb(skb);
888                                 goto error;
889                         }
890
891                         offset += copy;
892                         length -= datalen - fraggap;
893                         transhdrlen = 0;
894                         exthdrlen = 0;
895                         csummode = CHECKSUM_NONE;
896
897                         /*
898                          * Put the packet on the pending queue.
899                          */
900                         __skb_queue_tail(&sk->sk_write_queue, skb);
901                         continue;
902                 }
903
904                 if (copy > length)
905                         copy = length;
906
907                 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
908                         unsigned int off;
909
910                         off = skb->len;
911                         if (getfrag(from, skb_put(skb, copy), 
912                                         offset, copy, off, skb) < 0) {
913                                 __skb_trim(skb, off);
914                                 err = -EFAULT;
915                                 goto error;
916                         }
917                 } else {
918                         int i = skb_shinfo(skb)->nr_frags;
919                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
920                         struct page *page = sk->sk_sndmsg_page;
921                         int off = sk->sk_sndmsg_off;
922                         unsigned int left;
923
924                         if (page && (left = PAGE_SIZE - off) > 0) {
925                                 if (copy >= left)
926                                         copy = left;
927                                 if (page != frag->page) {
928                                         if (i == MAX_SKB_FRAGS) {
929                                                 err = -EMSGSIZE;
930                                                 goto error;
931                                         }
932                                         get_page(page);
933                                         skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
934                                         frag = &skb_shinfo(skb)->frags[i];
935                                 }
936                         } else if (i < MAX_SKB_FRAGS) {
937                                 if (copy > PAGE_SIZE)
938                                         copy = PAGE_SIZE;
939                                 page = alloc_pages(sk->sk_allocation, 0);
940                                 if (page == NULL)  {
941                                         err = -ENOMEM;
942                                         goto error;
943                                 }
944                                 sk->sk_sndmsg_page = page;
945                                 sk->sk_sndmsg_off = 0;
946
947                                 skb_fill_page_desc(skb, i, page, 0, 0);
948                                 frag = &skb_shinfo(skb)->frags[i];
949                                 skb->truesize += PAGE_SIZE;
950                                 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
951                         } else {
952                                 err = -EMSGSIZE;
953                                 goto error;
954                         }
955                         if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
956                                 err = -EFAULT;
957                                 goto error;
958                         }
959                         sk->sk_sndmsg_off += copy;
960                         frag->size += copy;
961                         skb->len += copy;
962                         skb->data_len += copy;
963                 }
964                 offset += copy;
965                 length -= copy;
966         }
967
968         return 0;
969
970 error:
971         inet->cork.length -= length;
972         IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
973         return err; 
974 }
975
976 ssize_t ip_append_page(struct sock *sk, struct page *page,
977                        int offset, size_t size, int flags)
978 {
979         struct inet_opt *inet = inet_sk(sk);
980         struct sk_buff *skb;
981         struct rtable *rt;
982         struct ip_options *opt = NULL;
983         int hh_len;
984         int mtu;
985         int len;
986         int err;
987         unsigned int maxfraglen, fragheaderlen, fraggap;
988
989         if (inet->hdrincl)
990                 return -EPERM;
991
992         if (flags&MSG_PROBE)
993                 return 0;
994
995         if (skb_queue_empty(&sk->sk_write_queue))
996                 return -EINVAL;
997
998         rt = inet->cork.rt;
999         if (inet->cork.flags & IPCORK_OPT)
1000                 opt = inet->cork.opt;
1001
1002         if (!(rt->u.dst.dev->features&NETIF_F_SG))
1003                 return -EOPNOTSUPP;
1004
1005         hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1006         mtu = inet->cork.fragsize;
1007
1008         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1009         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1010
1011         if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1012                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1013                 return -EMSGSIZE;
1014         }
1015
1016         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1017                 return -EINVAL;
1018
1019         inet->cork.length += size;
1020
1021         while (size > 0) {
1022                 int i;
1023
1024                 /* Check if the remaining data fits into current packet. */
1025                 len = mtu - skb->len;
1026                 if (len < size)
1027                         len = maxfraglen - skb->len;
1028                 if (len <= 0) {
1029                         struct sk_buff *skb_prev;
1030                         char *data;
1031                         struct iphdr *iph;
1032                         int alloclen;
1033
1034                         skb_prev = skb;
1035                         if (skb_prev)
1036                                 fraggap = skb_prev->len - maxfraglen;
1037                         else
1038                                 fraggap = 0;
1039
1040                         alloclen = fragheaderlen + hh_len + fraggap + 15;
1041                         skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1042                         if (unlikely(!skb)) {
1043                                 err = -ENOBUFS;
1044                                 goto error;
1045                         }
1046
1047                         /*
1048                          *      Fill in the control structures
1049                          */
1050                         skb->ip_summed = CHECKSUM_NONE;
1051                         skb->csum = 0;
1052                         skb_reserve(skb, hh_len);
1053
1054                         /*
1055                          *      Find where to start putting bytes.
1056                          */
1057                         data = skb_put(skb, fragheaderlen + fraggap);
1058                         skb->nh.iph = iph = (struct iphdr *)data;
1059                         data += fragheaderlen;
1060                         skb->h.raw = data;
1061
1062                         if (fraggap) {
1063                                 skb->csum = skb_copy_and_csum_bits(
1064                                         skb_prev, maxfraglen,
1065                                         data, fraggap, 0);
1066                                 skb_prev->csum = csum_sub(skb_prev->csum,
1067                                                           skb->csum);
1068                                 skb_trim(skb_prev, maxfraglen);
1069                         }
1070
1071                         /*
1072                          * Put the packet on the pending queue.
1073                          */
1074                         __skb_queue_tail(&sk->sk_write_queue, skb);
1075                         continue;
1076                 }
1077
1078                 i = skb_shinfo(skb)->nr_frags;
1079                 if (len > size)
1080                         len = size;
1081                 if (skb_can_coalesce(skb, i, page, offset)) {
1082                         skb_shinfo(skb)->frags[i-1].size += len;
1083                 } else if (i < MAX_SKB_FRAGS) {
1084                         get_page(page);
1085                         skb_fill_page_desc(skb, i, page, offset, len);
1086                 } else {
1087                         err = -EMSGSIZE;
1088                         goto error;
1089                 }
1090
1091                 if (skb->ip_summed == CHECKSUM_NONE) {
1092                         unsigned int csum;
1093                         csum = csum_page(page, offset, len);
1094                         skb->csum = csum_block_add(skb->csum, csum, skb->len);
1095                 }
1096
1097                 skb->len += len;
1098                 skb->data_len += len;
1099                 offset += len;
1100                 size -= len;
1101         }
1102         return 0;
1103
1104 error:
1105         inet->cork.length -= size;
1106         IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1107         return err;
1108 }
1109
1110 /*
1111  *      Combined all pending IP fragments on the socket as one IP datagram
1112  *      and push them out.
1113  */
1114 int ip_push_pending_frames(struct sock *sk)
1115 {
1116         struct sk_buff *skb, *tmp_skb;
1117         struct sk_buff **tail_skb;
1118         struct inet_opt *inet = inet_sk(sk);
1119         struct ip_options *opt = NULL;
1120         struct rtable *rt = inet->cork.rt;
1121         struct iphdr *iph;
1122         int df = 0;
1123         __u8 ttl;
1124         int err = 0;
1125
1126         if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1127                 goto out;
1128         tail_skb = &(skb_shinfo(skb)->frag_list);
1129
1130         /* move skb->data to ip header from ext header */
1131         if (skb->data < skb->nh.raw)
1132                 __skb_pull(skb, skb->nh.raw - skb->data);
1133         while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1134                 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1135                 *tail_skb = tmp_skb;
1136                 tail_skb = &(tmp_skb->next);
1137                 skb->len += tmp_skb->len;
1138                 skb->data_len += tmp_skb->len;
1139                 skb->truesize += tmp_skb->truesize;
1140                 __sock_put(tmp_skb->sk);
1141                 tmp_skb->destructor = NULL;
1142                 tmp_skb->sk = NULL;
1143         }
1144
1145         /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1146          * to fragment the frame generated here. No matter, what transforms
1147          * how transforms change size of the packet, it will come out.
1148          */
1149         if (inet->pmtudisc != IP_PMTUDISC_DO)
1150                 skb->local_df = 1;
1151
1152         /* DF bit is set when we want to see DF on outgoing frames.
1153          * If local_df is set too, we still allow to fragment this frame
1154          * locally. */
1155         if (inet->pmtudisc == IP_PMTUDISC_DO ||
1156             (!skb_shinfo(skb)->frag_list && ip_dont_fragment(sk, &rt->u.dst)))
1157                 df = htons(IP_DF);
1158
1159         if (inet->cork.flags & IPCORK_OPT)
1160                 opt = inet->cork.opt;
1161
1162         if (rt->rt_type == RTN_MULTICAST)
1163                 ttl = inet->mc_ttl;
1164         else
1165                 ttl = ip_select_ttl(inet, &rt->u.dst);
1166
1167         iph = (struct iphdr *)skb->data;
1168         iph->version = 4;
1169         iph->ihl = 5;
1170         if (opt) {
1171                 iph->ihl += opt->optlen>>2;
1172                 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1173         }
1174         iph->tos = inet->tos;
1175         iph->tot_len = htons(skb->len);
1176         iph->frag_off = df;
1177         if (!df) {
1178                 __ip_select_ident(iph, &rt->u.dst, 0);
1179         } else {
1180                 iph->id = htons(inet->id++);
1181         }
1182         iph->ttl = ttl;
1183         iph->protocol = sk->sk_protocol;
1184         iph->saddr = rt->rt_src;
1185         iph->daddr = rt->rt_dst;
1186         ip_send_check(iph);
1187
1188         skb->priority = sk->sk_priority;
1189         skb->dst = dst_clone(&rt->u.dst);
1190
1191         /* Netfilter gets whole the not fragmented skb. */
1192         err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, 
1193                       skb->dst->dev, dst_output);
1194         if (err) {
1195                 if (err > 0)
1196                         err = inet->recverr ? net_xmit_errno(err) : 0;
1197                 if (err)
1198                         goto error;
1199         }
1200
1201 out:
1202         inet->cork.flags &= ~IPCORK_OPT;
1203         if (inet->cork.opt) {
1204                 kfree(inet->cork.opt);
1205                 inet->cork.opt = NULL;
1206         }
1207         if (inet->cork.rt) {
1208                 ip_rt_put(inet->cork.rt);
1209                 inet->cork.rt = NULL;
1210         }
1211         return err;
1212
1213 error:
1214         IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1215         goto out;
1216 }
1217
1218 /*
1219  *      Throw away all pending data on the socket.
1220  */
1221 void ip_flush_pending_frames(struct sock *sk)
1222 {
1223         struct inet_opt *inet = inet_sk(sk);
1224         struct sk_buff *skb;
1225
1226         while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1227                 kfree_skb(skb);
1228
1229         inet->cork.flags &= ~IPCORK_OPT;
1230         if (inet->cork.opt) {
1231                 kfree(inet->cork.opt);
1232                 inet->cork.opt = NULL;
1233         }
1234         if (inet->cork.rt) {
1235                 ip_rt_put(inet->cork.rt);
1236                 inet->cork.rt = NULL;
1237         }
1238 }
1239
1240
1241 /*
1242  *      Fetch data from kernel space and fill in checksum if needed.
1243  */
1244 static int ip_reply_glue_bits(void *dptr, char *to, int offset, 
1245                               int len, int odd, struct sk_buff *skb)
1246 {
1247         unsigned int csum;
1248
1249         csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1250         skb->csum = csum_block_add(skb->csum, csum, odd);
1251         return 0;  
1252 }
1253
1254 /* 
1255  *      Generic function to send a packet as reply to another packet.
1256  *      Used to send TCP resets so far. ICMP should use this function too.
1257  *
1258  *      Should run single threaded per socket because it uses the sock 
1259  *      structure to pass arguments.
1260  *
1261  *      LATER: switch from ip_build_xmit to ip_append_*
1262  */
1263 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1264                    unsigned int len)
1265 {
1266         struct inet_opt *inet = inet_sk(sk);
1267         struct {
1268                 struct ip_options       opt;
1269                 char                    data[40];
1270         } replyopts;
1271         struct ipcm_cookie ipc;
1272         u32 daddr;
1273         struct rtable *rt = (struct rtable*)skb->dst;
1274
1275         if (ip_options_echo(&replyopts.opt, skb))
1276                 return;
1277
1278         daddr = ipc.addr = rt->rt_src;
1279         ipc.opt = NULL;
1280
1281         if (replyopts.opt.optlen) {
1282                 ipc.opt = &replyopts.opt;
1283
1284                 if (ipc.opt->srr)
1285                         daddr = replyopts.opt.faddr;
1286         }
1287
1288         {
1289                 struct flowi fl = { .nl_u = { .ip4_u =
1290                                               { .daddr = daddr,
1291                                                 .saddr = rt->rt_spec_dst,
1292                                                 .tos = RT_TOS(skb->nh.iph->tos) } },
1293                                     /* Not quite clean, but right. */
1294                                     .uli_u = { .ports =
1295                                                { .sport = skb->h.th->dest,
1296                                                  .dport = skb->h.th->source } },
1297                                     .proto = sk->sk_protocol };
1298                 if (ip_route_output_key(&rt, &fl))
1299                         return;
1300         }
1301
1302         /* And let IP do all the hard work.
1303
1304            This chunk is not reenterable, hence spinlock.
1305            Note that it uses the fact, that this function is called
1306            with locally disabled BH and that sk cannot be already spinlocked.
1307          */
1308         bh_lock_sock(sk);
1309         inet->tos = skb->nh.iph->tos;
1310         sk->sk_priority = skb->priority;
1311         sk->sk_protocol = skb->nh.iph->protocol;
1312         ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1313                        &ipc, rt, MSG_DONTWAIT);
1314         if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1315                 if (arg->csumoffset >= 0)
1316                         *((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
1317                 skb->ip_summed = CHECKSUM_NONE;
1318                 ip_push_pending_frames(sk);
1319         }
1320
1321         bh_unlock_sock(sk);
1322
1323         ip_rt_put(rt);
1324 }
1325
1326 /*
1327  *      IP protocol layer initialiser
1328  */
1329
1330 static struct packet_type ip_packet_type = {
1331         .type = __constant_htons(ETH_P_IP),
1332         .func = ip_rcv,
1333 };
1334
1335 /*
1336  *      IP registers the packet type and then calls the subprotocol initialisers
1337  */
1338
1339 void __init ip_init(void)
1340 {
1341         dev_add_pack(&ip_packet_type);
1342
1343         ip_rt_init();
1344         inet_initpeers();
1345
1346 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1347         igmp_mc_proc_init();
1348 #endif
1349 }
1350
1351 EXPORT_SYMBOL(ip_finish_output);
1352 EXPORT_SYMBOL(ip_fragment);
1353 EXPORT_SYMBOL(ip_generic_getfrag);
1354 EXPORT_SYMBOL(ip_queue_xmit);
1355 EXPORT_SYMBOL(ip_send_check);
1356
1357 #ifdef CONFIG_SYSCTL
1358 EXPORT_SYMBOL(sysctl_ip_default_ttl);
1359 #endif