Removed CKRM code base from kernel tree.
[linux-2.6.git] / net / ipv4 / tcp_ipv4.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Version:     $Id$
9  *
10  *              IPv4 specific functions
11  *
12  *
13  *              code split from:
14  *              linux/ipv4/tcp.c
15  *              linux/ipv4/tcp_input.c
16  *              linux/ipv4/tcp_output.c
17  *
18  *              See tcp.c for author information
19  *
20  *      This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25
26 /*
27  * Changes:
28  *              David S. Miller :       New socket lookup architecture.
29  *                                      This code is dedicated to John Dyson.
30  *              David S. Miller :       Change semantics of established hash,
31  *                                      half is devoted to TIME_WAIT sockets
32  *                                      and the rest go in the other half.
33  *              Andi Kleen :            Add support for syncookies and fixed
34  *                                      some bugs: ip options weren't passed to
35  *                                      the TCP layer, missed a check for an
36  *                                      ACK bit.
37  *              Andi Kleen :            Implemented fast path mtu discovery.
38  *                                      Fixed many serious bugs in the
39  *                                      open_request handling and moved
40  *                                      most of it into the af independent code.
41  *                                      Added tail drop and some other bugfixes.
42  *                                      Added new listen sematics.
43  *              Mike McLagan    :       Routing by source
44  *      Juan Jose Ciarlante:            ip_dynaddr bits
45  *              Andi Kleen:             various fixes.
46  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
47  *                                      coma.
48  *      Andi Kleen              :       Fix new listen.
49  *      Andi Kleen              :       Fix accept error reporting.
50  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
51  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
52  *                                      a single port at the same time.
53  */
54
55 #include <linux/config.h>
56
57 #include <linux/types.h>
58 #include <linux/fcntl.h>
59 #include <linux/module.h>
60 #include <linux/random.h>
61 #include <linux/cache.h>
62 #include <linux/jhash.h>
63 #include <linux/init.h>
64 #include <linux/times.h>
65
66 #include <net/icmp.h>
67 #include <net/tcp.h>
68 #include <net/ipv6.h>
69 #include <net/inet_common.h>
70 #include <net/xfrm.h>
71
72 #include <linux/inet.h>
73 #include <linux/ipv6.h>
74 #include <linux/stddef.h>
75 #include <linux/proc_fs.h>
76 #include <linux/seq_file.h>
77 #include <linux/vserver/debug.h>
78
79 extern int sysctl_ip_dynaddr;
80 int sysctl_tcp_tw_reuse;
81 int sysctl_tcp_low_latency;
82
83 /* Check TCP sequence numbers in ICMP packets. */
84 #define ICMP_MIN_LENGTH 8
85
86 /* Socket used for sending RSTs */
87 static struct socket *tcp_socket;
88
89 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
90                        struct sk_buff *skb);
91
92 struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
93         .__tcp_lhash_lock       =       RW_LOCK_UNLOCKED,
94         .__tcp_lhash_users      =       ATOMIC_INIT(0),
95         .__tcp_lhash_wait
96           = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait),
97         .__tcp_portalloc_lock   =       SPIN_LOCK_UNLOCKED
98 };
99
100 /*
101  * This array holds the first and last local port number.
102  * For high-usage systems, use sysctl to change this to
103  * 32768-61000
104  */
105 int sysctl_local_port_range[2] = { 1024, 4999 };
106 int tcp_port_rover = 1024 - 1;
107
108 static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
109                                  __u32 faddr, __u16 fport)
110 {
111         int h = (laddr ^ lport) ^ (faddr ^ fport);
112         h ^= h >> 16;
113         h ^= h >> 8;
114         return h & (tcp_ehash_size - 1);
115 }
116
117 static __inline__ int tcp_sk_hashfn(struct sock *sk)
118 {
119         struct inet_opt *inet = inet_sk(sk);
120         __u32 laddr = inet->rcv_saddr;
121         __u16 lport = inet->num;
122         __u32 faddr = inet->daddr;
123         __u16 fport = inet->dport;
124
125         return tcp_hashfn(laddr, lport, faddr, fport);
126 }
127
128 /* Allocate and initialize a new TCP local port bind bucket.
129  * The bindhash mutex for snum's hash chain must be held here.
130  */
131 struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
132                                           unsigned short snum)
133 {
134         struct tcp_bind_bucket *tb = kmem_cache_alloc(tcp_bucket_cachep,
135                                                       SLAB_ATOMIC);
136         if (tb) {
137                 tb->port = snum;
138                 tb->fastreuse = 0;
139                 INIT_HLIST_HEAD(&tb->owners);
140                 hlist_add_head(&tb->node, &head->chain);
141         }
142         return tb;
143 }
144
145 /* Caller must hold hashbucket lock for this tb with local BH disabled */
146 void tcp_bucket_destroy(struct tcp_bind_bucket *tb)
147 {
148         if (hlist_empty(&tb->owners)) {
149                 __hlist_del(&tb->node);
150                 kmem_cache_free(tcp_bucket_cachep, tb);
151         }
152 }
153
154 /* Caller must disable local BH processing. */
155 static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
156 {
157         struct tcp_bind_hashbucket *head =
158                                 &tcp_bhash[tcp_bhashfn(inet_sk(child)->num)];
159         struct tcp_bind_bucket *tb;
160
161         spin_lock(&head->lock);
162         tb = tcp_sk(sk)->bind_hash;
163         sk_add_bind_node(child, &tb->owners);
164         tcp_sk(child)->bind_hash = tb;
165         spin_unlock(&head->lock);
166 }
167
168 inline void tcp_inherit_port(struct sock *sk, struct sock *child)
169 {
170         local_bh_disable();
171         __tcp_inherit_port(sk, child);
172         local_bh_enable();
173 }
174
175 void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
176                    unsigned short snum)
177 {
178         inet_sk(sk)->num = snum;
179         sk_add_bind_node(sk, &tb->owners);
180         tcp_sk(sk)->bind_hash = tb;
181 }
182
183 static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
184 {
185         const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk);
186         struct sock *sk2;
187         struct hlist_node *node;
188         int reuse = sk->sk_reuse;
189
190         sk_for_each_bound(sk2, node, &tb->owners) {
191                 if (sk != sk2 &&
192                     !tcp_v6_ipv6only(sk2) &&
193                     (!sk->sk_bound_dev_if ||
194                      !sk2->sk_bound_dev_if ||
195                      sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
196                         if (!reuse || !sk2->sk_reuse ||
197                             sk2->sk_state == TCP_LISTEN) {
198                                 const u32 sk2_rcv_saddr = tcp_v4_rcv_saddr(sk2);
199                                 if (!sk2_rcv_saddr || !sk_rcv_saddr ||
200                                     sk2_rcv_saddr == sk_rcv_saddr)
201                                         break;
202                         }
203                 }
204         }
205         return node != NULL;
206 }
207
208 /* Obtain a reference to a local port for the given sock,
209  * if snum is zero it means select any available local port.
210  */
211 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
212 {
213         struct tcp_bind_hashbucket *head;
214         struct hlist_node *node;
215         struct tcp_bind_bucket *tb;
216         int ret;
217
218         local_bh_disable();
219         if (!snum) {
220                 int low = sysctl_local_port_range[0];
221                 int high = sysctl_local_port_range[1];
222                 int remaining = (high - low) + 1;
223                 int rover;
224
225                 spin_lock(&tcp_portalloc_lock);
226                 rover = tcp_port_rover;
227                 do {
228                         rover++;
229                         if (rover < low || rover > high)
230                                 rover = low;
231                         head = &tcp_bhash[tcp_bhashfn(rover)];
232                         spin_lock(&head->lock);
233                         tb_for_each(tb, node, &head->chain)
234                                 if (tb->port == rover)
235                                         goto next;
236                         break;
237                 next:
238                         spin_unlock(&head->lock);
239                 } while (--remaining > 0);
240                 tcp_port_rover = rover;
241                 spin_unlock(&tcp_portalloc_lock);
242
243                 /* Exhausted local port range during search? */
244                 ret = 1;
245                 if (remaining <= 0)
246                         goto fail;
247
248                 /* OK, here is the one we will use.  HEAD is
249                  * non-NULL and we hold it's mutex.
250                  */
251                 snum = rover;
252         } else {
253                 head = &tcp_bhash[tcp_bhashfn(snum)];
254                 spin_lock(&head->lock);
255                 tb_for_each(tb, node, &head->chain)
256                         if (tb->port == snum)
257                                 goto tb_found;
258         }
259         tb = NULL;
260         goto tb_not_found;
261 tb_found:
262         if (!hlist_empty(&tb->owners)) {
263                 if (sk->sk_reuse > 1)
264                         goto success;
265                 if (tb->fastreuse > 0 &&
266                     sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
267                         goto success;
268                 } else {
269                         ret = 1;
270                         if (tcp_bind_conflict(sk, tb))
271                                 goto fail_unlock;
272                 }
273         }
274 tb_not_found:
275         ret = 1;
276         if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
277                 goto fail_unlock;
278         if (hlist_empty(&tb->owners)) {
279                 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
280                         tb->fastreuse = 1;
281                 else
282                         tb->fastreuse = 0;
283         } else if (tb->fastreuse &&
284                    (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
285                 tb->fastreuse = 0;
286 success:
287         if (!tcp_sk(sk)->bind_hash)
288                 tcp_bind_hash(sk, tb, snum);
289         BUG_TRAP(tcp_sk(sk)->bind_hash == tb);
290         ret = 0;
291
292 fail_unlock:
293         spin_unlock(&head->lock);
294 fail:
295         local_bh_enable();
296         return ret;
297 }
298
299 /* Get rid of any references to a local port held by the
300  * given sock.
301  */
302 static void __tcp_put_port(struct sock *sk)
303 {
304         struct inet_opt *inet = inet_sk(sk);
305         struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)];
306         struct tcp_bind_bucket *tb;
307
308         spin_lock(&head->lock);
309         tb = tcp_sk(sk)->bind_hash;
310         __sk_del_bind_node(sk);
311         tcp_sk(sk)->bind_hash = NULL;
312         inet->num = 0;
313         tcp_bucket_destroy(tb);
314         spin_unlock(&head->lock);
315 }
316
317 void tcp_put_port(struct sock *sk)
318 {
319         local_bh_disable();
320         __tcp_put_port(sk);
321         local_bh_enable();
322 }
323
324 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
325  * Look, when several writers sleep and reader wakes them up, all but one
326  * immediately hit write lock and grab all the cpus. Exclusive sleep solves
327  * this, _but_ remember, it adds useless work on UP machines (wake up each
328  * exclusive lock release). It should be ifdefed really.
329  */
330
331 void tcp_listen_wlock(void)
332 {
333         write_lock(&tcp_lhash_lock);
334
335         if (atomic_read(&tcp_lhash_users)) {
336                 DEFINE_WAIT(wait);
337
338                 for (;;) {
339                         prepare_to_wait_exclusive(&tcp_lhash_wait,
340                                                 &wait, TASK_UNINTERRUPTIBLE);
341                         if (!atomic_read(&tcp_lhash_users))
342                                 break;
343                         write_unlock_bh(&tcp_lhash_lock);
344                         schedule();
345                         write_lock_bh(&tcp_lhash_lock);
346                 }
347
348                 finish_wait(&tcp_lhash_wait, &wait);
349         }
350 }
351
352 static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
353 {
354         struct hlist_head *list;
355         rwlock_t *lock;
356
357         BUG_TRAP(sk_unhashed(sk));
358         if (listen_possible && sk->sk_state == TCP_LISTEN) {
359                 list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
360                 lock = &tcp_lhash_lock;
361                 tcp_listen_wlock();
362         } else {
363                 list = &tcp_ehash[(sk->sk_hashent = tcp_sk_hashfn(sk))].chain;
364                 lock = &tcp_ehash[sk->sk_hashent].lock;
365                 write_lock(lock);
366         }
367         __sk_add_node(sk, list);
368         sock_prot_inc_use(sk->sk_prot);
369         write_unlock(lock);
370         if (listen_possible && sk->sk_state == TCP_LISTEN)
371                 wake_up(&tcp_lhash_wait);
372 }
373
374 static void tcp_v4_hash(struct sock *sk)
375 {
376         if (sk->sk_state != TCP_CLOSE) {
377                 local_bh_disable();
378                 __tcp_v4_hash(sk, 1);
379                 local_bh_enable();
380         }
381 }
382
383 void tcp_unhash(struct sock *sk)
384 {
385         rwlock_t *lock;
386
387         if (sk_unhashed(sk))
388                 goto ende;
389
390         if (sk->sk_state == TCP_LISTEN) {
391                 local_bh_disable();
392                 tcp_listen_wlock();
393                 lock = &tcp_lhash_lock;
394         } else {
395                 struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
396                 lock = &head->lock;
397                 write_lock_bh(&head->lock);
398         }
399
400         if (__sk_del_node_init(sk))
401                 sock_prot_dec_use(sk->sk_prot);
402         write_unlock_bh(lock);
403
404  ende:
405         if (sk->sk_state == TCP_LISTEN)
406                 wake_up(&tcp_lhash_wait);
407 }
408
409 /* Don't inline this cruft.  Here are some nice properties to
410  * exploit here.  The BSD API does not allow a listening TCP
411  * to specify the remote port nor the remote address for the
412  * connection.  So always assume those are both wildcarded
413  * during the search since they can never be otherwise.
414  */
415 static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr,
416                                              unsigned short hnum, int dif)
417 {
418         struct sock *result = NULL, *sk;
419         struct hlist_node *node;
420         int score, hiscore;
421
422         hiscore=-1;
423         sk_for_each(sk, node, head) {
424                 struct inet_opt *inet = inet_sk(sk);
425
426                 if (inet->num == hnum && !ipv6_only_sock(sk)) {
427                         __u32 rcv_saddr = inet->rcv_saddr;
428
429                         score = (sk->sk_family == PF_INET ? 1 : 0);
430                         if (rcv_saddr) {
431                                 if (rcv_saddr != daddr)
432                                         continue;
433                                 score+=2;
434                         }
435                         if (sk->sk_bound_dev_if) {
436                                 if (sk->sk_bound_dev_if != dif)
437                                         continue;
438                                 score+=2;
439                         }
440                         if (score == 5)
441                                 return sk;
442                         if (score > hiscore) {
443                                 hiscore = score;
444                                 result = sk;
445                         }
446                 }
447         }
448         return result;
449 }
450
451 /* Optimize the common listener case. */
452 struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum, int dif)
453 {
454         struct sock *sk = NULL;
455         struct hlist_head *head;
456
457         read_lock(&tcp_lhash_lock);
458         head = &tcp_listening_hash[tcp_lhashfn(hnum)];
459         if (!hlist_empty(head)) {
460                 struct inet_opt *inet = inet_sk((sk = __sk_head(head)));
461                 if (inet->num == hnum && !sk->sk_node.next &&
462                     (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
463                     (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
464                     !sk->sk_bound_dev_if)
465                         goto sherry_cache;
466                 sk = __tcp_v4_lookup_listener(head, daddr, hnum, dif);
467         }
468         if (sk) {
469 sherry_cache:
470                 sock_hold(sk);
471         }
472         read_unlock(&tcp_lhash_lock);
473         return sk;
474 }
475
476 EXPORT_SYMBOL_GPL(tcp_v4_lookup_listener);
477
478 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
479  * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
480  *
481  * Local BH must be disabled here.
482  */
483
484 static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
485                                                        u32 daddr, u16 hnum,
486                                                        int dif)
487 {
488         struct tcp_ehash_bucket *head;
489         TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
490         __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
491         struct sock *sk;
492         struct hlist_node *node;
493         /* Optimize here for direct hit, only listening connections can
494          * have wildcards anyways.
495          */
496         int hash = tcp_hashfn(daddr, hnum, saddr, sport);
497         head = &tcp_ehash[hash];
498         read_lock(&head->lock);
499         sk_for_each(sk, node, &head->chain) {
500                 if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
501                         goto hit; /* You sunk my battleship! */
502         }
503
504         /* Must check for a TIME_WAIT'er before going to listener hash. */
505         sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
506                 if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
507                         goto hit;
508         }
509         sk = NULL;
510 out:
511         read_unlock(&head->lock);
512         return sk;
513 hit:
514         sock_hold(sk);
515         goto out;
516 }
517
518 static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport,
519                                            u32 daddr, u16 hnum, int dif)
520 {
521         struct sock *sk = __tcp_v4_lookup_established(saddr, sport,
522                                                       daddr, hnum, dif);
523
524         return sk ? : tcp_v4_lookup_listener(daddr, hnum, dif);
525 }
526
527 inline struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr,
528                                   u16 dport, int dif)
529 {
530         struct sock *sk;
531
532         local_bh_disable();
533         sk = __tcp_v4_lookup(saddr, sport, daddr, ntohs(dport), dif);
534         local_bh_enable();
535
536         return sk;
537 }
538
539 EXPORT_SYMBOL_GPL(tcp_v4_lookup);
540
541 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
542 {
543         return secure_tcp_sequence_number(skb->nh.iph->daddr,
544                                           skb->nh.iph->saddr,
545                                           skb->h.th->dest,
546                                           skb->h.th->source);
547 }
548
549 /* called with local bh disabled */
550 static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
551                                       struct tcp_tw_bucket **twp)
552 {
553         struct inet_opt *inet = inet_sk(sk);
554         u32 daddr = inet->rcv_saddr;
555         u32 saddr = inet->daddr;
556         int dif = sk->sk_bound_dev_if;
557         TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
558         __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
559         int hash = tcp_hashfn(daddr, lport, saddr, inet->dport);
560         struct tcp_ehash_bucket *head = &tcp_ehash[hash];
561         struct sock *sk2;
562         struct hlist_node *node;
563         struct tcp_tw_bucket *tw;
564
565         write_lock(&head->lock);
566
567         /* Check TIME-WAIT sockets first. */
568         sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
569                 tw = (struct tcp_tw_bucket *)sk2;
570
571                 if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
572                         struct tcp_opt *tp = tcp_sk(sk);
573
574                         /* With PAWS, it is safe from the viewpoint
575                            of data integrity. Even without PAWS it
576                            is safe provided sequence spaces do not
577                            overlap i.e. at data rates <= 80Mbit/sec.
578
579                            Actually, the idea is close to VJ's one,
580                            only timestamp cache is held not per host,
581                            but per port pair and TW bucket is used
582                            as state holder.
583
584                            If TW bucket has been already destroyed we
585                            fall back to VJ's scheme and use initial
586                            timestamp retrieved from peer table.
587                          */
588                         if (tw->tw_ts_recent_stamp &&
589                             (!twp || (sysctl_tcp_tw_reuse &&
590                                       xtime.tv_sec -
591                                       tw->tw_ts_recent_stamp > 1))) {
592                                 if ((tp->write_seq =
593                                                 tw->tw_snd_nxt + 65535 + 2) == 0)
594                                         tp->write_seq = 1;
595                                 tp->ts_recent       = tw->tw_ts_recent;
596                                 tp->ts_recent_stamp = tw->tw_ts_recent_stamp;
597                                 sock_hold(sk2);
598                                 goto unique;
599                         } else
600                                 goto not_unique;
601                 }
602         }
603         tw = NULL;
604
605         /* And established part... */
606         sk_for_each(sk2, node, &head->chain) {
607                 if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif))
608                         goto not_unique;
609         }
610
611 unique:
612         /* Must record num and sport now. Otherwise we will see
613          * in hash table socket with a funny identity. */
614         inet->num = lport;
615         inet->sport = htons(lport);
616         sk->sk_hashent = hash;
617         BUG_TRAP(sk_unhashed(sk));
618         __sk_add_node(sk, &head->chain);
619         sock_prot_inc_use(sk->sk_prot);
620         write_unlock(&head->lock);
621
622         if (twp) {
623                 *twp = tw;
624                 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
625         } else if (tw) {
626                 /* Silly. Should hash-dance instead... */
627                 tcp_tw_deschedule(tw);
628                 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
629
630                 tcp_tw_put(tw);
631         }
632
633         return 0;
634
635 not_unique:
636         write_unlock(&head->lock);
637         return -EADDRNOTAVAIL;
638 }
639
640 /*
641  * Bind a port for a connect operation and hash it.
642  */
643 static int tcp_v4_hash_connect(struct sock *sk)
644 {
645         unsigned short snum = inet_sk(sk)->num;
646         struct tcp_bind_hashbucket *head;
647         struct tcp_bind_bucket *tb;
648         int ret;
649
650         if (!snum) {
651                 int rover;
652                 int low = sysctl_local_port_range[0];
653                 int high = sysctl_local_port_range[1];
654                 int remaining = (high - low) + 1;
655                 struct hlist_node *node;
656                 struct tcp_tw_bucket *tw = NULL;
657
658                 local_bh_disable();
659
660                 /* TODO. Actually it is not so bad idea to remove
661                  * tcp_portalloc_lock before next submission to Linus.
662                  * As soon as we touch this place at all it is time to think.
663                  *
664                  * Now it protects single _advisory_ variable tcp_port_rover,
665                  * hence it is mostly useless.
666                  * Code will work nicely if we just delete it, but
667                  * I am afraid in contented case it will work not better or
668                  * even worse: another cpu just will hit the same bucket
669                  * and spin there.
670                  * So some cpu salt could remove both contention and
671                  * memory pingpong. Any ideas how to do this in a nice way?
672                  */
673                 spin_lock(&tcp_portalloc_lock);
674                 rover = tcp_port_rover;
675
676                 do {
677                         rover++;
678                         if ((rover < low) || (rover > high))
679                                 rover = low;
680                         head = &tcp_bhash[tcp_bhashfn(rover)];
681                         spin_lock(&head->lock);
682
683                         /* Does not bother with rcv_saddr checks,
684                          * because the established check is already
685                          * unique enough.
686                          */
687                         tb_for_each(tb, node, &head->chain) {
688                                 if (tb->port == rover) {
689                                         BUG_TRAP(!hlist_empty(&tb->owners));
690                                         if (tb->fastreuse >= 0)
691                                                 goto next_port;
692                                         if (!__tcp_v4_check_established(sk,
693                                                                         rover,
694                                                                         &tw))
695                                                 goto ok;
696                                         goto next_port;
697                                 }
698                         }
699
700                         tb = tcp_bucket_create(head, rover);
701                         if (!tb) {
702                                 spin_unlock(&head->lock);
703                                 break;
704                         }
705                         tb->fastreuse = -1;
706                         goto ok;
707
708                 next_port:
709                         spin_unlock(&head->lock);
710                 } while (--remaining > 0);
711                 tcp_port_rover = rover;
712                 spin_unlock(&tcp_portalloc_lock);
713
714                 local_bh_enable();
715
716                 return -EADDRNOTAVAIL;
717
718 ok:
719                 /* All locks still held and bhs disabled */
720                 tcp_port_rover = rover;
721                 spin_unlock(&tcp_portalloc_lock);
722
723                 tcp_bind_hash(sk, tb, rover);
724                 if (sk_unhashed(sk)) {
725                         inet_sk(sk)->sport = htons(rover);
726                         __tcp_v4_hash(sk, 0);
727                 }
728                 spin_unlock(&head->lock);
729
730                 if (tw) {
731                         tcp_tw_deschedule(tw);
732                         tcp_tw_put(tw);
733                 }
734
735                 ret = 0;
736                 goto out;
737         }
738
739         head  = &tcp_bhash[tcp_bhashfn(snum)];
740         tb  = tcp_sk(sk)->bind_hash;
741         spin_lock_bh(&head->lock);
742         if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
743                 __tcp_v4_hash(sk, 0);
744                 spin_unlock_bh(&head->lock);
745                 return 0;
746         } else {
747                 spin_unlock(&head->lock);
748                 /* No definite answer... Walk to established hash table */
749                 ret = __tcp_v4_check_established(sk, snum, NULL);
750 out:
751                 local_bh_enable();
752                 return ret;
753         }
754 }
755
756 /* This will initiate an outgoing connection. */
757 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
758 {
759         struct inet_opt *inet = inet_sk(sk);
760         struct tcp_opt *tp = tcp_sk(sk);
761         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
762         struct rtable *rt;
763         u32 daddr, nexthop;
764         int tmp;
765         int err;
766
767         if (addr_len < sizeof(struct sockaddr_in))
768                 return -EINVAL;
769
770         if (usin->sin_family != AF_INET)
771                 return -EAFNOSUPPORT;
772
773         nexthop = daddr = usin->sin_addr.s_addr;
774         if (inet->opt && inet->opt->srr) {
775                 if (!daddr)
776                         return -EINVAL;
777                 nexthop = inet->opt->faddr;
778         }
779
780         tmp = ip_route_connect(&rt, nexthop, inet->saddr,
781                                RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
782                                IPPROTO_TCP,
783                                inet->sport, usin->sin_port, sk);
784         if (tmp < 0)
785                 return tmp;
786
787         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
788                 ip_rt_put(rt);
789                 return -ENETUNREACH;
790         }
791
792         if (!inet->opt || !inet->opt->srr)
793                 daddr = rt->rt_dst;
794
795         if (!inet->saddr)
796                 inet->saddr = rt->rt_src;
797         inet->rcv_saddr = inet->saddr;
798
799         if (tp->ts_recent_stamp && inet->daddr != daddr) {
800                 /* Reset inherited state */
801                 tp->ts_recent       = 0;
802                 tp->ts_recent_stamp = 0;
803                 tp->write_seq       = 0;
804         }
805
806         if (sysctl_tcp_tw_recycle &&
807             !tp->ts_recent_stamp && rt->rt_dst == daddr) {
808                 struct inet_peer *peer = rt_get_peer(rt);
809
810                 /* VJ's idea. We save last timestamp seen from
811                  * the destination in peer table, when entering state TIME-WAIT
812                  * and initialize ts_recent from it, when trying new connection.
813                  */
814
815                 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
816                         tp->ts_recent_stamp = peer->tcp_ts_stamp;
817                         tp->ts_recent = peer->tcp_ts;
818                 }
819         }
820
821         inet->dport = usin->sin_port;
822         inet->daddr = daddr;
823
824         tp->ext_header_len = 0;
825         if (inet->opt)
826                 tp->ext_header_len = inet->opt->optlen;
827
828         tp->mss_clamp = 536;
829
830         /* Socket identity is still unknown (sport may be zero).
831          * However we set state to SYN-SENT and not releasing socket
832          * lock select source port, enter ourselves into the hash tables and
833          * complete initialization after this.
834          */
835         tcp_set_state(sk, TCP_SYN_SENT);
836         err = tcp_v4_hash_connect(sk);
837         if (err)
838                 goto failure;
839
840         err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
841         if (err)
842                 goto failure;
843
844         /* OK, now commit destination to socket.  */
845         __sk_dst_set(sk, &rt->u.dst);
846         tcp_v4_setup_caps(sk, &rt->u.dst);
847         tp->ext2_header_len = rt->u.dst.header_len;
848
849         if (!tp->write_seq)
850                 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
851                                                            inet->daddr,
852                                                            inet->sport,
853                                                            usin->sin_port);
854
855         inet->id = tp->write_seq ^ jiffies;
856
857         err = tcp_connect(sk);
858         rt = NULL;
859         if (err)
860                 goto failure;
861
862         return 0;
863
864 failure:
865         /* This unhashes the socket and releases the local port, if necessary. */
866         tcp_set_state(sk, TCP_CLOSE);
867         ip_rt_put(rt);
868         sk->sk_route_caps = 0;
869         inet->dport = 0;
870         return err;
871 }
872
873 static __inline__ int tcp_v4_iif(struct sk_buff *skb)
874 {
875         return ((struct rtable *)skb->dst)->rt_iif;
876 }
877
878 static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
879 {
880         return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
881 }
882
883 static struct open_request *tcp_v4_search_req(struct tcp_opt *tp,
884                                               struct open_request ***prevp,
885                                               __u16 rport,
886                                               __u32 raddr, __u32 laddr)
887 {
888         struct tcp_listen_opt *lopt = tp->listen_opt;
889         struct open_request *req, **prev;
890
891         for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
892              (req = *prev) != NULL;
893              prev = &req->dl_next) {
894                 if (req->rmt_port == rport &&
895                     req->af.v4_req.rmt_addr == raddr &&
896                     req->af.v4_req.loc_addr == laddr &&
897                     TCP_INET_FAMILY(req->class->family)) {
898                         BUG_TRAP(!req->sk);
899                         *prevp = prev;
900                         break;
901                 }
902         }
903
904         return req;
905 }
906
907 static void tcp_v4_synq_add(struct sock *sk, struct open_request *req)
908 {
909         struct tcp_opt *tp = tcp_sk(sk);
910         struct tcp_listen_opt *lopt = tp->listen_opt;
911         u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
912
913         req->expires = jiffies + TCP_TIMEOUT_INIT;
914         req->retrans = 0;
915         req->sk = NULL;
916         req->dl_next = lopt->syn_table[h];
917
918         write_lock(&tp->syn_wait_lock);
919         lopt->syn_table[h] = req;
920         write_unlock(&tp->syn_wait_lock);
921
922         tcp_synq_added(sk);
923 }
924
925
926 /*
927  * This routine does path mtu discovery as defined in RFC1191.
928  */
929 static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
930                                      u32 mtu)
931 {
932         struct dst_entry *dst;
933         struct inet_opt *inet = inet_sk(sk);
934         struct tcp_opt *tp = tcp_sk(sk);
935
936         /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
937          * send out by Linux are always <576bytes so they should go through
938          * unfragmented).
939          */
940         if (sk->sk_state == TCP_LISTEN)
941                 return;
942
943         /* We don't check in the destentry if pmtu discovery is forbidden
944          * on this route. We just assume that no packet_to_big packets
945          * are send back when pmtu discovery is not active.
946          * There is a small race when the user changes this flag in the
947          * route, but I think that's acceptable.
948          */
949         if ((dst = __sk_dst_check(sk, 0)) == NULL)
950                 return;
951
952         dst->ops->update_pmtu(dst, mtu);
953
954         /* Something is about to be wrong... Remember soft error
955          * for the case, if this connection will not able to recover.
956          */
957         if (mtu < dst_pmtu(dst) && ip_dont_fragment(sk, dst))
958                 sk->sk_err_soft = EMSGSIZE;
959
960         mtu = dst_pmtu(dst);
961
962         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
963             tp->pmtu_cookie > mtu) {
964                 tcp_sync_mss(sk, mtu);
965
966                 /* Resend the TCP packet because it's
967                  * clear that the old packet has been
968                  * dropped. This is the new "fast" path mtu
969                  * discovery.
970                  */
971                 tcp_simple_retransmit(sk);
972         } /* else let the usual retransmit timer handle it */
973 }
974
975 /*
976  * This routine is called by the ICMP module when it gets some
977  * sort of error condition.  If err < 0 then the socket should
978  * be closed and the error returned to the user.  If err > 0
979  * it's just the icmp type << 8 | icmp code.  After adjustment
980  * header points to the first 8 bytes of the tcp header.  We need
981  * to find the appropriate port.
982  *
983  * The locking strategy used here is very "optimistic". When
984  * someone else accesses the socket the ICMP is just dropped
985  * and for some paths there is no check at all.
986  * A more general error queue to queue errors for later handling
987  * is probably better.
988  *
989  */
990
991 void tcp_v4_err(struct sk_buff *skb, u32 info)
992 {
993         struct iphdr *iph = (struct iphdr *)skb->data;
994         struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
995         struct tcp_opt *tp;
996         struct inet_opt *inet;
997         int type = skb->h.icmph->type;
998         int code = skb->h.icmph->code;
999         struct sock *sk;
1000         __u32 seq;
1001         int err;
1002
1003         if (skb->len < (iph->ihl << 2) + 8) {
1004                 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
1005                 return;
1006         }
1007
1008         sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,
1009                            th->source, tcp_v4_iif(skb));
1010         if (!sk) {
1011                 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
1012                 return;
1013         }
1014         if (sk->sk_state == TCP_TIME_WAIT) {
1015                 tcp_tw_put((struct tcp_tw_bucket *)sk);
1016                 return;
1017         }
1018
1019         bh_lock_sock(sk);
1020         /* If too many ICMPs get dropped on busy
1021          * servers this needs to be solved differently.
1022          */
1023         if (sock_owned_by_user(sk))
1024                 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
1025
1026         if (sk->sk_state == TCP_CLOSE)
1027                 goto out;
1028
1029         tp = tcp_sk(sk);
1030         seq = ntohl(th->seq);
1031         if (sk->sk_state != TCP_LISTEN &&
1032             !between(seq, tp->snd_una, tp->snd_nxt)) {
1033                 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
1034                 goto out;
1035         }
1036
1037         switch (type) {
1038         case ICMP_SOURCE_QUENCH:
1039                 /* Just silently ignore these. */
1040                 goto out;
1041         case ICMP_PARAMETERPROB:
1042                 err = EPROTO;
1043                 break;
1044         case ICMP_DEST_UNREACH:
1045                 if (code > NR_ICMP_UNREACH)
1046                         goto out;
1047
1048                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
1049                         if (!sock_owned_by_user(sk))
1050                                 do_pmtu_discovery(sk, iph, info);
1051                         goto out;
1052                 }
1053
1054                 err = icmp_err_convert[code].errno;
1055                 break;
1056         case ICMP_TIME_EXCEEDED:
1057                 err = EHOSTUNREACH;
1058                 break;
1059         default:
1060                 goto out;
1061         }
1062
1063         switch (sk->sk_state) {
1064                 struct open_request *req, **prev;
1065         case TCP_LISTEN:
1066                 if (sock_owned_by_user(sk))
1067                         goto out;
1068
1069                 req = tcp_v4_search_req(tp, &prev, th->dest,
1070                                         iph->daddr, iph->saddr);
1071                 if (!req)
1072                         goto out;
1073
1074                 /* ICMPs are not backlogged, hence we cannot get
1075                    an established socket here.
1076                  */
1077                 BUG_TRAP(!req->sk);
1078
1079                 if (seq != req->snt_isn) {
1080                         NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
1081                         goto out;
1082                 }
1083
1084                 /*
1085                  * Still in SYN_RECV, just remove it silently.
1086                  * There is no good way to pass the error to the newly
1087                  * created socket, and POSIX does not want network
1088                  * errors returned from accept().
1089                  */
1090                 tcp_synq_drop(sk, req, prev);
1091                 goto out;
1092
1093         case TCP_SYN_SENT:
1094         case TCP_SYN_RECV:  /* Cannot happen.
1095                                It can f.e. if SYNs crossed.
1096                              */
1097                 if (!sock_owned_by_user(sk)) {
1098                         TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1099                         sk->sk_err = err;
1100
1101                         sk->sk_error_report(sk);
1102
1103                         tcp_done(sk);
1104                 } else {
1105                         sk->sk_err_soft = err;
1106                 }
1107                 goto out;
1108         }
1109
1110         /* If we've already connected we will keep trying
1111          * until we time out, or the user gives up.
1112          *
1113          * rfc1122 4.2.3.9 allows to consider as hard errors
1114          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
1115          * but it is obsoleted by pmtu discovery).
1116          *
1117          * Note, that in modern internet, where routing is unreliable
1118          * and in each dark corner broken firewalls sit, sending random
1119          * errors ordered by their masters even this two messages finally lose
1120          * their original sense (even Linux sends invalid PORT_UNREACHs)
1121          *
1122          * Now we are in compliance with RFCs.
1123          *                                                      --ANK (980905)
1124          */
1125
1126         inet = inet_sk(sk);
1127         if (!sock_owned_by_user(sk) && inet->recverr) {
1128                 sk->sk_err = err;
1129                 sk->sk_error_report(sk);
1130         } else  { /* Only an error on timeout */
1131                 sk->sk_err_soft = err;
1132         }
1133
1134 out:
1135         bh_unlock_sock(sk);
1136         sock_put(sk);
1137 }
1138
1139 /* This routine computes an IPv4 TCP checksum. */
1140 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
1141                        struct sk_buff *skb)
1142 {
1143         struct inet_opt *inet = inet_sk(sk);
1144
1145         if (skb->ip_summed == CHECKSUM_HW) {
1146                 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
1147                 skb->csum = offsetof(struct tcphdr, check);
1148         } else {
1149                 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
1150                                          csum_partial((char *)th,
1151                                                       th->doff << 2,
1152                                                       skb->csum));
1153         }
1154 }
1155
1156 /*
1157  *      This routine will send an RST to the other tcp.
1158  *
1159  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
1160  *                    for reset.
1161  *      Answer: if a packet caused RST, it is not for a socket
1162  *              existing in our system, if it is matched to a socket,
1163  *              it is just duplicate segment or bug in other side's TCP.
1164  *              So that we build reply only basing on parameters
1165  *              arrived with segment.
1166  *      Exception: precedence violation. We do not implement it in any case.
1167  */
1168
1169 static void tcp_v4_send_reset(struct sk_buff *skb)
1170 {
1171         struct tcphdr *th = skb->h.th;
1172         struct tcphdr rth;
1173         struct ip_reply_arg arg;
1174
1175         /* Never send a reset in response to a reset. */
1176         if (th->rst)
1177                 return;
1178
1179         if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
1180                 return;
1181
1182         /* Swap the send and the receive. */
1183         memset(&rth, 0, sizeof(struct tcphdr));
1184         rth.dest   = th->source;
1185         rth.source = th->dest;
1186         rth.doff   = sizeof(struct tcphdr) / 4;
1187         rth.rst    = 1;
1188
1189         if (th->ack) {
1190                 rth.seq = th->ack_seq;
1191         } else {
1192                 rth.ack = 1;
1193                 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
1194                                     skb->len - (th->doff << 2));
1195         }
1196
1197         memset(&arg, 0, sizeof arg);
1198         arg.iov[0].iov_base = (unsigned char *)&rth;
1199         arg.iov[0].iov_len  = sizeof rth;
1200         arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1201                                       skb->nh.iph->saddr, /*XXX*/
1202                                       sizeof(struct tcphdr), IPPROTO_TCP, 0);
1203         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1204
1205         ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
1206
1207         TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1208         TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1209 }
1210
1211 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
1212    outside socket context is ugly, certainly. What can I do?
1213  */
1214
1215 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
1216                             u32 win, u32 ts)
1217 {
1218         struct tcphdr *th = skb->h.th;
1219         struct {
1220                 struct tcphdr th;
1221                 u32 tsopt[3];
1222         } rep;
1223         struct ip_reply_arg arg;
1224
1225         memset(&rep.th, 0, sizeof(struct tcphdr));
1226         memset(&arg, 0, sizeof arg);
1227
1228         arg.iov[0].iov_base = (unsigned char *)&rep;
1229         arg.iov[0].iov_len  = sizeof(rep.th);
1230         if (ts) {
1231                 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1232                                      (TCPOPT_TIMESTAMP << 8) |
1233                                      TCPOLEN_TIMESTAMP);
1234                 rep.tsopt[1] = htonl(tcp_time_stamp);
1235                 rep.tsopt[2] = htonl(ts);
1236                 arg.iov[0].iov_len = sizeof(rep);
1237         }
1238
1239         /* Swap the send and the receive. */
1240         rep.th.dest    = th->source;
1241         rep.th.source  = th->dest;
1242         rep.th.doff    = arg.iov[0].iov_len / 4;
1243         rep.th.seq     = htonl(seq);
1244         rep.th.ack_seq = htonl(ack);
1245         rep.th.ack     = 1;
1246         rep.th.window  = htons(win);
1247
1248         arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1249                                       skb->nh.iph->saddr, /*XXX*/
1250                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
1251         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1252
1253         ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
1254
1255         TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1256 }
1257
1258 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
1259 {
1260         struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
1261
1262         tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
1263                         tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
1264
1265         tcp_tw_put(tw);
1266 }
1267
1268 static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req)
1269 {
1270         tcp_v4_send_ack(skb, req->snt_isn + 1, req->rcv_isn + 1, req->rcv_wnd,
1271                         req->ts_recent);
1272 }
1273
1274 static struct dst_entry* tcp_v4_route_req(struct sock *sk,
1275                                           struct open_request *req)
1276 {
1277         struct rtable *rt;
1278         struct ip_options *opt = req->af.v4_req.opt;
1279         struct flowi fl = { .oif = sk->sk_bound_dev_if,
1280                             .nl_u = { .ip4_u =
1281                                       { .daddr = ((opt && opt->srr) ?
1282                                                   opt->faddr :
1283                                                   req->af.v4_req.rmt_addr),
1284                                         .saddr = req->af.v4_req.loc_addr,
1285                                         .tos = RT_CONN_FLAGS(sk) } },
1286                             .proto = IPPROTO_TCP,
1287                             .uli_u = { .ports =
1288                                        { .sport = inet_sk(sk)->sport,
1289                                          .dport = req->rmt_port } } };
1290
1291         if (ip_route_output_flow(&rt, &fl, sk, 0)) {
1292                 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1293                 return NULL;
1294         }
1295         if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
1296                 ip_rt_put(rt);
1297                 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1298                 return NULL;
1299         }
1300         return &rt->u.dst;
1301 }
1302
1303 /*
1304  *      Send a SYN-ACK after having received an ACK.
1305  *      This still operates on a open_request only, not on a big
1306  *      socket.
1307  */
1308 static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
1309                               struct dst_entry *dst)
1310 {
1311         int err = -1;
1312         struct sk_buff * skb;
1313
1314         /* First, grab a route. */
1315         if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1316                 goto out;
1317
1318         skb = tcp_make_synack(sk, dst, req);
1319
1320         if (skb) {
1321                 struct tcphdr *th = skb->h.th;
1322
1323                 th->check = tcp_v4_check(th, skb->len,
1324                                          req->af.v4_req.loc_addr,
1325                                          req->af.v4_req.rmt_addr,
1326                                          csum_partial((char *)th, skb->len,
1327                                                       skb->csum));
1328
1329                 err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr,
1330                                             req->af.v4_req.rmt_addr,
1331                                             req->af.v4_req.opt);
1332                 if (err == NET_XMIT_CN)
1333                         err = 0;
1334         }
1335
1336 out:
1337         dst_release(dst);
1338         return err;
1339 }
1340
1341 /*
1342  *      IPv4 open_request destructor.
1343  */
1344 static void tcp_v4_or_free(struct open_request *req)
1345 {
1346         if (req->af.v4_req.opt)
1347                 kfree(req->af.v4_req.opt);
1348 }
1349
1350 static inline void syn_flood_warning(struct sk_buff *skb)
1351 {
1352         static unsigned long warntime;
1353
1354         if (time_after(jiffies, (warntime + HZ * 60))) {
1355                 warntime = jiffies;
1356                 printk(KERN_INFO
1357                        "possible SYN flooding on port %d. Sending cookies.\n",
1358                        ntohs(skb->h.th->dest));
1359         }
1360 }
1361
1362 /*
1363  * Save and compile IPv4 options into the open_request if needed.
1364  */
1365 static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
1366                                                      struct sk_buff *skb)
1367 {
1368         struct ip_options *opt = &(IPCB(skb)->opt);
1369         struct ip_options *dopt = NULL;
1370
1371         if (opt && opt->optlen) {
1372                 int opt_size = optlength(opt);
1373                 dopt = kmalloc(opt_size, GFP_ATOMIC);
1374                 if (dopt) {
1375                         if (ip_options_echo(dopt, skb)) {
1376                                 kfree(dopt);
1377                                 dopt = NULL;
1378                         }
1379                 }
1380         }
1381         return dopt;
1382 }
1383
1384 /*
1385  * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
1386  * One SYN_RECV socket costs about 80bytes on a 32bit machine.
1387  * It would be better to replace it with a global counter for all sockets
1388  * but then some measure against one socket starving all other sockets
1389  * would be needed.
1390  *
1391  * It was 128 by default. Experiments with real servers show, that
1392  * it is absolutely not enough even at 100conn/sec. 256 cures most
1393  * of problems. This value is adjusted to 128 for very small machines
1394  * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
1395  * Further increasing requires to change hash table size.
1396  */
1397 int sysctl_max_syn_backlog = 256;
1398
1399 struct or_calltable or_ipv4 = {
1400         .family         =       PF_INET,
1401         .rtx_syn_ack    =       tcp_v4_send_synack,
1402         .send_ack       =       tcp_v4_or_send_ack,
1403         .destructor     =       tcp_v4_or_free,
1404         .send_reset     =       tcp_v4_send_reset,
1405 };
1406
1407 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1408 {
1409         struct tcp_opt tp;
1410         struct open_request *req;
1411         __u32 saddr = skb->nh.iph->saddr;
1412         __u32 daddr = skb->nh.iph->daddr;
1413         __u32 isn = TCP_SKB_CB(skb)->when;
1414         struct dst_entry *dst = NULL;
1415 #ifdef CONFIG_SYN_COOKIES
1416         int want_cookie = 0;
1417 #else
1418 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1419 #endif
1420
1421         /* Never answer to SYNs send to broadcast or multicast */
1422         if (((struct rtable *)skb->dst)->rt_flags &
1423             (RTCF_BROADCAST | RTCF_MULTICAST))
1424                 goto drop;
1425
1426         /* TW buckets are converted to open requests without
1427          * limitations, they conserve resources and peer is
1428          * evidently real one.
1429          */
1430         if (tcp_synq_is_full(sk) && !isn) {
1431 #ifdef CONFIG_SYN_COOKIES
1432                 if (sysctl_tcp_syncookies) {
1433                         want_cookie = 1;
1434                 } else
1435 #endif
1436                 goto drop;
1437         }
1438
1439         /* Accept backlog is full. If we have already queued enough
1440          * of warm entries in syn queue, drop request. It is better than
1441          * clogging syn queue with openreqs with exponentially increasing
1442          * timeout.
1443          */
1444         if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
1445                 goto drop;
1446
1447         req = tcp_openreq_alloc();
1448         if (!req)
1449                 goto drop;
1450
1451         tcp_clear_options(&tp);
1452         tp.mss_clamp = 536;
1453         tp.user_mss  = tcp_sk(sk)->user_mss;
1454
1455         tcp_parse_options(skb, &tp, 0);
1456
1457         if (want_cookie) {
1458                 tcp_clear_options(&tp);
1459                 tp.saw_tstamp = 0;
1460         }
1461
1462         if (tp.saw_tstamp && !tp.rcv_tsval) {
1463                 /* Some OSes (unknown ones, but I see them on web server, which
1464                  * contains information interesting only for windows'
1465                  * users) do not send their stamp in SYN. It is easy case.
1466                  * We simply do not advertise TS support.
1467                  */
1468                 tp.saw_tstamp = 0;
1469                 tp.tstamp_ok  = 0;
1470         }
1471         tp.tstamp_ok = tp.saw_tstamp;
1472
1473         tcp_openreq_init(req, &tp, skb);
1474         req->af.v4_req.loc_addr = daddr;
1475         req->af.v4_req.rmt_addr = saddr;
1476         req->af.v4_req.opt = tcp_v4_save_options(sk, skb);
1477         req->class = &or_ipv4;
1478         if (!want_cookie)
1479                 TCP_ECN_create_request(req, skb->h.th);
1480
1481         if (want_cookie) {
1482 #ifdef CONFIG_SYN_COOKIES
1483                 syn_flood_warning(skb);
1484 #endif
1485                 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1486         } else if (!isn) {
1487                 struct inet_peer *peer = NULL;
1488
1489                 /* VJ's idea. We save last timestamp seen
1490                  * from the destination in peer table, when entering
1491                  * state TIME-WAIT, and check against it before
1492                  * accepting new connection request.
1493                  *
1494                  * If "isn" is not zero, this request hit alive
1495                  * timewait bucket, so that all the necessary checks
1496                  * are made in the function processing timewait state.
1497                  */
1498                 if (tp.saw_tstamp &&
1499                     sysctl_tcp_tw_recycle &&
1500                     (dst = tcp_v4_route_req(sk, req)) != NULL &&
1501                     (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1502                     peer->v4daddr == saddr) {
1503                         if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1504                             (s32)(peer->tcp_ts - req->ts_recent) >
1505                                                         TCP_PAWS_WINDOW) {
1506                                 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
1507                                 dst_release(dst);
1508                                 goto drop_and_free;
1509                         }
1510                 }
1511                 /* Kill the following clause, if you dislike this way. */
1512                 else if (!sysctl_tcp_syncookies &&
1513                          (sysctl_max_syn_backlog - tcp_synq_len(sk) <
1514                           (sysctl_max_syn_backlog >> 2)) &&
1515                          (!peer || !peer->tcp_ts_stamp) &&
1516                          (!dst || !dst_metric(dst, RTAX_RTT))) {
1517                         /* Without syncookies last quarter of
1518                          * backlog is filled with destinations,
1519                          * proven to be alive.
1520                          * It means that we continue to communicate
1521                          * to destinations, already remembered
1522                          * to the moment of synflood.
1523                          */
1524                         NETDEBUG(if (net_ratelimit()) \
1525                                         printk(KERN_DEBUG "TCP: drop open "
1526                                                           "request from %u.%u."
1527                                                           "%u.%u/%u\n", \
1528                                                NIPQUAD(saddr),
1529                                                ntohs(skb->h.th->source)));
1530                         dst_release(dst);
1531                         goto drop_and_free;
1532                 }
1533
1534                 isn = tcp_v4_init_sequence(sk, skb);
1535         }
1536         req->snt_isn = isn;
1537
1538         if (tcp_v4_send_synack(sk, req, dst))
1539                 goto drop_and_free;
1540
1541         if (want_cookie) {
1542                 tcp_openreq_free(req);
1543         } else {
1544                 tcp_v4_synq_add(sk, req);
1545         }
1546         return 0;
1547
1548 drop_and_free:
1549         tcp_openreq_free(req);
1550 drop:
1551         TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1552         return 0;
1553 }
1554
1555
1556 /*
1557  * The three way handshake has completed - we got a valid synack -
1558  * now create the new socket.
1559  */
1560 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1561                                   struct open_request *req,
1562                                   struct dst_entry *dst)
1563 {
1564         struct inet_opt *newinet;
1565         struct tcp_opt *newtp;
1566         struct sock *newsk;
1567
1568         if (sk_acceptq_is_full(sk))
1569                 goto exit_overflow;
1570
1571         if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1572                 goto exit;
1573
1574         newsk = tcp_create_openreq_child(sk, req, skb);
1575         if (!newsk)
1576                 goto exit;
1577
1578         newsk->sk_dst_cache = dst;
1579         tcp_v4_setup_caps(newsk, dst);
1580
1581         newtp                 = tcp_sk(newsk);
1582         newinet               = inet_sk(newsk);
1583         newinet->daddr        = req->af.v4_req.rmt_addr;
1584         newinet->rcv_saddr    = req->af.v4_req.loc_addr;
1585         newinet->saddr        = req->af.v4_req.loc_addr;
1586         newinet->opt          = req->af.v4_req.opt;
1587         req->af.v4_req.opt    = NULL;
1588         newinet->mc_index     = tcp_v4_iif(skb);
1589         newinet->mc_ttl       = skb->nh.iph->ttl;
1590         newtp->ext_header_len = 0;
1591         if (newinet->opt)
1592                 newtp->ext_header_len = newinet->opt->optlen;
1593         newtp->ext2_header_len = dst->header_len;
1594         newinet->id = newtp->write_seq ^ jiffies;
1595
1596         tcp_sync_mss(newsk, dst_pmtu(dst));
1597         newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1598         tcp_initialize_rcv_mss(newsk);
1599
1600         __tcp_v4_hash(newsk, 0);
1601         __tcp_inherit_port(sk, newsk);
1602
1603         return newsk;
1604
1605 exit_overflow:
1606         NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1607 exit:
1608         NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1609         dst_release(dst);
1610         return NULL;
1611 }
1612
1613 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1614 {
1615         struct tcphdr *th = skb->h.th;
1616         struct iphdr *iph = skb->nh.iph;
1617         struct tcp_opt *tp = tcp_sk(sk);
1618         struct sock *nsk;
1619         struct open_request **prev;
1620         /* Find possible connection requests. */
1621         struct open_request *req = tcp_v4_search_req(tp, &prev, th->source,
1622                                                      iph->saddr, iph->daddr);
1623         if (req)
1624                 return tcp_check_req(sk, skb, req, prev);
1625
1626         nsk = __tcp_v4_lookup_established(skb->nh.iph->saddr,
1627                                           th->source,
1628                                           skb->nh.iph->daddr,
1629                                           ntohs(th->dest),
1630                                           tcp_v4_iif(skb));
1631
1632         if (nsk) {
1633                 if (nsk->sk_state != TCP_TIME_WAIT) {
1634                         bh_lock_sock(nsk);
1635                         return nsk;
1636                 }
1637                 tcp_tw_put((struct tcp_tw_bucket *)nsk);
1638                 return NULL;
1639         }
1640
1641 #ifdef CONFIG_SYN_COOKIES
1642         if (!th->rst && !th->syn && th->ack)
1643                 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1644 #endif
1645         return sk;
1646 }
1647
1648 static int tcp_v4_checksum_init(struct sk_buff *skb)
1649 {
1650         if (skb->ip_summed == CHECKSUM_HW) {
1651                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1652                 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1653                                   skb->nh.iph->daddr, skb->csum))
1654                         return 0;
1655
1656                 NETDEBUG(if (net_ratelimit())
1657                                 printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
1658                 skb->ip_summed = CHECKSUM_NONE;
1659         }
1660         if (skb->len <= 76) {
1661                 if (tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1662                                  skb->nh.iph->daddr,
1663                                  skb_checksum(skb, 0, skb->len, 0)))
1664                         return -1;
1665                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1666         } else {
1667                 skb->csum = ~tcp_v4_check(skb->h.th, skb->len,
1668                                           skb->nh.iph->saddr,
1669                                           skb->nh.iph->daddr, 0);
1670         }
1671         return 0;
1672 }
1673
1674
1675 /* The socket must have it's spinlock held when we get
1676  * here.
1677  *
1678  * We have a potential double-lock case here, so even when
1679  * doing backlog processing we use the BH locking scheme.
1680  * This is because we cannot sleep with the original spinlock
1681  * held.
1682  */
1683 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1684 {
1685         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1686                 TCP_CHECK_TIMER(sk);
1687                 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1688                         goto reset;
1689                 TCP_CHECK_TIMER(sk);
1690                 return 0;
1691         }
1692
1693         if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
1694                 goto csum_err;
1695
1696         if (sk->sk_state == TCP_LISTEN) {
1697                 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1698                 if (!nsk)
1699                         goto discard;
1700
1701                 if (nsk != sk) {
1702                         if (tcp_child_process(sk, nsk, skb))
1703                                 goto reset;
1704                         return 0;
1705                 }
1706         }
1707
1708         TCP_CHECK_TIMER(sk);
1709         if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1710                 goto reset;
1711         TCP_CHECK_TIMER(sk);
1712         return 0;
1713
1714 reset:
1715         tcp_v4_send_reset(skb);
1716 discard:
1717         kfree_skb(skb);
1718         /* Be careful here. If this function gets more complicated and
1719          * gcc suffers from register pressure on the x86, sk (in %ebx)
1720          * might be destroyed here. This current version compiles correctly,
1721          * but you have been warned.
1722          */
1723         return 0;
1724
1725 csum_err:
1726         TCP_INC_STATS_BH(TCP_MIB_INERRS);
1727         goto discard;
1728 }
1729
1730 /*
1731  *      From tcp_input.c
1732  */
1733
1734 int tcp_v4_rcv(struct sk_buff *skb)
1735 {
1736         struct tcphdr *th;
1737         struct sock *sk;
1738         int ret;
1739
1740         if (skb->pkt_type != PACKET_HOST)
1741                 goto discard_it;
1742
1743         /* Count it even if it's bad */
1744         TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1745
1746         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1747                 goto discard_it;
1748
1749         th = skb->h.th;
1750
1751         if (th->doff < sizeof(struct tcphdr) / 4)
1752                 goto bad_packet;
1753         if (!pskb_may_pull(skb, th->doff * 4))
1754                 goto discard_it;
1755
1756         /* An explanation is required here, I think.
1757          * Packet length and doff are validated by header prediction,
1758          * provided case of th->doff==0 is elimineted.
1759          * So, we defer the checks. */
1760         if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1761              tcp_v4_checksum_init(skb) < 0))
1762                 goto bad_packet;
1763
1764         th = skb->h.th;
1765         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1766         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1767                                     skb->len - th->doff * 4);
1768         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1769         TCP_SKB_CB(skb)->when    = 0;
1770         TCP_SKB_CB(skb)->flags   = skb->nh.iph->tos;
1771         TCP_SKB_CB(skb)->sacked  = 0;
1772
1773         sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source,
1774                              skb->nh.iph->daddr, ntohs(th->dest),
1775                              tcp_v4_iif(skb));
1776
1777         if (!sk)
1778                 goto no_tcp_socket;
1779
1780 process:
1781 #if defined(CONFIG_VNET) || defined(CONFIG_VNET_MODULE)
1782         /* Silently drop if VNET is active and the context is not
1783          * entitled to read the packet.
1784          */
1785         if (vnet_active) {
1786                 /* Transfer ownership of reusable TIME_WAIT buckets to
1787                  * whomever VNET decided should own the packet.
1788                  */
1789                 if (sk->sk_state == TCP_TIME_WAIT)
1790                         sk->sk_xid = skb->xid;
1791
1792                 if ((int) sk->sk_xid > 0 && sk->sk_xid != skb->xid)
1793                         goto discard_it;
1794         }
1795 #endif
1796
1797         if (sk->sk_state == TCP_TIME_WAIT)
1798                 goto do_time_wait;
1799
1800         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1801                 goto discard_and_relse;
1802
1803         if (sk_filter(sk, skb, 0))
1804                 goto discard_and_relse;
1805
1806         skb->dev = NULL;
1807
1808         bh_lock_sock(sk);
1809         ret = 0;
1810         if (!sock_owned_by_user(sk)) {
1811                 if (!tcp_prequeue(sk, skb))
1812                         ret = tcp_v4_do_rcv(sk, skb);
1813         } else
1814                 sk_add_backlog(sk, skb);
1815         bh_unlock_sock(sk);
1816
1817         sock_put(sk);
1818
1819         return ret;
1820
1821 no_tcp_socket:
1822         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1823                 goto discard_it;
1824
1825         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1826 bad_packet:
1827                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1828 #if defined(CONFIG_VNET) || defined(CONFIG_VNET_MODULE)
1829         } else if (vnet_active && skb->sk) {
1830                 /* VNET: Suppress RST if the port was bound to a (presumably raw) socket */
1831 #endif
1832         } else {
1833                 tcp_v4_send_reset(skb);
1834         }
1835
1836 discard_it:
1837         /* Discard frame. */
1838         kfree_skb(skb);
1839         return 0;
1840
1841 discard_and_relse:
1842         sock_put(sk);
1843         goto discard_it;
1844
1845 do_time_wait:
1846         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1847                 tcp_tw_put((struct tcp_tw_bucket *) sk);
1848                 goto discard_it;
1849         }
1850
1851         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1852                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1853                 tcp_tw_put((struct tcp_tw_bucket *) sk);
1854                 goto discard_it;
1855         }
1856         switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
1857                                            skb, th, skb->len)) {
1858         case TCP_TW_SYN: {
1859                 struct sock *sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr,
1860                                                           ntohs(th->dest),
1861                                                           tcp_v4_iif(skb));
1862                 if (sk2) {
1863                         tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
1864                         tcp_tw_put((struct tcp_tw_bucket *)sk);
1865                         sk = sk2;
1866                         goto process;
1867                 }
1868                 /* Fall through to ACK */
1869         }
1870         case TCP_TW_ACK:
1871                 tcp_v4_timewait_ack(sk, skb);
1872                 break;
1873         case TCP_TW_RST:
1874                 goto no_tcp_socket;
1875         case TCP_TW_SUCCESS:;
1876         }
1877         goto discard_it;
1878 }
1879
1880 /* With per-bucket locks this operation is not-atomic, so that
1881  * this version is not worse.
1882  */
1883 static void __tcp_v4_rehash(struct sock *sk)
1884 {
1885         sk->sk_prot->unhash(sk);
1886         sk->sk_prot->hash(sk);
1887 }
1888
1889 static int tcp_v4_reselect_saddr(struct sock *sk)
1890 {
1891         struct inet_opt *inet = inet_sk(sk);
1892         int err;
1893         struct rtable *rt;
1894         __u32 old_saddr = inet->saddr;
1895         __u32 new_saddr;
1896         __u32 daddr = inet->daddr;
1897
1898         if (inet->opt && inet->opt->srr)
1899                 daddr = inet->opt->faddr;
1900
1901         /* Query new route. */
1902         err = ip_route_connect(&rt, daddr, 0,
1903                                RT_TOS(inet->tos) | sk->sk_localroute,
1904                                sk->sk_bound_dev_if,
1905                                IPPROTO_TCP,
1906                                inet->sport, inet->dport, sk);
1907         if (err)
1908                 return err;
1909
1910         __sk_dst_set(sk, &rt->u.dst);
1911         tcp_v4_setup_caps(sk, &rt->u.dst);
1912         tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
1913
1914         new_saddr = rt->rt_src;
1915
1916         if (new_saddr == old_saddr)
1917                 return 0;
1918
1919         if (sysctl_ip_dynaddr > 1) {
1920                 printk(KERN_INFO "tcp_v4_rebuild_header(): shifting inet->"
1921                                  "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n",
1922                        NIPQUAD(old_saddr),
1923                        NIPQUAD(new_saddr));
1924         }
1925
1926         inet->saddr = new_saddr;
1927         inet->rcv_saddr = new_saddr;
1928
1929         /* XXX The only one ugly spot where we need to
1930          * XXX really change the sockets identity after
1931          * XXX it has entered the hashes. -DaveM
1932          *
1933          * Besides that, it does not check for connection
1934          * uniqueness. Wait for troubles.
1935          */
1936         __tcp_v4_rehash(sk);
1937         return 0;
1938 }
1939
1940 int tcp_v4_rebuild_header(struct sock *sk)
1941 {
1942         struct inet_opt *inet = inet_sk(sk);
1943         struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1944         u32 daddr;
1945         int err;
1946
1947         /* Route is OK, nothing to do. */
1948         if (rt)
1949                 return 0;
1950
1951         /* Reroute. */
1952         daddr = inet->daddr;
1953         if (inet->opt && inet->opt->srr)
1954                 daddr = inet->opt->faddr;
1955
1956         {
1957                 struct flowi fl = { .oif = sk->sk_bound_dev_if,
1958                                     .nl_u = { .ip4_u =
1959                                               { .daddr = daddr,
1960                                                 .saddr = inet->saddr,
1961                                                 .tos = RT_CONN_FLAGS(sk) } },
1962                                     .proto = IPPROTO_TCP,
1963                                     .uli_u = { .ports =
1964                                                { .sport = inet->sport,
1965                                                  .dport = inet->dport } } };
1966                                                 
1967                 err = ip_route_output_flow(&rt, &fl, sk, 0);
1968         }
1969         if (!err) {
1970                 __sk_dst_set(sk, &rt->u.dst);
1971                 tcp_v4_setup_caps(sk, &rt->u.dst);
1972                 tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
1973                 return 0;
1974         }
1975
1976         /* Routing failed... */
1977         sk->sk_route_caps = 0;
1978
1979         if (!sysctl_ip_dynaddr ||
1980             sk->sk_state != TCP_SYN_SENT ||
1981             (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
1982             (err = tcp_v4_reselect_saddr(sk)) != 0)
1983                 sk->sk_err_soft = -err;
1984
1985         return err;
1986 }
1987
1988 static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
1989 {
1990         struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;
1991         struct inet_opt *inet = inet_sk(sk);
1992
1993         sin->sin_family         = AF_INET;
1994         sin->sin_addr.s_addr    = inet->daddr;
1995         sin->sin_port           = inet->dport;
1996 }
1997
1998 /* VJ's idea. Save last timestamp seen from this destination
1999  * and hold it at least for normal timewait interval to use for duplicate
2000  * segment detection in subsequent connections, before they enter synchronized
2001  * state.
2002  */
2003
2004 int tcp_v4_remember_stamp(struct sock *sk)
2005 {
2006         struct inet_opt *inet = inet_sk(sk);
2007         struct tcp_opt *tp = tcp_sk(sk);
2008         struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
2009         struct inet_peer *peer = NULL;
2010         int release_it = 0;
2011
2012         if (!rt || rt->rt_dst != inet->daddr) {
2013                 peer = inet_getpeer(inet->daddr, 1);
2014                 release_it = 1;
2015         } else {
2016                 if (!rt->peer)
2017                         rt_bind_peer(rt, 1);
2018                 peer = rt->peer;
2019         }
2020
2021         if (peer) {
2022                 if ((s32)(peer->tcp_ts - tp->ts_recent) <= 0 ||
2023                     (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2024                      peer->tcp_ts_stamp <= tp->ts_recent_stamp)) {
2025                         peer->tcp_ts_stamp = tp->ts_recent_stamp;
2026                         peer->tcp_ts = tp->ts_recent;
2027                 }
2028                 if (release_it)
2029                         inet_putpeer(peer);
2030                 return 1;
2031         }
2032
2033         return 0;
2034 }
2035
2036 int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
2037 {
2038         struct inet_peer *peer = NULL;
2039
2040         peer = inet_getpeer(tw->tw_daddr, 1);
2041
2042         if (peer) {
2043                 if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 ||
2044                     (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2045                      peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) {
2046                         peer->tcp_ts_stamp = tw->tw_ts_recent_stamp;
2047                         peer->tcp_ts = tw->tw_ts_recent;
2048                 }
2049                 inet_putpeer(peer);
2050                 return 1;
2051         }
2052
2053         return 0;
2054 }
2055
2056 struct tcp_func ipv4_specific = {
2057         .queue_xmit     =       ip_queue_xmit,
2058         .send_check     =       tcp_v4_send_check,
2059         .rebuild_header =       tcp_v4_rebuild_header,
2060         .conn_request   =       tcp_v4_conn_request,
2061         .syn_recv_sock  =       tcp_v4_syn_recv_sock,
2062         .remember_stamp =       tcp_v4_remember_stamp,
2063         .net_header_len =       sizeof(struct iphdr),
2064         .setsockopt     =       ip_setsockopt,
2065         .getsockopt     =       ip_getsockopt,
2066         .addr2sockaddr  =       v4_addr2sockaddr,
2067         .sockaddr_len   =       sizeof(struct sockaddr_in),
2068 };
2069
2070 /* NOTE: A lot of things set to zero explicitly by call to
2071  *       sk_alloc() so need not be done here.
2072  */
2073 static int tcp_v4_init_sock(struct sock *sk)
2074 {
2075         struct tcp_opt *tp = tcp_sk(sk);
2076
2077         skb_queue_head_init(&tp->out_of_order_queue);
2078         tcp_init_xmit_timers(sk);
2079         tcp_prequeue_init(tp);
2080
2081         tp->rto  = TCP_TIMEOUT_INIT;
2082         tp->mdev = TCP_TIMEOUT_INIT;
2083
2084         /* So many TCP implementations out there (incorrectly) count the
2085          * initial SYN frame in their delayed-ACK and congestion control
2086          * algorithms that we must have the following bandaid to talk
2087          * efficiently to them.  -DaveM
2088          */
2089         tp->snd_cwnd = 2;
2090
2091         /* See draft-stevens-tcpca-spec-01 for discussion of the
2092          * initialization of these values.
2093          */
2094         tp->snd_ssthresh = 0x7fffffff;  /* Infinity */
2095         tp->snd_cwnd_clamp = ~0;
2096         tp->mss_cache_std = tp->mss_cache = 536;
2097
2098         tp->reordering = sysctl_tcp_reordering;
2099
2100         sk->sk_state = TCP_CLOSE;
2101
2102         sk->sk_write_space = sk_stream_write_space;
2103         sk->sk_use_write_queue = 1;
2104
2105         tp->af_specific = &ipv4_specific;
2106
2107         sk->sk_sndbuf = sysctl_tcp_wmem[1];
2108         sk->sk_rcvbuf = sysctl_tcp_rmem[1];
2109
2110         atomic_inc(&tcp_sockets_allocated);
2111
2112         return 0;
2113 }
2114
2115 int tcp_v4_destroy_sock(struct sock *sk)
2116 {
2117         struct tcp_opt *tp = tcp_sk(sk);
2118
2119         tcp_clear_xmit_timers(sk);
2120
2121         /* Cleanup up the write buffer. */
2122         sk_stream_writequeue_purge(sk);
2123
2124         /* Cleans up our, hopefully empty, out_of_order_queue. */
2125         __skb_queue_purge(&tp->out_of_order_queue);
2126
2127         /* Clean prequeue, it must be empty really */
2128         __skb_queue_purge(&tp->ucopy.prequeue);
2129
2130         /* Clean up a referenced TCP bind bucket. */
2131         if (tp->bind_hash)
2132                 tcp_put_port(sk);
2133
2134         /*
2135          * If sendmsg cached page exists, toss it.
2136          */
2137         if (sk->sk_sndmsg_page) {
2138                 __free_page(sk->sk_sndmsg_page);
2139                 sk->sk_sndmsg_page = NULL;
2140         }
2141
2142         atomic_dec(&tcp_sockets_allocated);
2143
2144         return 0;
2145 }
2146
2147 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2148
2149 #ifdef CONFIG_PROC_FS
2150 /* Proc filesystem TCP sock list dumping. */
2151
2152 static inline struct tcp_tw_bucket *tw_head(struct hlist_head *head)
2153 {
2154         return hlist_empty(head) ? NULL :
2155                 list_entry(head->first, struct tcp_tw_bucket, tw_node);
2156 }
2157
2158 static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw)
2159 {
2160         return tw->tw_node.next ?
2161                 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2162 }
2163
2164 static void *listening_get_next(struct seq_file *seq, void *cur)
2165 {
2166         struct tcp_opt *tp;
2167         struct hlist_node *node;
2168         struct sock *sk = cur;
2169         struct tcp_iter_state* st = seq->private;
2170
2171         if (!sk) {
2172                 st->bucket = 0;
2173                 sk = sk_head(&tcp_listening_hash[0]);
2174                 goto get_sk;
2175         }
2176
2177         ++st->num;
2178
2179         if (st->state == TCP_SEQ_STATE_OPENREQ) {
2180                 struct open_request *req = cur;
2181
2182                 tp = tcp_sk(st->syn_wait_sk);
2183                 req = req->dl_next;
2184                 while (1) {
2185                         while (req) {
2186                                 vxdprintk(VXD_CBIT(net, 6),
2187                                         "sk,req: %p [#%d] (from %d)", req->sk,
2188                                         (req->sk)?req->sk->sk_xid:0, current->xid);
2189                                 if (req->sk &&
2190                                         !vx_check(req->sk->sk_xid, VX_IDENT|VX_WATCH))
2191                                         continue;
2192                                 if (req->class->family == st->family) {
2193                                         cur = req;
2194                                         goto out;
2195                                 }
2196                                 req = req->dl_next;
2197                         }
2198                         if (++st->sbucket >= TCP_SYNQ_HSIZE)
2199                                 break;
2200 get_req:
2201                         req = tp->listen_opt->syn_table[st->sbucket];
2202                 }
2203                 sk        = sk_next(st->syn_wait_sk);
2204                 st->state = TCP_SEQ_STATE_LISTENING;
2205                 read_unlock_bh(&tp->syn_wait_lock);
2206         } else {
2207                 tp = tcp_sk(sk);
2208                 read_lock_bh(&tp->syn_wait_lock);
2209                 if (tp->listen_opt && tp->listen_opt->qlen)
2210                         goto start_req;
2211                 read_unlock_bh(&tp->syn_wait_lock);
2212                 sk = sk_next(sk);
2213         }
2214 get_sk:
2215         sk_for_each_from(sk, node) {
2216                 vxdprintk(VXD_CBIT(net, 6), "sk: %p [#%d] (from %d)",
2217                         sk, sk->sk_xid, current->xid);
2218                 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2219                         continue;
2220                 if (sk->sk_family == st->family) {
2221                         cur = sk;
2222                         goto out;
2223                 }
2224                 tp = tcp_sk(sk);
2225                 read_lock_bh(&tp->syn_wait_lock);
2226                 if (tp->listen_opt && tp->listen_opt->qlen) {
2227 start_req:
2228                         st->uid         = sock_i_uid(sk);
2229                         st->syn_wait_sk = sk;
2230                         st->state       = TCP_SEQ_STATE_OPENREQ;
2231                         st->sbucket     = 0;
2232                         goto get_req;
2233                 }
2234                 read_unlock_bh(&tp->syn_wait_lock);
2235         }
2236         if (++st->bucket < TCP_LHTABLE_SIZE) {
2237                 sk = sk_head(&tcp_listening_hash[st->bucket]);
2238                 goto get_sk;
2239         }
2240         cur = NULL;
2241 out:
2242         return cur;
2243 }
2244
2245 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2246 {
2247         void *rc = listening_get_next(seq, NULL);
2248
2249         while (rc && *pos) {
2250                 rc = listening_get_next(seq, rc);
2251                 --*pos;
2252         }
2253         return rc;
2254 }
2255
2256 static void *established_get_first(struct seq_file *seq)
2257 {
2258         struct tcp_iter_state* st = seq->private;
2259         void *rc = NULL;
2260
2261         for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) {
2262                 struct sock *sk;
2263                 struct hlist_node *node;
2264                 struct tcp_tw_bucket *tw;
2265                
2266                 read_lock(&tcp_ehash[st->bucket].lock);
2267                 sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) {
2268                         vxdprintk(VXD_CBIT(net, 6),
2269                                 "sk,egf: %p [#%d] (from %d)",
2270                                 sk, sk->sk_xid, current->xid);
2271                         if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2272                                 continue;
2273                         if (sk->sk_family != st->family)
2274                                 continue;
2275                         rc = sk;
2276                         goto out;
2277                 }
2278                 st->state = TCP_SEQ_STATE_TIME_WAIT;
2279                 tw_for_each(tw, node,
2280                             &tcp_ehash[st->bucket + tcp_ehash_size].chain) {
2281                         vxdprintk(VXD_CBIT(net, 6),
2282                                 "tw: %p [#%d] (from %d)",
2283                                 tw, tw->tw_xid, current->xid);
2284                         if (!vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))
2285                                 continue;
2286                         if (tw->tw_family != st->family)
2287                                 continue;
2288                         rc = tw;
2289                         goto out;
2290                 }
2291                 read_unlock(&tcp_ehash[st->bucket].lock);
2292                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2293         }
2294 out:
2295         return rc;
2296 }
2297
2298 static void *established_get_next(struct seq_file *seq, void *cur)
2299 {
2300         struct sock *sk = cur;
2301         struct tcp_tw_bucket *tw;
2302         struct hlist_node *node;
2303         struct tcp_iter_state* st = seq->private;
2304
2305         ++st->num;
2306
2307         if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2308                 tw = cur;
2309                 tw = tw_next(tw);
2310 get_tw:
2311                 while (tw && (tw->tw_family != st->family ||
2312                         !vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))) {
2313                         tw = tw_next(tw);
2314                 }
2315                 if (tw) {
2316                         cur = tw;
2317                         goto out;
2318                 }
2319                 read_unlock(&tcp_ehash[st->bucket].lock);
2320                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2321                 if (++st->bucket < tcp_ehash_size) {
2322                         read_lock(&tcp_ehash[st->bucket].lock);
2323                         sk = sk_head(&tcp_ehash[st->bucket].chain);
2324                 } else {
2325                         cur = NULL;
2326                         goto out;
2327                 }
2328         } else
2329                 sk = sk_next(sk);
2330
2331         sk_for_each_from(sk, node) {
2332                 vxdprintk(VXD_CBIT(net, 6),
2333                         "sk,egn: %p [#%d] (from %d)",
2334                         sk, sk->sk_xid, current->xid);
2335                 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2336                         continue;
2337                 if (sk->sk_family == st->family)
2338                         goto found;
2339         }
2340
2341         st->state = TCP_SEQ_STATE_TIME_WAIT;
2342         tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain);
2343         goto get_tw;
2344 found:
2345         cur = sk;
2346 out:
2347         return cur;
2348 }
2349
2350 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2351 {
2352         void *rc = established_get_first(seq);
2353
2354         while (rc && pos) {
2355                 rc = established_get_next(seq, rc);
2356                 --pos;
2357         }               
2358         return rc;
2359 }
2360
2361 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2362 {
2363         void *rc;
2364         struct tcp_iter_state* st = seq->private;
2365
2366         tcp_listen_lock();
2367         st->state = TCP_SEQ_STATE_LISTENING;
2368         rc        = listening_get_idx(seq, &pos);
2369
2370         if (!rc) {
2371                 tcp_listen_unlock();
2372                 local_bh_disable();
2373                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2374                 rc        = established_get_idx(seq, pos);
2375         }
2376
2377         return rc;
2378 }
2379
2380 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2381 {
2382         struct tcp_iter_state* st = seq->private;
2383         st->state = TCP_SEQ_STATE_LISTENING;
2384         st->num = 0;
2385         return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2386 }
2387
2388 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2389 {
2390         void *rc = NULL;
2391         struct tcp_iter_state* st;
2392
2393         if (v == SEQ_START_TOKEN) {
2394                 rc = tcp_get_idx(seq, 0);
2395                 goto out;
2396         }
2397         st = seq->private;
2398
2399         switch (st->state) {
2400         case TCP_SEQ_STATE_OPENREQ:
2401         case TCP_SEQ_STATE_LISTENING:
2402                 rc = listening_get_next(seq, v);
2403                 if (!rc) {
2404                         tcp_listen_unlock();
2405                         local_bh_disable();
2406                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2407                         rc        = established_get_first(seq);
2408                 }
2409                 break;
2410         case TCP_SEQ_STATE_ESTABLISHED:
2411         case TCP_SEQ_STATE_TIME_WAIT:
2412                 rc = established_get_next(seq, v);
2413                 break;
2414         }
2415 out:
2416         ++*pos;
2417         return rc;
2418 }
2419
2420 static void tcp_seq_stop(struct seq_file *seq, void *v)
2421 {
2422         struct tcp_iter_state* st = seq->private;
2423
2424         switch (st->state) {
2425         case TCP_SEQ_STATE_OPENREQ:
2426                 if (v) {
2427                         struct tcp_opt *tp = tcp_sk(st->syn_wait_sk);
2428                         read_unlock_bh(&tp->syn_wait_lock);
2429                 }
2430         case TCP_SEQ_STATE_LISTENING:
2431                 if (v != SEQ_START_TOKEN)
2432                         tcp_listen_unlock();
2433                 break;
2434         case TCP_SEQ_STATE_TIME_WAIT:
2435         case TCP_SEQ_STATE_ESTABLISHED:
2436                 if (v)
2437                         read_unlock(&tcp_ehash[st->bucket].lock);
2438                 local_bh_enable();
2439                 break;
2440         }
2441 }
2442
2443 static int tcp_seq_open(struct inode *inode, struct file *file)
2444 {
2445         struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2446         struct seq_file *seq;
2447         struct tcp_iter_state *s;
2448         int rc;
2449
2450         if (unlikely(afinfo == NULL))
2451                 return -EINVAL;
2452
2453         s = kmalloc(sizeof(*s), GFP_KERNEL);
2454         if (!s)
2455                 return -ENOMEM;
2456         memset(s, 0, sizeof(*s));
2457         s->family               = afinfo->family;
2458         s->seq_ops.start        = tcp_seq_start;
2459         s->seq_ops.next         = tcp_seq_next;
2460         s->seq_ops.show         = afinfo->seq_show;
2461         s->seq_ops.stop         = tcp_seq_stop;
2462
2463         rc = seq_open(file, &s->seq_ops);
2464         if (rc)
2465                 goto out_kfree;
2466         seq          = file->private_data;
2467         seq->private = s;
2468 out:
2469         return rc;
2470 out_kfree:
2471         kfree(s);
2472         goto out;
2473 }
2474
2475 int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
2476 {
2477         int rc = 0;
2478         struct proc_dir_entry *p;
2479
2480         if (!afinfo)
2481                 return -EINVAL;
2482         afinfo->seq_fops->owner         = afinfo->owner;
2483         afinfo->seq_fops->open          = tcp_seq_open;
2484         afinfo->seq_fops->read          = seq_read;
2485         afinfo->seq_fops->llseek        = seq_lseek;
2486         afinfo->seq_fops->release       = seq_release_private;
2487         
2488         p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
2489         if (p)
2490                 p->data = afinfo;
2491         else
2492                 rc = -ENOMEM;
2493         return rc;
2494 }
2495
2496 void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
2497 {
2498         if (!afinfo)
2499                 return;
2500         proc_net_remove(afinfo->name);
2501         memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); 
2502 }
2503
2504 static void get_openreq4(struct sock *sk, struct open_request *req,
2505                          char *tmpbuf, int i, int uid)
2506 {
2507         int ttd = req->expires - jiffies;
2508
2509         sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2510                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
2511                 i,
2512                 req->af.v4_req.loc_addr,
2513                 ntohs(inet_sk(sk)->sport),
2514                 req->af.v4_req.rmt_addr,
2515                 ntohs(req->rmt_port),
2516                 TCP_SYN_RECV,
2517                 0, 0, /* could print option size, but that is af dependent. */
2518                 1,    /* timers active (only the expire timer) */
2519                 jiffies_to_clock_t(ttd),
2520                 req->retrans,
2521                 uid,
2522                 0,  /* non standard timer */
2523                 0, /* open_requests have no inode */
2524                 atomic_read(&sk->sk_refcnt),
2525                 req);
2526 }
2527
2528 static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
2529 {
2530         int timer_active;
2531         unsigned long timer_expires;
2532         struct tcp_opt *tp = tcp_sk(sp);
2533         struct inet_opt *inet = inet_sk(sp);
2534         unsigned int dest = inet->daddr;
2535         unsigned int src = inet->rcv_saddr;
2536         __u16 destp = ntohs(inet->dport);
2537         __u16 srcp = ntohs(inet->sport);
2538
2539         if (tp->pending == TCP_TIME_RETRANS) {
2540                 timer_active    = 1;
2541                 timer_expires   = tp->timeout;
2542         } else if (tp->pending == TCP_TIME_PROBE0) {
2543                 timer_active    = 4;
2544                 timer_expires   = tp->timeout;
2545         } else if (timer_pending(&sp->sk_timer)) {
2546                 timer_active    = 2;
2547                 timer_expires   = sp->sk_timer.expires;
2548         } else {
2549                 timer_active    = 0;
2550                 timer_expires = jiffies;
2551         }
2552
2553         sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2554                         "%08X %5d %8d %lu %d %p %u %u %u %u %d",
2555                 i, src, srcp, dest, destp, sp->sk_state,
2556                 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
2557                 timer_active,
2558                 jiffies_to_clock_t(timer_expires - jiffies),
2559                 tp->retransmits,
2560                 sock_i_uid(sp),
2561                 tp->probes_out,
2562                 sock_i_ino(sp),
2563                 atomic_read(&sp->sk_refcnt), sp,
2564                 tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong,
2565                 tp->snd_cwnd,
2566                 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
2567 }
2568
2569 static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i)
2570 {
2571         unsigned int dest, src;
2572         __u16 destp, srcp;
2573         int ttd = tw->tw_ttd - jiffies;
2574
2575         if (ttd < 0)
2576                 ttd = 0;
2577
2578         dest  = tw->tw_daddr;
2579         src   = tw->tw_rcv_saddr;
2580         destp = ntohs(tw->tw_dport);
2581         srcp  = ntohs(tw->tw_sport);
2582
2583         sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2584                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p",
2585                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2586                 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2587                 atomic_read(&tw->tw_refcnt), tw);
2588 }
2589
2590 #define TMPSZ 150
2591
2592 static int tcp4_seq_show(struct seq_file *seq, void *v)
2593 {
2594         struct tcp_iter_state* st;
2595         char tmpbuf[TMPSZ + 1];
2596
2597         if (v == SEQ_START_TOKEN) {
2598                 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2599                            "  sl  local_address rem_address   st tx_queue "
2600                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2601                            "inode");
2602                 goto out;
2603         }
2604         st = seq->private;
2605
2606         switch (st->state) {
2607         case TCP_SEQ_STATE_LISTENING:
2608         case TCP_SEQ_STATE_ESTABLISHED:
2609                 get_tcp4_sock(v, tmpbuf, st->num);
2610                 break;
2611         case TCP_SEQ_STATE_OPENREQ:
2612                 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
2613                 break;
2614         case TCP_SEQ_STATE_TIME_WAIT:
2615                 get_timewait4_sock(v, tmpbuf, st->num);
2616                 break;
2617         }
2618         seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
2619 out:
2620         return 0;
2621 }
2622
2623 static struct file_operations tcp4_seq_fops;
2624 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2625         .owner          = THIS_MODULE,
2626         .name           = "tcp",
2627         .family         = AF_INET,
2628         .seq_show       = tcp4_seq_show,
2629         .seq_fops       = &tcp4_seq_fops,
2630 };
2631
2632 int __init tcp4_proc_init(void)
2633 {
2634         return tcp_proc_register(&tcp4_seq_afinfo);
2635 }
2636
2637 void tcp4_proc_exit(void)
2638 {
2639         tcp_proc_unregister(&tcp4_seq_afinfo);
2640 }
2641 #endif /* CONFIG_PROC_FS */
2642
2643 struct proto tcp_prot = {
2644         .name                   = "TCP",
2645         .owner                  = THIS_MODULE,
2646         .close                  = tcp_close,
2647         .connect                = tcp_v4_connect,
2648         .disconnect             = tcp_disconnect,
2649         .accept                 = tcp_accept,
2650         .ioctl                  = tcp_ioctl,
2651         .init                   = tcp_v4_init_sock,
2652         .destroy                = tcp_v4_destroy_sock,
2653         .shutdown               = tcp_shutdown,
2654         .setsockopt             = tcp_setsockopt,
2655         .getsockopt             = tcp_getsockopt,
2656         .sendmsg                = tcp_sendmsg,
2657         .recvmsg                = tcp_recvmsg,
2658         .backlog_rcv            = tcp_v4_do_rcv,
2659         .hash                   = tcp_v4_hash,
2660         .unhash                 = tcp_unhash,
2661         .get_port               = tcp_v4_get_port,
2662         .enter_memory_pressure  = tcp_enter_memory_pressure,
2663         .sockets_allocated      = &tcp_sockets_allocated,
2664         .memory_allocated       = &tcp_memory_allocated,
2665         .memory_pressure        = &tcp_memory_pressure,
2666         .sysctl_mem             = sysctl_tcp_mem,
2667         .sysctl_wmem            = sysctl_tcp_wmem,
2668         .sysctl_rmem            = sysctl_tcp_rmem,
2669         .max_header             = MAX_TCP_HEADER,
2670         .slab_obj_size          = sizeof(struct tcp_sock),
2671 };
2672
2673
2674
2675 void __init tcp_v4_init(struct net_proto_family *ops)
2676 {
2677         int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
2678         if (err < 0)
2679                 panic("Failed to create the TCP control socket.\n");
2680         tcp_socket->sk->sk_allocation   = GFP_ATOMIC;
2681         inet_sk(tcp_socket->sk)->uc_ttl = -1;
2682
2683         /* Unhash it so that IP input processing does not even
2684          * see it, we do not wish this socket to see incoming
2685          * packets.
2686          */
2687         tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
2688 }
2689
2690 EXPORT_SYMBOL(ipv4_specific);
2691 EXPORT_SYMBOL(tcp_bind_hash);
2692 EXPORT_SYMBOL(tcp_bucket_create);
2693 EXPORT_SYMBOL(tcp_hashinfo);
2694 EXPORT_SYMBOL(tcp_inherit_port);
2695 EXPORT_SYMBOL(tcp_listen_wlock);
2696 EXPORT_SYMBOL(tcp_port_rover);
2697 EXPORT_SYMBOL(tcp_prot);
2698 EXPORT_SYMBOL(tcp_put_port);
2699 EXPORT_SYMBOL(tcp_unhash);
2700 EXPORT_SYMBOL(tcp_v4_conn_request);
2701 EXPORT_SYMBOL(tcp_v4_connect);
2702 EXPORT_SYMBOL(tcp_v4_do_rcv);
2703 EXPORT_SYMBOL(tcp_v4_rebuild_header);
2704 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2705 EXPORT_SYMBOL(tcp_v4_send_check);
2706 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2707
2708 #ifdef CONFIG_PROC_FS
2709 EXPORT_SYMBOL(tcp_proc_register);
2710 EXPORT_SYMBOL(tcp_proc_unregister);
2711 #endif
2712 EXPORT_SYMBOL(sysctl_local_port_range);
2713 EXPORT_SYMBOL(sysctl_max_syn_backlog);
2714 EXPORT_SYMBOL(sysctl_tcp_low_latency);
2715