This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / net / ipv4 / tcp_ipv4.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Version:     $Id$
9  *
10  *              IPv4 specific functions
11  *
12  *
13  *              code split from:
14  *              linux/ipv4/tcp.c
15  *              linux/ipv4/tcp_input.c
16  *              linux/ipv4/tcp_output.c
17  *
18  *              See tcp.c for author information
19  *
20  *      This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25
26 /*
27  * Changes:
28  *              David S. Miller :       New socket lookup architecture.
29  *                                      This code is dedicated to John Dyson.
30  *              David S. Miller :       Change semantics of established hash,
31  *                                      half is devoted to TIME_WAIT sockets
32  *                                      and the rest go in the other half.
33  *              Andi Kleen :            Add support for syncookies and fixed
34  *                                      some bugs: ip options weren't passed to
35  *                                      the TCP layer, missed a check for an
36  *                                      ACK bit.
37  *              Andi Kleen :            Implemented fast path mtu discovery.
38  *                                      Fixed many serious bugs in the
39  *                                      open_request handling and moved
40  *                                      most of it into the af independent code.
41  *                                      Added tail drop and some other bugfixes.
42  *                                      Added new listen sematics.
43  *              Mike McLagan    :       Routing by source
44  *      Juan Jose Ciarlante:            ip_dynaddr bits
45  *              Andi Kleen:             various fixes.
46  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
47  *                                      coma.
48  *      Andi Kleen              :       Fix new listen.
49  *      Andi Kleen              :       Fix accept error reporting.
50  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
51  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
52  *                                      a single port at the same time.
53  */
54
55 #include <linux/config.h>
56
57 #include <linux/types.h>
58 #include <linux/fcntl.h>
59 #include <linux/module.h>
60 #include <linux/random.h>
61 #include <linux/cache.h>
62 #include <linux/jhash.h>
63 #include <linux/init.h>
64 #include <linux/times.h>
65
66 #include <net/icmp.h>
67 #include <net/tcp.h>
68 #include <net/ipv6.h>
69 #include <net/inet_common.h>
70 #include <net/xfrm.h>
71
72 #include <linux/inet.h>
73 #include <linux/ipv6.h>
74 #include <linux/stddef.h>
75 #include <linux/proc_fs.h>
76 #include <linux/seq_file.h>
77 #include <linux/vserver/debug.h>
78
79 extern int sysctl_ip_dynaddr;
80 int sysctl_tcp_tw_reuse;
81 int sysctl_tcp_low_latency;
82
83 /* Check TCP sequence numbers in ICMP packets. */
84 #define ICMP_MIN_LENGTH 8
85
86 /* Socket used for sending RSTs */
87 static struct socket *tcp_socket;
88
89 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
90                        struct sk_buff *skb);
91
92 struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
93         .__tcp_lhash_lock       =       RW_LOCK_UNLOCKED,
94         .__tcp_lhash_users      =       ATOMIC_INIT(0),
95         .__tcp_lhash_wait
96           = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait),
97         .__tcp_portalloc_lock   =       SPIN_LOCK_UNLOCKED
98 };
99
100 /*
101  * This array holds the first and last local port number.
102  * For high-usage systems, use sysctl to change this to
103  * 32768-61000
104  */
105 int sysctl_local_port_range[2] = { 1024, 4999 };
106 int tcp_port_rover = 1024 - 1;
107
108 static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
109                                  __u32 faddr, __u16 fport)
110 {
111         int h = (laddr ^ lport) ^ (faddr ^ fport);
112         h ^= h >> 16;
113         h ^= h >> 8;
114         return h & (tcp_ehash_size - 1);
115 }
116
117 static __inline__ int tcp_sk_hashfn(struct sock *sk)
118 {
119         struct inet_opt *inet = inet_sk(sk);
120         __u32 laddr = inet->rcv_saddr;
121         __u16 lport = inet->num;
122         __u32 faddr = inet->daddr;
123         __u16 fport = inet->dport;
124
125         return tcp_hashfn(laddr, lport, faddr, fport);
126 }
127
128 /* Allocate and initialize a new TCP local port bind bucket.
129  * The bindhash mutex for snum's hash chain must be held here.
130  */
131 struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
132                                           unsigned short snum)
133 {
134         struct tcp_bind_bucket *tb = kmem_cache_alloc(tcp_bucket_cachep,
135                                                       SLAB_ATOMIC);
136         if (tb) {
137                 tb->port = snum;
138                 tb->fastreuse = 0;
139                 INIT_HLIST_HEAD(&tb->owners);
140                 hlist_add_head(&tb->node, &head->chain);
141         }
142         return tb;
143 }
144
145 /* Caller must hold hashbucket lock for this tb with local BH disabled */
146 void tcp_bucket_destroy(struct tcp_bind_bucket *tb)
147 {
148         if (hlist_empty(&tb->owners)) {
149                 __hlist_del(&tb->node);
150                 kmem_cache_free(tcp_bucket_cachep, tb);
151         }
152 }
153
154 /* Caller must disable local BH processing. */
155 static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
156 {
157         struct tcp_bind_hashbucket *head =
158                                 &tcp_bhash[tcp_bhashfn(inet_sk(child)->num)];
159         struct tcp_bind_bucket *tb;
160
161         spin_lock(&head->lock);
162         tb = tcp_sk(sk)->bind_hash;
163         sk_add_bind_node(child, &tb->owners);
164         tcp_sk(child)->bind_hash = tb;
165         spin_unlock(&head->lock);
166 }
167
168 inline void tcp_inherit_port(struct sock *sk, struct sock *child)
169 {
170         local_bh_disable();
171         __tcp_inherit_port(sk, child);
172         local_bh_enable();
173 }
174
175 void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
176                    unsigned short snum)
177 {
178         inet_sk(sk)->num = snum;
179         sk_add_bind_node(sk, &tb->owners);
180         tcp_sk(sk)->bind_hash = tb;
181 }
182
183 static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
184 {
185         const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk);
186         struct sock *sk2;
187         struct hlist_node *node;
188         int reuse = sk->sk_reuse;
189
190         sk_for_each_bound(sk2, node, &tb->owners) {
191                 if (sk != sk2 &&
192                     !tcp_v6_ipv6only(sk2) &&
193                     (!sk->sk_bound_dev_if ||
194                      !sk2->sk_bound_dev_if ||
195                      sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
196                         if (!reuse || !sk2->sk_reuse ||
197                             sk2->sk_state == TCP_LISTEN) {
198                                 const u32 sk2_rcv_saddr = tcp_v4_rcv_saddr(sk2);
199                                 if (!sk2_rcv_saddr || !sk_rcv_saddr ||
200                                     sk2_rcv_saddr == sk_rcv_saddr)
201                                         break;
202                         }
203                 }
204         }
205         return node != NULL;
206 }
207
208 /* Obtain a reference to a local port for the given sock,
209  * if snum is zero it means select any available local port.
210  */
211 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
212 {
213         struct tcp_bind_hashbucket *head;
214         struct hlist_node *node;
215         struct tcp_bind_bucket *tb;
216         int ret;
217
218         local_bh_disable();
219         if (!snum) {
220                 int low = sysctl_local_port_range[0];
221                 int high = sysctl_local_port_range[1];
222                 int remaining = (high - low) + 1;
223                 int rover;
224
225                 spin_lock(&tcp_portalloc_lock);
226                 rover = tcp_port_rover;
227                 do {
228                         rover++;
229                         if (rover < low || rover > high)
230                                 rover = low;
231                         head = &tcp_bhash[tcp_bhashfn(rover)];
232                         spin_lock(&head->lock);
233                         tb_for_each(tb, node, &head->chain)
234                                 if (tb->port == rover)
235                                         goto next;
236                         break;
237                 next:
238                         spin_unlock(&head->lock);
239                 } while (--remaining > 0);
240                 tcp_port_rover = rover;
241                 spin_unlock(&tcp_portalloc_lock);
242
243                 /* Exhausted local port range during search? */
244                 ret = 1;
245                 if (remaining <= 0)
246                         goto fail;
247
248                 /* OK, here is the one we will use.  HEAD is
249                  * non-NULL and we hold it's mutex.
250                  */
251                 snum = rover;
252         } else {
253                 head = &tcp_bhash[tcp_bhashfn(snum)];
254                 spin_lock(&head->lock);
255                 tb_for_each(tb, node, &head->chain)
256                         if (tb->port == snum)
257                                 goto tb_found;
258         }
259         tb = NULL;
260         goto tb_not_found;
261 tb_found:
262         if (!hlist_empty(&tb->owners)) {
263                 if (sk->sk_reuse > 1)
264                         goto success;
265                 if (tb->fastreuse > 0 &&
266                     sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
267                         goto success;
268                 } else {
269                         ret = 1;
270                         if (tcp_bind_conflict(sk, tb))
271                                 goto fail_unlock;
272                 }
273         }
274 tb_not_found:
275         ret = 1;
276         if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
277                 goto fail_unlock;
278         if (hlist_empty(&tb->owners)) {
279                 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
280                         tb->fastreuse = 1;
281                 else
282                         tb->fastreuse = 0;
283         } else if (tb->fastreuse &&
284                    (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
285                 tb->fastreuse = 0;
286 success:
287         if (!tcp_sk(sk)->bind_hash)
288                 tcp_bind_hash(sk, tb, snum);
289         BUG_TRAP(tcp_sk(sk)->bind_hash == tb);
290         ret = 0;
291
292 fail_unlock:
293         spin_unlock(&head->lock);
294 fail:
295         local_bh_enable();
296         return ret;
297 }
298
299 /* Get rid of any references to a local port held by the
300  * given sock.
301  */
302 static void __tcp_put_port(struct sock *sk)
303 {
304         struct inet_opt *inet = inet_sk(sk);
305         struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)];
306         struct tcp_bind_bucket *tb;
307
308         spin_lock(&head->lock);
309         tb = tcp_sk(sk)->bind_hash;
310         __sk_del_bind_node(sk);
311         tcp_sk(sk)->bind_hash = NULL;
312         inet->num = 0;
313         tcp_bucket_destroy(tb);
314         spin_unlock(&head->lock);
315 }
316
317 void tcp_put_port(struct sock *sk)
318 {
319         local_bh_disable();
320         __tcp_put_port(sk);
321         local_bh_enable();
322 }
323
324 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
325  * Look, when several writers sleep and reader wakes them up, all but one
326  * immediately hit write lock and grab all the cpus. Exclusive sleep solves
327  * this, _but_ remember, it adds useless work on UP machines (wake up each
328  * exclusive lock release). It should be ifdefed really.
329  */
330
331 void tcp_listen_wlock(void)
332 {
333         write_lock(&tcp_lhash_lock);
334
335         if (atomic_read(&tcp_lhash_users)) {
336                 DEFINE_WAIT(wait);
337
338                 for (;;) {
339                         prepare_to_wait_exclusive(&tcp_lhash_wait,
340                                                 &wait, TASK_UNINTERRUPTIBLE);
341                         if (!atomic_read(&tcp_lhash_users))
342                                 break;
343                         write_unlock_bh(&tcp_lhash_lock);
344                         schedule();
345                         write_lock_bh(&tcp_lhash_lock);
346                 }
347
348                 finish_wait(&tcp_lhash_wait, &wait);
349         }
350 }
351
352 static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
353 {
354         struct hlist_head *list;
355         rwlock_t *lock;
356
357         BUG_TRAP(sk_unhashed(sk));
358         if (listen_possible && sk->sk_state == TCP_LISTEN) {
359                 list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
360                 lock = &tcp_lhash_lock;
361                 tcp_listen_wlock();
362         } else {
363                 list = &tcp_ehash[(sk->sk_hashent = tcp_sk_hashfn(sk))].chain;
364                 lock = &tcp_ehash[sk->sk_hashent].lock;
365                 write_lock(lock);
366         }
367         __sk_add_node(sk, list);
368         sock_prot_inc_use(sk->sk_prot);
369         write_unlock(lock);
370         if (listen_possible && sk->sk_state == TCP_LISTEN)
371                 wake_up(&tcp_lhash_wait);
372 }
373
374 static void tcp_v4_hash(struct sock *sk)
375 {
376         if (sk->sk_state != TCP_CLOSE) {
377                 local_bh_disable();
378                 __tcp_v4_hash(sk, 1);
379                 local_bh_enable();
380         }
381 }
382
383 void tcp_unhash(struct sock *sk)
384 {
385         rwlock_t *lock;
386
387         if (sk_unhashed(sk))
388                 goto ende;
389
390         if (sk->sk_state == TCP_LISTEN) {
391                 local_bh_disable();
392                 tcp_listen_wlock();
393                 lock = &tcp_lhash_lock;
394         } else {
395                 struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
396                 lock = &head->lock;
397                 write_lock_bh(&head->lock);
398         }
399
400         if (__sk_del_node_init(sk))
401                 sock_prot_dec_use(sk->sk_prot);
402         write_unlock_bh(lock);
403
404  ende:
405         if (sk->sk_state == TCP_LISTEN)
406                 wake_up(&tcp_lhash_wait);
407 }
408
409 /* Don't inline this cruft.  Here are some nice properties to
410  * exploit here.  The BSD API does not allow a listening TCP
411  * to specify the remote port nor the remote address for the
412  * connection.  So always assume those are both wildcarded
413  * during the search since they can never be otherwise.
414  */
415 static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr,
416                                              unsigned short hnum, int dif)
417 {
418         struct sock *result = NULL, *sk;
419         struct hlist_node *node;
420         int score, hiscore;
421
422         hiscore=-1;
423         sk_for_each(sk, node, head) {
424                 struct inet_opt *inet = inet_sk(sk);
425
426                 if (inet->num == hnum && !ipv6_only_sock(sk)) {
427                         __u32 rcv_saddr = inet->rcv_saddr;
428
429                         score = (sk->sk_family == PF_INET ? 1 : 0);
430                         if (rcv_saddr) {
431                                 if (rcv_saddr != daddr)
432                                         continue;
433                                 score+=2;
434                         }
435                         if (sk->sk_bound_dev_if) {
436                                 if (sk->sk_bound_dev_if != dif)
437                                         continue;
438                                 score+=2;
439                         }
440                         if (score == 5)
441                                 return sk;
442                         if (score > hiscore) {
443                                 hiscore = score;
444                                 result = sk;
445                         }
446                 }
447         }
448         return result;
449 }
450
451 /* Optimize the common listener case. */
452 inline struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum,
453                                            int dif)
454 {
455         struct sock *sk = NULL;
456         struct hlist_head *head;
457
458         read_lock(&tcp_lhash_lock);
459         head = &tcp_listening_hash[tcp_lhashfn(hnum)];
460         if (!hlist_empty(head)) {
461                 struct inet_opt *inet = inet_sk((sk = __sk_head(head)));
462                 if (inet->num == hnum && !sk->sk_node.next &&
463                     (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
464                     (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
465                     !sk->sk_bound_dev_if)
466                         goto sherry_cache;
467                 sk = __tcp_v4_lookup_listener(head, daddr, hnum, dif);
468         }
469         if (sk) {
470 sherry_cache:
471                 sock_hold(sk);
472         }
473         read_unlock(&tcp_lhash_lock);
474         return sk;
475 }
476
477 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
478  * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
479  *
480  * Local BH must be disabled here.
481  */
482
483 static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
484                                                        u32 daddr, u16 hnum,
485                                                        int dif)
486 {
487         struct tcp_ehash_bucket *head;
488         TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
489         __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
490         struct sock *sk;
491         struct hlist_node *node;
492         /* Optimize here for direct hit, only listening connections can
493          * have wildcards anyways.
494          */
495         int hash = tcp_hashfn(daddr, hnum, saddr, sport);
496         head = &tcp_ehash[hash];
497         read_lock(&head->lock);
498         sk_for_each(sk, node, &head->chain) {
499                 if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
500                         goto hit; /* You sunk my battleship! */
501         }
502
503         /* Must check for a TIME_WAIT'er before going to listener hash. */
504         sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
505                 if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
506                         goto hit;
507         }
508         sk = NULL;
509 out:
510         read_unlock(&head->lock);
511         return sk;
512 hit:
513         sock_hold(sk);
514         goto out;
515 }
516
517 static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport,
518                                            u32 daddr, u16 hnum, int dif)
519 {
520         struct sock *sk = __tcp_v4_lookup_established(saddr, sport,
521                                                       daddr, hnum, dif);
522
523         return sk ? : tcp_v4_lookup_listener(daddr, hnum, dif);
524 }
525
526 inline struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr,
527                                   u16 dport, int dif)
528 {
529         struct sock *sk;
530
531         local_bh_disable();
532         sk = __tcp_v4_lookup(saddr, sport, daddr, ntohs(dport), dif);
533         local_bh_enable();
534
535         return sk;
536 }
537
538 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
539 {
540         return secure_tcp_sequence_number(skb->nh.iph->daddr,
541                                           skb->nh.iph->saddr,
542                                           skb->h.th->dest,
543                                           skb->h.th->source);
544 }
545
546 /* called with local bh disabled */
547 static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
548                                       struct tcp_tw_bucket **twp)
549 {
550         struct inet_opt *inet = inet_sk(sk);
551         u32 daddr = inet->rcv_saddr;
552         u32 saddr = inet->daddr;
553         int dif = sk->sk_bound_dev_if;
554         TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
555         __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
556         int hash = tcp_hashfn(daddr, lport, saddr, inet->dport);
557         struct tcp_ehash_bucket *head = &tcp_ehash[hash];
558         struct sock *sk2;
559         struct hlist_node *node;
560         struct tcp_tw_bucket *tw;
561
562         write_lock(&head->lock);
563
564         /* Check TIME-WAIT sockets first. */
565         sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
566                 tw = (struct tcp_tw_bucket *)sk2;
567
568                 if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
569                         struct tcp_opt *tp = tcp_sk(sk);
570
571                         /* With PAWS, it is safe from the viewpoint
572                            of data integrity. Even without PAWS it
573                            is safe provided sequence spaces do not
574                            overlap i.e. at data rates <= 80Mbit/sec.
575
576                            Actually, the idea is close to VJ's one,
577                            only timestamp cache is held not per host,
578                            but per port pair and TW bucket is used
579                            as state holder.
580
581                            If TW bucket has been already destroyed we
582                            fall back to VJ's scheme and use initial
583                            timestamp retrieved from peer table.
584                          */
585                         if (tw->tw_ts_recent_stamp &&
586                             (!twp || (sysctl_tcp_tw_reuse &&
587                                       xtime.tv_sec -
588                                       tw->tw_ts_recent_stamp > 1))) {
589                                 if ((tp->write_seq =
590                                                 tw->tw_snd_nxt + 65535 + 2) == 0)
591                                         tp->write_seq = 1;
592                                 tp->ts_recent       = tw->tw_ts_recent;
593                                 tp->ts_recent_stamp = tw->tw_ts_recent_stamp;
594                                 sock_hold(sk2);
595                                 goto unique;
596                         } else
597                                 goto not_unique;
598                 }
599         }
600         tw = NULL;
601
602         /* And established part... */
603         sk_for_each(sk2, node, &head->chain) {
604                 if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif))
605                         goto not_unique;
606         }
607
608 unique:
609         /* Must record num and sport now. Otherwise we will see
610          * in hash table socket with a funny identity. */
611         inet->num = lport;
612         inet->sport = htons(lport);
613         sk->sk_hashent = hash;
614         BUG_TRAP(sk_unhashed(sk));
615         __sk_add_node(sk, &head->chain);
616         sock_prot_inc_use(sk->sk_prot);
617         write_unlock(&head->lock);
618
619         if (twp) {
620                 *twp = tw;
621                 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
622         } else if (tw) {
623                 /* Silly. Should hash-dance instead... */
624                 tcp_tw_deschedule(tw);
625                 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
626
627                 tcp_tw_put(tw);
628         }
629
630         return 0;
631
632 not_unique:
633         write_unlock(&head->lock);
634         return -EADDRNOTAVAIL;
635 }
636
637 /*
638  * Bind a port for a connect operation and hash it.
639  */
640 static int tcp_v4_hash_connect(struct sock *sk)
641 {
642         unsigned short snum = inet_sk(sk)->num;
643         struct tcp_bind_hashbucket *head;
644         struct tcp_bind_bucket *tb;
645         int ret;
646
647         if (!snum) {
648                 int rover;
649                 int low = sysctl_local_port_range[0];
650                 int high = sysctl_local_port_range[1];
651                 int remaining = (high - low) + 1;
652                 struct hlist_node *node;
653                 struct tcp_tw_bucket *tw = NULL;
654
655                 local_bh_disable();
656
657                 /* TODO. Actually it is not so bad idea to remove
658                  * tcp_portalloc_lock before next submission to Linus.
659                  * As soon as we touch this place at all it is time to think.
660                  *
661                  * Now it protects single _advisory_ variable tcp_port_rover,
662                  * hence it is mostly useless.
663                  * Code will work nicely if we just delete it, but
664                  * I am afraid in contented case it will work not better or
665                  * even worse: another cpu just will hit the same bucket
666                  * and spin there.
667                  * So some cpu salt could remove both contention and
668                  * memory pingpong. Any ideas how to do this in a nice way?
669                  */
670                 spin_lock(&tcp_portalloc_lock);
671                 rover = tcp_port_rover;
672
673                 do {
674                         rover++;
675                         if ((rover < low) || (rover > high))
676                                 rover = low;
677                         head = &tcp_bhash[tcp_bhashfn(rover)];
678                         spin_lock(&head->lock);
679
680                         /* Does not bother with rcv_saddr checks,
681                          * because the established check is already
682                          * unique enough.
683                          */
684                         tb_for_each(tb, node, &head->chain) {
685                                 if (tb->port == rover) {
686                                         BUG_TRAP(!hlist_empty(&tb->owners));
687                                         if (tb->fastreuse >= 0)
688                                                 goto next_port;
689                                         if (!__tcp_v4_check_established(sk,
690                                                                         rover,
691                                                                         &tw))
692                                                 goto ok;
693                                         goto next_port;
694                                 }
695                         }
696
697                         tb = tcp_bucket_create(head, rover);
698                         if (!tb) {
699                                 spin_unlock(&head->lock);
700                                 break;
701                         }
702                         tb->fastreuse = -1;
703                         goto ok;
704
705                 next_port:
706                         spin_unlock(&head->lock);
707                 } while (--remaining > 0);
708                 tcp_port_rover = rover;
709                 spin_unlock(&tcp_portalloc_lock);
710
711                 local_bh_enable();
712
713                 return -EADDRNOTAVAIL;
714
715 ok:
716                 /* All locks still held and bhs disabled */
717                 tcp_port_rover = rover;
718                 spin_unlock(&tcp_portalloc_lock);
719
720                 tcp_bind_hash(sk, tb, rover);
721                 if (sk_unhashed(sk)) {
722                         inet_sk(sk)->sport = htons(rover);
723                         __tcp_v4_hash(sk, 0);
724                 }
725                 spin_unlock(&head->lock);
726
727                 if (tw) {
728                         tcp_tw_deschedule(tw);
729                         tcp_tw_put(tw);
730                 }
731
732                 ret = 0;
733                 goto out;
734         }
735
736         head  = &tcp_bhash[tcp_bhashfn(snum)];
737         tb  = tcp_sk(sk)->bind_hash;
738         spin_lock_bh(&head->lock);
739         if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
740                 __tcp_v4_hash(sk, 0);
741                 spin_unlock_bh(&head->lock);
742                 return 0;
743         } else {
744                 spin_unlock(&head->lock);
745                 /* No definite answer... Walk to established hash table */
746                 ret = __tcp_v4_check_established(sk, snum, NULL);
747 out:
748                 local_bh_enable();
749                 return ret;
750         }
751 }
752
753 /* This will initiate an outgoing connection. */
754 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
755 {
756         struct inet_opt *inet = inet_sk(sk);
757         struct tcp_opt *tp = tcp_sk(sk);
758         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
759         struct rtable *rt;
760         u32 daddr, nexthop;
761         int tmp;
762         int err;
763
764         if (addr_len < sizeof(struct sockaddr_in))
765                 return -EINVAL;
766
767         if (usin->sin_family != AF_INET)
768                 return -EAFNOSUPPORT;
769
770         nexthop = daddr = usin->sin_addr.s_addr;
771         if (inet->opt && inet->opt->srr) {
772                 if (!daddr)
773                         return -EINVAL;
774                 nexthop = inet->opt->faddr;
775         }
776
777         tmp = ip_route_connect(&rt, nexthop, inet->saddr,
778                                RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
779                                IPPROTO_TCP,
780                                inet->sport, usin->sin_port, sk);
781         if (tmp < 0)
782                 return tmp;
783
784         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
785                 ip_rt_put(rt);
786                 return -ENETUNREACH;
787         }
788
789         if (!inet->opt || !inet->opt->srr)
790                 daddr = rt->rt_dst;
791
792         if (!inet->saddr)
793                 inet->saddr = rt->rt_src;
794         inet->rcv_saddr = inet->saddr;
795
796         if (tp->ts_recent_stamp && inet->daddr != daddr) {
797                 /* Reset inherited state */
798                 tp->ts_recent       = 0;
799                 tp->ts_recent_stamp = 0;
800                 tp->write_seq       = 0;
801         }
802
803         if (sysctl_tcp_tw_recycle &&
804             !tp->ts_recent_stamp && rt->rt_dst == daddr) {
805                 struct inet_peer *peer = rt_get_peer(rt);
806
807                 /* VJ's idea. We save last timestamp seen from
808                  * the destination in peer table, when entering state TIME-WAIT
809                  * and initialize ts_recent from it, when trying new connection.
810                  */
811
812                 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
813                         tp->ts_recent_stamp = peer->tcp_ts_stamp;
814                         tp->ts_recent = peer->tcp_ts;
815                 }
816         }
817
818         inet->dport = usin->sin_port;
819         inet->daddr = daddr;
820
821         tp->ext_header_len = 0;
822         if (inet->opt)
823                 tp->ext_header_len = inet->opt->optlen;
824
825         tp->mss_clamp = 536;
826
827         /* Socket identity is still unknown (sport may be zero).
828          * However we set state to SYN-SENT and not releasing socket
829          * lock select source port, enter ourselves into the hash tables and
830          * complete initialization after this.
831          */
832         tcp_set_state(sk, TCP_SYN_SENT);
833         err = tcp_v4_hash_connect(sk);
834         if (err)
835                 goto failure;
836
837         err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
838         if (err)
839                 goto failure;
840
841         /* OK, now commit destination to socket.  */
842         __sk_dst_set(sk, &rt->u.dst);
843         tcp_v4_setup_caps(sk, &rt->u.dst);
844         tp->ext2_header_len = rt->u.dst.header_len;
845
846         if (!tp->write_seq)
847                 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
848                                                            inet->daddr,
849                                                            inet->sport,
850                                                            usin->sin_port);
851
852         inet->id = tp->write_seq ^ jiffies;
853
854         err = tcp_connect(sk);
855         rt = NULL;
856         if (err)
857                 goto failure;
858
859         return 0;
860
861 failure:
862         /* This unhashes the socket and releases the local port, if necessary. */
863         tcp_set_state(sk, TCP_CLOSE);
864         ip_rt_put(rt);
865         sk->sk_route_caps = 0;
866         inet->dport = 0;
867         return err;
868 }
869
870 static __inline__ int tcp_v4_iif(struct sk_buff *skb)
871 {
872         return ((struct rtable *)skb->dst)->rt_iif;
873 }
874
875 static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
876 {
877         return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
878 }
879
880 static struct open_request *tcp_v4_search_req(struct tcp_opt *tp,
881                                               struct open_request ***prevp,
882                                               __u16 rport,
883                                               __u32 raddr, __u32 laddr)
884 {
885         struct tcp_listen_opt *lopt = tp->listen_opt;
886         struct open_request *req, **prev;
887
888         for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
889              (req = *prev) != NULL;
890              prev = &req->dl_next) {
891                 if (req->rmt_port == rport &&
892                     req->af.v4_req.rmt_addr == raddr &&
893                     req->af.v4_req.loc_addr == laddr &&
894                     TCP_INET_FAMILY(req->class->family)) {
895                         BUG_TRAP(!req->sk);
896                         *prevp = prev;
897                         break;
898                 }
899         }
900
901         return req;
902 }
903
904 static void tcp_v4_synq_add(struct sock *sk, struct open_request *req)
905 {
906         struct tcp_opt *tp = tcp_sk(sk);
907         struct tcp_listen_opt *lopt = tp->listen_opt;
908         u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
909
910         req->expires = jiffies + TCP_TIMEOUT_INIT;
911         req->retrans = 0;
912         req->sk = NULL;
913         req->dl_next = lopt->syn_table[h];
914
915         write_lock(&tp->syn_wait_lock);
916         lopt->syn_table[h] = req;
917         write_unlock(&tp->syn_wait_lock);
918
919 #ifdef CONFIG_ACCEPT_QUEUES
920         tcp_synq_added(sk, req);
921 #else
922         tcp_synq_added(sk);
923 #endif
924 }
925
926
927 /*
928  * This routine does path mtu discovery as defined in RFC1191.
929  */
930 static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
931                                      u32 mtu)
932 {
933         struct dst_entry *dst;
934         struct inet_opt *inet = inet_sk(sk);
935         struct tcp_opt *tp = tcp_sk(sk);
936
937         /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
938          * send out by Linux are always <576bytes so they should go through
939          * unfragmented).
940          */
941         if (sk->sk_state == TCP_LISTEN)
942                 return;
943
944         /* We don't check in the destentry if pmtu discovery is forbidden
945          * on this route. We just assume that no packet_to_big packets
946          * are send back when pmtu discovery is not active.
947          * There is a small race when the user changes this flag in the
948          * route, but I think that's acceptable.
949          */
950         if ((dst = __sk_dst_check(sk, 0)) == NULL)
951                 return;
952
953         dst->ops->update_pmtu(dst, mtu);
954
955         /* Something is about to be wrong... Remember soft error
956          * for the case, if this connection will not able to recover.
957          */
958         if (mtu < dst_pmtu(dst) && ip_dont_fragment(sk, dst))
959                 sk->sk_err_soft = EMSGSIZE;
960
961         mtu = dst_pmtu(dst);
962
963         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
964             tp->pmtu_cookie > mtu) {
965                 tcp_sync_mss(sk, mtu);
966
967                 /* Resend the TCP packet because it's
968                  * clear that the old packet has been
969                  * dropped. This is the new "fast" path mtu
970                  * discovery.
971                  */
972                 tcp_simple_retransmit(sk);
973         } /* else let the usual retransmit timer handle it */
974 }
975
976 /*
977  * This routine is called by the ICMP module when it gets some
978  * sort of error condition.  If err < 0 then the socket should
979  * be closed and the error returned to the user.  If err > 0
980  * it's just the icmp type << 8 | icmp code.  After adjustment
981  * header points to the first 8 bytes of the tcp header.  We need
982  * to find the appropriate port.
983  *
984  * The locking strategy used here is very "optimistic". When
985  * someone else accesses the socket the ICMP is just dropped
986  * and for some paths there is no check at all.
987  * A more general error queue to queue errors for later handling
988  * is probably better.
989  *
990  */
991
992 void tcp_v4_err(struct sk_buff *skb, u32 info)
993 {
994         struct iphdr *iph = (struct iphdr *)skb->data;
995         struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
996         struct tcp_opt *tp;
997         struct inet_opt *inet;
998         int type = skb->h.icmph->type;
999         int code = skb->h.icmph->code;
1000         struct sock *sk;
1001         __u32 seq;
1002         int err;
1003
1004         if (skb->len < (iph->ihl << 2) + 8) {
1005                 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
1006                 return;
1007         }
1008
1009         sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,
1010                            th->source, tcp_v4_iif(skb));
1011         if (!sk) {
1012                 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
1013                 return;
1014         }
1015         if (sk->sk_state == TCP_TIME_WAIT) {
1016                 tcp_tw_put((struct tcp_tw_bucket *)sk);
1017                 return;
1018         }
1019
1020         bh_lock_sock(sk);
1021         /* If too many ICMPs get dropped on busy
1022          * servers this needs to be solved differently.
1023          */
1024         if (sock_owned_by_user(sk))
1025                 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
1026
1027         if (sk->sk_state == TCP_CLOSE)
1028                 goto out;
1029
1030         tp = tcp_sk(sk);
1031         seq = ntohl(th->seq);
1032         if (sk->sk_state != TCP_LISTEN &&
1033             !between(seq, tp->snd_una, tp->snd_nxt)) {
1034                 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
1035                 goto out;
1036         }
1037
1038         switch (type) {
1039         case ICMP_SOURCE_QUENCH:
1040                 /* Just silently ignore these. */
1041                 goto out;
1042         case ICMP_PARAMETERPROB:
1043                 err = EPROTO;
1044                 break;
1045         case ICMP_DEST_UNREACH:
1046                 if (code > NR_ICMP_UNREACH)
1047                         goto out;
1048
1049                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
1050                         if (!sock_owned_by_user(sk))
1051                                 do_pmtu_discovery(sk, iph, info);
1052                         goto out;
1053                 }
1054
1055                 err = icmp_err_convert[code].errno;
1056                 break;
1057         case ICMP_TIME_EXCEEDED:
1058                 err = EHOSTUNREACH;
1059                 break;
1060         default:
1061                 goto out;
1062         }
1063
1064         switch (sk->sk_state) {
1065                 struct open_request *req, **prev;
1066         case TCP_LISTEN:
1067                 if (sock_owned_by_user(sk))
1068                         goto out;
1069
1070                 req = tcp_v4_search_req(tp, &prev, th->dest,
1071                                         iph->daddr, iph->saddr);
1072                 if (!req)
1073                         goto out;
1074
1075                 /* ICMPs are not backlogged, hence we cannot get
1076                    an established socket here.
1077                  */
1078                 BUG_TRAP(!req->sk);
1079
1080                 if (seq != req->snt_isn) {
1081                         NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
1082                         goto out;
1083                 }
1084
1085                 /*
1086                  * Still in SYN_RECV, just remove it silently.
1087                  * There is no good way to pass the error to the newly
1088                  * created socket, and POSIX does not want network
1089                  * errors returned from accept().
1090                  */
1091                 tcp_synq_drop(sk, req, prev);
1092                 goto out;
1093
1094         case TCP_SYN_SENT:
1095         case TCP_SYN_RECV:  /* Cannot happen.
1096                                It can f.e. if SYNs crossed.
1097                              */
1098                 if (!sock_owned_by_user(sk)) {
1099                         TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1100                         sk->sk_err = err;
1101
1102                         sk->sk_error_report(sk);
1103
1104                         tcp_done(sk);
1105                 } else {
1106                         sk->sk_err_soft = err;
1107                 }
1108                 goto out;
1109         }
1110
1111         /* If we've already connected we will keep trying
1112          * until we time out, or the user gives up.
1113          *
1114          * rfc1122 4.2.3.9 allows to consider as hard errors
1115          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
1116          * but it is obsoleted by pmtu discovery).
1117          *
1118          * Note, that in modern internet, where routing is unreliable
1119          * and in each dark corner broken firewalls sit, sending random
1120          * errors ordered by their masters even this two messages finally lose
1121          * their original sense (even Linux sends invalid PORT_UNREACHs)
1122          *
1123          * Now we are in compliance with RFCs.
1124          *                                                      --ANK (980905)
1125          */
1126
1127         inet = inet_sk(sk);
1128         if (!sock_owned_by_user(sk) && inet->recverr) {
1129                 sk->sk_err = err;
1130                 sk->sk_error_report(sk);
1131         } else  { /* Only an error on timeout */
1132                 sk->sk_err_soft = err;
1133         }
1134
1135 out:
1136         bh_unlock_sock(sk);
1137         sock_put(sk);
1138 }
1139
1140 /* This routine computes an IPv4 TCP checksum. */
1141 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
1142                        struct sk_buff *skb)
1143 {
1144         struct inet_opt *inet = inet_sk(sk);
1145
1146         if (skb->ip_summed == CHECKSUM_HW) {
1147                 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
1148                 skb->csum = offsetof(struct tcphdr, check);
1149         } else {
1150                 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
1151                                          csum_partial((char *)th,
1152                                                       th->doff << 2,
1153                                                       skb->csum));
1154         }
1155 }
1156
1157 /*
1158  *      This routine will send an RST to the other tcp.
1159  *
1160  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
1161  *                    for reset.
1162  *      Answer: if a packet caused RST, it is not for a socket
1163  *              existing in our system, if it is matched to a socket,
1164  *              it is just duplicate segment or bug in other side's TCP.
1165  *              So that we build reply only basing on parameters
1166  *              arrived with segment.
1167  *      Exception: precedence violation. We do not implement it in any case.
1168  */
1169
1170 static void tcp_v4_send_reset(struct sk_buff *skb)
1171 {
1172         struct tcphdr *th = skb->h.th;
1173         struct tcphdr rth;
1174         struct ip_reply_arg arg;
1175
1176         /* Never send a reset in response to a reset. */
1177         if (th->rst)
1178                 return;
1179
1180         if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
1181                 return;
1182
1183         /* Swap the send and the receive. */
1184         memset(&rth, 0, sizeof(struct tcphdr));
1185         rth.dest   = th->source;
1186         rth.source = th->dest;
1187         rth.doff   = sizeof(struct tcphdr) / 4;
1188         rth.rst    = 1;
1189
1190         if (th->ack) {
1191                 rth.seq = th->ack_seq;
1192         } else {
1193                 rth.ack = 1;
1194                 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
1195                                     skb->len - (th->doff << 2));
1196         }
1197
1198         memset(&arg, 0, sizeof arg);
1199         arg.iov[0].iov_base = (unsigned char *)&rth;
1200         arg.iov[0].iov_len  = sizeof rth;
1201         arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1202                                       skb->nh.iph->saddr, /*XXX*/
1203                                       sizeof(struct tcphdr), IPPROTO_TCP, 0);
1204         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1205
1206         ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
1207
1208         TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1209         TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1210 }
1211
1212 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
1213    outside socket context is ugly, certainly. What can I do?
1214  */
1215
1216 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
1217                             u32 win, u32 ts)
1218 {
1219         struct tcphdr *th = skb->h.th;
1220         struct {
1221                 struct tcphdr th;
1222                 u32 tsopt[3];
1223         } rep;
1224         struct ip_reply_arg arg;
1225
1226         memset(&rep.th, 0, sizeof(struct tcphdr));
1227         memset(&arg, 0, sizeof arg);
1228
1229         arg.iov[0].iov_base = (unsigned char *)&rep;
1230         arg.iov[0].iov_len  = sizeof(rep.th);
1231         if (ts) {
1232                 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1233                                      (TCPOPT_TIMESTAMP << 8) |
1234                                      TCPOLEN_TIMESTAMP);
1235                 rep.tsopt[1] = htonl(tcp_time_stamp);
1236                 rep.tsopt[2] = htonl(ts);
1237                 arg.iov[0].iov_len = sizeof(rep);
1238         }
1239
1240         /* Swap the send and the receive. */
1241         rep.th.dest    = th->source;
1242         rep.th.source  = th->dest;
1243         rep.th.doff    = arg.iov[0].iov_len / 4;
1244         rep.th.seq     = htonl(seq);
1245         rep.th.ack_seq = htonl(ack);
1246         rep.th.ack     = 1;
1247         rep.th.window  = htons(win);
1248
1249         arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1250                                       skb->nh.iph->saddr, /*XXX*/
1251                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
1252         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1253
1254         ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
1255
1256         TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1257 }
1258
1259 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
1260 {
1261         struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
1262
1263         tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
1264                         tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
1265
1266         tcp_tw_put(tw);
1267 }
1268
1269 static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req)
1270 {
1271         tcp_v4_send_ack(skb, req->snt_isn + 1, req->rcv_isn + 1, req->rcv_wnd,
1272                         req->ts_recent);
1273 }
1274
1275 static struct dst_entry* tcp_v4_route_req(struct sock *sk,
1276                                           struct open_request *req)
1277 {
1278         struct rtable *rt;
1279         struct ip_options *opt = req->af.v4_req.opt;
1280         struct flowi fl = { .oif = sk->sk_bound_dev_if,
1281                             .nl_u = { .ip4_u =
1282                                       { .daddr = ((opt && opt->srr) ?
1283                                                   opt->faddr :
1284                                                   req->af.v4_req.rmt_addr),
1285                                         .saddr = req->af.v4_req.loc_addr,
1286                                         .tos = RT_CONN_FLAGS(sk) } },
1287                             .proto = IPPROTO_TCP,
1288                             .uli_u = { .ports =
1289                                        { .sport = inet_sk(sk)->sport,
1290                                          .dport = req->rmt_port } } };
1291
1292         if (ip_route_output_flow(&rt, &fl, sk, 0)) {
1293                 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1294                 return NULL;
1295         }
1296         if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
1297                 ip_rt_put(rt);
1298                 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1299                 return NULL;
1300         }
1301         return &rt->u.dst;
1302 }
1303
1304 /*
1305  *      Send a SYN-ACK after having received an ACK.
1306  *      This still operates on a open_request only, not on a big
1307  *      socket.
1308  */
1309 static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
1310                               struct dst_entry *dst)
1311 {
1312         int err = -1;
1313         struct sk_buff * skb;
1314
1315         /* First, grab a route. */
1316         if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1317                 goto out;
1318
1319         skb = tcp_make_synack(sk, dst, req);
1320
1321         if (skb) {
1322                 struct tcphdr *th = skb->h.th;
1323
1324                 th->check = tcp_v4_check(th, skb->len,
1325                                          req->af.v4_req.loc_addr,
1326                                          req->af.v4_req.rmt_addr,
1327                                          csum_partial((char *)th, skb->len,
1328                                                       skb->csum));
1329
1330                 err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr,
1331                                             req->af.v4_req.rmt_addr,
1332                                             req->af.v4_req.opt);
1333                 if (err == NET_XMIT_CN)
1334                         err = 0;
1335         }
1336
1337 out:
1338         dst_release(dst);
1339         return err;
1340 }
1341
1342 /*
1343  *      IPv4 open_request destructor.
1344  */
1345 static void tcp_v4_or_free(struct open_request *req)
1346 {
1347         if (req->af.v4_req.opt)
1348                 kfree(req->af.v4_req.opt);
1349 }
1350
1351 static inline void syn_flood_warning(struct sk_buff *skb)
1352 {
1353         static unsigned long warntime;
1354
1355         if (time_after(jiffies, (warntime + HZ * 60))) {
1356                 warntime = jiffies;
1357                 printk(KERN_INFO
1358                        "possible SYN flooding on port %d. Sending cookies.\n",
1359                        ntohs(skb->h.th->dest));
1360         }
1361 }
1362
1363 /*
1364  * Save and compile IPv4 options into the open_request if needed.
1365  */
1366 static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
1367                                                      struct sk_buff *skb)
1368 {
1369         struct ip_options *opt = &(IPCB(skb)->opt);
1370         struct ip_options *dopt = NULL;
1371
1372         if (opt && opt->optlen) {
1373                 int opt_size = optlength(opt);
1374                 dopt = kmalloc(opt_size, GFP_ATOMIC);
1375                 if (dopt) {
1376                         if (ip_options_echo(dopt, skb)) {
1377                                 kfree(dopt);
1378                                 dopt = NULL;
1379                         }
1380                 }
1381         }
1382         return dopt;
1383 }
1384
1385 /*
1386  * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
1387  * One SYN_RECV socket costs about 80bytes on a 32bit machine.
1388  * It would be better to replace it with a global counter for all sockets
1389  * but then some measure against one socket starving all other sockets
1390  * would be needed.
1391  *
1392  * It was 128 by default. Experiments with real servers show, that
1393  * it is absolutely not enough even at 100conn/sec. 256 cures most
1394  * of problems. This value is adjusted to 128 for very small machines
1395  * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
1396  * Further increasing requires to change hash table size.
1397  */
1398 int sysctl_max_syn_backlog = 256;
1399
1400 struct or_calltable or_ipv4 = {
1401         .family         =       PF_INET,
1402         .rtx_syn_ack    =       tcp_v4_send_synack,
1403         .send_ack       =       tcp_v4_or_send_ack,
1404         .destructor     =       tcp_v4_or_free,
1405         .send_reset     =       tcp_v4_send_reset,
1406 };
1407
1408 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1409 {
1410         struct tcp_opt tp;
1411         struct open_request *req;
1412         __u32 saddr = skb->nh.iph->saddr;
1413         __u32 daddr = skb->nh.iph->daddr;
1414         __u32 isn = TCP_SKB_CB(skb)->when;
1415         struct dst_entry *dst = NULL;
1416 #ifdef CONFIG_ACCEPT_QUEUES
1417         int class = 0;
1418 #endif
1419 #ifdef CONFIG_SYN_COOKIES
1420         int want_cookie = 0;
1421 #else
1422 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1423 #endif
1424
1425         /* Never answer to SYNs send to broadcast or multicast */
1426         if (((struct rtable *)skb->dst)->rt_flags &
1427             (RTCF_BROADCAST | RTCF_MULTICAST))
1428                 goto drop;
1429
1430         /* TW buckets are converted to open requests without
1431          * limitations, they conserve resources and peer is
1432          * evidently real one.
1433          */
1434         if (tcp_synq_is_full(sk) && !isn) {
1435 #ifdef CONFIG_SYN_COOKIES
1436                 if (sysctl_tcp_syncookies) {
1437                         want_cookie = 1;
1438                 } else
1439 #endif
1440                 goto drop;
1441         }
1442
1443 #ifdef CONFIG_ACCEPT_QUEUES
1444         class = (skb->nfmark <= 0) ? 0 :
1445                 ((skb->nfmark >= NUM_ACCEPT_QUEUES) ? 0: skb->nfmark);
1446         /*
1447          * Accept only if the class has shares set or if the default class
1448          * i.e. class 0 has shares
1449          */
1450         if (!(tcp_sk(sk)->acceptq[class].aq_ratio)) {
1451                 if (tcp_sk(sk)->acceptq[0].aq_ratio) 
1452                         class = 0;
1453                 else
1454                         goto drop;
1455         }
1456 #endif
1457
1458         /* Accept backlog is full. If we have already queued enough
1459          * of warm entries in syn queue, drop request. It is better than
1460          * clogging syn queue with openreqs with exponentially increasing
1461          * timeout.
1462          */
1463 #ifdef CONFIG_ACCEPT_QUEUES
1464         if (sk_acceptq_is_full(sk, class) && tcp_synq_young(sk, class) > 1)
1465 #else
1466         if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
1467 #endif
1468                 goto drop;
1469
1470         req = tcp_openreq_alloc();
1471         if (!req)
1472                 goto drop;
1473
1474         tcp_clear_options(&tp);
1475         tp.mss_clamp = 536;
1476         tp.user_mss  = tcp_sk(sk)->user_mss;
1477
1478         tcp_parse_options(skb, &tp, 0);
1479
1480         if (want_cookie) {
1481                 tcp_clear_options(&tp);
1482                 tp.saw_tstamp = 0;
1483         }
1484
1485         if (tp.saw_tstamp && !tp.rcv_tsval) {
1486                 /* Some OSes (unknown ones, but I see them on web server, which
1487                  * contains information interesting only for windows'
1488                  * users) do not send their stamp in SYN. It is easy case.
1489                  * We simply do not advertise TS support.
1490                  */
1491                 tp.saw_tstamp = 0;
1492                 tp.tstamp_ok  = 0;
1493         }
1494         tp.tstamp_ok = tp.saw_tstamp;
1495
1496         tcp_openreq_init(req, &tp, skb);
1497 #ifdef CONFIG_ACCEPT_QUEUES
1498         req->acceptq_class = class;
1499         req->acceptq_time_stamp = jiffies;
1500 #endif
1501         req->af.v4_req.loc_addr = daddr;
1502         req->af.v4_req.rmt_addr = saddr;
1503         req->af.v4_req.opt = tcp_v4_save_options(sk, skb);
1504         req->class = &or_ipv4;
1505         if (!want_cookie)
1506                 TCP_ECN_create_request(req, skb->h.th);
1507
1508         if (want_cookie) {
1509 #ifdef CONFIG_SYN_COOKIES
1510                 syn_flood_warning(skb);
1511 #endif
1512                 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1513         } else if (!isn) {
1514                 struct inet_peer *peer = NULL;
1515
1516                 /* VJ's idea. We save last timestamp seen
1517                  * from the destination in peer table, when entering
1518                  * state TIME-WAIT, and check against it before
1519                  * accepting new connection request.
1520                  *
1521                  * If "isn" is not zero, this request hit alive
1522                  * timewait bucket, so that all the necessary checks
1523                  * are made in the function processing timewait state.
1524                  */
1525                 if (tp.saw_tstamp &&
1526                     sysctl_tcp_tw_recycle &&
1527                     (dst = tcp_v4_route_req(sk, req)) != NULL &&
1528                     (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1529                     peer->v4daddr == saddr) {
1530                         if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1531                             (s32)(peer->tcp_ts - req->ts_recent) >
1532                                                         TCP_PAWS_WINDOW) {
1533                                 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
1534                                 dst_release(dst);
1535                                 goto drop_and_free;
1536                         }
1537                 }
1538                 /* Kill the following clause, if you dislike this way. */
1539                 else if (!sysctl_tcp_syncookies &&
1540                          (sysctl_max_syn_backlog - tcp_synq_len(sk) <
1541                           (sysctl_max_syn_backlog >> 2)) &&
1542                          (!peer || !peer->tcp_ts_stamp) &&
1543                          (!dst || !dst_metric(dst, RTAX_RTT))) {
1544                         /* Without syncookies last quarter of
1545                          * backlog is filled with destinations,
1546                          * proven to be alive.
1547                          * It means that we continue to communicate
1548                          * to destinations, already remembered
1549                          * to the moment of synflood.
1550                          */
1551                         NETDEBUG(if (net_ratelimit()) \
1552                                         printk(KERN_DEBUG "TCP: drop open "
1553                                                           "request from %u.%u."
1554                                                           "%u.%u/%u\n", \
1555                                                NIPQUAD(saddr),
1556                                                ntohs(skb->h.th->source)));
1557                         dst_release(dst);
1558                         goto drop_and_free;
1559                 }
1560
1561                 isn = tcp_v4_init_sequence(sk, skb);
1562         }
1563         req->snt_isn = isn;
1564
1565         if (tcp_v4_send_synack(sk, req, dst))
1566                 goto drop_and_free;
1567
1568         if (want_cookie) {
1569                 tcp_openreq_free(req);
1570         } else {
1571                 tcp_v4_synq_add(sk, req);
1572         }
1573         return 0;
1574
1575 drop_and_free:
1576         tcp_openreq_free(req);
1577 drop:
1578         TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1579         return 0;
1580 }
1581
1582
1583 /*
1584  * The three way handshake has completed - we got a valid synack -
1585  * now create the new socket.
1586  */
1587 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1588                                   struct open_request *req,
1589                                   struct dst_entry *dst)
1590 {
1591         struct inet_opt *newinet;
1592         struct tcp_opt *newtp;
1593         struct sock *newsk;
1594
1595 #ifdef CONFIG_ACCEPT_QUEUES
1596         if (sk_acceptq_is_full(sk, req->acceptq_class))
1597 #else
1598         if (sk_acceptq_is_full(sk))
1599 #endif
1600                 goto exit_overflow;
1601
1602         if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1603                 goto exit;
1604
1605         newsk = tcp_create_openreq_child(sk, req, skb);
1606         if (!newsk)
1607                 goto exit;
1608
1609         newsk->sk_dst_cache = dst;
1610         tcp_v4_setup_caps(newsk, dst);
1611
1612         newtp                 = tcp_sk(newsk);
1613         newinet               = inet_sk(newsk);
1614         newinet->daddr        = req->af.v4_req.rmt_addr;
1615         newinet->rcv_saddr    = req->af.v4_req.loc_addr;
1616         newinet->saddr        = req->af.v4_req.loc_addr;
1617         newinet->opt          = req->af.v4_req.opt;
1618         req->af.v4_req.opt    = NULL;
1619         newinet->mc_index     = tcp_v4_iif(skb);
1620         newinet->mc_ttl       = skb->nh.iph->ttl;
1621         newtp->ext_header_len = 0;
1622         if (newinet->opt)
1623                 newtp->ext_header_len = newinet->opt->optlen;
1624         newtp->ext2_header_len = dst->header_len;
1625         newinet->id = newtp->write_seq ^ jiffies;
1626
1627         tcp_sync_mss(newsk, dst_pmtu(dst));
1628         newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1629         tcp_initialize_rcv_mss(newsk);
1630
1631         __tcp_v4_hash(newsk, 0);
1632         __tcp_inherit_port(sk, newsk);
1633
1634         return newsk;
1635
1636 exit_overflow:
1637         NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1638 exit:
1639         NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1640         dst_release(dst);
1641         return NULL;
1642 }
1643
1644 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1645 {
1646         struct tcphdr *th = skb->h.th;
1647         struct iphdr *iph = skb->nh.iph;
1648         struct tcp_opt *tp = tcp_sk(sk);
1649         struct sock *nsk;
1650         struct open_request **prev;
1651         /* Find possible connection requests. */
1652         struct open_request *req = tcp_v4_search_req(tp, &prev, th->source,
1653                                                      iph->saddr, iph->daddr);
1654         if (req)
1655                 return tcp_check_req(sk, skb, req, prev);
1656
1657         nsk = __tcp_v4_lookup_established(skb->nh.iph->saddr,
1658                                           th->source,
1659                                           skb->nh.iph->daddr,
1660                                           ntohs(th->dest),
1661                                           tcp_v4_iif(skb));
1662
1663         if (nsk) {
1664                 if (nsk->sk_state != TCP_TIME_WAIT) {
1665                         bh_lock_sock(nsk);
1666                         return nsk;
1667                 }
1668                 tcp_tw_put((struct tcp_tw_bucket *)nsk);
1669                 return NULL;
1670         }
1671
1672 #ifdef CONFIG_SYN_COOKIES
1673         if (!th->rst && !th->syn && th->ack)
1674                 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1675 #endif
1676         return sk;
1677 }
1678
1679 static int tcp_v4_checksum_init(struct sk_buff *skb)
1680 {
1681         if (skb->ip_summed == CHECKSUM_HW) {
1682                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1683                 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1684                                   skb->nh.iph->daddr, skb->csum))
1685                         return 0;
1686
1687                 NETDEBUG(if (net_ratelimit())
1688                                 printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
1689                 skb->ip_summed = CHECKSUM_NONE;
1690         }
1691         if (skb->len <= 76) {
1692                 if (tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1693                                  skb->nh.iph->daddr,
1694                                  skb_checksum(skb, 0, skb->len, 0)))
1695                         return -1;
1696                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1697         } else {
1698                 skb->csum = ~tcp_v4_check(skb->h.th, skb->len,
1699                                           skb->nh.iph->saddr,
1700                                           skb->nh.iph->daddr, 0);
1701         }
1702         return 0;
1703 }
1704
1705
1706 /* The socket must have it's spinlock held when we get
1707  * here.
1708  *
1709  * We have a potential double-lock case here, so even when
1710  * doing backlog processing we use the BH locking scheme.
1711  * This is because we cannot sleep with the original spinlock
1712  * held.
1713  */
1714 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1715 {
1716         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1717                 TCP_CHECK_TIMER(sk);
1718                 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1719                         goto reset;
1720                 TCP_CHECK_TIMER(sk);
1721                 return 0;
1722         }
1723
1724         if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
1725                 goto csum_err;
1726
1727         if (sk->sk_state == TCP_LISTEN) {
1728                 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1729                 if (!nsk)
1730                         goto discard;
1731
1732                 if (nsk != sk) {
1733                         if (tcp_child_process(sk, nsk, skb))
1734                                 goto reset;
1735                         return 0;
1736                 }
1737         }
1738
1739         TCP_CHECK_TIMER(sk);
1740         if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1741                 goto reset;
1742         TCP_CHECK_TIMER(sk);
1743         return 0;
1744
1745 reset:
1746         tcp_v4_send_reset(skb);
1747 discard:
1748         kfree_skb(skb);
1749         /* Be careful here. If this function gets more complicated and
1750          * gcc suffers from register pressure on the x86, sk (in %ebx)
1751          * might be destroyed here. This current version compiles correctly,
1752          * but you have been warned.
1753          */
1754         return 0;
1755
1756 csum_err:
1757         TCP_INC_STATS_BH(TCP_MIB_INERRS);
1758         goto discard;
1759 }
1760
1761 /*
1762  *      From tcp_input.c
1763  */
1764
1765 int tcp_v4_rcv(struct sk_buff *skb)
1766 {
1767         struct tcphdr *th;
1768         struct sock *sk;
1769         int ret;
1770
1771         if (skb->pkt_type != PACKET_HOST)
1772                 goto discard_it;
1773
1774         /* Count it even if it's bad */
1775         TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1776
1777         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1778                 goto discard_it;
1779
1780         th = skb->h.th;
1781
1782         if (th->doff < sizeof(struct tcphdr) / 4)
1783                 goto bad_packet;
1784         if (!pskb_may_pull(skb, th->doff * 4))
1785                 goto discard_it;
1786
1787         /* An explanation is required here, I think.
1788          * Packet length and doff are validated by header prediction,
1789          * provided case of th->doff==0 is elimineted.
1790          * So, we defer the checks. */
1791         if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1792              tcp_v4_checksum_init(skb) < 0))
1793                 goto bad_packet;
1794
1795         th = skb->h.th;
1796         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1797         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1798                                     skb->len - th->doff * 4);
1799         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1800         TCP_SKB_CB(skb)->when    = 0;
1801         TCP_SKB_CB(skb)->flags   = skb->nh.iph->tos;
1802         TCP_SKB_CB(skb)->sacked  = 0;
1803
1804         sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source,
1805                              skb->nh.iph->daddr, ntohs(th->dest),
1806                              tcp_v4_iif(skb));
1807
1808         if (!sk)
1809                 goto no_tcp_socket;
1810
1811 process:
1812 #if defined(CONFIG_VNET) || defined(CONFIG_VNET_MODULE)
1813         /* Silently drop if VNET is active and the context is not
1814          * entitled to read the packet.
1815          */
1816         if (vnet_active) {
1817                 /* Transfer ownership of reusable TIME_WAIT buckets to
1818                  * whomever VNET decided should own the packet.
1819                  */
1820                 if (sk->sk_state == TCP_TIME_WAIT)
1821                         sk->sk_xid = skb->xid;
1822
1823                 if ((int) sk->sk_xid > 0 && sk->sk_xid != skb->xid)
1824                         goto discard_it;
1825         }
1826 #endif
1827
1828         if (sk->sk_state == TCP_TIME_WAIT)
1829                 goto do_time_wait;
1830
1831         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1832                 goto discard_and_relse;
1833
1834         if (sk_filter(sk, skb, 0))
1835                 goto discard_and_relse;
1836
1837         skb->dev = NULL;
1838
1839         bh_lock_sock(sk);
1840         ret = 0;
1841         if (!sock_owned_by_user(sk)) {
1842                 if (!tcp_prequeue(sk, skb))
1843                         ret = tcp_v4_do_rcv(sk, skb);
1844         } else
1845                 sk_add_backlog(sk, skb);
1846         bh_unlock_sock(sk);
1847
1848         sock_put(sk);
1849
1850         return ret;
1851
1852 no_tcp_socket:
1853         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1854                 goto discard_it;
1855
1856         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1857 bad_packet:
1858                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1859 #if defined(CONFIG_VNET) || defined(CONFIG_VNET_MODULE)
1860         } else if (vnet_active && skb->sk) {
1861                 /* VNET: Suppress RST if the port was bound to a (presumably raw) socket */
1862 #endif
1863         } else {
1864                 tcp_v4_send_reset(skb);
1865         }
1866
1867 discard_it:
1868         /* Discard frame. */
1869         kfree_skb(skb);
1870         return 0;
1871
1872 discard_and_relse:
1873         sock_put(sk);
1874         goto discard_it;
1875
1876 do_time_wait:
1877         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1878                 tcp_tw_put((struct tcp_tw_bucket *) sk);
1879                 goto discard_it;
1880         }
1881
1882         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1883                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1884                 tcp_tw_put((struct tcp_tw_bucket *) sk);
1885                 goto discard_it;
1886         }
1887         switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
1888                                            skb, th, skb->len)) {
1889         case TCP_TW_SYN: {
1890                 struct sock *sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr,
1891                                                           ntohs(th->dest),
1892                                                           tcp_v4_iif(skb));
1893                 if (sk2) {
1894                         tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
1895                         tcp_tw_put((struct tcp_tw_bucket *)sk);
1896                         sk = sk2;
1897                         goto process;
1898                 }
1899                 /* Fall through to ACK */
1900         }
1901         case TCP_TW_ACK:
1902                 tcp_v4_timewait_ack(sk, skb);
1903                 break;
1904         case TCP_TW_RST:
1905                 goto no_tcp_socket;
1906         case TCP_TW_SUCCESS:;
1907         }
1908         goto discard_it;
1909 }
1910
1911 /* With per-bucket locks this operation is not-atomic, so that
1912  * this version is not worse.
1913  */
1914 static void __tcp_v4_rehash(struct sock *sk)
1915 {
1916         sk->sk_prot->unhash(sk);
1917         sk->sk_prot->hash(sk);
1918 }
1919
1920 static int tcp_v4_reselect_saddr(struct sock *sk)
1921 {
1922         struct inet_opt *inet = inet_sk(sk);
1923         int err;
1924         struct rtable *rt;
1925         __u32 old_saddr = inet->saddr;
1926         __u32 new_saddr;
1927         __u32 daddr = inet->daddr;
1928
1929         if (inet->opt && inet->opt->srr)
1930                 daddr = inet->opt->faddr;
1931
1932         /* Query new route. */
1933         err = ip_route_connect(&rt, daddr, 0,
1934                                RT_TOS(inet->tos) | sk->sk_localroute,
1935                                sk->sk_bound_dev_if,
1936                                IPPROTO_TCP,
1937                                inet->sport, inet->dport, sk);
1938         if (err)
1939                 return err;
1940
1941         __sk_dst_set(sk, &rt->u.dst);
1942         tcp_v4_setup_caps(sk, &rt->u.dst);
1943         tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
1944
1945         new_saddr = rt->rt_src;
1946
1947         if (new_saddr == old_saddr)
1948                 return 0;
1949
1950         if (sysctl_ip_dynaddr > 1) {
1951                 printk(KERN_INFO "tcp_v4_rebuild_header(): shifting inet->"
1952                                  "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n",
1953                        NIPQUAD(old_saddr),
1954                        NIPQUAD(new_saddr));
1955         }
1956
1957         inet->saddr = new_saddr;
1958         inet->rcv_saddr = new_saddr;
1959
1960         /* XXX The only one ugly spot where we need to
1961          * XXX really change the sockets identity after
1962          * XXX it has entered the hashes. -DaveM
1963          *
1964          * Besides that, it does not check for connection
1965          * uniqueness. Wait for troubles.
1966          */
1967         __tcp_v4_rehash(sk);
1968         return 0;
1969 }
1970
1971 int tcp_v4_rebuild_header(struct sock *sk)
1972 {
1973         struct inet_opt *inet = inet_sk(sk);
1974         struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1975         u32 daddr;
1976         int err;
1977
1978         /* Route is OK, nothing to do. */
1979         if (rt)
1980                 return 0;
1981
1982         /* Reroute. */
1983         daddr = inet->daddr;
1984         if (inet->opt && inet->opt->srr)
1985                 daddr = inet->opt->faddr;
1986
1987         {
1988                 struct flowi fl = { .oif = sk->sk_bound_dev_if,
1989                                     .nl_u = { .ip4_u =
1990                                               { .daddr = daddr,
1991                                                 .saddr = inet->saddr,
1992                                                 .tos = RT_CONN_FLAGS(sk) } },
1993                                     .proto = IPPROTO_TCP,
1994                                     .uli_u = { .ports =
1995                                                { .sport = inet->sport,
1996                                                  .dport = inet->dport } } };
1997                                                 
1998                 err = ip_route_output_flow(&rt, &fl, sk, 0);
1999         }
2000         if (!err) {
2001                 __sk_dst_set(sk, &rt->u.dst);
2002                 tcp_v4_setup_caps(sk, &rt->u.dst);
2003                 tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
2004                 return 0;
2005         }
2006
2007         /* Routing failed... */
2008         sk->sk_route_caps = 0;
2009
2010         if (!sysctl_ip_dynaddr ||
2011             sk->sk_state != TCP_SYN_SENT ||
2012             (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
2013             (err = tcp_v4_reselect_saddr(sk)) != 0)
2014                 sk->sk_err_soft = -err;
2015
2016         return err;
2017 }
2018
2019 static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
2020 {
2021         struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;
2022         struct inet_opt *inet = inet_sk(sk);
2023
2024         sin->sin_family         = AF_INET;
2025         sin->sin_addr.s_addr    = inet->daddr;
2026         sin->sin_port           = inet->dport;
2027 }
2028
2029 /* VJ's idea. Save last timestamp seen from this destination
2030  * and hold it at least for normal timewait interval to use for duplicate
2031  * segment detection in subsequent connections, before they enter synchronized
2032  * state.
2033  */
2034
2035 int tcp_v4_remember_stamp(struct sock *sk)
2036 {
2037         struct inet_opt *inet = inet_sk(sk);
2038         struct tcp_opt *tp = tcp_sk(sk);
2039         struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
2040         struct inet_peer *peer = NULL;
2041         int release_it = 0;
2042
2043         if (!rt || rt->rt_dst != inet->daddr) {
2044                 peer = inet_getpeer(inet->daddr, 1);
2045                 release_it = 1;
2046         } else {
2047                 if (!rt->peer)
2048                         rt_bind_peer(rt, 1);
2049                 peer = rt->peer;
2050         }
2051
2052         if (peer) {
2053                 if ((s32)(peer->tcp_ts - tp->ts_recent) <= 0 ||
2054                     (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2055                      peer->tcp_ts_stamp <= tp->ts_recent_stamp)) {
2056                         peer->tcp_ts_stamp = tp->ts_recent_stamp;
2057                         peer->tcp_ts = tp->ts_recent;
2058                 }
2059                 if (release_it)
2060                         inet_putpeer(peer);
2061                 return 1;
2062         }
2063
2064         return 0;
2065 }
2066
2067 int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
2068 {
2069         struct inet_peer *peer = NULL;
2070
2071         peer = inet_getpeer(tw->tw_daddr, 1);
2072
2073         if (peer) {
2074                 if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 ||
2075                     (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2076                      peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) {
2077                         peer->tcp_ts_stamp = tw->tw_ts_recent_stamp;
2078                         peer->tcp_ts = tw->tw_ts_recent;
2079                 }
2080                 inet_putpeer(peer);
2081                 return 1;
2082         }
2083
2084         return 0;
2085 }
2086
2087 struct tcp_func ipv4_specific = {
2088         .queue_xmit     =       ip_queue_xmit,
2089         .send_check     =       tcp_v4_send_check,
2090         .rebuild_header =       tcp_v4_rebuild_header,
2091         .conn_request   =       tcp_v4_conn_request,
2092         .syn_recv_sock  =       tcp_v4_syn_recv_sock,
2093         .remember_stamp =       tcp_v4_remember_stamp,
2094         .net_header_len =       sizeof(struct iphdr),
2095         .setsockopt     =       ip_setsockopt,
2096         .getsockopt     =       ip_getsockopt,
2097         .addr2sockaddr  =       v4_addr2sockaddr,
2098         .sockaddr_len   =       sizeof(struct sockaddr_in),
2099 };
2100
2101 /* NOTE: A lot of things set to zero explicitly by call to
2102  *       sk_alloc() so need not be done here.
2103  */
2104 static int tcp_v4_init_sock(struct sock *sk)
2105 {
2106         struct tcp_opt *tp = tcp_sk(sk);
2107
2108         skb_queue_head_init(&tp->out_of_order_queue);
2109         tcp_init_xmit_timers(sk);
2110         tcp_prequeue_init(tp);
2111
2112         tp->rto  = TCP_TIMEOUT_INIT;
2113         tp->mdev = TCP_TIMEOUT_INIT;
2114
2115         /* So many TCP implementations out there (incorrectly) count the
2116          * initial SYN frame in their delayed-ACK and congestion control
2117          * algorithms that we must have the following bandaid to talk
2118          * efficiently to them.  -DaveM
2119          */
2120         tp->snd_cwnd = 2;
2121
2122         /* See draft-stevens-tcpca-spec-01 for discussion of the
2123          * initialization of these values.
2124          */
2125         tp->snd_ssthresh = 0x7fffffff;  /* Infinity */
2126         tp->snd_cwnd_clamp = ~0;
2127         tp->mss_cache_std = tp->mss_cache = 536;
2128
2129         tp->reordering = sysctl_tcp_reordering;
2130
2131         sk->sk_state = TCP_CLOSE;
2132
2133         sk->sk_write_space = sk_stream_write_space;
2134         sk->sk_use_write_queue = 1;
2135
2136         tp->af_specific = &ipv4_specific;
2137
2138         sk->sk_sndbuf = sysctl_tcp_wmem[1];
2139         sk->sk_rcvbuf = sysctl_tcp_rmem[1];
2140
2141         atomic_inc(&tcp_sockets_allocated);
2142
2143         return 0;
2144 }
2145
2146 int tcp_v4_destroy_sock(struct sock *sk)
2147 {
2148         struct tcp_opt *tp = tcp_sk(sk);
2149
2150         tcp_clear_xmit_timers(sk);
2151
2152         /* Cleanup up the write buffer. */
2153         sk_stream_writequeue_purge(sk);
2154
2155         /* Cleans up our, hopefully empty, out_of_order_queue. */
2156         __skb_queue_purge(&tp->out_of_order_queue);
2157
2158         /* Clean prequeue, it must be empty really */
2159         __skb_queue_purge(&tp->ucopy.prequeue);
2160
2161         /* Clean up a referenced TCP bind bucket. */
2162         if (tp->bind_hash)
2163                 tcp_put_port(sk);
2164
2165         /*
2166          * If sendmsg cached page exists, toss it.
2167          */
2168         if (sk->sk_sndmsg_page) {
2169                 __free_page(sk->sk_sndmsg_page);
2170                 sk->sk_sndmsg_page = NULL;
2171         }
2172
2173         atomic_dec(&tcp_sockets_allocated);
2174
2175         return 0;
2176 }
2177
2178 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2179
2180 #ifdef CONFIG_PROC_FS
2181 /* Proc filesystem TCP sock list dumping. */
2182
2183 static inline struct tcp_tw_bucket *tw_head(struct hlist_head *head)
2184 {
2185         return hlist_empty(head) ? NULL :
2186                 list_entry(head->first, struct tcp_tw_bucket, tw_node);
2187 }
2188
2189 static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw)
2190 {
2191         return tw->tw_node.next ?
2192                 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2193 }
2194
2195 static void *listening_get_next(struct seq_file *seq, void *cur)
2196 {
2197         struct tcp_opt *tp;
2198         struct hlist_node *node;
2199         struct sock *sk = cur;
2200         struct tcp_iter_state* st = seq->private;
2201
2202         if (!sk) {
2203                 st->bucket = 0;
2204                 sk = sk_head(&tcp_listening_hash[0]);
2205                 goto get_sk;
2206         }
2207
2208         ++st->num;
2209
2210         if (st->state == TCP_SEQ_STATE_OPENREQ) {
2211                 struct open_request *req = cur;
2212
2213                 tp = tcp_sk(st->syn_wait_sk);
2214                 req = req->dl_next;
2215                 while (1) {
2216                         while (req) {
2217                                 vxdprintk(VXD_CBIT(net, 6),
2218                                         "sk,req: %p [#%d] (from %d)", req->sk,
2219                                         (req->sk)?req->sk->sk_xid:0, current->xid);
2220                                 if (req->sk &&
2221                                         !vx_check(req->sk->sk_xid, VX_IDENT|VX_WATCH))
2222                                         continue;
2223                                 if (req->class->family == st->family) {
2224                                         cur = req;
2225                                         goto out;
2226                                 }
2227                                 req = req->dl_next;
2228                         }
2229                         if (++st->sbucket >= TCP_SYNQ_HSIZE)
2230                                 break;
2231 get_req:
2232                         req = tp->listen_opt->syn_table[st->sbucket];
2233                 }
2234                 sk        = sk_next(st->syn_wait_sk);
2235                 st->state = TCP_SEQ_STATE_LISTENING;
2236                 read_unlock_bh(&tp->syn_wait_lock);
2237         } else {
2238                 tp = tcp_sk(sk);
2239                 read_lock_bh(&tp->syn_wait_lock);
2240                 if (tp->listen_opt && tp->listen_opt->qlen)
2241                         goto start_req;
2242                 read_unlock_bh(&tp->syn_wait_lock);
2243                 sk = sk_next(sk);
2244         }
2245 get_sk:
2246         sk_for_each_from(sk, node) {
2247                 vxdprintk(VXD_CBIT(net, 6), "sk: %p [#%d] (from %d)",
2248                         sk, sk->sk_xid, current->xid);
2249                 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2250                         continue;
2251                 if (sk->sk_family == st->family) {
2252                         cur = sk;
2253                         goto out;
2254                 }
2255                 tp = tcp_sk(sk);
2256                 read_lock_bh(&tp->syn_wait_lock);
2257                 if (tp->listen_opt && tp->listen_opt->qlen) {
2258 start_req:
2259                         st->uid         = sock_i_uid(sk);
2260                         st->syn_wait_sk = sk;
2261                         st->state       = TCP_SEQ_STATE_OPENREQ;
2262                         st->sbucket     = 0;
2263                         goto get_req;
2264                 }
2265                 read_unlock_bh(&tp->syn_wait_lock);
2266         }
2267         if (++st->bucket < TCP_LHTABLE_SIZE) {
2268                 sk = sk_head(&tcp_listening_hash[st->bucket]);
2269                 goto get_sk;
2270         }
2271         cur = NULL;
2272 out:
2273         return cur;
2274 }
2275
2276 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2277 {
2278         void *rc = listening_get_next(seq, NULL);
2279
2280         while (rc && *pos) {
2281                 rc = listening_get_next(seq, rc);
2282                 --*pos;
2283         }
2284         return rc;
2285 }
2286
2287 static void *established_get_first(struct seq_file *seq)
2288 {
2289         struct tcp_iter_state* st = seq->private;
2290         void *rc = NULL;
2291
2292         for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) {
2293                 struct sock *sk;
2294                 struct hlist_node *node;
2295                 struct tcp_tw_bucket *tw;
2296                
2297                 read_lock(&tcp_ehash[st->bucket].lock);
2298                 sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) {
2299                         vxdprintk(VXD_CBIT(net, 6),
2300                                 "sk,egf: %p [#%d] (from %d)",
2301                                 sk, sk->sk_xid, current->xid);
2302                         if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2303                                 continue;
2304                         if (sk->sk_family != st->family)
2305                                 continue;
2306                         rc = sk;
2307                         goto out;
2308                 }
2309                 st->state = TCP_SEQ_STATE_TIME_WAIT;
2310                 tw_for_each(tw, node,
2311                             &tcp_ehash[st->bucket + tcp_ehash_size].chain) {
2312                         vxdprintk(VXD_CBIT(net, 6),
2313                                 "tw: %p [#%d] (from %d)",
2314                                 tw, tw->tw_xid, current->xid);
2315                         if (!vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))
2316                                 continue;
2317                         if (tw->tw_family != st->family)
2318                                 continue;
2319                         rc = tw;
2320                         goto out;
2321                 }
2322                 read_unlock(&tcp_ehash[st->bucket].lock);
2323                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2324         }
2325 out:
2326         return rc;
2327 }
2328
2329 static void *established_get_next(struct seq_file *seq, void *cur)
2330 {
2331         struct sock *sk = cur;
2332         struct tcp_tw_bucket *tw;
2333         struct hlist_node *node;
2334         struct tcp_iter_state* st = seq->private;
2335
2336         ++st->num;
2337
2338         if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2339                 tw = cur;
2340                 tw = tw_next(tw);
2341 get_tw:
2342                 while (tw && (tw->tw_family != st->family ||
2343                         !vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))) {
2344                         tw = tw_next(tw);
2345                 }
2346                 if (tw) {
2347                         cur = tw;
2348                         goto out;
2349                 }
2350                 read_unlock(&tcp_ehash[st->bucket].lock);
2351                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2352                 if (++st->bucket < tcp_ehash_size) {
2353                         read_lock(&tcp_ehash[st->bucket].lock);
2354                         sk = sk_head(&tcp_ehash[st->bucket].chain);
2355                 } else {
2356                         cur = NULL;
2357                         goto out;
2358                 }
2359         } else
2360                 sk = sk_next(sk);
2361
2362         sk_for_each_from(sk, node) {
2363                 vxdprintk(VXD_CBIT(net, 6),
2364                         "sk,egn: %p [#%d] (from %d)",
2365                         sk, sk->sk_xid, current->xid);
2366                 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2367                         continue;
2368                 if (sk->sk_family == st->family)
2369                         goto found;
2370         }
2371
2372         st->state = TCP_SEQ_STATE_TIME_WAIT;
2373         tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain);
2374         goto get_tw;
2375 found:
2376         cur = sk;
2377 out:
2378         return cur;
2379 }
2380
2381 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2382 {
2383         void *rc = established_get_first(seq);
2384
2385         while (rc && pos) {
2386                 rc = established_get_next(seq, rc);
2387                 --pos;
2388         }               
2389         return rc;
2390 }
2391
2392 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2393 {
2394         void *rc;
2395         struct tcp_iter_state* st = seq->private;
2396
2397         tcp_listen_lock();
2398         st->state = TCP_SEQ_STATE_LISTENING;
2399         rc        = listening_get_idx(seq, &pos);
2400
2401         if (!rc) {
2402                 tcp_listen_unlock();
2403                 local_bh_disable();
2404                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2405                 rc        = established_get_idx(seq, pos);
2406         }
2407
2408         return rc;
2409 }
2410
2411 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2412 {
2413         struct tcp_iter_state* st = seq->private;
2414         st->state = TCP_SEQ_STATE_LISTENING;
2415         st->num = 0;
2416         return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2417 }
2418
2419 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2420 {
2421         void *rc = NULL;
2422         struct tcp_iter_state* st;
2423
2424         if (v == SEQ_START_TOKEN) {
2425                 rc = tcp_get_idx(seq, 0);
2426                 goto out;
2427         }
2428         st = seq->private;
2429
2430         switch (st->state) {
2431         case TCP_SEQ_STATE_OPENREQ:
2432         case TCP_SEQ_STATE_LISTENING:
2433                 rc = listening_get_next(seq, v);
2434                 if (!rc) {
2435                         tcp_listen_unlock();
2436                         local_bh_disable();
2437                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2438                         rc        = established_get_first(seq);
2439                 }
2440                 break;
2441         case TCP_SEQ_STATE_ESTABLISHED:
2442         case TCP_SEQ_STATE_TIME_WAIT:
2443                 rc = established_get_next(seq, v);
2444                 break;
2445         }
2446 out:
2447         ++*pos;
2448         return rc;
2449 }
2450
2451 static void tcp_seq_stop(struct seq_file *seq, void *v)
2452 {
2453         struct tcp_iter_state* st = seq->private;
2454
2455         switch (st->state) {
2456         case TCP_SEQ_STATE_OPENREQ:
2457                 if (v) {
2458                         struct tcp_opt *tp = tcp_sk(st->syn_wait_sk);
2459                         read_unlock_bh(&tp->syn_wait_lock);
2460                 }
2461         case TCP_SEQ_STATE_LISTENING:
2462                 if (v != SEQ_START_TOKEN)
2463                         tcp_listen_unlock();
2464                 break;
2465         case TCP_SEQ_STATE_TIME_WAIT:
2466         case TCP_SEQ_STATE_ESTABLISHED:
2467                 if (v)
2468                         read_unlock(&tcp_ehash[st->bucket].lock);
2469                 local_bh_enable();
2470                 break;
2471         }
2472 }
2473
2474 static int tcp_seq_open(struct inode *inode, struct file *file)
2475 {
2476         struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2477         struct seq_file *seq;
2478         struct tcp_iter_state *s;
2479         int rc;
2480
2481         if (unlikely(afinfo == NULL))
2482                 return -EINVAL;
2483
2484         s = kmalloc(sizeof(*s), GFP_KERNEL);
2485         if (!s)
2486                 return -ENOMEM;
2487         memset(s, 0, sizeof(*s));
2488         s->family               = afinfo->family;
2489         s->seq_ops.start        = tcp_seq_start;
2490         s->seq_ops.next         = tcp_seq_next;
2491         s->seq_ops.show         = afinfo->seq_show;
2492         s->seq_ops.stop         = tcp_seq_stop;
2493
2494         rc = seq_open(file, &s->seq_ops);
2495         if (rc)
2496                 goto out_kfree;
2497         seq          = file->private_data;
2498         seq->private = s;
2499 out:
2500         return rc;
2501 out_kfree:
2502         kfree(s);
2503         goto out;
2504 }
2505
2506 int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
2507 {
2508         int rc = 0;
2509         struct proc_dir_entry *p;
2510
2511         if (!afinfo)
2512                 return -EINVAL;
2513         afinfo->seq_fops->owner         = afinfo->owner;
2514         afinfo->seq_fops->open          = tcp_seq_open;
2515         afinfo->seq_fops->read          = seq_read;
2516         afinfo->seq_fops->llseek        = seq_lseek;
2517         afinfo->seq_fops->release       = seq_release_private;
2518         
2519         p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
2520         if (p)
2521                 p->data = afinfo;
2522         else
2523                 rc = -ENOMEM;
2524         return rc;
2525 }
2526
2527 void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
2528 {
2529         if (!afinfo)
2530                 return;
2531         proc_net_remove(afinfo->name);
2532         memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); 
2533 }
2534
2535 static void get_openreq4(struct sock *sk, struct open_request *req,
2536                          char *tmpbuf, int i, int uid)
2537 {
2538         int ttd = req->expires - jiffies;
2539
2540         sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2541                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
2542                 i,
2543                 req->af.v4_req.loc_addr,
2544                 ntohs(inet_sk(sk)->sport),
2545                 req->af.v4_req.rmt_addr,
2546                 ntohs(req->rmt_port),
2547                 TCP_SYN_RECV,
2548                 0, 0, /* could print option size, but that is af dependent. */
2549                 1,    /* timers active (only the expire timer) */
2550                 jiffies_to_clock_t(ttd),
2551                 req->retrans,
2552                 uid,
2553                 0,  /* non standard timer */
2554                 0, /* open_requests have no inode */
2555                 atomic_read(&sk->sk_refcnt),
2556                 req);
2557 }
2558
2559 static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
2560 {
2561         int timer_active;
2562         unsigned long timer_expires;
2563         struct tcp_opt *tp = tcp_sk(sp);
2564         struct inet_opt *inet = inet_sk(sp);
2565         unsigned int dest = inet->daddr;
2566         unsigned int src = inet->rcv_saddr;
2567         __u16 destp = ntohs(inet->dport);
2568         __u16 srcp = ntohs(inet->sport);
2569
2570         if (tp->pending == TCP_TIME_RETRANS) {
2571                 timer_active    = 1;
2572                 timer_expires   = tp->timeout;
2573         } else if (tp->pending == TCP_TIME_PROBE0) {
2574                 timer_active    = 4;
2575                 timer_expires   = tp->timeout;
2576         } else if (timer_pending(&sp->sk_timer)) {
2577                 timer_active    = 2;
2578                 timer_expires   = sp->sk_timer.expires;
2579         } else {
2580                 timer_active    = 0;
2581                 timer_expires = jiffies;
2582         }
2583
2584         sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2585                         "%08X %5d %8d %lu %d %p %u %u %u %u %d",
2586                 i, src, srcp, dest, destp, sp->sk_state,
2587                 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
2588                 timer_active,
2589                 jiffies_to_clock_t(timer_expires - jiffies),
2590                 tp->retransmits,
2591                 sock_i_uid(sp),
2592                 tp->probes_out,
2593                 sock_i_ino(sp),
2594                 atomic_read(&sp->sk_refcnt), sp,
2595                 tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong,
2596                 tp->snd_cwnd,
2597                 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
2598 }
2599
2600 static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i)
2601 {
2602         unsigned int dest, src;
2603         __u16 destp, srcp;
2604         int ttd = tw->tw_ttd - jiffies;
2605
2606         if (ttd < 0)
2607                 ttd = 0;
2608
2609         dest  = tw->tw_daddr;
2610         src   = tw->tw_rcv_saddr;
2611         destp = ntohs(tw->tw_dport);
2612         srcp  = ntohs(tw->tw_sport);
2613
2614         sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2615                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p",
2616                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2617                 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2618                 atomic_read(&tw->tw_refcnt), tw);
2619 }
2620
2621 #define TMPSZ 150
2622
2623 static int tcp4_seq_show(struct seq_file *seq, void *v)
2624 {
2625         struct tcp_iter_state* st;
2626         char tmpbuf[TMPSZ + 1];
2627
2628         if (v == SEQ_START_TOKEN) {
2629                 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2630                            "  sl  local_address rem_address   st tx_queue "
2631                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2632                            "inode");
2633                 goto out;
2634         }
2635         st = seq->private;
2636
2637         switch (st->state) {
2638         case TCP_SEQ_STATE_LISTENING:
2639         case TCP_SEQ_STATE_ESTABLISHED:
2640                 get_tcp4_sock(v, tmpbuf, st->num);
2641                 break;
2642         case TCP_SEQ_STATE_OPENREQ:
2643                 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
2644                 break;
2645         case TCP_SEQ_STATE_TIME_WAIT:
2646                 get_timewait4_sock(v, tmpbuf, st->num);
2647                 break;
2648         }
2649         seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
2650 out:
2651         return 0;
2652 }
2653
2654 static struct file_operations tcp4_seq_fops;
2655 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2656         .owner          = THIS_MODULE,
2657         .name           = "tcp",
2658         .family         = AF_INET,
2659         .seq_show       = tcp4_seq_show,
2660         .seq_fops       = &tcp4_seq_fops,
2661 };
2662
2663 int __init tcp4_proc_init(void)
2664 {
2665         return tcp_proc_register(&tcp4_seq_afinfo);
2666 }
2667
2668 void tcp4_proc_exit(void)
2669 {
2670         tcp_proc_unregister(&tcp4_seq_afinfo);
2671 }
2672 #endif /* CONFIG_PROC_FS */
2673
2674 struct proto tcp_prot = {
2675         .name                   = "TCP",
2676         .close                  = tcp_close,
2677         .connect                = tcp_v4_connect,
2678         .disconnect             = tcp_disconnect,
2679         .accept                 = tcp_accept,
2680         .ioctl                  = tcp_ioctl,
2681         .init                   = tcp_v4_init_sock,
2682         .destroy                = tcp_v4_destroy_sock,
2683         .shutdown               = tcp_shutdown,
2684         .setsockopt             = tcp_setsockopt,
2685         .getsockopt             = tcp_getsockopt,
2686         .sendmsg                = tcp_sendmsg,
2687         .recvmsg                = tcp_recvmsg,
2688         .backlog_rcv            = tcp_v4_do_rcv,
2689         .hash                   = tcp_v4_hash,
2690         .unhash                 = tcp_unhash,
2691         .get_port               = tcp_v4_get_port,
2692         .enter_memory_pressure  = tcp_enter_memory_pressure,
2693         .sockets_allocated      = &tcp_sockets_allocated,
2694         .memory_allocated       = &tcp_memory_allocated,
2695         .memory_pressure        = &tcp_memory_pressure,
2696         .sysctl_mem             = sysctl_tcp_mem,
2697         .sysctl_wmem            = sysctl_tcp_wmem,
2698         .sysctl_rmem            = sysctl_tcp_rmem,
2699         .max_header             = MAX_TCP_HEADER,
2700         .slab_obj_size          = sizeof(struct tcp_sock),
2701 };
2702
2703
2704
2705 void __init tcp_v4_init(struct net_proto_family *ops)
2706 {
2707         int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
2708         if (err < 0)
2709                 panic("Failed to create the TCP control socket.\n");
2710         tcp_socket->sk->sk_allocation   = GFP_ATOMIC;
2711         inet_sk(tcp_socket->sk)->uc_ttl = -1;
2712
2713         /* Unhash it so that IP input processing does not even
2714          * see it, we do not wish this socket to see incoming
2715          * packets.
2716          */
2717         tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
2718 }
2719
2720 EXPORT_SYMBOL(ipv4_specific);
2721 EXPORT_SYMBOL(tcp_bind_hash);
2722 EXPORT_SYMBOL(tcp_bucket_create);
2723 EXPORT_SYMBOL(tcp_hashinfo);
2724 EXPORT_SYMBOL(tcp_inherit_port);
2725 EXPORT_SYMBOL(tcp_listen_wlock);
2726 EXPORT_SYMBOL(tcp_port_rover);
2727 EXPORT_SYMBOL(tcp_prot);
2728 EXPORT_SYMBOL(tcp_put_port);
2729 EXPORT_SYMBOL(tcp_unhash);
2730 EXPORT_SYMBOL(tcp_v4_conn_request);
2731 EXPORT_SYMBOL(tcp_v4_connect);
2732 EXPORT_SYMBOL(tcp_v4_do_rcv);
2733 EXPORT_SYMBOL_GPL(tcp_v4_lookup_listener);
2734 EXPORT_SYMBOL(tcp_v4_rebuild_header);
2735 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2736 EXPORT_SYMBOL(tcp_v4_send_check);
2737 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2738
2739 #ifdef CONFIG_PROC_FS
2740 EXPORT_SYMBOL(tcp_proc_register);
2741 EXPORT_SYMBOL(tcp_proc_unregister);
2742 #endif
2743 #ifdef CONFIG_SYSCTL
2744 EXPORT_SYMBOL(sysctl_local_port_range);
2745 EXPORT_SYMBOL(sysctl_max_syn_backlog);
2746 EXPORT_SYMBOL(sysctl_tcp_low_latency);
2747 #endif