patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / net / ipv4 / tcp_ipv4.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Version:     $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
9  *
10  *              IPv4 specific functions
11  *
12  *
13  *              code split from:
14  *              linux/ipv4/tcp.c
15  *              linux/ipv4/tcp_input.c
16  *              linux/ipv4/tcp_output.c
17  *
18  *              See tcp.c for author information
19  *
20  *      This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25
26 /*
27  * Changes:
28  *              David S. Miller :       New socket lookup architecture.
29  *                                      This code is dedicated to John Dyson.
30  *              David S. Miller :       Change semantics of established hash,
31  *                                      half is devoted to TIME_WAIT sockets
32  *                                      and the rest go in the other half.
33  *              Andi Kleen :            Add support for syncookies and fixed
34  *                                      some bugs: ip options weren't passed to
35  *                                      the TCP layer, missed a check for an
36  *                                      ACK bit.
37  *              Andi Kleen :            Implemented fast path mtu discovery.
38  *                                      Fixed many serious bugs in the
39  *                                      open_request handling and moved
40  *                                      most of it into the af independent code.
41  *                                      Added tail drop and some other bugfixes.
42  *                                      Added new listen sematics.
43  *              Mike McLagan    :       Routing by source
44  *      Juan Jose Ciarlante:            ip_dynaddr bits
45  *              Andi Kleen:             various fixes.
46  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
47  *                                      coma.
48  *      Andi Kleen              :       Fix new listen.
49  *      Andi Kleen              :       Fix accept error reporting.
50  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
51  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
52  *                                      a single port at the same time.
53  */
54
55 #include <linux/config.h>
56
57 #include <linux/types.h>
58 #include <linux/fcntl.h>
59 #include <linux/module.h>
60 #include <linux/random.h>
61 #include <linux/cache.h>
62 #include <linux/jhash.h>
63 #include <linux/init.h>
64 #include <linux/times.h>
65
66 #include <net/icmp.h>
67 #include <net/tcp.h>
68 #include <net/ipv6.h>
69 #include <net/inet_common.h>
70 #include <net/xfrm.h>
71
72 #include <linux/inet.h>
73 #include <linux/ipv6.h>
74 #include <linux/stddef.h>
75 #include <linux/proc_fs.h>
76 #include <linux/seq_file.h>
77
78 extern int sysctl_ip_dynaddr;
79 int sysctl_tcp_tw_reuse;
80 int sysctl_tcp_low_latency;
81
82 /* Check TCP sequence numbers in ICMP packets. */
83 #define ICMP_MIN_LENGTH 8
84
85 /* Socket used for sending RSTs */
86 static struct socket *tcp_socket;
87
88 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
89                        struct sk_buff *skb);
90
91 struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
92         .__tcp_lhash_lock       =       RW_LOCK_UNLOCKED,
93         .__tcp_lhash_users      =       ATOMIC_INIT(0),
94         .__tcp_lhash_wait
95           = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait),
96         .__tcp_portalloc_lock   =       SPIN_LOCK_UNLOCKED
97 };
98
99 /*
100  * This array holds the first and last local port number.
101  * For high-usage systems, use sysctl to change this to
102  * 32768-61000
103  */
104 int sysctl_local_port_range[2] = { 1024, 4999 };
105 int tcp_port_rover = 1024 - 1;
106
107 static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
108                                  __u32 faddr, __u16 fport)
109 {
110         int h = (laddr ^ lport) ^ (faddr ^ fport);
111         h ^= h >> 16;
112         h ^= h >> 8;
113         return h & (tcp_ehash_size - 1);
114 }
115
116 static __inline__ int tcp_sk_hashfn(struct sock *sk)
117 {
118         struct inet_opt *inet = inet_sk(sk);
119         __u32 laddr = inet->rcv_saddr;
120         __u16 lport = inet->num;
121         __u32 faddr = inet->daddr;
122         __u16 fport = inet->dport;
123
124         return tcp_hashfn(laddr, lport, faddr, fport);
125 }
126
127 /* Allocate and initialize a new TCP local port bind bucket.
128  * The bindhash mutex for snum's hash chain must be held here.
129  */
130 struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
131                                           unsigned short snum)
132 {
133         struct tcp_bind_bucket *tb = kmem_cache_alloc(tcp_bucket_cachep,
134                                                       SLAB_ATOMIC);
135         if (tb) {
136                 tb->port = snum;
137                 tb->fastreuse = 0;
138                 INIT_HLIST_HEAD(&tb->owners);
139                 hlist_add_head(&tb->node, &head->chain);
140         }
141         return tb;
142 }
143
144 /* Caller must hold hashbucket lock for this tb with local BH disabled */
145 void tcp_bucket_destroy(struct tcp_bind_bucket *tb)
146 {
147         if (hlist_empty(&tb->owners)) {
148                 __hlist_del(&tb->node);
149                 kmem_cache_free(tcp_bucket_cachep, tb);
150         }
151 }
152
153 /* Caller must disable local BH processing. */
154 static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
155 {
156         struct tcp_bind_hashbucket *head =
157                                 &tcp_bhash[tcp_bhashfn(inet_sk(child)->num)];
158         struct tcp_bind_bucket *tb;
159
160         spin_lock(&head->lock);
161         tb = tcp_sk(sk)->bind_hash;
162         sk_add_bind_node(child, &tb->owners);
163         tcp_sk(child)->bind_hash = tb;
164         spin_unlock(&head->lock);
165 }
166
167 inline void tcp_inherit_port(struct sock *sk, struct sock *child)
168 {
169         local_bh_disable();
170         __tcp_inherit_port(sk, child);
171         local_bh_enable();
172 }
173
174 void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
175                    unsigned short snum)
176 {
177         inet_sk(sk)->num = snum;
178         sk_add_bind_node(sk, &tb->owners);
179         tcp_sk(sk)->bind_hash = tb;
180 }
181
182 /*
183         Return 1 if addr match the socket IP list
184         or the socket is INADDR_ANY
185 */
186 static inline int tcp_in_list(struct sock *sk, u32 addr)
187 {
188         struct nx_info *nxi = sk->sk_nx_info;
189
190         vxdprintk("tcp_in_list(%p) %p,%p;%lx\n",
191                 sk, nxi, sk->sk_socket,
192                 (sk->sk_socket?sk->sk_socket->flags:0));
193
194         if (nxi) {
195                 int n = nxi->nbipv4;
196                 int i;
197
198                 for (i=0; i<n; i++)
199                         if (nxi->ipv4[i] == addr)
200                                 return 1;
201         }
202         else if (!tcp_v4_rcv_saddr(sk) || tcp_v4_rcv_saddr(sk) == addr)
203                 return 1;
204         return 0;
205 }
206         
207 /*
208         Check if the addresses in sk1 conflict with those in sk2
209 */
210 int tcp_ipv4_addr_conflict(struct sock *sk1, struct sock *sk2)
211 {
212         if (sk1 && sk2)
213         nxdprintk("inet_bind(%p,%p) %p,%p;%lx %p,%p;%lx\n",
214                 sk1, sk2,
215                 sk1->sk_nx_info, sk1->sk_socket,
216                 (sk1->sk_socket?sk1->sk_socket->flags:0),
217                 sk2->sk_nx_info, sk2->sk_socket,
218                 (sk2->sk_socket?sk2->sk_socket->flags:0));
219
220         if (tcp_v4_rcv_saddr(sk1)) {
221                 /* Bind to one address only */
222                 return tcp_in_list (sk2, tcp_v4_rcv_saddr(sk1));
223         } else if (sk1->sk_nx_info) {
224                 /* A restricted bind(any) */
225                 struct nx_info *nxi = sk1->sk_nx_info;
226                 int n = nxi->nbipv4;
227                 int i;
228
229                 for (i=0; i<n; i++)
230                         if (tcp_in_list (sk2, nxi->ipv4[i]))
231                                 return 1;
232         } else  /* A bind(any) do not allow other bind on the same port */
233                 return 1;
234         return 0;
235 }
236
237 static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
238 {
239         struct sock *sk2;
240         struct hlist_node *node;
241         int reuse = sk->sk_reuse;
242
243         sk_for_each_bound(sk2, node, &tb->owners) {
244                 if (sk != sk2 &&
245                     !tcp_v6_ipv6only(sk2) &&
246                     (!sk->sk_bound_dev_if ||
247                      !sk2->sk_bound_dev_if ||
248                      sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
249                         if (!reuse || !sk2->sk_reuse ||
250                             sk2->sk_state == TCP_LISTEN) {
251                                 if (tcp_ipv4_addr_conflict(sk, sk2))
252                                         break;
253                         }
254                 }
255         }
256         return node != NULL;
257 }
258
259 /* Obtain a reference to a local port for the given sock,
260  * if snum is zero it means select any available local port.
261  */
262 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
263 {
264         struct tcp_bind_hashbucket *head;
265         struct hlist_node *node;
266         struct tcp_bind_bucket *tb;
267         int ret;
268
269         local_bh_disable();
270         if (!snum) {
271                 int low = sysctl_local_port_range[0];
272                 int high = sysctl_local_port_range[1];
273                 int remaining = (high - low) + 1;
274                 int rover;
275
276                 spin_lock(&tcp_portalloc_lock);
277                 rover = tcp_port_rover;
278                 do {
279                         rover++;
280                         if (rover < low || rover > high)
281                                 rover = low;
282                         head = &tcp_bhash[tcp_bhashfn(rover)];
283                         spin_lock(&head->lock);
284                         tb_for_each(tb, node, &head->chain)
285                                 if (tb->port == rover)
286                                         goto next;
287                         break;
288                 next:
289                         spin_unlock(&head->lock);
290                 } while (--remaining > 0);
291                 tcp_port_rover = rover;
292                 spin_unlock(&tcp_portalloc_lock);
293
294                 /* Exhausted local port range during search? */
295                 ret = 1;
296                 if (remaining <= 0)
297                         goto fail;
298
299                 /* OK, here is the one we will use.  HEAD is
300                  * non-NULL and we hold it's mutex.
301                  */
302                 snum = rover;
303         } else {
304                 head = &tcp_bhash[tcp_bhashfn(snum)];
305                 spin_lock(&head->lock);
306                 tb_for_each(tb, node, &head->chain)
307                         if (tb->port == snum)
308                                 goto tb_found;
309         }
310         tb = NULL;
311         goto tb_not_found;
312 tb_found:
313         if (!hlist_empty(&tb->owners)) {
314                 if (sk->sk_reuse > 1)
315                         goto success;
316                 if (tb->fastreuse > 0 &&
317                     sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
318                         goto success;
319                 } else {
320                         ret = 1;
321                         if (tcp_bind_conflict(sk, tb))
322                                 goto fail_unlock;
323                 }
324         }
325 tb_not_found:
326         ret = 1;
327         if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
328                 goto fail_unlock;
329         if (hlist_empty(&tb->owners)) {
330                 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
331                         tb->fastreuse = 1;
332                 else
333                         tb->fastreuse = 0;
334         } else if (tb->fastreuse &&
335                    (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
336                 tb->fastreuse = 0;
337 success:
338         if (!tcp_sk(sk)->bind_hash)
339                 tcp_bind_hash(sk, tb, snum);
340         BUG_TRAP(tcp_sk(sk)->bind_hash == tb);
341         ret = 0;
342
343 fail_unlock:
344         spin_unlock(&head->lock);
345 fail:
346         local_bh_enable();
347         return ret;
348 }
349
350 /* Get rid of any references to a local port held by the
351  * given sock.
352  */
353 static void __tcp_put_port(struct sock *sk)
354 {
355         struct inet_opt *inet = inet_sk(sk);
356         struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)];
357         struct tcp_bind_bucket *tb;
358
359         spin_lock(&head->lock);
360         tb = tcp_sk(sk)->bind_hash;
361         __sk_del_bind_node(sk);
362         tcp_sk(sk)->bind_hash = NULL;
363         inet->num = 0;
364         tcp_bucket_destroy(tb);
365         spin_unlock(&head->lock);
366 }
367
368 void tcp_put_port(struct sock *sk)
369 {
370         local_bh_disable();
371         __tcp_put_port(sk);
372         local_bh_enable();
373 }
374
375 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
376  * Look, when several writers sleep and reader wakes them up, all but one
377  * immediately hit write lock and grab all the cpus. Exclusive sleep solves
378  * this, _but_ remember, it adds useless work on UP machines (wake up each
379  * exclusive lock release). It should be ifdefed really.
380  */
381
382 void tcp_listen_wlock(void)
383 {
384         write_lock(&tcp_lhash_lock);
385
386         if (atomic_read(&tcp_lhash_users)) {
387                 DEFINE_WAIT(wait);
388
389                 for (;;) {
390                         prepare_to_wait_exclusive(&tcp_lhash_wait,
391                                                 &wait, TASK_UNINTERRUPTIBLE);
392                         if (!atomic_read(&tcp_lhash_users))
393                                 break;
394                         write_unlock_bh(&tcp_lhash_lock);
395                         schedule();
396                         write_lock_bh(&tcp_lhash_lock);
397                 }
398
399                 finish_wait(&tcp_lhash_wait, &wait);
400         }
401 }
402
403 static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
404 {
405         struct hlist_head *list;
406         rwlock_t *lock;
407
408         BUG_TRAP(sk_unhashed(sk));
409         if (listen_possible && sk->sk_state == TCP_LISTEN) {
410                 list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
411                 lock = &tcp_lhash_lock;
412                 tcp_listen_wlock();
413         } else {
414                 list = &tcp_ehash[(sk->sk_hashent = tcp_sk_hashfn(sk))].chain;
415                 lock = &tcp_ehash[sk->sk_hashent].lock;
416                 write_lock(lock);
417         }
418         __sk_add_node(sk, list);
419         sock_prot_inc_use(sk->sk_prot);
420         write_unlock(lock);
421         if (listen_possible && sk->sk_state == TCP_LISTEN)
422                 wake_up(&tcp_lhash_wait);
423 }
424
425 static void tcp_v4_hash(struct sock *sk)
426 {
427         if (sk->sk_state != TCP_CLOSE) {
428                 local_bh_disable();
429                 __tcp_v4_hash(sk, 1);
430                 local_bh_enable();
431         }
432 }
433
434 void tcp_unhash(struct sock *sk)
435 {
436         rwlock_t *lock;
437
438         if (sk_unhashed(sk))
439                 goto ende;
440
441         if (sk->sk_state == TCP_LISTEN) {
442                 local_bh_disable();
443                 tcp_listen_wlock();
444                 lock = &tcp_lhash_lock;
445         } else {
446                 struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
447                 lock = &head->lock;
448                 write_lock_bh(&head->lock);
449         }
450
451         if (__sk_del_node_init(sk))
452                 sock_prot_dec_use(sk->sk_prot);
453         write_unlock_bh(lock);
454
455  ende:
456         if (sk->sk_state == TCP_LISTEN)
457                 wake_up(&tcp_lhash_wait);
458 }
459
460 /*
461         Check if an address is in the list
462 */
463 static inline int tcp_addr_in_list(
464         u32 rcv_saddr,
465         u32 daddr,
466         struct nx_info *nx_info)
467 {
468         if (rcv_saddr == daddr)
469                 return 1;
470         else if (rcv_saddr == 0) {
471                 /* Accept any address or check the list */
472                 if (!nx_info)
473                         return 1;
474                 else {
475                         int n = nx_info->nbipv4;
476                         int i;
477
478                         for (i=0; i<n; i++)
479                                 if (nx_info->ipv4[i] == daddr)
480                                         return 1;
481                 }
482         }
483         return 0;
484 }
485
486
487
488 /* Don't inline this cruft.  Here are some nice properties to
489  * exploit here.  The BSD API does not allow a listening TCP
490  * to specify the remote port nor the remote address for the
491  * connection.  So always assume those are both wildcarded
492  * during the search since they can never be otherwise.
493  */
494 static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr,
495                                              unsigned short hnum, int dif)
496 {
497         struct sock *result = NULL, *sk;
498         struct hlist_node *node;
499         int score, hiscore;
500
501         hiscore=-1;
502         sk_for_each(sk, node, head) {
503                 struct inet_opt *inet = inet_sk(sk);
504
505                 if (inet->num == hnum && !ipv6_only_sock(sk)) {
506                         __u32 rcv_saddr = inet->rcv_saddr;
507
508                         score = (sk->sk_family == PF_INET ? 1 : 0);
509                         if (tcp_addr_in_list(rcv_saddr, daddr, sk->sk_nx_info))
510                                 score+=2;
511                         else
512                                 continue;
513                         if (sk->sk_bound_dev_if) {
514                                 if (sk->sk_bound_dev_if != dif)
515                                         continue;
516                                 score+=2;
517                         }
518                         if (score == 5)
519                                 return sk;
520                         if (score > hiscore) {
521                                 hiscore = score;
522                                 result = sk;
523                         }
524                 }
525         }
526         return result;
527 }
528
529 /* Optimize the common listener case. */
530 inline struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum,
531                                            int dif)
532 {
533         struct sock *sk = NULL;
534         struct hlist_head *head;
535
536         read_lock(&tcp_lhash_lock);
537         head = &tcp_listening_hash[tcp_lhashfn(hnum)];
538         if (!hlist_empty(head)) {
539                 struct inet_opt *inet = inet_sk((sk = __sk_head(head)));
540
541                 if (inet->num == hnum && !sk->sk_node.next &&
542                     (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
543                     tcp_addr_in_list(inet->rcv_saddr, daddr, sk->sk_nx_info) &&
544                     !sk->sk_bound_dev_if)
545                         goto sherry_cache;
546                 sk = __tcp_v4_lookup_listener(head, daddr, hnum, dif);
547         }
548         if (sk) {
549 sherry_cache:
550                 sock_hold(sk);
551         }
552         read_unlock(&tcp_lhash_lock);
553         return sk;
554 }
555
556 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
557  * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
558  *
559  * Local BH must be disabled here.
560  */
561
562 static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
563                                                        u32 daddr, u16 hnum,
564                                                        int dif)
565 {
566         struct tcp_ehash_bucket *head;
567         TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
568         __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
569         struct sock *sk;
570         struct hlist_node *node;
571         /* Optimize here for direct hit, only listening connections can
572          * have wildcards anyways.
573          */
574         int hash = tcp_hashfn(daddr, hnum, saddr, sport);
575         head = &tcp_ehash[hash];
576         read_lock(&head->lock);
577         sk_for_each(sk, node, &head->chain) {
578                 if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
579                         goto hit; /* You sunk my battleship! */
580         }
581
582         /* Must check for a TIME_WAIT'er before going to listener hash. */
583         sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
584                 if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
585                         goto hit;
586         }
587         sk = NULL;
588 out:
589         read_unlock(&head->lock);
590         return sk;
591 hit:
592         sock_hold(sk);
593         goto out;
594 }
595
596 static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport,
597                                            u32 daddr, u16 hnum, int dif)
598 {
599         struct sock *sk = __tcp_v4_lookup_established(saddr, sport,
600                                                       daddr, hnum, dif);
601
602         return sk ? : tcp_v4_lookup_listener(daddr, hnum, dif);
603 }
604
605 inline struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr,
606                                   u16 dport, int dif)
607 {
608         struct sock *sk;
609
610         local_bh_disable();
611         sk = __tcp_v4_lookup(saddr, sport, daddr, ntohs(dport), dif);
612         local_bh_enable();
613
614         return sk;
615 }
616
617 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
618 {
619         return secure_tcp_sequence_number(skb->nh.iph->daddr,
620                                           skb->nh.iph->saddr,
621                                           skb->h.th->dest,
622                                           skb->h.th->source);
623 }
624
625 /* called with local bh disabled */
626 static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
627                                       struct tcp_tw_bucket **twp)
628 {
629         struct inet_opt *inet = inet_sk(sk);
630         u32 daddr = inet->rcv_saddr;
631         u32 saddr = inet->daddr;
632         int dif = sk->sk_bound_dev_if;
633         TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
634         __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
635         int hash = tcp_hashfn(daddr, lport, saddr, inet->dport);
636         struct tcp_ehash_bucket *head = &tcp_ehash[hash];
637         struct sock *sk2;
638         struct hlist_node *node;
639         struct tcp_tw_bucket *tw;
640
641         write_lock(&head->lock);
642
643         /* Check TIME-WAIT sockets first. */
644         sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
645                 tw = (struct tcp_tw_bucket *)sk2;
646
647                 if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
648                         struct tcp_opt *tp = tcp_sk(sk);
649
650                         /* With PAWS, it is safe from the viewpoint
651                            of data integrity. Even without PAWS it
652                            is safe provided sequence spaces do not
653                            overlap i.e. at data rates <= 80Mbit/sec.
654
655                            Actually, the idea is close to VJ's one,
656                            only timestamp cache is held not per host,
657                            but per port pair and TW bucket is used
658                            as state holder.
659
660                            If TW bucket has been already destroyed we
661                            fall back to VJ's scheme and use initial
662                            timestamp retrieved from peer table.
663                          */
664                         if (tw->tw_ts_recent_stamp &&
665                             (!twp || (sysctl_tcp_tw_reuse &&
666                                       xtime.tv_sec -
667                                       tw->tw_ts_recent_stamp > 1))) {
668                                 if ((tp->write_seq =
669                                                 tw->tw_snd_nxt + 65535 + 2) == 0)
670                                         tp->write_seq = 1;
671                                 tp->ts_recent       = tw->tw_ts_recent;
672                                 tp->ts_recent_stamp = tw->tw_ts_recent_stamp;
673                                 sock_hold(sk2);
674                                 goto unique;
675                         } else
676                                 goto not_unique;
677                 }
678         }
679         tw = NULL;
680
681         /* And established part... */
682         sk_for_each(sk2, node, &head->chain) {
683                 if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif))
684                         goto not_unique;
685         }
686
687 unique:
688         /* Must record num and sport now. Otherwise we will see
689          * in hash table socket with a funny identity. */
690         inet->num = lport;
691         inet->sport = htons(lport);
692         sk->sk_hashent = hash;
693         BUG_TRAP(sk_unhashed(sk));
694         __sk_add_node(sk, &head->chain);
695         sock_prot_inc_use(sk->sk_prot);
696         write_unlock(&head->lock);
697
698         if (twp) {
699                 *twp = tw;
700                 NET_INC_STATS_BH(TimeWaitRecycled);
701         } else if (tw) {
702                 /* Silly. Should hash-dance instead... */
703                 tcp_tw_deschedule(tw);
704                 NET_INC_STATS_BH(TimeWaitRecycled);
705
706                 tcp_tw_put(tw);
707         }
708
709         return 0;
710
711 not_unique:
712         write_unlock(&head->lock);
713         return -EADDRNOTAVAIL;
714 }
715
716 /*
717  * Bind a port for a connect operation and hash it.
718  */
719 static int tcp_v4_hash_connect(struct sock *sk)
720 {
721         unsigned short snum = inet_sk(sk)->num;
722         struct tcp_bind_hashbucket *head;
723         struct tcp_bind_bucket *tb;
724         int ret;
725
726         if (!snum) {
727                 int rover;
728                 int low = sysctl_local_port_range[0];
729                 int high = sysctl_local_port_range[1];
730                 int remaining = (high - low) + 1;
731                 struct hlist_node *node;
732                 struct tcp_tw_bucket *tw = NULL;
733
734                 local_bh_disable();
735
736                 /* TODO. Actually it is not so bad idea to remove
737                  * tcp_portalloc_lock before next submission to Linus.
738                  * As soon as we touch this place at all it is time to think.
739                  *
740                  * Now it protects single _advisory_ variable tcp_port_rover,
741                  * hence it is mostly useless.
742                  * Code will work nicely if we just delete it, but
743                  * I am afraid in contented case it will work not better or
744                  * even worse: another cpu just will hit the same bucket
745                  * and spin there.
746                  * So some cpu salt could remove both contention and
747                  * memory pingpong. Any ideas how to do this in a nice way?
748                  */
749                 spin_lock(&tcp_portalloc_lock);
750                 rover = tcp_port_rover;
751
752                 do {
753                         rover++;
754                         if ((rover < low) || (rover > high))
755                                 rover = low;
756                         head = &tcp_bhash[tcp_bhashfn(rover)];
757                         spin_lock(&head->lock);
758
759                         /* Does not bother with rcv_saddr checks,
760                          * because the established check is already
761                          * unique enough.
762                          */
763                         tb_for_each(tb, node, &head->chain) {
764                                 if (tb->port == rover) {
765                                         BUG_TRAP(!hlist_empty(&tb->owners));
766                                         if (tb->fastreuse >= 0)
767                                                 goto next_port;
768                                         if (!__tcp_v4_check_established(sk,
769                                                                         rover,
770                                                                         &tw))
771                                                 goto ok;
772                                         goto next_port;
773                                 }
774                         }
775
776                         tb = tcp_bucket_create(head, rover);
777                         if (!tb) {
778                                 spin_unlock(&head->lock);
779                                 break;
780                         }
781                         tb->fastreuse = -1;
782                         goto ok;
783
784                 next_port:
785                         spin_unlock(&head->lock);
786                 } while (--remaining > 0);
787                 tcp_port_rover = rover;
788                 spin_unlock(&tcp_portalloc_lock);
789
790                 local_bh_enable();
791
792                 return -EADDRNOTAVAIL;
793
794 ok:
795                 /* All locks still held and bhs disabled */
796                 tcp_port_rover = rover;
797                 spin_unlock(&tcp_portalloc_lock);
798
799                 tcp_bind_hash(sk, tb, rover);
800                 if (sk_unhashed(sk)) {
801                         inet_sk(sk)->sport = htons(rover);
802                         __tcp_v4_hash(sk, 0);
803                 }
804                 spin_unlock(&head->lock);
805
806                 if (tw) {
807                         tcp_tw_deschedule(tw);
808                         tcp_tw_put(tw);
809                 }
810
811                 ret = 0;
812                 goto out;
813         }
814
815         head  = &tcp_bhash[tcp_bhashfn(snum)];
816         tb  = tcp_sk(sk)->bind_hash;
817         spin_lock_bh(&head->lock);
818         if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
819                 __tcp_v4_hash(sk, 0);
820                 spin_unlock_bh(&head->lock);
821                 return 0;
822         } else {
823                 spin_unlock(&head->lock);
824                 /* No definite answer... Walk to established hash table */
825                 ret = __tcp_v4_check_established(sk, snum, NULL);
826 out:
827                 local_bh_enable();
828                 return ret;
829         }
830 }
831
832 /* This will initiate an outgoing connection. */
833 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
834 {
835         struct inet_opt *inet = inet_sk(sk);
836         struct tcp_opt *tp = tcp_sk(sk);
837         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
838         struct rtable *rt;
839         u32 daddr, nexthop;
840         int tmp;
841         int err;
842
843         if (addr_len < sizeof(struct sockaddr_in))
844                 return -EINVAL;
845
846         if (usin->sin_family != AF_INET)
847                 return -EAFNOSUPPORT;
848
849         nexthop = daddr = usin->sin_addr.s_addr;
850         if (inet->opt && inet->opt->srr) {
851                 if (!daddr)
852                         return -EINVAL;
853                 nexthop = inet->opt->faddr;
854         }
855
856         tmp = ip_route_connect(&rt, nexthop, inet->saddr,
857                                RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
858                                IPPROTO_TCP,
859                                inet->sport, usin->sin_port, sk);
860         if (tmp < 0)
861                 return tmp;
862
863         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
864                 ip_rt_put(rt);
865                 return -ENETUNREACH;
866         }
867
868         if (!inet->opt || !inet->opt->srr)
869                 daddr = rt->rt_dst;
870
871         if (!inet->saddr)
872                 inet->saddr = rt->rt_src;
873         inet->rcv_saddr = inet->saddr;
874
875         if (tp->ts_recent_stamp && inet->daddr != daddr) {
876                 /* Reset inherited state */
877                 tp->ts_recent       = 0;
878                 tp->ts_recent_stamp = 0;
879                 tp->write_seq       = 0;
880         }
881
882         if (sysctl_tcp_tw_recycle &&
883             !tp->ts_recent_stamp && rt->rt_dst == daddr) {
884                 struct inet_peer *peer = rt_get_peer(rt);
885
886                 /* VJ's idea. We save last timestamp seen from
887                  * the destination in peer table, when entering state TIME-WAIT
888                  * and initialize ts_recent from it, when trying new connection.
889                  */
890
891                 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
892                         tp->ts_recent_stamp = peer->tcp_ts_stamp;
893                         tp->ts_recent = peer->tcp_ts;
894                 }
895         }
896
897         inet->dport = usin->sin_port;
898         inet->daddr = daddr;
899
900         tp->ext_header_len = 0;
901         if (inet->opt)
902                 tp->ext_header_len = inet->opt->optlen;
903
904         tp->mss_clamp = 536;
905
906         /* Socket identity is still unknown (sport may be zero).
907          * However we set state to SYN-SENT and not releasing socket
908          * lock select source port, enter ourselves into the hash tables and
909          * complete initialization after this.
910          */
911         tcp_set_state(sk, TCP_SYN_SENT);
912         err = tcp_v4_hash_connect(sk);
913         if (err)
914                 goto failure;
915
916         err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
917         if (err)
918                 goto failure;
919
920         /* OK, now commit destination to socket.  */
921         __sk_dst_set(sk, &rt->u.dst);
922         tcp_v4_setup_caps(sk, &rt->u.dst);
923         tp->ext2_header_len = rt->u.dst.header_len;
924
925         if (!tp->write_seq)
926                 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
927                                                            inet->daddr,
928                                                            inet->sport,
929                                                            usin->sin_port);
930
931         inet->id = tp->write_seq ^ jiffies;
932
933         err = tcp_connect(sk);
934         rt = NULL;
935         if (err)
936                 goto failure;
937
938         return 0;
939
940 failure:
941         /* This unhashes the socket and releases the local port, if necessary. */
942         tcp_set_state(sk, TCP_CLOSE);
943         ip_rt_put(rt);
944         sk->sk_route_caps = 0;
945         inet->dport = 0;
946         return err;
947 }
948
949 static __inline__ int tcp_v4_iif(struct sk_buff *skb)
950 {
951         return ((struct rtable *)skb->dst)->rt_iif;
952 }
953
954 static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
955 {
956         return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
957 }
958
959 static struct open_request *tcp_v4_search_req(struct tcp_opt *tp,
960                                               struct open_request ***prevp,
961                                               __u16 rport,
962                                               __u32 raddr, __u32 laddr)
963 {
964         struct tcp_listen_opt *lopt = tp->listen_opt;
965         struct open_request *req, **prev;
966
967         for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
968              (req = *prev) != NULL;
969              prev = &req->dl_next) {
970                 if (req->rmt_port == rport &&
971                     req->af.v4_req.rmt_addr == raddr &&
972                     req->af.v4_req.loc_addr == laddr &&
973                     TCP_INET_FAMILY(req->class->family)) {
974                         BUG_TRAP(!req->sk);
975                         *prevp = prev;
976                         break;
977                 }
978         }
979
980         return req;
981 }
982
983 static void tcp_v4_synq_add(struct sock *sk, struct open_request *req)
984 {
985         struct tcp_opt *tp = tcp_sk(sk);
986         struct tcp_listen_opt *lopt = tp->listen_opt;
987         u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
988
989         req->expires = jiffies + TCP_TIMEOUT_INIT;
990         req->retrans = 0;
991         req->sk = NULL;
992         req->dl_next = lopt->syn_table[h];
993
994         write_lock(&tp->syn_wait_lock);
995         lopt->syn_table[h] = req;
996         write_unlock(&tp->syn_wait_lock);
997
998         tcp_synq_added(sk);
999 }
1000
1001
1002 /*
1003  * This routine does path mtu discovery as defined in RFC1191.
1004  */
1005 static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
1006                                      u32 mtu)
1007 {
1008         struct dst_entry *dst;
1009         struct inet_opt *inet = inet_sk(sk);
1010         struct tcp_opt *tp = tcp_sk(sk);
1011
1012         /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
1013          * send out by Linux are always <576bytes so they should go through
1014          * unfragmented).
1015          */
1016         if (sk->sk_state == TCP_LISTEN)
1017                 return;
1018
1019         /* We don't check in the destentry if pmtu discovery is forbidden
1020          * on this route. We just assume that no packet_to_big packets
1021          * are send back when pmtu discovery is not active.
1022          * There is a small race when the user changes this flag in the
1023          * route, but I think that's acceptable.
1024          */
1025         if ((dst = __sk_dst_check(sk, 0)) == NULL)
1026                 return;
1027
1028         dst->ops->update_pmtu(dst, mtu);
1029
1030         /* Something is about to be wrong... Remember soft error
1031          * for the case, if this connection will not able to recover.
1032          */
1033         if (mtu < dst_pmtu(dst) && ip_dont_fragment(sk, dst))
1034                 sk->sk_err_soft = EMSGSIZE;
1035
1036         mtu = dst_pmtu(dst);
1037
1038         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
1039             tp->pmtu_cookie > mtu) {
1040                 tcp_sync_mss(sk, mtu);
1041
1042                 /* Resend the TCP packet because it's
1043                  * clear that the old packet has been
1044                  * dropped. This is the new "fast" path mtu
1045                  * discovery.
1046                  */
1047                 tcp_simple_retransmit(sk);
1048         } /* else let the usual retransmit timer handle it */
1049 }
1050
1051 /*
1052  * This routine is called by the ICMP module when it gets some
1053  * sort of error condition.  If err < 0 then the socket should
1054  * be closed and the error returned to the user.  If err > 0
1055  * it's just the icmp type << 8 | icmp code.  After adjustment
1056  * header points to the first 8 bytes of the tcp header.  We need
1057  * to find the appropriate port.
1058  *
1059  * The locking strategy used here is very "optimistic". When
1060  * someone else accesses the socket the ICMP is just dropped
1061  * and for some paths there is no check at all.
1062  * A more general error queue to queue errors for later handling
1063  * is probably better.
1064  *
1065  */
1066
1067 void tcp_v4_err(struct sk_buff *skb, u32 info)
1068 {
1069         struct iphdr *iph = (struct iphdr *)skb->data;
1070         struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1071         struct tcp_opt *tp;
1072         struct inet_opt *inet;
1073         int type = skb->h.icmph->type;
1074         int code = skb->h.icmph->code;
1075         struct sock *sk;
1076         __u32 seq;
1077         int err;
1078
1079         if (skb->len < (iph->ihl << 2) + 8) {
1080                 ICMP_INC_STATS_BH(IcmpInErrors);
1081                 return;
1082         }
1083
1084         sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,
1085                            th->source, tcp_v4_iif(skb));
1086         if (!sk) {
1087                 ICMP_INC_STATS_BH(IcmpInErrors);
1088                 return;
1089         }
1090         if (sk->sk_state == TCP_TIME_WAIT) {
1091                 tcp_tw_put((struct tcp_tw_bucket *)sk);
1092                 return;
1093         }
1094
1095         bh_lock_sock(sk);
1096         /* If too many ICMPs get dropped on busy
1097          * servers this needs to be solved differently.
1098          */
1099         if (sock_owned_by_user(sk))
1100                 NET_INC_STATS_BH(LockDroppedIcmps);
1101
1102         if (sk->sk_state == TCP_CLOSE)
1103                 goto out;
1104
1105         tp = tcp_sk(sk);
1106         seq = ntohl(th->seq);
1107         if (sk->sk_state != TCP_LISTEN &&
1108             !between(seq, tp->snd_una, tp->snd_nxt)) {
1109                 NET_INC_STATS(OutOfWindowIcmps);
1110                 goto out;
1111         }
1112
1113         switch (type) {
1114         case ICMP_SOURCE_QUENCH:
1115                 /* This is deprecated, but if someone generated it,
1116                  * we have no reasons to ignore it.
1117                  */
1118                 if (!sock_owned_by_user(sk))
1119                         tcp_enter_cwr(tp);
1120                 goto out;
1121         case ICMP_PARAMETERPROB:
1122                 err = EPROTO;
1123                 break;
1124         case ICMP_DEST_UNREACH:
1125                 if (code > NR_ICMP_UNREACH)
1126                         goto out;
1127
1128                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
1129                         if (!sock_owned_by_user(sk))
1130                                 do_pmtu_discovery(sk, iph, info);
1131                         goto out;
1132                 }
1133
1134                 err = icmp_err_convert[code].errno;
1135                 break;
1136         case ICMP_TIME_EXCEEDED:
1137                 err = EHOSTUNREACH;
1138                 break;
1139         default:
1140                 goto out;
1141         }
1142
1143         switch (sk->sk_state) {
1144                 struct open_request *req, **prev;
1145         case TCP_LISTEN:
1146                 if (sock_owned_by_user(sk))
1147                         goto out;
1148
1149                 req = tcp_v4_search_req(tp, &prev, th->dest,
1150                                         iph->daddr, iph->saddr);
1151                 if (!req)
1152                         goto out;
1153
1154                 /* ICMPs are not backlogged, hence we cannot get
1155                    an established socket here.
1156                  */
1157                 BUG_TRAP(!req->sk);
1158
1159                 if (seq != req->snt_isn) {
1160                         NET_INC_STATS_BH(OutOfWindowIcmps);
1161                         goto out;
1162                 }
1163
1164                 /*
1165                  * Still in SYN_RECV, just remove it silently.
1166                  * There is no good way to pass the error to the newly
1167                  * created socket, and POSIX does not want network
1168                  * errors returned from accept().
1169                  */
1170                 tcp_synq_drop(sk, req, prev);
1171                 goto out;
1172
1173         case TCP_SYN_SENT:
1174         case TCP_SYN_RECV:  /* Cannot happen.
1175                                It can f.e. if SYNs crossed.
1176                              */
1177                 if (!sock_owned_by_user(sk)) {
1178                         TCP_INC_STATS_BH(TcpAttemptFails);
1179                         sk->sk_err = err;
1180
1181                         sk->sk_error_report(sk);
1182
1183                         tcp_done(sk);
1184                 } else {
1185                         sk->sk_err_soft = err;
1186                 }
1187                 goto out;
1188         }
1189
1190         /* If we've already connected we will keep trying
1191          * until we time out, or the user gives up.
1192          *
1193          * rfc1122 4.2.3.9 allows to consider as hard errors
1194          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
1195          * but it is obsoleted by pmtu discovery).
1196          *
1197          * Note, that in modern internet, where routing is unreliable
1198          * and in each dark corner broken firewalls sit, sending random
1199          * errors ordered by their masters even this two messages finally lose
1200          * their original sense (even Linux sends invalid PORT_UNREACHs)
1201          *
1202          * Now we are in compliance with RFCs.
1203          *                                                      --ANK (980905)
1204          */
1205
1206         inet = inet_sk(sk);
1207         if (!sock_owned_by_user(sk) && inet->recverr) {
1208                 sk->sk_err = err;
1209                 sk->sk_error_report(sk);
1210         } else  { /* Only an error on timeout */
1211                 sk->sk_err_soft = err;
1212         }
1213
1214 out:
1215         bh_unlock_sock(sk);
1216         sock_put(sk);
1217 }
1218
1219 /* This routine computes an IPv4 TCP checksum. */
1220 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
1221                        struct sk_buff *skb)
1222 {
1223         struct inet_opt *inet = inet_sk(sk);
1224
1225         if (skb->ip_summed == CHECKSUM_HW) {
1226                 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
1227                 skb->csum = offsetof(struct tcphdr, check);
1228         } else {
1229                 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
1230                                          csum_partial((char *)th,
1231                                                       th->doff << 2,
1232                                                       skb->csum));
1233         }
1234 }
1235
1236 /*
1237  *      This routine will send an RST to the other tcp.
1238  *
1239  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
1240  *                    for reset.
1241  *      Answer: if a packet caused RST, it is not for a socket
1242  *              existing in our system, if it is matched to a socket,
1243  *              it is just duplicate segment or bug in other side's TCP.
1244  *              So that we build reply only basing on parameters
1245  *              arrived with segment.
1246  *      Exception: precedence violation. We do not implement it in any case.
1247  */
1248
1249 static void tcp_v4_send_reset(struct sk_buff *skb)
1250 {
1251         struct tcphdr *th = skb->h.th;
1252         struct tcphdr rth;
1253         struct ip_reply_arg arg;
1254
1255         /* Never send a reset in response to a reset. */
1256         if (th->rst)
1257                 return;
1258
1259         if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
1260                 return;
1261
1262         /* Swap the send and the receive. */
1263         memset(&rth, 0, sizeof(struct tcphdr));
1264         rth.dest   = th->source;
1265         rth.source = th->dest;
1266         rth.doff   = sizeof(struct tcphdr) / 4;
1267         rth.rst    = 1;
1268
1269         if (th->ack) {
1270                 rth.seq = th->ack_seq;
1271         } else {
1272                 rth.ack = 1;
1273                 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
1274                                     skb->len - (th->doff << 2));
1275         }
1276
1277         memset(&arg, 0, sizeof arg);
1278         arg.iov[0].iov_base = (unsigned char *)&rth;
1279         arg.iov[0].iov_len  = sizeof rth;
1280         arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1281                                       skb->nh.iph->saddr, /*XXX*/
1282                                       sizeof(struct tcphdr), IPPROTO_TCP, 0);
1283         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1284
1285         ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
1286
1287         TCP_INC_STATS_BH(TcpOutSegs);
1288         TCP_INC_STATS_BH(TcpOutRsts);
1289 }
1290
1291 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
1292    outside socket context is ugly, certainly. What can I do?
1293  */
1294
1295 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
1296                             u32 win, u32 ts)
1297 {
1298         struct tcphdr *th = skb->h.th;
1299         struct {
1300                 struct tcphdr th;
1301                 u32 tsopt[3];
1302         } rep;
1303         struct ip_reply_arg arg;
1304
1305         memset(&rep.th, 0, sizeof(struct tcphdr));
1306         memset(&arg, 0, sizeof arg);
1307
1308         arg.iov[0].iov_base = (unsigned char *)&rep;
1309         arg.iov[0].iov_len  = sizeof(rep.th);
1310         if (ts) {
1311                 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1312                                      (TCPOPT_TIMESTAMP << 8) |
1313                                      TCPOLEN_TIMESTAMP);
1314                 rep.tsopt[1] = htonl(tcp_time_stamp);
1315                 rep.tsopt[2] = htonl(ts);
1316                 arg.iov[0].iov_len = sizeof(rep);
1317         }
1318
1319         /* Swap the send and the receive. */
1320         rep.th.dest    = th->source;
1321         rep.th.source  = th->dest;
1322         rep.th.doff    = arg.iov[0].iov_len / 4;
1323         rep.th.seq     = htonl(seq);
1324         rep.th.ack_seq = htonl(ack);
1325         rep.th.ack     = 1;
1326         rep.th.window  = htons(win);
1327
1328         arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1329                                       skb->nh.iph->saddr, /*XXX*/
1330                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
1331         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1332
1333         ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
1334
1335         TCP_INC_STATS_BH(TcpOutSegs);
1336 }
1337
1338 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
1339 {
1340         struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
1341
1342         tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
1343                         tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
1344
1345         tcp_tw_put(tw);
1346 }
1347
1348 static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req)
1349 {
1350         tcp_v4_send_ack(skb, req->snt_isn + 1, req->rcv_isn + 1, req->rcv_wnd,
1351                         req->ts_recent);
1352 }
1353
1354 static struct dst_entry* tcp_v4_route_req(struct sock *sk,
1355                                           struct open_request *req)
1356 {
1357         struct rtable *rt;
1358         struct ip_options *opt = req->af.v4_req.opt;
1359         struct flowi fl = { .oif = sk->sk_bound_dev_if,
1360                             .nl_u = { .ip4_u =
1361                                       { .daddr = ((opt && opt->srr) ?
1362                                                   opt->faddr :
1363                                                   req->af.v4_req.rmt_addr),
1364                                         .saddr = req->af.v4_req.loc_addr,
1365                                         .tos = RT_CONN_FLAGS(sk) } },
1366                             .proto = IPPROTO_TCP,
1367                             .uli_u = { .ports =
1368                                        { .sport = inet_sk(sk)->sport,
1369                                          .dport = req->rmt_port } } };
1370
1371         if (ip_route_output_flow(&rt, &fl, sk, 0)) {
1372                 IP_INC_STATS_BH(OutNoRoutes);
1373                 return NULL;
1374         }
1375         if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
1376                 ip_rt_put(rt);
1377                 IP_INC_STATS_BH(OutNoRoutes);
1378                 return NULL;
1379         }
1380         return &rt->u.dst;
1381 }
1382
1383 /*
1384  *      Send a SYN-ACK after having received an ACK.
1385  *      This still operates on a open_request only, not on a big
1386  *      socket.
1387  */
1388 static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
1389                               struct dst_entry *dst)
1390 {
1391         int err = -1;
1392         struct sk_buff * skb;
1393
1394         /* First, grab a route. */
1395         if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1396                 goto out;
1397
1398         skb = tcp_make_synack(sk, dst, req);
1399
1400         if (skb) {
1401                 struct tcphdr *th = skb->h.th;
1402
1403                 th->check = tcp_v4_check(th, skb->len,
1404                                          req->af.v4_req.loc_addr,
1405                                          req->af.v4_req.rmt_addr,
1406                                          csum_partial((char *)th, skb->len,
1407                                                       skb->csum));
1408
1409                 err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr,
1410                                             req->af.v4_req.rmt_addr,
1411                                             req->af.v4_req.opt);
1412                 if (err == NET_XMIT_CN)
1413                         err = 0;
1414         }
1415
1416 out:
1417         dst_release(dst);
1418         return err;
1419 }
1420
1421 /*
1422  *      IPv4 open_request destructor.
1423  */
1424 static void tcp_v4_or_free(struct open_request *req)
1425 {
1426         if (req->af.v4_req.opt)
1427                 kfree(req->af.v4_req.opt);
1428 }
1429
1430 static inline void syn_flood_warning(struct sk_buff *skb)
1431 {
1432         static unsigned long warntime;
1433
1434         if (time_after(jiffies, (warntime + HZ * 60))) {
1435                 warntime = jiffies;
1436                 printk(KERN_INFO
1437                        "possible SYN flooding on port %d. Sending cookies.\n",
1438                        ntohs(skb->h.th->dest));
1439         }
1440 }
1441
1442 /*
1443  * Save and compile IPv4 options into the open_request if needed.
1444  */
1445 static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
1446                                                      struct sk_buff *skb)
1447 {
1448         struct ip_options *opt = &(IPCB(skb)->opt);
1449         struct ip_options *dopt = NULL;
1450
1451         if (opt && opt->optlen) {
1452                 int opt_size = optlength(opt);
1453                 dopt = kmalloc(opt_size, GFP_ATOMIC);
1454                 if (dopt) {
1455                         if (ip_options_echo(dopt, skb)) {
1456                                 kfree(dopt);
1457                                 dopt = NULL;
1458                         }
1459                 }
1460         }
1461         return dopt;
1462 }
1463
1464 /*
1465  * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
1466  * One SYN_RECV socket costs about 80bytes on a 32bit machine.
1467  * It would be better to replace it with a global counter for all sockets
1468  * but then some measure against one socket starving all other sockets
1469  * would be needed.
1470  *
1471  * It was 128 by default. Experiments with real servers show, that
1472  * it is absolutely not enough even at 100conn/sec. 256 cures most
1473  * of problems. This value is adjusted to 128 for very small machines
1474  * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
1475  * Further increasing requires to change hash table size.
1476  */
1477 int sysctl_max_syn_backlog = 256;
1478
1479 struct or_calltable or_ipv4 = {
1480         .family         =       PF_INET,
1481         .rtx_syn_ack    =       tcp_v4_send_synack,
1482         .send_ack       =       tcp_v4_or_send_ack,
1483         .destructor     =       tcp_v4_or_free,
1484         .send_reset     =       tcp_v4_send_reset,
1485 };
1486
1487 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1488 {
1489         struct tcp_opt tp;
1490         struct open_request *req;
1491         __u32 saddr = skb->nh.iph->saddr;
1492         __u32 daddr = skb->nh.iph->daddr;
1493         __u32 isn = TCP_SKB_CB(skb)->when;
1494         struct dst_entry *dst = NULL;
1495 #ifdef CONFIG_SYN_COOKIES
1496         int want_cookie = 0;
1497 #else
1498 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1499 #endif
1500
1501         /* Never answer to SYNs send to broadcast or multicast */
1502         if (((struct rtable *)skb->dst)->rt_flags &
1503             (RTCF_BROADCAST | RTCF_MULTICAST))
1504                 goto drop;
1505
1506         /* TW buckets are converted to open requests without
1507          * limitations, they conserve resources and peer is
1508          * evidently real one.
1509          */
1510         if (tcp_synq_is_full(sk) && !isn) {
1511 #ifdef CONFIG_SYN_COOKIES
1512                 if (sysctl_tcp_syncookies) {
1513                         want_cookie = 1;
1514                 } else
1515 #endif
1516                 goto drop;
1517         }
1518
1519         /* Accept backlog is full. If we have already queued enough
1520          * of warm entries in syn queue, drop request. It is better than
1521          * clogging syn queue with openreqs with exponentially increasing
1522          * timeout.
1523          */
1524         if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
1525                 goto drop;
1526
1527         req = tcp_openreq_alloc();
1528         if (!req)
1529                 goto drop;
1530
1531         tcp_clear_options(&tp);
1532         tp.mss_clamp = 536;
1533         tp.user_mss  = tcp_sk(sk)->user_mss;
1534
1535         tcp_parse_options(skb, &tp, 0);
1536
1537         if (want_cookie) {
1538                 tcp_clear_options(&tp);
1539                 tp.saw_tstamp = 0;
1540         }
1541
1542         if (tp.saw_tstamp && !tp.rcv_tsval) {
1543                 /* Some OSes (unknown ones, but I see them on web server, which
1544                  * contains information interesting only for windows'
1545                  * users) do not send their stamp in SYN. It is easy case.
1546                  * We simply do not advertise TS support.
1547                  */
1548                 tp.saw_tstamp = 0;
1549                 tp.tstamp_ok  = 0;
1550         }
1551         tp.tstamp_ok = tp.saw_tstamp;
1552
1553         tcp_openreq_init(req, &tp, skb);
1554
1555         req->af.v4_req.loc_addr = daddr;
1556         req->af.v4_req.rmt_addr = saddr;
1557         req->af.v4_req.opt = tcp_v4_save_options(sk, skb);
1558         req->class = &or_ipv4;
1559         if (!want_cookie)
1560                 TCP_ECN_create_request(req, skb->h.th);
1561
1562         if (want_cookie) {
1563 #ifdef CONFIG_SYN_COOKIES
1564                 syn_flood_warning(skb);
1565 #endif
1566                 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1567         } else if (!isn) {
1568                 struct inet_peer *peer = NULL;
1569
1570                 /* VJ's idea. We save last timestamp seen
1571                  * from the destination in peer table, when entering
1572                  * state TIME-WAIT, and check against it before
1573                  * accepting new connection request.
1574                  *
1575                  * If "isn" is not zero, this request hit alive
1576                  * timewait bucket, so that all the necessary checks
1577                  * are made in the function processing timewait state.
1578                  */
1579                 if (tp.saw_tstamp &&
1580                     sysctl_tcp_tw_recycle &&
1581                     (dst = tcp_v4_route_req(sk, req)) != NULL &&
1582                     (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1583                     peer->v4daddr == saddr) {
1584                         if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1585                             (s32)(peer->tcp_ts - req->ts_recent) >
1586                                                         TCP_PAWS_WINDOW) {
1587                                 NET_INC_STATS_BH(PAWSPassiveRejected);
1588                                 dst_release(dst);
1589                                 goto drop_and_free;
1590                         }
1591                 }
1592                 /* Kill the following clause, if you dislike this way. */
1593                 else if (!sysctl_tcp_syncookies &&
1594                          (sysctl_max_syn_backlog - tcp_synq_len(sk) <
1595                           (sysctl_max_syn_backlog >> 2)) &&
1596                          (!peer || !peer->tcp_ts_stamp) &&
1597                          (!dst || !dst_metric(dst, RTAX_RTT))) {
1598                         /* Without syncookies last quarter of
1599                          * backlog is filled with destinations,
1600                          * proven to be alive.
1601                          * It means that we continue to communicate
1602                          * to destinations, already remembered
1603                          * to the moment of synflood.
1604                          */
1605                         NETDEBUG(if (net_ratelimit()) \
1606                                         printk(KERN_DEBUG "TCP: drop open "
1607                                                           "request from %u.%u."
1608                                                           "%u.%u/%u\n", \
1609                                                NIPQUAD(saddr),
1610                                                ntohs(skb->h.th->source)));
1611                         dst_release(dst);
1612                         goto drop_and_free;
1613                 }
1614
1615                 isn = tcp_v4_init_sequence(sk, skb);
1616         }
1617         req->snt_isn = isn;
1618
1619         if (tcp_v4_send_synack(sk, req, dst))
1620                 goto drop_and_free;
1621
1622         if (want_cookie) {
1623                 tcp_openreq_free(req);
1624         } else {
1625                 tcp_v4_synq_add(sk, req);
1626         }
1627         return 0;
1628
1629 drop_and_free:
1630         tcp_openreq_free(req);
1631 drop:
1632         TCP_INC_STATS_BH(TcpAttemptFails);
1633         return 0;
1634 }
1635
1636
1637 /*
1638  * The three way handshake has completed - we got a valid synack -
1639  * now create the new socket.
1640  */
1641 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1642                                   struct open_request *req,
1643                                   struct dst_entry *dst)
1644 {
1645         struct inet_opt *newinet;
1646         struct tcp_opt *newtp;
1647         struct sock *newsk;
1648
1649         if (sk_acceptq_is_full(sk))
1650                 goto exit_overflow;
1651
1652         if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1653                 goto exit;
1654
1655         newsk = tcp_create_openreq_child(sk, req, skb);
1656         if (!newsk)
1657                 goto exit;
1658
1659         newsk->sk_dst_cache = dst;
1660         tcp_v4_setup_caps(newsk, dst);
1661
1662         newtp                 = tcp_sk(newsk);
1663         newinet               = inet_sk(newsk);
1664         newinet->daddr        = req->af.v4_req.rmt_addr;
1665         newinet->rcv_saddr    = req->af.v4_req.loc_addr;
1666         newinet->saddr        = req->af.v4_req.loc_addr;
1667         newinet->opt          = req->af.v4_req.opt;
1668         req->af.v4_req.opt    = NULL;
1669         newinet->mc_index     = tcp_v4_iif(skb);
1670         newinet->mc_ttl       = skb->nh.iph->ttl;
1671         newtp->ext_header_len = 0;
1672         if (newinet->opt)
1673                 newtp->ext_header_len = newinet->opt->optlen;
1674         newtp->ext2_header_len = dst->header_len;
1675         newinet->id = newtp->write_seq ^ jiffies;
1676
1677         tcp_sync_mss(newsk, dst_pmtu(dst));
1678         newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1679         tcp_initialize_rcv_mss(newsk);
1680
1681         __tcp_v4_hash(newsk, 0);
1682         __tcp_inherit_port(sk, newsk);
1683
1684         return newsk;
1685
1686 exit_overflow:
1687         NET_INC_STATS_BH(ListenOverflows);
1688 exit:
1689         NET_INC_STATS_BH(ListenDrops);
1690         dst_release(dst);
1691         return NULL;
1692 }
1693
1694 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1695 {
1696         struct tcphdr *th = skb->h.th;
1697         struct iphdr *iph = skb->nh.iph;
1698         struct tcp_opt *tp = tcp_sk(sk);
1699         struct sock *nsk;
1700         struct open_request **prev;
1701         /* Find possible connection requests. */
1702         struct open_request *req = tcp_v4_search_req(tp, &prev, th->source,
1703                                                      iph->saddr, iph->daddr);
1704         if (req)
1705                 return tcp_check_req(sk, skb, req, prev);
1706
1707         nsk = __tcp_v4_lookup_established(skb->nh.iph->saddr,
1708                                           th->source,
1709                                           skb->nh.iph->daddr,
1710                                           ntohs(th->dest),
1711                                           tcp_v4_iif(skb));
1712
1713         if (nsk) {
1714                 if (nsk->sk_state != TCP_TIME_WAIT) {
1715                         bh_lock_sock(nsk);
1716                         return nsk;
1717                 }
1718                 tcp_tw_put((struct tcp_tw_bucket *)nsk);
1719                 return NULL;
1720         }
1721
1722 #ifdef CONFIG_SYN_COOKIES
1723         if (!th->rst && !th->syn && th->ack)
1724                 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1725 #endif
1726         return sk;
1727 }
1728
1729 static int tcp_v4_checksum_init(struct sk_buff *skb)
1730 {
1731         if (skb->ip_summed == CHECKSUM_HW) {
1732                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1733                 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1734                                   skb->nh.iph->daddr, skb->csum))
1735                         return 0;
1736
1737                 NETDEBUG(if (net_ratelimit())
1738                                 printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
1739                 skb->ip_summed = CHECKSUM_NONE;
1740         }
1741         if (skb->len <= 76) {
1742                 if (tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1743                                  skb->nh.iph->daddr,
1744                                  skb_checksum(skb, 0, skb->len, 0)))
1745                         return -1;
1746                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1747         } else {
1748                 skb->csum = ~tcp_v4_check(skb->h.th, skb->len,
1749                                           skb->nh.iph->saddr,
1750                                           skb->nh.iph->daddr, 0);
1751         }
1752         return 0;
1753 }
1754
1755
1756 /* The socket must have it's spinlock held when we get
1757  * here.
1758  *
1759  * We have a potential double-lock case here, so even when
1760  * doing backlog processing we use the BH locking scheme.
1761  * This is because we cannot sleep with the original spinlock
1762  * held.
1763  */
1764 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1765 {
1766         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1767                 TCP_CHECK_TIMER(sk);
1768                 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1769                         goto reset;
1770                 TCP_CHECK_TIMER(sk);
1771                 return 0;
1772         }
1773
1774         if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
1775                 goto csum_err;
1776
1777         if (sk->sk_state == TCP_LISTEN) {
1778                 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1779                 if (!nsk)
1780                         goto discard;
1781
1782                 if (nsk != sk) {
1783                         if (tcp_child_process(sk, nsk, skb))
1784                                 goto reset;
1785                         return 0;
1786                 }
1787         }
1788
1789         TCP_CHECK_TIMER(sk);
1790         if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1791                 goto reset;
1792         TCP_CHECK_TIMER(sk);
1793         return 0;
1794
1795 reset:
1796         tcp_v4_send_reset(skb);
1797 discard:
1798         kfree_skb(skb);
1799         /* Be careful here. If this function gets more complicated and
1800          * gcc suffers from register pressure on the x86, sk (in %ebx)
1801          * might be destroyed here. This current version compiles correctly,
1802          * but you have been warned.
1803          */
1804         return 0;
1805
1806 csum_err:
1807         TCP_INC_STATS_BH(TcpInErrs);
1808         goto discard;
1809 }
1810
1811 /*
1812  *      From tcp_input.c
1813  */
1814
1815 int tcp_v4_rcv(struct sk_buff *skb)
1816 {
1817         struct tcphdr *th;
1818         struct sock *sk;
1819         int ret;
1820
1821         if (skb->pkt_type != PACKET_HOST)
1822                 goto discard_it;
1823
1824         /* Count it even if it's bad */
1825         TCP_INC_STATS_BH(TcpInSegs);
1826
1827         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1828                 goto discard_it;
1829
1830         th = skb->h.th;
1831
1832         if (th->doff < sizeof(struct tcphdr) / 4)
1833                 goto bad_packet;
1834         if (!pskb_may_pull(skb, th->doff * 4))
1835                 goto discard_it;
1836
1837         /* An explanation is required here, I think.
1838          * Packet length and doff are validated by header prediction,
1839          * provided case of th->doff==0 is elimineted.
1840          * So, we defer the checks. */
1841         if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1842              tcp_v4_checksum_init(skb) < 0))
1843                 goto bad_packet;
1844
1845         th = skb->h.th;
1846         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1847         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1848                                     skb->len - th->doff * 4);
1849         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1850         TCP_SKB_CB(skb)->when    = 0;
1851         TCP_SKB_CB(skb)->flags   = skb->nh.iph->tos;
1852         TCP_SKB_CB(skb)->sacked  = 0;
1853
1854         sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source,
1855                              skb->nh.iph->daddr, ntohs(th->dest),
1856                              tcp_v4_iif(skb));
1857
1858         if (!sk)
1859                 goto no_tcp_socket;
1860
1861 process:
1862         if (sk->sk_state == TCP_TIME_WAIT)
1863                 goto do_time_wait;
1864
1865         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1866                 goto discard_and_relse;
1867
1868         if (sk_filter(sk, skb, 0))
1869                 goto discard_and_relse;
1870
1871         skb->dev = NULL;
1872
1873         bh_lock_sock(sk);
1874         ret = 0;
1875         if (!sock_owned_by_user(sk)) {
1876                 if (!tcp_prequeue(sk, skb))
1877                         ret = tcp_v4_do_rcv(sk, skb);
1878         } else
1879                 sk_add_backlog(sk, skb);
1880         bh_unlock_sock(sk);
1881
1882         sock_put(sk);
1883
1884         return ret;
1885
1886 no_tcp_socket:
1887         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1888                 goto discard_it;
1889
1890         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1891 bad_packet:
1892                 TCP_INC_STATS_BH(TcpInErrs);
1893         } else {
1894                 tcp_v4_send_reset(skb);
1895         }
1896
1897 discard_it:
1898         /* Discard frame. */
1899         kfree_skb(skb);
1900         return 0;
1901
1902 discard_and_relse:
1903         sock_put(sk);
1904         goto discard_it;
1905
1906 do_time_wait:
1907         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1908                 tcp_tw_put((struct tcp_tw_bucket *) sk);
1909                 goto discard_it;
1910         }
1911
1912         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1913                 TCP_INC_STATS_BH(TcpInErrs);
1914                 tcp_tw_put((struct tcp_tw_bucket *) sk);
1915                 goto discard_it;
1916         }
1917         switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
1918                                            skb, th, skb->len)) {
1919         case TCP_TW_SYN: {
1920                 struct sock *sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr,
1921                                                           ntohs(th->dest),
1922                                                           tcp_v4_iif(skb));
1923                 if (sk2) {
1924                         tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
1925                         tcp_tw_put((struct tcp_tw_bucket *)sk);
1926                         sk = sk2;
1927                         goto process;
1928                 }
1929                 /* Fall through to ACK */
1930         }
1931         case TCP_TW_ACK:
1932                 tcp_v4_timewait_ack(sk, skb);
1933                 break;
1934         case TCP_TW_RST:
1935                 goto no_tcp_socket;
1936         case TCP_TW_SUCCESS:;
1937         }
1938         goto discard_it;
1939 }
1940
1941 /* With per-bucket locks this operation is not-atomic, so that
1942  * this version is not worse.
1943  */
1944 static void __tcp_v4_rehash(struct sock *sk)
1945 {
1946         sk->sk_prot->unhash(sk);
1947         sk->sk_prot->hash(sk);
1948 }
1949
1950 static int tcp_v4_reselect_saddr(struct sock *sk)
1951 {
1952         struct inet_opt *inet = inet_sk(sk);
1953         int err;
1954         struct rtable *rt;
1955         __u32 old_saddr = inet->saddr;
1956         __u32 new_saddr;
1957         __u32 daddr = inet->daddr;
1958
1959         if (inet->opt && inet->opt->srr)
1960                 daddr = inet->opt->faddr;
1961
1962         /* Query new route. */
1963         err = ip_route_connect(&rt, daddr, 0,
1964                                RT_TOS(inet->tos) | sk->sk_localroute,
1965                                sk->sk_bound_dev_if,
1966                                IPPROTO_TCP,
1967                                inet->sport, inet->dport, sk);
1968         if (err)
1969                 return err;
1970
1971         __sk_dst_set(sk, &rt->u.dst);
1972         tcp_v4_setup_caps(sk, &rt->u.dst);
1973         tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
1974
1975         new_saddr = rt->rt_src;
1976
1977         if (new_saddr == old_saddr)
1978                 return 0;
1979
1980         if (sysctl_ip_dynaddr > 1) {
1981                 printk(KERN_INFO "tcp_v4_rebuild_header(): shifting inet->"
1982                                  "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n",
1983                        NIPQUAD(old_saddr),
1984                        NIPQUAD(new_saddr));
1985         }
1986
1987         inet->saddr = new_saddr;
1988         inet->rcv_saddr = new_saddr;
1989
1990         /* XXX The only one ugly spot where we need to
1991          * XXX really change the sockets identity after
1992          * XXX it has entered the hashes. -DaveM
1993          *
1994          * Besides that, it does not check for connection
1995          * uniqueness. Wait for troubles.
1996          */
1997         __tcp_v4_rehash(sk);
1998         return 0;
1999 }
2000
2001 int tcp_v4_rebuild_header(struct sock *sk)
2002 {
2003         struct inet_opt *inet = inet_sk(sk);
2004         struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
2005         u32 daddr;
2006         int err;
2007
2008         /* Route is OK, nothing to do. */
2009         if (rt)
2010                 return 0;
2011
2012         /* Reroute. */
2013         daddr = inet->daddr;
2014         if (inet->opt && inet->opt->srr)
2015                 daddr = inet->opt->faddr;
2016
2017         {
2018                 struct flowi fl = { .oif = sk->sk_bound_dev_if,
2019                                     .nl_u = { .ip4_u =
2020                                               { .daddr = daddr,
2021                                                 .saddr = inet->saddr,
2022                                                 .tos = RT_CONN_FLAGS(sk) } },
2023                                     .proto = IPPROTO_TCP,
2024                                     .uli_u = { .ports =
2025                                                { .sport = inet->sport,
2026                                                  .dport = inet->dport } } };
2027                                                 
2028                 err = ip_route_output_flow(&rt, &fl, sk, 0);
2029         }
2030         if (!err) {
2031                 __sk_dst_set(sk, &rt->u.dst);
2032                 tcp_v4_setup_caps(sk, &rt->u.dst);
2033                 tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
2034                 return 0;
2035         }
2036
2037         /* Routing failed... */
2038         sk->sk_route_caps = 0;
2039
2040         if (!sysctl_ip_dynaddr ||
2041             sk->sk_state != TCP_SYN_SENT ||
2042             (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
2043             (err = tcp_v4_reselect_saddr(sk)) != 0)
2044                 sk->sk_err_soft = -err;
2045
2046         return err;
2047 }
2048
2049 static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
2050 {
2051         struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;
2052         struct inet_opt *inet = inet_sk(sk);
2053
2054         sin->sin_family         = AF_INET;
2055         sin->sin_addr.s_addr    = inet->daddr;
2056         sin->sin_port           = inet->dport;
2057 }
2058
2059 /* VJ's idea. Save last timestamp seen from this destination
2060  * and hold it at least for normal timewait interval to use for duplicate
2061  * segment detection in subsequent connections, before they enter synchronized
2062  * state.
2063  */
2064
2065 int tcp_v4_remember_stamp(struct sock *sk)
2066 {
2067         struct inet_opt *inet = inet_sk(sk);
2068         struct tcp_opt *tp = tcp_sk(sk);
2069         struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
2070         struct inet_peer *peer = NULL;
2071         int release_it = 0;
2072
2073         if (!rt || rt->rt_dst != inet->daddr) {
2074                 peer = inet_getpeer(inet->daddr, 1);
2075                 release_it = 1;
2076         } else {
2077                 if (!rt->peer)
2078                         rt_bind_peer(rt, 1);
2079                 peer = rt->peer;
2080         }
2081
2082         if (peer) {
2083                 if ((s32)(peer->tcp_ts - tp->ts_recent) <= 0 ||
2084                     (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2085                      peer->tcp_ts_stamp <= tp->ts_recent_stamp)) {
2086                         peer->tcp_ts_stamp = tp->ts_recent_stamp;
2087                         peer->tcp_ts = tp->ts_recent;
2088                 }
2089                 if (release_it)
2090                         inet_putpeer(peer);
2091                 return 1;
2092         }
2093
2094         return 0;
2095 }
2096
2097 int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
2098 {
2099         struct inet_peer *peer = NULL;
2100
2101         peer = inet_getpeer(tw->tw_daddr, 1);
2102
2103         if (peer) {
2104                 if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 ||
2105                     (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2106                      peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) {
2107                         peer->tcp_ts_stamp = tw->tw_ts_recent_stamp;
2108                         peer->tcp_ts = tw->tw_ts_recent;
2109                 }
2110                 inet_putpeer(peer);
2111                 return 1;
2112         }
2113
2114         return 0;
2115 }
2116
2117 struct tcp_func ipv4_specific = {
2118         .queue_xmit     =       ip_queue_xmit,
2119         .send_check     =       tcp_v4_send_check,
2120         .rebuild_header =       tcp_v4_rebuild_header,
2121         .conn_request   =       tcp_v4_conn_request,
2122         .syn_recv_sock  =       tcp_v4_syn_recv_sock,
2123         .remember_stamp =       tcp_v4_remember_stamp,
2124         .net_header_len =       sizeof(struct iphdr),
2125         .setsockopt     =       ip_setsockopt,
2126         .getsockopt     =       ip_getsockopt,
2127         .addr2sockaddr  =       v4_addr2sockaddr,
2128         .sockaddr_len   =       sizeof(struct sockaddr_in),
2129 };
2130
2131 /* NOTE: A lot of things set to zero explicitly by call to
2132  *       sk_alloc() so need not be done here.
2133  */
2134 static int tcp_v4_init_sock(struct sock *sk)
2135 {
2136         struct tcp_opt *tp = tcp_sk(sk);
2137
2138         skb_queue_head_init(&tp->out_of_order_queue);
2139         tcp_init_xmit_timers(sk);
2140         tcp_prequeue_init(tp);
2141
2142         tp->rto  = TCP_TIMEOUT_INIT;
2143         tp->mdev = TCP_TIMEOUT_INIT;
2144
2145         /* So many TCP implementations out there (incorrectly) count the
2146          * initial SYN frame in their delayed-ACK and congestion control
2147          * algorithms that we must have the following bandaid to talk
2148          * efficiently to them.  -DaveM
2149          */
2150         tp->snd_cwnd = 2;
2151
2152         /* See draft-stevens-tcpca-spec-01 for discussion of the
2153          * initialization of these values.
2154          */
2155         tp->snd_ssthresh = 0x7fffffff;  /* Infinity */
2156         tp->snd_cwnd_clamp = ~0;
2157         tp->mss_cache = 536;
2158
2159         tp->reordering = sysctl_tcp_reordering;
2160
2161         sk->sk_state = TCP_CLOSE;
2162
2163         sk->sk_write_space = tcp_write_space;
2164         sk->sk_use_write_queue = 1;
2165
2166         tp->af_specific = &ipv4_specific;
2167
2168         sk->sk_sndbuf = sysctl_tcp_wmem[1];
2169         sk->sk_rcvbuf = sysctl_tcp_rmem[1];
2170
2171         atomic_inc(&tcp_sockets_allocated);
2172
2173         return 0;
2174 }
2175
2176 static int tcp_v4_destroy_sock(struct sock *sk)
2177 {
2178         struct tcp_opt *tp = tcp_sk(sk);
2179
2180         tcp_clear_xmit_timers(sk);
2181
2182         /* Cleanup up the write buffer. */
2183         tcp_writequeue_purge(sk);
2184
2185         /* Cleans up our, hopefully empty, out_of_order_queue. */
2186         __skb_queue_purge(&tp->out_of_order_queue);
2187
2188         /* Clean prequeue, it must be empty really */
2189         __skb_queue_purge(&tp->ucopy.prequeue);
2190
2191         /* Clean up a referenced TCP bind bucket. */
2192         if (tp->bind_hash)
2193                 tcp_put_port(sk);
2194
2195         /* If sendmsg cached page exists, toss it. */
2196         if (inet_sk(sk)->sndmsg_page)
2197                 __free_page(inet_sk(sk)->sndmsg_page);
2198
2199         atomic_dec(&tcp_sockets_allocated);
2200
2201         return 0;
2202 }
2203
2204 #ifdef CONFIG_PROC_FS
2205 /* Proc filesystem TCP sock list dumping. */
2206
2207 static inline struct tcp_tw_bucket *tw_head(struct hlist_head *head)
2208 {
2209         return hlist_empty(head) ? NULL :
2210                 list_entry(head->first, struct tcp_tw_bucket, tw_node);
2211 }
2212
2213 static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw)
2214 {
2215         return tw->tw_node.next ?
2216                 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2217 }
2218
2219 static void *listening_get_next(struct seq_file *seq, void *cur)
2220 {
2221         struct tcp_opt *tp;
2222         struct hlist_node *node;
2223         struct sock *sk = cur;
2224         struct tcp_iter_state* st = seq->private;
2225
2226         if (!sk) {
2227                 st->bucket = 0;
2228                 sk = sk_head(&tcp_listening_hash[0]);
2229                 goto get_sk;
2230         }
2231
2232         ++st->num;
2233
2234         if (st->state == TCP_SEQ_STATE_OPENREQ) {
2235                 struct open_request *req = cur;
2236
2237                 tp = tcp_sk(st->syn_wait_sk);
2238                 req = req->dl_next;
2239                 while (1) {
2240                         while (req) {
2241                                 vxdprintk("skr: %p [#%d] (from %d)\n",
2242                                         req->sk, req->sk->sk_xid, current->xid);
2243                                 if (!vx_check(req->sk->sk_xid, VX_IDENT|VX_WATCH))
2244                                         continue;
2245                                 if (req->class->family == st->family) {
2246                                         cur = req;
2247                                         goto out;
2248                                 }
2249                                 req = req->dl_next;
2250                         }
2251                         if (++st->sbucket >= TCP_SYNQ_HSIZE)
2252                                 break;
2253 get_req:
2254                         req = tp->listen_opt->syn_table[st->sbucket];
2255                 }
2256                 sk        = sk_next(st->syn_wait_sk);
2257                 st->state = TCP_SEQ_STATE_LISTENING;
2258                 read_unlock_bh(&tp->syn_wait_lock);
2259         } else
2260                 sk = sk_next(sk);
2261 get_sk:
2262         sk_for_each_from(sk, node) {
2263                 vxdprintk("sk: %p [#%d] (from %d)\n",
2264                         sk, sk->sk_xid, current->xid);
2265                 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2266                         continue;
2267                 if (sk->sk_family == st->family) {
2268                         cur = sk;
2269                         goto out;
2270                 }
2271                 tp = tcp_sk(sk);
2272                 read_lock_bh(&tp->syn_wait_lock);
2273                 if (tp->listen_opt && tp->listen_opt->qlen) {
2274                         st->uid         = sock_i_uid(sk);
2275                         st->syn_wait_sk = sk;
2276                         st->state       = TCP_SEQ_STATE_OPENREQ;
2277                         st->sbucket     = 0;
2278                         goto get_req;
2279                 }
2280                 read_unlock_bh(&tp->syn_wait_lock);
2281         }
2282         if (++st->bucket < TCP_LHTABLE_SIZE) {
2283                 sk = sk_head(&tcp_listening_hash[st->bucket]);
2284                 goto get_sk;
2285         }
2286         cur = NULL;
2287 out:
2288         return cur;
2289 }
2290
2291 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2292 {
2293         void *rc = listening_get_next(seq, NULL);
2294
2295         while (rc && *pos) {
2296                 rc = listening_get_next(seq, rc);
2297                 --*pos;
2298         }
2299         return rc;
2300 }
2301
2302 static void *established_get_first(struct seq_file *seq)
2303 {
2304         struct tcp_iter_state* st = seq->private;
2305         void *rc = NULL;
2306
2307         for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) {
2308                 struct sock *sk;
2309                 struct hlist_node *node;
2310                 struct tcp_tw_bucket *tw;
2311                
2312                 read_lock(&tcp_ehash[st->bucket].lock);
2313                 sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) {
2314                         vxdprintk("egf,sk: %p [#%d] (from %d)\n",
2315                                 sk, sk->sk_xid, current->xid);
2316                         if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2317                                 continue;
2318                         if (sk->sk_family != st->family)
2319                                 continue;
2320                         rc = sk;
2321                         goto out;
2322                 }
2323                 st->state = TCP_SEQ_STATE_TIME_WAIT;
2324                 tw_for_each(tw, node,
2325                             &tcp_ehash[st->bucket + tcp_ehash_size].chain) {
2326                         vxdprintk("tw: %p [#%d] (from %d)\n",
2327                                 tw, tw->tw_xid, current->xid);
2328                         if (!vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))
2329                                 continue;
2330                         if (tw->tw_family != st->family)
2331                                 continue;
2332                         rc = tw;
2333                         goto out;
2334                 }
2335                 read_unlock(&tcp_ehash[st->bucket].lock);
2336                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2337         }
2338 out:
2339         return rc;
2340 }
2341
2342 static void *established_get_next(struct seq_file *seq, void *cur)
2343 {
2344         struct sock *sk = cur;
2345         struct tcp_tw_bucket *tw;
2346         struct hlist_node *node;
2347         struct tcp_iter_state* st = seq->private;
2348
2349         ++st->num;
2350
2351         if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2352                 tw = cur;
2353                 tw = tw_next(tw);
2354 get_tw:
2355                 while (tw && tw->tw_family != st->family &&
2356                         !vx_check(tw->tw_xid, VX_IDENT|VX_WATCH)) {
2357                         tw = tw_next(tw);
2358                 }
2359                 if (tw) {
2360                         cur = tw;
2361                         goto out;
2362                 }
2363                 read_unlock(&tcp_ehash[st->bucket].lock);
2364                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2365                 if (++st->bucket < tcp_ehash_size) {
2366                         read_lock(&tcp_ehash[st->bucket].lock);
2367                         sk = sk_head(&tcp_ehash[st->bucket].chain);
2368                 } else {
2369                         cur = NULL;
2370                         goto out;
2371                 }
2372         } else
2373                 sk = sk_next(sk);
2374
2375         sk_for_each_from(sk, node) {
2376                 vxdprintk("egn,sk: %p [#%d] (from %d)\n",
2377                         sk, sk->sk_xid, current->xid);
2378                 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2379                         continue;
2380                 if (sk->sk_family == st->family)
2381                         goto found;
2382         }
2383
2384         st->state = TCP_SEQ_STATE_TIME_WAIT;
2385         tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain);
2386         goto get_tw;
2387 found:
2388         cur = sk;
2389 out:
2390         return cur;
2391 }
2392
2393 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2394 {
2395         void *rc = established_get_first(seq);
2396
2397         while (rc && pos) {
2398                 rc = established_get_next(seq, rc);
2399                 --pos;
2400         }               
2401         return rc;
2402 }
2403
2404 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2405 {
2406         void *rc;
2407         struct tcp_iter_state* st = seq->private;
2408
2409         tcp_listen_lock();
2410         st->state = TCP_SEQ_STATE_LISTENING;
2411         rc        = listening_get_idx(seq, &pos);
2412
2413         if (!rc) {
2414                 tcp_listen_unlock();
2415                 local_bh_disable();
2416                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2417                 rc        = established_get_idx(seq, pos);
2418         }
2419
2420         return rc;
2421 }
2422
2423 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2424 {
2425         struct tcp_iter_state* st = seq->private;
2426         st->state = TCP_SEQ_STATE_LISTENING;
2427         st->num = 0;
2428         return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2429 }
2430
2431 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2432 {
2433         void *rc = NULL;
2434         struct tcp_iter_state* st;
2435
2436         if (v == SEQ_START_TOKEN) {
2437                 rc = tcp_get_idx(seq, 0);
2438                 goto out;
2439         }
2440         st = seq->private;
2441
2442         switch (st->state) {
2443         case TCP_SEQ_STATE_OPENREQ:
2444         case TCP_SEQ_STATE_LISTENING:
2445                 rc = listening_get_next(seq, v);
2446                 if (!rc) {
2447                         tcp_listen_unlock();
2448                         local_bh_disable();
2449                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2450                         rc        = established_get_first(seq);
2451                 }
2452                 break;
2453         case TCP_SEQ_STATE_ESTABLISHED:
2454         case TCP_SEQ_STATE_TIME_WAIT:
2455                 rc = established_get_next(seq, v);
2456                 break;
2457         }
2458 out:
2459         ++*pos;
2460         return rc;
2461 }
2462
2463 static void tcp_seq_stop(struct seq_file *seq, void *v)
2464 {
2465         struct tcp_iter_state* st = seq->private;
2466
2467         switch (st->state) {
2468         case TCP_SEQ_STATE_OPENREQ:
2469                 if (v) {
2470                         struct tcp_opt *tp = tcp_sk(st->syn_wait_sk);
2471                         read_unlock_bh(&tp->syn_wait_lock);
2472                 }
2473         case TCP_SEQ_STATE_LISTENING:
2474                 if (v != SEQ_START_TOKEN)
2475                         tcp_listen_unlock();
2476                 break;
2477         case TCP_SEQ_STATE_TIME_WAIT:
2478         case TCP_SEQ_STATE_ESTABLISHED:
2479                 if (v)
2480                         read_unlock(&tcp_ehash[st->bucket].lock);
2481                 local_bh_enable();
2482                 break;
2483         }
2484 }
2485
2486 static int tcp_seq_open(struct inode *inode, struct file *file)
2487 {
2488         struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2489         struct seq_file *seq;
2490         struct tcp_iter_state *s;
2491         int rc;
2492
2493         if (unlikely(afinfo == NULL))
2494                 return -EINVAL;
2495
2496         s = kmalloc(sizeof(*s), GFP_KERNEL);
2497         if (!s)
2498                 return -ENOMEM;
2499         memset(s, 0, sizeof(*s));
2500         s->family               = afinfo->family;
2501         s->seq_ops.start        = tcp_seq_start;
2502         s->seq_ops.next         = tcp_seq_next;
2503         s->seq_ops.show         = afinfo->seq_show;
2504         s->seq_ops.stop         = tcp_seq_stop;
2505
2506         rc = seq_open(file, &s->seq_ops);
2507         if (rc)
2508                 goto out_kfree;
2509         seq          = file->private_data;
2510         seq->private = s;
2511 out:
2512         return rc;
2513 out_kfree:
2514         kfree(s);
2515         goto out;
2516 }
2517
2518 int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
2519 {
2520         int rc = 0;
2521         struct proc_dir_entry *p;
2522
2523         if (!afinfo)
2524                 return -EINVAL;
2525         afinfo->seq_fops->owner         = afinfo->owner;
2526         afinfo->seq_fops->open          = tcp_seq_open;
2527         afinfo->seq_fops->read          = seq_read;
2528         afinfo->seq_fops->llseek        = seq_lseek;
2529         afinfo->seq_fops->release       = seq_release_private;
2530         
2531         p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
2532         if (p)
2533                 p->data = afinfo;
2534         else
2535                 rc = -ENOMEM;
2536         return rc;
2537 }
2538
2539 void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
2540 {
2541         if (!afinfo)
2542                 return;
2543         proc_net_remove(afinfo->name);
2544         memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); 
2545 }
2546
2547 static void get_openreq4(struct sock *sk, struct open_request *req,
2548                          char *tmpbuf, int i, int uid)
2549 {
2550         int ttd = req->expires - jiffies;
2551
2552         sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2553                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
2554                 i,
2555                 req->af.v4_req.loc_addr,
2556                 ntohs(inet_sk(sk)->sport),
2557                 req->af.v4_req.rmt_addr,
2558                 ntohs(req->rmt_port),
2559                 TCP_SYN_RECV,
2560                 0, 0, /* could print option size, but that is af dependent. */
2561                 1,    /* timers active (only the expire timer) */
2562                 jiffies_to_clock_t(ttd),
2563                 req->retrans,
2564                 uid,
2565                 0,  /* non standard timer */
2566                 0, /* open_requests have no inode */
2567                 atomic_read(&sk->sk_refcnt),
2568                 req);
2569 }
2570
2571 static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
2572 {
2573         int timer_active;
2574         unsigned long timer_expires;
2575         struct tcp_opt *tp = tcp_sk(sp);
2576         struct inet_opt *inet = inet_sk(sp);
2577         unsigned int dest = inet->daddr;
2578         unsigned int src = inet->rcv_saddr;
2579         __u16 destp = ntohs(inet->dport);
2580         __u16 srcp = ntohs(inet->sport);
2581
2582         if (tp->pending == TCP_TIME_RETRANS) {
2583                 timer_active    = 1;
2584                 timer_expires   = tp->timeout;
2585         } else if (tp->pending == TCP_TIME_PROBE0) {
2586                 timer_active    = 4;
2587                 timer_expires   = tp->timeout;
2588         } else if (timer_pending(&sp->sk_timer)) {
2589                 timer_active    = 2;
2590                 timer_expires   = sp->sk_timer.expires;
2591         } else {
2592                 timer_active    = 0;
2593                 timer_expires = jiffies;
2594         }
2595
2596         sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2597                         "%08X %5d %8d %lu %d %p %u %u %u %u %d",
2598                 i, src, srcp, dest, destp, sp->sk_state,
2599                 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
2600                 timer_active,
2601                 jiffies_to_clock_t(timer_expires - jiffies),
2602                 tp->retransmits,
2603                 sock_i_uid(sp),
2604                 tp->probes_out,
2605                 sock_i_ino(sp),
2606                 atomic_read(&sp->sk_refcnt), sp,
2607                 tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong,
2608                 tp->snd_cwnd,
2609                 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
2610 }
2611
2612 static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i)
2613 {
2614         unsigned int dest, src;
2615         __u16 destp, srcp;
2616         int ttd = tw->tw_ttd - jiffies;
2617
2618         if (ttd < 0)
2619                 ttd = 0;
2620
2621         dest  = tw->tw_daddr;
2622         src   = tw->tw_rcv_saddr;
2623         destp = ntohs(tw->tw_dport);
2624         srcp  = ntohs(tw->tw_sport);
2625
2626         sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2627                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p",
2628                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2629                 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2630                 atomic_read(&tw->tw_refcnt), tw);
2631 }
2632
2633 #define TMPSZ 150
2634
2635 static int tcp4_seq_show(struct seq_file *seq, void *v)
2636 {
2637         struct tcp_iter_state* st;
2638         char tmpbuf[TMPSZ + 1];
2639
2640         if (v == SEQ_START_TOKEN) {
2641                 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2642                            "  sl  local_address rem_address   st tx_queue "
2643                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2644                            "inode");
2645                 goto out;
2646         }
2647         st = seq->private;
2648
2649         switch (st->state) {
2650         case TCP_SEQ_STATE_LISTENING:
2651         case TCP_SEQ_STATE_ESTABLISHED:
2652                 get_tcp4_sock(v, tmpbuf, st->num);
2653                 break;
2654         case TCP_SEQ_STATE_OPENREQ:
2655                 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
2656                 break;
2657         case TCP_SEQ_STATE_TIME_WAIT:
2658                 get_timewait4_sock(v, tmpbuf, st->num);
2659                 break;
2660         }
2661         seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
2662 out:
2663         return 0;
2664 }
2665
2666 static struct file_operations tcp4_seq_fops;
2667 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2668         .owner          = THIS_MODULE,
2669         .name           = "tcp",
2670         .family         = AF_INET,
2671         .seq_show       = tcp4_seq_show,
2672         .seq_fops       = &tcp4_seq_fops,
2673 };
2674
2675 int __init tcp4_proc_init(void)
2676 {
2677         return tcp_proc_register(&tcp4_seq_afinfo);
2678 }
2679
2680 void tcp4_proc_exit(void)
2681 {
2682         tcp_proc_unregister(&tcp4_seq_afinfo);
2683 }
2684 #endif /* CONFIG_PROC_FS */
2685
2686 struct proto tcp_prot = {
2687         .name           =       "TCP",
2688         .close          =       tcp_close,
2689         .connect        =       tcp_v4_connect,
2690         .disconnect     =       tcp_disconnect,
2691         .accept         =       tcp_accept,
2692         .ioctl          =       tcp_ioctl,
2693         .init           =       tcp_v4_init_sock,
2694         .destroy        =       tcp_v4_destroy_sock,
2695         .shutdown       =       tcp_shutdown,
2696         .setsockopt     =       tcp_setsockopt,
2697         .getsockopt     =       tcp_getsockopt,
2698         .sendmsg        =       tcp_sendmsg,
2699         .recvmsg        =       tcp_recvmsg,
2700         .backlog_rcv    =       tcp_v4_do_rcv,
2701         .hash           =       tcp_v4_hash,
2702         .unhash         =       tcp_unhash,
2703         .get_port       =       tcp_v4_get_port,
2704 };
2705
2706
2707
2708 void __init tcp_v4_init(struct net_proto_family *ops)
2709 {
2710         int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
2711         if (err < 0)
2712                 panic("Failed to create the TCP control socket.\n");
2713         tcp_socket->sk->sk_allocation   = GFP_ATOMIC;
2714         inet_sk(tcp_socket->sk)->uc_ttl = -1;
2715
2716         /* Unhash it so that IP input processing does not even
2717          * see it, we do not wish this socket to see incoming
2718          * packets.
2719          */
2720         tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
2721 }
2722
2723 EXPORT_SYMBOL(ipv4_specific);
2724 EXPORT_SYMBOL(tcp_bind_hash);
2725 EXPORT_SYMBOL(tcp_bucket_create);
2726 EXPORT_SYMBOL(tcp_hashinfo);
2727 EXPORT_SYMBOL(tcp_inherit_port);
2728 EXPORT_SYMBOL(tcp_listen_wlock);
2729 EXPORT_SYMBOL(tcp_port_rover);
2730 EXPORT_SYMBOL(tcp_prot);
2731 EXPORT_SYMBOL(tcp_put_port);
2732 EXPORT_SYMBOL(tcp_unhash);
2733 EXPORT_SYMBOL(tcp_v4_conn_request);
2734 EXPORT_SYMBOL(tcp_v4_connect);
2735 EXPORT_SYMBOL(tcp_v4_do_rcv);
2736 EXPORT_SYMBOL(tcp_v4_lookup_listener);
2737 EXPORT_SYMBOL(tcp_v4_rebuild_header);
2738 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2739 EXPORT_SYMBOL(tcp_v4_send_check);
2740 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2741
2742 #ifdef CONFIG_PROC_FS
2743 EXPORT_SYMBOL(tcp_proc_register);
2744 EXPORT_SYMBOL(tcp_proc_unregister);
2745 #endif
2746 #ifdef CONFIG_SYSCTL
2747 EXPORT_SYMBOL(sysctl_local_port_range);
2748 EXPORT_SYMBOL(sysctl_max_syn_backlog);
2749 EXPORT_SYMBOL(sysctl_tcp_low_latency);
2750 #endif