This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / net / ipv4 / tcp_ipv4.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Version:     $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
9  *
10  *              IPv4 specific functions
11  *
12  *
13  *              code split from:
14  *              linux/ipv4/tcp.c
15  *              linux/ipv4/tcp_input.c
16  *              linux/ipv4/tcp_output.c
17  *
18  *              See tcp.c for author information
19  *
20  *      This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25
26 /*
27  * Changes:
28  *              David S. Miller :       New socket lookup architecture.
29  *                                      This code is dedicated to John Dyson.
30  *              David S. Miller :       Change semantics of established hash,
31  *                                      half is devoted to TIME_WAIT sockets
32  *                                      and the rest go in the other half.
33  *              Andi Kleen :            Add support for syncookies and fixed
34  *                                      some bugs: ip options weren't passed to
35  *                                      the TCP layer, missed a check for an
36  *                                      ACK bit.
37  *              Andi Kleen :            Implemented fast path mtu discovery.
38  *                                      Fixed many serious bugs in the
39  *                                      open_request handling and moved
40  *                                      most of it into the af independent code.
41  *                                      Added tail drop and some other bugfixes.
42  *                                      Added new listen sematics.
43  *              Mike McLagan    :       Routing by source
44  *      Juan Jose Ciarlante:            ip_dynaddr bits
45  *              Andi Kleen:             various fixes.
46  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
47  *                                      coma.
48  *      Andi Kleen              :       Fix new listen.
49  *      Andi Kleen              :       Fix accept error reporting.
50  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
51  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
52  *                                      a single port at the same time.
53  */
54
55 #include <linux/config.h>
56
57 #include <linux/types.h>
58 #include <linux/fcntl.h>
59 #include <linux/module.h>
60 #include <linux/random.h>
61 #include <linux/cache.h>
62 #include <linux/jhash.h>
63 #include <linux/init.h>
64 #include <linux/times.h>
65
66 #include <net/icmp.h>
67 #include <net/tcp.h>
68 #include <net/ipv6.h>
69 #include <net/inet_common.h>
70 #include <net/xfrm.h>
71
72 #include <linux/inet.h>
73 #include <linux/ipv6.h>
74 #include <linux/stddef.h>
75 #include <linux/proc_fs.h>
76 #include <linux/seq_file.h>
77
78 #include <linux/vs_base.h>
79
80 extern int sysctl_ip_dynaddr;
81 int sysctl_tcp_tw_reuse;
82 int sysctl_tcp_low_latency;
83
84 /* Check TCP sequence numbers in ICMP packets. */
85 #define ICMP_MIN_LENGTH 8
86
87 /* Socket used for sending RSTs */
88 static struct socket *tcp_socket;
89
90 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
91                        struct sk_buff *skb);
92
93 struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
94         .__tcp_lhash_lock       =       RW_LOCK_UNLOCKED,
95         .__tcp_lhash_users      =       ATOMIC_INIT(0),
96         .__tcp_lhash_wait
97           = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait),
98         .__tcp_portalloc_lock   =       SPIN_LOCK_UNLOCKED
99 };
100
101 /*
102  * This array holds the first and last local port number.
103  * For high-usage systems, use sysctl to change this to
104  * 32768-61000
105  */
106 int sysctl_local_port_range[2] = { 1024, 4999 };
107 int tcp_port_rover = 1024 - 1;
108
109 static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
110                                  __u32 faddr, __u16 fport)
111 {
112         int h = (laddr ^ lport) ^ (faddr ^ fport);
113         h ^= h >> 16;
114         h ^= h >> 8;
115         return h & (tcp_ehash_size - 1);
116 }
117
118 static __inline__ int tcp_sk_hashfn(struct sock *sk)
119 {
120         struct inet_opt *inet = inet_sk(sk);
121         __u32 laddr = inet->rcv_saddr;
122         __u16 lport = inet->num;
123         __u32 faddr = inet->daddr;
124         __u16 fport = inet->dport;
125
126         return tcp_hashfn(laddr, lport, faddr, fport);
127 }
128
129 /* Allocate and initialize a new TCP local port bind bucket.
130  * The bindhash mutex for snum's hash chain must be held here.
131  */
132 struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
133                                           unsigned short snum)
134 {
135         struct tcp_bind_bucket *tb = kmem_cache_alloc(tcp_bucket_cachep,
136                                                       SLAB_ATOMIC);
137         if (tb) {
138                 tb->port = snum;
139                 tb->fastreuse = 0;
140                 INIT_HLIST_HEAD(&tb->owners);
141                 hlist_add_head(&tb->node, &head->chain);
142         }
143         return tb;
144 }
145
146 /* Caller must hold hashbucket lock for this tb with local BH disabled */
147 void tcp_bucket_destroy(struct tcp_bind_bucket *tb)
148 {
149         if (hlist_empty(&tb->owners)) {
150                 __hlist_del(&tb->node);
151                 kmem_cache_free(tcp_bucket_cachep, tb);
152         }
153 }
154
155 /* Caller must disable local BH processing. */
156 static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
157 {
158         struct tcp_bind_hashbucket *head =
159                                 &tcp_bhash[tcp_bhashfn(inet_sk(child)->num)];
160         struct tcp_bind_bucket *tb;
161
162         spin_lock(&head->lock);
163         tb = tcp_sk(sk)->bind_hash;
164         sk_add_bind_node(child, &tb->owners);
165         tcp_sk(child)->bind_hash = tb;
166         spin_unlock(&head->lock);
167 }
168
169 inline void tcp_inherit_port(struct sock *sk, struct sock *child)
170 {
171         local_bh_disable();
172         __tcp_inherit_port(sk, child);
173         local_bh_enable();
174 }
175
176 void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
177                    unsigned short snum)
178 {
179         inet_sk(sk)->num = snum;
180         sk_add_bind_node(sk, &tb->owners);
181         tcp_sk(sk)->bind_hash = tb;
182 }
183
184 static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
185 {
186         const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk);
187         struct sock *sk2;
188         struct hlist_node *node;
189         int reuse = sk->sk_reuse;
190
191         sk_for_each_bound(sk2, node, &tb->owners) {
192                 if (sk != sk2 &&
193                     !tcp_v6_ipv6only(sk2) &&
194                     (!sk->sk_bound_dev_if ||
195                      !sk2->sk_bound_dev_if ||
196                      sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
197                         if (!reuse || !sk2->sk_reuse ||
198                             sk2->sk_state == TCP_LISTEN) {
199                                 const u32 sk2_rcv_saddr = tcp_v4_rcv_saddr(sk2);
200                                 if (!sk2_rcv_saddr || !sk_rcv_saddr ||
201                                     sk2_rcv_saddr == sk_rcv_saddr)
202                                         break;
203                         }
204                 }
205         }
206         return node != NULL;
207 }
208
209 /* Obtain a reference to a local port for the given sock,
210  * if snum is zero it means select any available local port.
211  */
212 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
213 {
214         struct tcp_bind_hashbucket *head;
215         struct hlist_node *node;
216         struct tcp_bind_bucket *tb;
217         int ret;
218
219         local_bh_disable();
220         if (!snum) {
221                 int low = sysctl_local_port_range[0];
222                 int high = sysctl_local_port_range[1];
223                 int remaining = (high - low) + 1;
224                 int rover;
225
226                 spin_lock(&tcp_portalloc_lock);
227                 rover = tcp_port_rover;
228                 do {
229                         rover++;
230                         if (rover < low || rover > high)
231                                 rover = low;
232                         head = &tcp_bhash[tcp_bhashfn(rover)];
233                         spin_lock(&head->lock);
234                         tb_for_each(tb, node, &head->chain)
235                                 if (tb->port == rover)
236                                         goto next;
237                         break;
238                 next:
239                         spin_unlock(&head->lock);
240                 } while (--remaining > 0);
241                 tcp_port_rover = rover;
242                 spin_unlock(&tcp_portalloc_lock);
243
244                 /* Exhausted local port range during search? */
245                 ret = 1;
246                 if (remaining <= 0)
247                         goto fail;
248
249                 /* OK, here is the one we will use.  HEAD is
250                  * non-NULL and we hold it's mutex.
251                  */
252                 snum = rover;
253         } else {
254                 head = &tcp_bhash[tcp_bhashfn(snum)];
255                 spin_lock(&head->lock);
256                 tb_for_each(tb, node, &head->chain)
257                         if (tb->port == snum)
258                                 goto tb_found;
259         }
260         tb = NULL;
261         goto tb_not_found;
262 tb_found:
263         if (!hlist_empty(&tb->owners)) {
264                 if (sk->sk_reuse > 1)
265                         goto success;
266                 if (tb->fastreuse > 0 &&
267                     sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
268                         goto success;
269                 } else {
270                         ret = 1;
271                         if (tcp_bind_conflict(sk, tb))
272                                 goto fail_unlock;
273                 }
274         }
275 tb_not_found:
276         ret = 1;
277         if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
278                 goto fail_unlock;
279         if (hlist_empty(&tb->owners)) {
280                 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
281                         tb->fastreuse = 1;
282                 else
283                         tb->fastreuse = 0;
284         } else if (tb->fastreuse &&
285                    (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
286                 tb->fastreuse = 0;
287 success:
288         if (!tcp_sk(sk)->bind_hash)
289                 tcp_bind_hash(sk, tb, snum);
290         BUG_TRAP(tcp_sk(sk)->bind_hash == tb);
291         ret = 0;
292
293 fail_unlock:
294         spin_unlock(&head->lock);
295 fail:
296         local_bh_enable();
297         return ret;
298 }
299
300 /* Get rid of any references to a local port held by the
301  * given sock.
302  */
303 static void __tcp_put_port(struct sock *sk)
304 {
305         struct inet_opt *inet = inet_sk(sk);
306         struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)];
307         struct tcp_bind_bucket *tb;
308
309         spin_lock(&head->lock);
310         tb = tcp_sk(sk)->bind_hash;
311         __sk_del_bind_node(sk);
312         tcp_sk(sk)->bind_hash = NULL;
313         inet->num = 0;
314         tcp_bucket_destroy(tb);
315         spin_unlock(&head->lock);
316 }
317
318 void tcp_put_port(struct sock *sk)
319 {
320         local_bh_disable();
321         __tcp_put_port(sk);
322         local_bh_enable();
323 }
324
325 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
326  * Look, when several writers sleep and reader wakes them up, all but one
327  * immediately hit write lock and grab all the cpus. Exclusive sleep solves
328  * this, _but_ remember, it adds useless work on UP machines (wake up each
329  * exclusive lock release). It should be ifdefed really.
330  */
331
332 void tcp_listen_wlock(void)
333 {
334         write_lock(&tcp_lhash_lock);
335
336         if (atomic_read(&tcp_lhash_users)) {
337                 DEFINE_WAIT(wait);
338
339                 for (;;) {
340                         prepare_to_wait_exclusive(&tcp_lhash_wait,
341                                                 &wait, TASK_UNINTERRUPTIBLE);
342                         if (!atomic_read(&tcp_lhash_users))
343                                 break;
344                         write_unlock_bh(&tcp_lhash_lock);
345                         schedule();
346                         write_lock_bh(&tcp_lhash_lock);
347                 }
348
349                 finish_wait(&tcp_lhash_wait, &wait);
350         }
351 }
352
353 static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
354 {
355         struct hlist_head *list;
356         rwlock_t *lock;
357
358         BUG_TRAP(sk_unhashed(sk));
359         if (listen_possible && sk->sk_state == TCP_LISTEN) {
360                 list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
361                 lock = &tcp_lhash_lock;
362                 tcp_listen_wlock();
363         } else {
364                 list = &tcp_ehash[(sk->sk_hashent = tcp_sk_hashfn(sk))].chain;
365                 lock = &tcp_ehash[sk->sk_hashent].lock;
366                 write_lock(lock);
367         }
368         __sk_add_node(sk, list);
369         sock_prot_inc_use(sk->sk_prot);
370         write_unlock(lock);
371         if (listen_possible && sk->sk_state == TCP_LISTEN)
372                 wake_up(&tcp_lhash_wait);
373 }
374
375 static void tcp_v4_hash(struct sock *sk)
376 {
377         if (sk->sk_state != TCP_CLOSE) {
378                 local_bh_disable();
379                 __tcp_v4_hash(sk, 1);
380                 local_bh_enable();
381         }
382 }
383
384 void tcp_unhash(struct sock *sk)
385 {
386         rwlock_t *lock;
387
388         if (sk_unhashed(sk))
389                 goto ende;
390
391         if (sk->sk_state == TCP_LISTEN) {
392                 local_bh_disable();
393                 tcp_listen_wlock();
394                 lock = &tcp_lhash_lock;
395         } else {
396                 struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
397                 lock = &head->lock;
398                 write_lock_bh(&head->lock);
399         }
400
401         if (__sk_del_node_init(sk))
402                 sock_prot_dec_use(sk->sk_prot);
403         write_unlock_bh(lock);
404
405  ende:
406         if (sk->sk_state == TCP_LISTEN)
407                 wake_up(&tcp_lhash_wait);
408 }
409
410 /* Don't inline this cruft.  Here are some nice properties to
411  * exploit here.  The BSD API does not allow a listening TCP
412  * to specify the remote port nor the remote address for the
413  * connection.  So always assume those are both wildcarded
414  * during the search since they can never be otherwise.
415  */
416 static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr,
417                                              unsigned short hnum, int dif)
418 {
419         struct sock *result = NULL, *sk;
420         struct hlist_node *node;
421         int score, hiscore;
422
423         hiscore=-1;
424         sk_for_each(sk, node, head) {
425                 struct inet_opt *inet = inet_sk(sk);
426
427                 if (inet->num == hnum && !ipv6_only_sock(sk)) {
428                         __u32 rcv_saddr = inet->rcv_saddr;
429
430                         score = (sk->sk_family == PF_INET ? 1 : 0);
431                         if (rcv_saddr) {
432                                 if (rcv_saddr != daddr)
433                                         continue;
434                                 score+=2;
435                         }
436                         if (sk->sk_bound_dev_if) {
437                                 if (sk->sk_bound_dev_if != dif)
438                                         continue;
439                                 score+=2;
440                         }
441                         if (score == 5)
442                                 return sk;
443                         if (score > hiscore) {
444                                 hiscore = score;
445                                 result = sk;
446                         }
447                 }
448         }
449         return result;
450 }
451
452 /* Optimize the common listener case. */
453 inline struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum,
454                                            int dif)
455 {
456         struct sock *sk = NULL;
457         struct hlist_head *head;
458
459         read_lock(&tcp_lhash_lock);
460         head = &tcp_listening_hash[tcp_lhashfn(hnum)];
461         if (!hlist_empty(head)) {
462                 struct inet_opt *inet = inet_sk((sk = __sk_head(head)));
463                 if (inet->num == hnum && !sk->sk_node.next &&
464                     (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
465                     (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
466                     !sk->sk_bound_dev_if)
467                         goto sherry_cache;
468                 sk = __tcp_v4_lookup_listener(head, daddr, hnum, dif);
469         }
470         if (sk) {
471 sherry_cache:
472                 sock_hold(sk);
473         }
474         read_unlock(&tcp_lhash_lock);
475         return sk;
476 }
477
478 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
479  * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
480  *
481  * Local BH must be disabled here.
482  */
483
484 static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
485                                                        u32 daddr, u16 hnum,
486                                                        int dif)
487 {
488         struct tcp_ehash_bucket *head;
489         TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
490         __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
491         struct sock *sk;
492         struct hlist_node *node;
493         /* Optimize here for direct hit, only listening connections can
494          * have wildcards anyways.
495          */
496         int hash = tcp_hashfn(daddr, hnum, saddr, sport);
497         head = &tcp_ehash[hash];
498         read_lock(&head->lock);
499         sk_for_each(sk, node, &head->chain) {
500                 if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
501                         goto hit; /* You sunk my battleship! */
502         }
503
504         /* Must check for a TIME_WAIT'er before going to listener hash. */
505         sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
506                 if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
507                         goto hit;
508         }
509         sk = NULL;
510 out:
511         read_unlock(&head->lock);
512         return sk;
513 hit:
514         sock_hold(sk);
515         goto out;
516 }
517
518 static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport,
519                                            u32 daddr, u16 hnum, int dif)
520 {
521         struct sock *sk = __tcp_v4_lookup_established(saddr, sport,
522                                                       daddr, hnum, dif);
523
524         return sk ? : tcp_v4_lookup_listener(daddr, hnum, dif);
525 }
526
527 inline struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr,
528                                   u16 dport, int dif)
529 {
530         struct sock *sk;
531
532         local_bh_disable();
533         sk = __tcp_v4_lookup(saddr, sport, daddr, ntohs(dport), dif);
534         local_bh_enable();
535
536         return sk;
537 }
538
539 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
540 {
541         return secure_tcp_sequence_number(skb->nh.iph->daddr,
542                                           skb->nh.iph->saddr,
543                                           skb->h.th->dest,
544                                           skb->h.th->source);
545 }
546
547 /* called with local bh disabled */
548 static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
549                                       struct tcp_tw_bucket **twp)
550 {
551         struct inet_opt *inet = inet_sk(sk);
552         u32 daddr = inet->rcv_saddr;
553         u32 saddr = inet->daddr;
554         int dif = sk->sk_bound_dev_if;
555         TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
556         __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
557         int hash = tcp_hashfn(daddr, lport, saddr, inet->dport);
558         struct tcp_ehash_bucket *head = &tcp_ehash[hash];
559         struct sock *sk2;
560         struct hlist_node *node;
561         struct tcp_tw_bucket *tw;
562
563         write_lock(&head->lock);
564
565         /* Check TIME-WAIT sockets first. */
566         sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
567                 tw = (struct tcp_tw_bucket *)sk2;
568
569                 if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
570                         struct tcp_opt *tp = tcp_sk(sk);
571
572                         /* With PAWS, it is safe from the viewpoint
573                            of data integrity. Even without PAWS it
574                            is safe provided sequence spaces do not
575                            overlap i.e. at data rates <= 80Mbit/sec.
576
577                            Actually, the idea is close to VJ's one,
578                            only timestamp cache is held not per host,
579                            but per port pair and TW bucket is used
580                            as state holder.
581
582                            If TW bucket has been already destroyed we
583                            fall back to VJ's scheme and use initial
584                            timestamp retrieved from peer table.
585                          */
586                         if (tw->tw_ts_recent_stamp &&
587                             (!twp || (sysctl_tcp_tw_reuse &&
588                                       xtime.tv_sec -
589                                       tw->tw_ts_recent_stamp > 1))) {
590                                 if ((tp->write_seq =
591                                                 tw->tw_snd_nxt + 65535 + 2) == 0)
592                                         tp->write_seq = 1;
593                                 tp->ts_recent       = tw->tw_ts_recent;
594                                 tp->ts_recent_stamp = tw->tw_ts_recent_stamp;
595                                 sock_hold(sk2);
596                                 goto unique;
597                         } else
598                                 goto not_unique;
599                 }
600         }
601         tw = NULL;
602
603         /* And established part... */
604         sk_for_each(sk2, node, &head->chain) {
605                 if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif))
606                         goto not_unique;
607         }
608
609 unique:
610         /* Must record num and sport now. Otherwise we will see
611          * in hash table socket with a funny identity. */
612         inet->num = lport;
613         inet->sport = htons(lport);
614         sk->sk_hashent = hash;
615         BUG_TRAP(sk_unhashed(sk));
616         __sk_add_node(sk, &head->chain);
617         sock_prot_inc_use(sk->sk_prot);
618         write_unlock(&head->lock);
619
620         if (twp) {
621                 *twp = tw;
622                 NET_INC_STATS_BH(TimeWaitRecycled);
623         } else if (tw) {
624                 /* Silly. Should hash-dance instead... */
625                 tcp_tw_deschedule(tw);
626                 NET_INC_STATS_BH(TimeWaitRecycled);
627
628                 tcp_tw_put(tw);
629         }
630
631         return 0;
632
633 not_unique:
634         write_unlock(&head->lock);
635         return -EADDRNOTAVAIL;
636 }
637
638 /*
639  * Bind a port for a connect operation and hash it.
640  */
641 static int tcp_v4_hash_connect(struct sock *sk)
642 {
643         unsigned short snum = inet_sk(sk)->num;
644         struct tcp_bind_hashbucket *head;
645         struct tcp_bind_bucket *tb;
646         int ret;
647
648         if (!snum) {
649                 int rover;
650                 int low = sysctl_local_port_range[0];
651                 int high = sysctl_local_port_range[1];
652                 int remaining = (high - low) + 1;
653                 struct hlist_node *node;
654                 struct tcp_tw_bucket *tw = NULL;
655
656                 local_bh_disable();
657
658                 /* TODO. Actually it is not so bad idea to remove
659                  * tcp_portalloc_lock before next submission to Linus.
660                  * As soon as we touch this place at all it is time to think.
661                  *
662                  * Now it protects single _advisory_ variable tcp_port_rover,
663                  * hence it is mostly useless.
664                  * Code will work nicely if we just delete it, but
665                  * I am afraid in contented case it will work not better or
666                  * even worse: another cpu just will hit the same bucket
667                  * and spin there.
668                  * So some cpu salt could remove both contention and
669                  * memory pingpong. Any ideas how to do this in a nice way?
670                  */
671                 spin_lock(&tcp_portalloc_lock);
672                 rover = tcp_port_rover;
673
674                 do {
675                         rover++;
676                         if ((rover < low) || (rover > high))
677                                 rover = low;
678                         head = &tcp_bhash[tcp_bhashfn(rover)];
679                         spin_lock(&head->lock);
680
681                         /* Does not bother with rcv_saddr checks,
682                          * because the established check is already
683                          * unique enough.
684                          */
685                         tb_for_each(tb, node, &head->chain) {
686                                 if (tb->port == rover) {
687                                         BUG_TRAP(!hlist_empty(&tb->owners));
688                                         if (tb->fastreuse >= 0)
689                                                 goto next_port;
690                                         if (!__tcp_v4_check_established(sk,
691                                                                         rover,
692                                                                         &tw))
693                                                 goto ok;
694                                         goto next_port;
695                                 }
696                         }
697
698                         tb = tcp_bucket_create(head, rover);
699                         if (!tb) {
700                                 spin_unlock(&head->lock);
701                                 break;
702                         }
703                         tb->fastreuse = -1;
704                         goto ok;
705
706                 next_port:
707                         spin_unlock(&head->lock);
708                 } while (--remaining > 0);
709                 tcp_port_rover = rover;
710                 spin_unlock(&tcp_portalloc_lock);
711
712                 local_bh_enable();
713
714                 return -EADDRNOTAVAIL;
715
716 ok:
717                 /* All locks still held and bhs disabled */
718                 tcp_port_rover = rover;
719                 spin_unlock(&tcp_portalloc_lock);
720
721                 tcp_bind_hash(sk, tb, rover);
722                 if (sk_unhashed(sk)) {
723                         inet_sk(sk)->sport = htons(rover);
724                         __tcp_v4_hash(sk, 0);
725                 }
726                 spin_unlock(&head->lock);
727
728                 if (tw) {
729                         tcp_tw_deschedule(tw);
730                         tcp_tw_put(tw);
731                 }
732
733                 ret = 0;
734                 goto out;
735         }
736
737         head  = &tcp_bhash[tcp_bhashfn(snum)];
738         tb  = tcp_sk(sk)->bind_hash;
739         spin_lock_bh(&head->lock);
740         if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
741                 __tcp_v4_hash(sk, 0);
742                 spin_unlock_bh(&head->lock);
743                 return 0;
744         } else {
745                 spin_unlock(&head->lock);
746                 /* No definite answer... Walk to established hash table */
747                 ret = __tcp_v4_check_established(sk, snum, NULL);
748 out:
749                 local_bh_enable();
750                 return ret;
751         }
752 }
753
754 /* This will initiate an outgoing connection. */
755 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
756 {
757         struct inet_opt *inet = inet_sk(sk);
758         struct tcp_opt *tp = tcp_sk(sk);
759         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
760         struct rtable *rt;
761         u32 daddr, nexthop;
762         int tmp;
763         int err;
764
765         if (addr_len < sizeof(struct sockaddr_in))
766                 return -EINVAL;
767
768         if (usin->sin_family != AF_INET)
769                 return -EAFNOSUPPORT;
770
771         nexthop = daddr = usin->sin_addr.s_addr;
772         if (inet->opt && inet->opt->srr) {
773                 if (!daddr)
774                         return -EINVAL;
775                 nexthop = inet->opt->faddr;
776         }
777
778         tmp = ip_route_connect(&rt, nexthop, inet->saddr,
779                                RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
780                                IPPROTO_TCP,
781                                inet->sport, usin->sin_port, sk);
782         if (tmp < 0)
783                 return tmp;
784
785         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
786                 ip_rt_put(rt);
787                 return -ENETUNREACH;
788         }
789
790         if (!inet->opt || !inet->opt->srr)
791                 daddr = rt->rt_dst;
792
793         if (!inet->saddr)
794                 inet->saddr = rt->rt_src;
795         inet->rcv_saddr = inet->saddr;
796
797         if (tp->ts_recent_stamp && inet->daddr != daddr) {
798                 /* Reset inherited state */
799                 tp->ts_recent       = 0;
800                 tp->ts_recent_stamp = 0;
801                 tp->write_seq       = 0;
802         }
803
804         if (sysctl_tcp_tw_recycle &&
805             !tp->ts_recent_stamp && rt->rt_dst == daddr) {
806                 struct inet_peer *peer = rt_get_peer(rt);
807
808                 /* VJ's idea. We save last timestamp seen from
809                  * the destination in peer table, when entering state TIME-WAIT
810                  * and initialize ts_recent from it, when trying new connection.
811                  */
812
813                 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
814                         tp->ts_recent_stamp = peer->tcp_ts_stamp;
815                         tp->ts_recent = peer->tcp_ts;
816                 }
817         }
818
819         inet->dport = usin->sin_port;
820         inet->daddr = daddr;
821
822         tp->ext_header_len = 0;
823         if (inet->opt)
824                 tp->ext_header_len = inet->opt->optlen;
825
826         tp->mss_clamp = 536;
827
828         /* Socket identity is still unknown (sport may be zero).
829          * However we set state to SYN-SENT and not releasing socket
830          * lock select source port, enter ourselves into the hash tables and
831          * complete initialization after this.
832          */
833         tcp_set_state(sk, TCP_SYN_SENT);
834         err = tcp_v4_hash_connect(sk);
835         if (err)
836                 goto failure;
837
838         err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
839         if (err)
840                 goto failure;
841
842         /* OK, now commit destination to socket.  */
843         __sk_dst_set(sk, &rt->u.dst);
844         tcp_v4_setup_caps(sk, &rt->u.dst);
845         tp->ext2_header_len = rt->u.dst.header_len;
846
847         if (!tp->write_seq)
848                 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
849                                                            inet->daddr,
850                                                            inet->sport,
851                                                            usin->sin_port);
852
853         inet->id = tp->write_seq ^ jiffies;
854
855         err = tcp_connect(sk);
856         rt = NULL;
857         if (err)
858                 goto failure;
859
860         return 0;
861
862 failure:
863         /* This unhashes the socket and releases the local port, if necessary. */
864         tcp_set_state(sk, TCP_CLOSE);
865         ip_rt_put(rt);
866         sk->sk_route_caps = 0;
867         inet->dport = 0;
868         return err;
869 }
870
871 static __inline__ int tcp_v4_iif(struct sk_buff *skb)
872 {
873         return ((struct rtable *)skb->dst)->rt_iif;
874 }
875
876 static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
877 {
878         return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
879 }
880
881 static struct open_request *tcp_v4_search_req(struct tcp_opt *tp,
882                                               struct open_request ***prevp,
883                                               __u16 rport,
884                                               __u32 raddr, __u32 laddr)
885 {
886         struct tcp_listen_opt *lopt = tp->listen_opt;
887         struct open_request *req, **prev;
888
889         for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
890              (req = *prev) != NULL;
891              prev = &req->dl_next) {
892                 if (req->rmt_port == rport &&
893                     req->af.v4_req.rmt_addr == raddr &&
894                     req->af.v4_req.loc_addr == laddr &&
895                     TCP_INET_FAMILY(req->class->family)) {
896                         BUG_TRAP(!req->sk);
897                         *prevp = prev;
898                         break;
899                 }
900         }
901
902         return req;
903 }
904
905 static void tcp_v4_synq_add(struct sock *sk, struct open_request *req)
906 {
907         struct tcp_opt *tp = tcp_sk(sk);
908         struct tcp_listen_opt *lopt = tp->listen_opt;
909         u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
910
911         req->expires = jiffies + TCP_TIMEOUT_INIT;
912         req->retrans = 0;
913         req->sk = NULL;
914         req->dl_next = lopt->syn_table[h];
915
916         write_lock(&tp->syn_wait_lock);
917         lopt->syn_table[h] = req;
918         write_unlock(&tp->syn_wait_lock);
919
920 #ifdef CONFIG_ACCEPT_QUEUES
921         tcp_synq_added(sk, req);
922 #else
923         tcp_synq_added(sk);
924 #endif
925 }
926
927
928 /*
929  * This routine does path mtu discovery as defined in RFC1191.
930  */
931 static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
932                                      u32 mtu)
933 {
934         struct dst_entry *dst;
935         struct inet_opt *inet = inet_sk(sk);
936         struct tcp_opt *tp = tcp_sk(sk);
937
938         /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
939          * send out by Linux are always <576bytes so they should go through
940          * unfragmented).
941          */
942         if (sk->sk_state == TCP_LISTEN)
943                 return;
944
945         /* We don't check in the destentry if pmtu discovery is forbidden
946          * on this route. We just assume that no packet_to_big packets
947          * are send back when pmtu discovery is not active.
948          * There is a small race when the user changes this flag in the
949          * route, but I think that's acceptable.
950          */
951         if ((dst = __sk_dst_check(sk, 0)) == NULL)
952                 return;
953
954         dst->ops->update_pmtu(dst, mtu);
955
956         /* Something is about to be wrong... Remember soft error
957          * for the case, if this connection will not able to recover.
958          */
959         if (mtu < dst_pmtu(dst) && ip_dont_fragment(sk, dst))
960                 sk->sk_err_soft = EMSGSIZE;
961
962         mtu = dst_pmtu(dst);
963
964         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
965             tp->pmtu_cookie > mtu) {
966                 tcp_sync_mss(sk, mtu);
967
968                 /* Resend the TCP packet because it's
969                  * clear that the old packet has been
970                  * dropped. This is the new "fast" path mtu
971                  * discovery.
972                  */
973                 tcp_simple_retransmit(sk);
974         } /* else let the usual retransmit timer handle it */
975 }
976
977 /*
978  * This routine is called by the ICMP module when it gets some
979  * sort of error condition.  If err < 0 then the socket should
980  * be closed and the error returned to the user.  If err > 0
981  * it's just the icmp type << 8 | icmp code.  After adjustment
982  * header points to the first 8 bytes of the tcp header.  We need
983  * to find the appropriate port.
984  *
985  * The locking strategy used here is very "optimistic". When
986  * someone else accesses the socket the ICMP is just dropped
987  * and for some paths there is no check at all.
988  * A more general error queue to queue errors for later handling
989  * is probably better.
990  *
991  */
992
993 void tcp_v4_err(struct sk_buff *skb, u32 info)
994 {
995         struct iphdr *iph = (struct iphdr *)skb->data;
996         struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
997         struct tcp_opt *tp;
998         struct inet_opt *inet;
999         int type = skb->h.icmph->type;
1000         int code = skb->h.icmph->code;
1001         struct sock *sk;
1002         __u32 seq;
1003         int err;
1004
1005         if (skb->len < (iph->ihl << 2) + 8) {
1006                 ICMP_INC_STATS_BH(IcmpInErrors);
1007                 return;
1008         }
1009
1010         sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,
1011                            th->source, tcp_v4_iif(skb));
1012         if (!sk) {
1013                 ICMP_INC_STATS_BH(IcmpInErrors);
1014                 return;
1015         }
1016         if (sk->sk_state == TCP_TIME_WAIT) {
1017                 tcp_tw_put((struct tcp_tw_bucket *)sk);
1018                 return;
1019         }
1020
1021         bh_lock_sock(sk);
1022         /* If too many ICMPs get dropped on busy
1023          * servers this needs to be solved differently.
1024          */
1025         if (sock_owned_by_user(sk))
1026                 NET_INC_STATS_BH(LockDroppedIcmps);
1027
1028         if (sk->sk_state == TCP_CLOSE)
1029                 goto out;
1030
1031         tp = tcp_sk(sk);
1032         seq = ntohl(th->seq);
1033         if (sk->sk_state != TCP_LISTEN &&
1034             !between(seq, tp->snd_una, tp->snd_nxt)) {
1035                 NET_INC_STATS(OutOfWindowIcmps);
1036                 goto out;
1037         }
1038
1039         switch (type) {
1040         case ICMP_SOURCE_QUENCH:
1041                 /* This is deprecated, but if someone generated it,
1042                  * we have no reasons to ignore it.
1043                  */
1044                 if (!sock_owned_by_user(sk))
1045                         tcp_enter_cwr(tp);
1046                 goto out;
1047         case ICMP_PARAMETERPROB:
1048                 err = EPROTO;
1049                 break;
1050         case ICMP_DEST_UNREACH:
1051                 if (code > NR_ICMP_UNREACH)
1052                         goto out;
1053
1054                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
1055                         if (!sock_owned_by_user(sk))
1056                                 do_pmtu_discovery(sk, iph, info);
1057                         goto out;
1058                 }
1059
1060                 err = icmp_err_convert[code].errno;
1061                 break;
1062         case ICMP_TIME_EXCEEDED:
1063                 err = EHOSTUNREACH;
1064                 break;
1065         default:
1066                 goto out;
1067         }
1068
1069         switch (sk->sk_state) {
1070                 struct open_request *req, **prev;
1071         case TCP_LISTEN:
1072                 if (sock_owned_by_user(sk))
1073                         goto out;
1074
1075                 req = tcp_v4_search_req(tp, &prev, th->dest,
1076                                         iph->daddr, iph->saddr);
1077                 if (!req)
1078                         goto out;
1079
1080                 /* ICMPs are not backlogged, hence we cannot get
1081                    an established socket here.
1082                  */
1083                 BUG_TRAP(!req->sk);
1084
1085                 if (seq != req->snt_isn) {
1086                         NET_INC_STATS_BH(OutOfWindowIcmps);
1087                         goto out;
1088                 }
1089
1090                 /*
1091                  * Still in SYN_RECV, just remove it silently.
1092                  * There is no good way to pass the error to the newly
1093                  * created socket, and POSIX does not want network
1094                  * errors returned from accept().
1095                  */
1096                 tcp_synq_drop(sk, req, prev);
1097                 goto out;
1098
1099         case TCP_SYN_SENT:
1100         case TCP_SYN_RECV:  /* Cannot happen.
1101                                It can f.e. if SYNs crossed.
1102                              */
1103                 if (!sock_owned_by_user(sk)) {
1104                         TCP_INC_STATS_BH(TcpAttemptFails);
1105                         sk->sk_err = err;
1106
1107                         sk->sk_error_report(sk);
1108
1109                         tcp_done(sk);
1110                 } else {
1111                         sk->sk_err_soft = err;
1112                 }
1113                 goto out;
1114         }
1115
1116         /* If we've already connected we will keep trying
1117          * until we time out, or the user gives up.
1118          *
1119          * rfc1122 4.2.3.9 allows to consider as hard errors
1120          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
1121          * but it is obsoleted by pmtu discovery).
1122          *
1123          * Note, that in modern internet, where routing is unreliable
1124          * and in each dark corner broken firewalls sit, sending random
1125          * errors ordered by their masters even this two messages finally lose
1126          * their original sense (even Linux sends invalid PORT_UNREACHs)
1127          *
1128          * Now we are in compliance with RFCs.
1129          *                                                      --ANK (980905)
1130          */
1131
1132         inet = inet_sk(sk);
1133         if (!sock_owned_by_user(sk) && inet->recverr) {
1134                 sk->sk_err = err;
1135                 sk->sk_error_report(sk);
1136         } else  { /* Only an error on timeout */
1137                 sk->sk_err_soft = err;
1138         }
1139
1140 out:
1141         bh_unlock_sock(sk);
1142         sock_put(sk);
1143 }
1144
1145 /* This routine computes an IPv4 TCP checksum. */
1146 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
1147                        struct sk_buff *skb)
1148 {
1149         struct inet_opt *inet = inet_sk(sk);
1150
1151         if (skb->ip_summed == CHECKSUM_HW) {
1152                 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
1153                 skb->csum = offsetof(struct tcphdr, check);
1154         } else {
1155                 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
1156                                          csum_partial((char *)th,
1157                                                       th->doff << 2,
1158                                                       skb->csum));
1159         }
1160 }
1161
1162 /*
1163  *      This routine will send an RST to the other tcp.
1164  *
1165  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
1166  *                    for reset.
1167  *      Answer: if a packet caused RST, it is not for a socket
1168  *              existing in our system, if it is matched to a socket,
1169  *              it is just duplicate segment or bug in other side's TCP.
1170  *              So that we build reply only basing on parameters
1171  *              arrived with segment.
1172  *      Exception: precedence violation. We do not implement it in any case.
1173  */
1174
1175 static void tcp_v4_send_reset(struct sk_buff *skb)
1176 {
1177         struct tcphdr *th = skb->h.th;
1178         struct tcphdr rth;
1179         struct ip_reply_arg arg;
1180
1181         /* Never send a reset in response to a reset. */
1182         if (th->rst)
1183                 return;
1184
1185         if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
1186                 return;
1187
1188         /* Swap the send and the receive. */
1189         memset(&rth, 0, sizeof(struct tcphdr));
1190         rth.dest   = th->source;
1191         rth.source = th->dest;
1192         rth.doff   = sizeof(struct tcphdr) / 4;
1193         rth.rst    = 1;
1194
1195         if (th->ack) {
1196                 rth.seq = th->ack_seq;
1197         } else {
1198                 rth.ack = 1;
1199                 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
1200                                     skb->len - (th->doff << 2));
1201         }
1202
1203         memset(&arg, 0, sizeof arg);
1204         arg.iov[0].iov_base = (unsigned char *)&rth;
1205         arg.iov[0].iov_len  = sizeof rth;
1206         arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1207                                       skb->nh.iph->saddr, /*XXX*/
1208                                       sizeof(struct tcphdr), IPPROTO_TCP, 0);
1209         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1210
1211         ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
1212
1213         TCP_INC_STATS_BH(TcpOutSegs);
1214         TCP_INC_STATS_BH(TcpOutRsts);
1215 }
1216
1217 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
1218    outside socket context is ugly, certainly. What can I do?
1219  */
1220
1221 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
1222                             u32 win, u32 ts)
1223 {
1224         struct tcphdr *th = skb->h.th;
1225         struct {
1226                 struct tcphdr th;
1227                 u32 tsopt[3];
1228         } rep;
1229         struct ip_reply_arg arg;
1230
1231         memset(&rep.th, 0, sizeof(struct tcphdr));
1232         memset(&arg, 0, sizeof arg);
1233
1234         arg.iov[0].iov_base = (unsigned char *)&rep;
1235         arg.iov[0].iov_len  = sizeof(rep.th);
1236         if (ts) {
1237                 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1238                                      (TCPOPT_TIMESTAMP << 8) |
1239                                      TCPOLEN_TIMESTAMP);
1240                 rep.tsopt[1] = htonl(tcp_time_stamp);
1241                 rep.tsopt[2] = htonl(ts);
1242                 arg.iov[0].iov_len = sizeof(rep);
1243         }
1244
1245         /* Swap the send and the receive. */
1246         rep.th.dest    = th->source;
1247         rep.th.source  = th->dest;
1248         rep.th.doff    = arg.iov[0].iov_len / 4;
1249         rep.th.seq     = htonl(seq);
1250         rep.th.ack_seq = htonl(ack);
1251         rep.th.ack     = 1;
1252         rep.th.window  = htons(win);
1253
1254         arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1255                                       skb->nh.iph->saddr, /*XXX*/
1256                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
1257         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1258
1259         ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
1260
1261         TCP_INC_STATS_BH(TcpOutSegs);
1262 }
1263
1264 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
1265 {
1266         struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
1267
1268         tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
1269                         tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
1270
1271         tcp_tw_put(tw);
1272 }
1273
1274 static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req)
1275 {
1276         tcp_v4_send_ack(skb, req->snt_isn + 1, req->rcv_isn + 1, req->rcv_wnd,
1277                         req->ts_recent);
1278 }
1279
1280 static struct dst_entry* tcp_v4_route_req(struct sock *sk,
1281                                           struct open_request *req)
1282 {
1283         struct rtable *rt;
1284         struct ip_options *opt = req->af.v4_req.opt;
1285         struct flowi fl = { .oif = sk->sk_bound_dev_if,
1286                             .nl_u = { .ip4_u =
1287                                       { .daddr = ((opt && opt->srr) ?
1288                                                   opt->faddr :
1289                                                   req->af.v4_req.rmt_addr),
1290                                         .saddr = req->af.v4_req.loc_addr,
1291                                         .tos = RT_CONN_FLAGS(sk) } },
1292                             .proto = IPPROTO_TCP,
1293                             .uli_u = { .ports =
1294                                        { .sport = inet_sk(sk)->sport,
1295                                          .dport = req->rmt_port } } };
1296
1297         if (ip_route_output_flow(&rt, &fl, sk, 0)) {
1298                 IP_INC_STATS_BH(OutNoRoutes);
1299                 return NULL;
1300         }
1301         if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
1302                 ip_rt_put(rt);
1303                 IP_INC_STATS_BH(OutNoRoutes);
1304                 return NULL;
1305         }
1306         return &rt->u.dst;
1307 }
1308
1309 /*
1310  *      Send a SYN-ACK after having received an ACK.
1311  *      This still operates on a open_request only, not on a big
1312  *      socket.
1313  */
1314 static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
1315                               struct dst_entry *dst)
1316 {
1317         int err = -1;
1318         struct sk_buff * skb;
1319
1320         /* First, grab a route. */
1321         if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1322                 goto out;
1323
1324         skb = tcp_make_synack(sk, dst, req);
1325
1326         if (skb) {
1327                 struct tcphdr *th = skb->h.th;
1328
1329                 th->check = tcp_v4_check(th, skb->len,
1330                                          req->af.v4_req.loc_addr,
1331                                          req->af.v4_req.rmt_addr,
1332                                          csum_partial((char *)th, skb->len,
1333                                                       skb->csum));
1334
1335                 err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr,
1336                                             req->af.v4_req.rmt_addr,
1337                                             req->af.v4_req.opt);
1338                 if (err == NET_XMIT_CN)
1339                         err = 0;
1340         }
1341
1342 out:
1343         dst_release(dst);
1344         return err;
1345 }
1346
1347 /*
1348  *      IPv4 open_request destructor.
1349  */
1350 static void tcp_v4_or_free(struct open_request *req)
1351 {
1352         if (req->af.v4_req.opt)
1353                 kfree(req->af.v4_req.opt);
1354 }
1355
1356 static inline void syn_flood_warning(struct sk_buff *skb)
1357 {
1358         static unsigned long warntime;
1359
1360         if (time_after(jiffies, (warntime + HZ * 60))) {
1361                 warntime = jiffies;
1362                 printk(KERN_INFO
1363                        "possible SYN flooding on port %d. Sending cookies.\n",
1364                        ntohs(skb->h.th->dest));
1365         }
1366 }
1367
1368 /*
1369  * Save and compile IPv4 options into the open_request if needed.
1370  */
1371 static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
1372                                                      struct sk_buff *skb)
1373 {
1374         struct ip_options *opt = &(IPCB(skb)->opt);
1375         struct ip_options *dopt = NULL;
1376
1377         if (opt && opt->optlen) {
1378                 int opt_size = optlength(opt);
1379                 dopt = kmalloc(opt_size, GFP_ATOMIC);
1380                 if (dopt) {
1381                         if (ip_options_echo(dopt, skb)) {
1382                                 kfree(dopt);
1383                                 dopt = NULL;
1384                         }
1385                 }
1386         }
1387         return dopt;
1388 }
1389
1390 /*
1391  * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
1392  * One SYN_RECV socket costs about 80bytes on a 32bit machine.
1393  * It would be better to replace it with a global counter for all sockets
1394  * but then some measure against one socket starving all other sockets
1395  * would be needed.
1396  *
1397  * It was 128 by default. Experiments with real servers show, that
1398  * it is absolutely not enough even at 100conn/sec. 256 cures most
1399  * of problems. This value is adjusted to 128 for very small machines
1400  * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
1401  * Further increasing requires to change hash table size.
1402  */
1403 int sysctl_max_syn_backlog = 256;
1404
1405 struct or_calltable or_ipv4 = {
1406         .family         =       PF_INET,
1407         .rtx_syn_ack    =       tcp_v4_send_synack,
1408         .send_ack       =       tcp_v4_or_send_ack,
1409         .destructor     =       tcp_v4_or_free,
1410         .send_reset     =       tcp_v4_send_reset,
1411 };
1412
1413 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1414 {
1415         struct tcp_opt tp;
1416         struct open_request *req;
1417         __u32 saddr = skb->nh.iph->saddr;
1418         __u32 daddr = skb->nh.iph->daddr;
1419         __u32 isn = TCP_SKB_CB(skb)->when;
1420         struct dst_entry *dst = NULL;
1421 #ifdef CONFIG_ACCEPT_QUEUES
1422         int class = 0;
1423 #endif
1424 #ifdef CONFIG_SYN_COOKIES
1425         int want_cookie = 0;
1426 #else
1427 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1428 #endif
1429
1430         /* Never answer to SYNs send to broadcast or multicast */
1431         if (((struct rtable *)skb->dst)->rt_flags &
1432             (RTCF_BROADCAST | RTCF_MULTICAST))
1433                 goto drop;
1434
1435         /* TW buckets are converted to open requests without
1436          * limitations, they conserve resources and peer is
1437          * evidently real one.
1438          */
1439         if (tcp_synq_is_full(sk) && !isn) {
1440 #ifdef CONFIG_SYN_COOKIES
1441                 if (sysctl_tcp_syncookies) {
1442                         want_cookie = 1;
1443                 } else
1444 #endif
1445                 goto drop;
1446         }
1447
1448 #ifdef CONFIG_ACCEPT_QUEUES
1449         class = (skb->nfmark <= 0) ? 0 :
1450                 ((skb->nfmark >= NUM_ACCEPT_QUEUES) ? 0: skb->nfmark);
1451         /*
1452          * Accept only if the class has shares set or if the default class
1453          * i.e. class 0 has shares
1454          */
1455         if (!(tcp_sk(sk)->acceptq[class].aq_ratio)) {
1456                 if (tcp_sk(sk)->acceptq[0].aq_ratio) 
1457                         class = 0;
1458                 else
1459                         goto drop;
1460         }
1461 #endif
1462
1463         /* Accept backlog is full. If we have already queued enough
1464          * of warm entries in syn queue, drop request. It is better than
1465          * clogging syn queue with openreqs with exponentially increasing
1466          * timeout.
1467          */
1468 #ifdef CONFIG_ACCEPT_QUEUES
1469         if (sk_acceptq_is_full(sk, class) && tcp_synq_young(sk, class) > 1)
1470 #else
1471         if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
1472 #endif
1473                 goto drop;
1474
1475         req = tcp_openreq_alloc();
1476         if (!req)
1477                 goto drop;
1478
1479         tcp_clear_options(&tp);
1480         tp.mss_clamp = 536;
1481         tp.user_mss  = tcp_sk(sk)->user_mss;
1482
1483         tcp_parse_options(skb, &tp, 0);
1484
1485         if (want_cookie) {
1486                 tcp_clear_options(&tp);
1487                 tp.saw_tstamp = 0;
1488         }
1489
1490         if (tp.saw_tstamp && !tp.rcv_tsval) {
1491                 /* Some OSes (unknown ones, but I see them on web server, which
1492                  * contains information interesting only for windows'
1493                  * users) do not send their stamp in SYN. It is easy case.
1494                  * We simply do not advertise TS support.
1495                  */
1496                 tp.saw_tstamp = 0;
1497                 tp.tstamp_ok  = 0;
1498         }
1499         tp.tstamp_ok = tp.saw_tstamp;
1500
1501         tcp_openreq_init(req, &tp, skb);
1502 #ifdef CONFIG_ACCEPT_QUEUES
1503         req->acceptq_class = class;
1504         req->acceptq_time_stamp = jiffies;
1505 #endif
1506         req->af.v4_req.loc_addr = daddr;
1507         req->af.v4_req.rmt_addr = saddr;
1508         req->af.v4_req.opt = tcp_v4_save_options(sk, skb);
1509         req->class = &or_ipv4;
1510         if (!want_cookie)
1511                 TCP_ECN_create_request(req, skb->h.th);
1512
1513         if (want_cookie) {
1514 #ifdef CONFIG_SYN_COOKIES
1515                 syn_flood_warning(skb);
1516 #endif
1517                 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1518         } else if (!isn) {
1519                 struct inet_peer *peer = NULL;
1520
1521                 /* VJ's idea. We save last timestamp seen
1522                  * from the destination in peer table, when entering
1523                  * state TIME-WAIT, and check against it before
1524                  * accepting new connection request.
1525                  *
1526                  * If "isn" is not zero, this request hit alive
1527                  * timewait bucket, so that all the necessary checks
1528                  * are made in the function processing timewait state.
1529                  */
1530                 if (tp.saw_tstamp &&
1531                     sysctl_tcp_tw_recycle &&
1532                     (dst = tcp_v4_route_req(sk, req)) != NULL &&
1533                     (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1534                     peer->v4daddr == saddr) {
1535                         if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1536                             (s32)(peer->tcp_ts - req->ts_recent) >
1537                                                         TCP_PAWS_WINDOW) {
1538                                 NET_INC_STATS_BH(PAWSPassiveRejected);
1539                                 dst_release(dst);
1540                                 goto drop_and_free;
1541                         }
1542                 }
1543                 /* Kill the following clause, if you dislike this way. */
1544                 else if (!sysctl_tcp_syncookies &&
1545                          (sysctl_max_syn_backlog - tcp_synq_len(sk) <
1546                           (sysctl_max_syn_backlog >> 2)) &&
1547                          (!peer || !peer->tcp_ts_stamp) &&
1548                          (!dst || !dst_metric(dst, RTAX_RTT))) {
1549                         /* Without syncookies last quarter of
1550                          * backlog is filled with destinations,
1551                          * proven to be alive.
1552                          * It means that we continue to communicate
1553                          * to destinations, already remembered
1554                          * to the moment of synflood.
1555                          */
1556                         NETDEBUG(if (net_ratelimit()) \
1557                                         printk(KERN_DEBUG "TCP: drop open "
1558                                                           "request from %u.%u."
1559                                                           "%u.%u/%u\n", \
1560                                                NIPQUAD(saddr),
1561                                                ntohs(skb->h.th->source)));
1562                         dst_release(dst);
1563                         goto drop_and_free;
1564                 }
1565
1566                 isn = tcp_v4_init_sequence(sk, skb);
1567         }
1568         req->snt_isn = isn;
1569
1570         if (tcp_v4_send_synack(sk, req, dst))
1571                 goto drop_and_free;
1572
1573         if (want_cookie) {
1574                 tcp_openreq_free(req);
1575         } else {
1576                 tcp_v4_synq_add(sk, req);
1577         }
1578         return 0;
1579
1580 drop_and_free:
1581         tcp_openreq_free(req);
1582 drop:
1583         TCP_INC_STATS_BH(TcpAttemptFails);
1584         return 0;
1585 }
1586
1587
1588 /*
1589  * The three way handshake has completed - we got a valid synack -
1590  * now create the new socket.
1591  */
1592 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1593                                   struct open_request *req,
1594                                   struct dst_entry *dst)
1595 {
1596         struct inet_opt *newinet;
1597         struct tcp_opt *newtp;
1598         struct sock *newsk;
1599
1600 #ifdef CONFIG_ACCEPT_QUEUES
1601         if (sk_acceptq_is_full(sk, req->acceptq_class))
1602 #else
1603         if (sk_acceptq_is_full(sk))
1604 #endif
1605                 goto exit_overflow;
1606
1607         if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1608                 goto exit;
1609
1610         newsk = tcp_create_openreq_child(sk, req, skb);
1611         if (!newsk)
1612                 goto exit;
1613
1614         newsk->sk_dst_cache = dst;
1615         tcp_v4_setup_caps(newsk, dst);
1616
1617         newtp                 = tcp_sk(newsk);
1618         newinet               = inet_sk(newsk);
1619         newinet->daddr        = req->af.v4_req.rmt_addr;
1620         newinet->rcv_saddr    = req->af.v4_req.loc_addr;
1621         newinet->saddr        = req->af.v4_req.loc_addr;
1622         newinet->opt          = req->af.v4_req.opt;
1623         req->af.v4_req.opt    = NULL;
1624         newinet->mc_index     = tcp_v4_iif(skb);
1625         newinet->mc_ttl       = skb->nh.iph->ttl;
1626         newtp->ext_header_len = 0;
1627         if (newinet->opt)
1628                 newtp->ext_header_len = newinet->opt->optlen;
1629         newtp->ext2_header_len = dst->header_len;
1630         newinet->id = newtp->write_seq ^ jiffies;
1631
1632         tcp_sync_mss(newsk, dst_pmtu(dst));
1633         newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1634         tcp_initialize_rcv_mss(newsk);
1635
1636         __tcp_v4_hash(newsk, 0);
1637         __tcp_inherit_port(sk, newsk);
1638
1639         return newsk;
1640
1641 exit_overflow:
1642         NET_INC_STATS_BH(ListenOverflows);
1643 exit:
1644         NET_INC_STATS_BH(ListenDrops);
1645         dst_release(dst);
1646         return NULL;
1647 }
1648
1649 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1650 {
1651         struct tcphdr *th = skb->h.th;
1652         struct iphdr *iph = skb->nh.iph;
1653         struct tcp_opt *tp = tcp_sk(sk);
1654         struct sock *nsk;
1655         struct open_request **prev;
1656         /* Find possible connection requests. */
1657         struct open_request *req = tcp_v4_search_req(tp, &prev, th->source,
1658                                                      iph->saddr, iph->daddr);
1659         if (req)
1660                 return tcp_check_req(sk, skb, req, prev);
1661
1662         nsk = __tcp_v4_lookup_established(skb->nh.iph->saddr,
1663                                           th->source,
1664                                           skb->nh.iph->daddr,
1665                                           ntohs(th->dest),
1666                                           tcp_v4_iif(skb));
1667
1668         if (nsk) {
1669                 if (nsk->sk_state != TCP_TIME_WAIT) {
1670                         bh_lock_sock(nsk);
1671                         return nsk;
1672                 }
1673                 tcp_tw_put((struct tcp_tw_bucket *)nsk);
1674                 return NULL;
1675         }
1676
1677 #ifdef CONFIG_SYN_COOKIES
1678         if (!th->rst && !th->syn && th->ack)
1679                 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1680 #endif
1681         return sk;
1682 }
1683
1684 static int tcp_v4_checksum_init(struct sk_buff *skb)
1685 {
1686         if (skb->ip_summed == CHECKSUM_HW) {
1687                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1688                 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1689                                   skb->nh.iph->daddr, skb->csum))
1690                         return 0;
1691
1692                 NETDEBUG(if (net_ratelimit())
1693                                 printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
1694                 skb->ip_summed = CHECKSUM_NONE;
1695         }
1696         if (skb->len <= 76) {
1697                 if (tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1698                                  skb->nh.iph->daddr,
1699                                  skb_checksum(skb, 0, skb->len, 0)))
1700                         return -1;
1701                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1702         } else {
1703                 skb->csum = ~tcp_v4_check(skb->h.th, skb->len,
1704                                           skb->nh.iph->saddr,
1705                                           skb->nh.iph->daddr, 0);
1706         }
1707         return 0;
1708 }
1709
1710
1711 /* The socket must have it's spinlock held when we get
1712  * here.
1713  *
1714  * We have a potential double-lock case here, so even when
1715  * doing backlog processing we use the BH locking scheme.
1716  * This is because we cannot sleep with the original spinlock
1717  * held.
1718  */
1719 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1720 {
1721         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1722                 TCP_CHECK_TIMER(sk);
1723                 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1724                         goto reset;
1725                 TCP_CHECK_TIMER(sk);
1726                 return 0;
1727         }
1728
1729         if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
1730                 goto csum_err;
1731
1732         if (sk->sk_state == TCP_LISTEN) {
1733                 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1734                 if (!nsk)
1735                         goto discard;
1736
1737                 if (nsk != sk) {
1738                         if (tcp_child_process(sk, nsk, skb))
1739                                 goto reset;
1740                         return 0;
1741                 }
1742         }
1743
1744         TCP_CHECK_TIMER(sk);
1745         if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1746                 goto reset;
1747         TCP_CHECK_TIMER(sk);
1748         return 0;
1749
1750 reset:
1751         tcp_v4_send_reset(skb);
1752 discard:
1753         kfree_skb(skb);
1754         /* Be careful here. If this function gets more complicated and
1755          * gcc suffers from register pressure on the x86, sk (in %ebx)
1756          * might be destroyed here. This current version compiles correctly,
1757          * but you have been warned.
1758          */
1759         return 0;
1760
1761 csum_err:
1762         TCP_INC_STATS_BH(TcpInErrs);
1763         goto discard;
1764 }
1765
1766 extern struct proto_ops inet_stream_ops;
1767
1768 extern int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
1769
1770 /*
1771  *      From tcp_input.c
1772  */
1773
1774 int tcp_v4_rcv(struct sk_buff *skb)
1775 {
1776         struct tcphdr *th;
1777         struct sock *sk;
1778         int ret;
1779
1780         if (skb->pkt_type != PACKET_HOST)
1781                 goto discard_it;
1782
1783         /* Count it even if it's bad */
1784         TCP_INC_STATS_BH(TcpInSegs);
1785
1786         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1787                 goto discard_it;
1788
1789         th = skb->h.th;
1790
1791         if (th->doff < sizeof(struct tcphdr) / 4)
1792                 goto bad_packet;
1793         if (!pskb_may_pull(skb, th->doff * 4))
1794                 goto discard_it;
1795
1796         /* An explanation is required here, I think.
1797          * Packet length and doff are validated by header prediction,
1798          * provided case of th->doff==0 is elimineted.
1799          * So, we defer the checks. */
1800         if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1801              tcp_v4_checksum_init(skb) < 0))
1802                 goto bad_packet;
1803
1804         th = skb->h.th;
1805         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1806         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1807                                     skb->len - th->doff * 4);
1808         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1809         TCP_SKB_CB(skb)->when    = 0;
1810         TCP_SKB_CB(skb)->flags   = skb->nh.iph->tos;
1811         TCP_SKB_CB(skb)->sacked  = 0;
1812
1813         sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source,
1814                              skb->nh.iph->daddr, ntohs(th->dest),
1815                              tcp_v4_iif(skb));
1816
1817         if (!sk)
1818                 goto no_tcp_socket;
1819
1820 process:
1821         /* Silently drop if VNET is active (if INET bind() has been
1822          * overridden) and the context is not entitled to read the
1823          * packet.
1824          */
1825         if (inet_stream_ops.bind != inet_bind &&
1826             (int) sk->sk_xid >= 0 && sk->sk_xid != skb->xid)
1827                 goto discard_it;
1828
1829         if (sk->sk_state == TCP_TIME_WAIT)
1830                 goto do_time_wait;
1831
1832         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1833                 goto discard_and_relse;
1834
1835         if (sk_filter(sk, skb, 0))
1836                 goto discard_and_relse;
1837
1838         skb->dev = NULL;
1839
1840         bh_lock_sock(sk);
1841         ret = 0;
1842         if (!sock_owned_by_user(sk)) {
1843                 if (!tcp_prequeue(sk, skb))
1844                         ret = tcp_v4_do_rcv(sk, skb);
1845         } else
1846                 sk_add_backlog(sk, skb);
1847         bh_unlock_sock(sk);
1848
1849         sock_put(sk);
1850
1851         return ret;
1852
1853 no_tcp_socket:
1854         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1855                 goto discard_it;
1856
1857         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1858 bad_packet:
1859                 TCP_INC_STATS_BH(TcpInErrs);
1860         } else {
1861                 tcp_v4_send_reset(skb);
1862         }
1863
1864 discard_it:
1865         /* Discard frame. */
1866         kfree_skb(skb);
1867         return 0;
1868
1869 discard_and_relse:
1870         sock_put(sk);
1871         goto discard_it;
1872
1873 do_time_wait:
1874         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1875                 tcp_tw_put((struct tcp_tw_bucket *) sk);
1876                 goto discard_it;
1877         }
1878
1879         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1880                 TCP_INC_STATS_BH(TcpInErrs);
1881                 tcp_tw_put((struct tcp_tw_bucket *) sk);
1882                 goto discard_it;
1883         }
1884         switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
1885                                            skb, th, skb->len)) {
1886         case TCP_TW_SYN: {
1887                 struct sock *sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr,
1888                                                           ntohs(th->dest),
1889                                                           tcp_v4_iif(skb));
1890                 if (sk2) {
1891                         tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
1892                         tcp_tw_put((struct tcp_tw_bucket *)sk);
1893                         sk = sk2;
1894                         goto process;
1895                 }
1896                 /* Fall through to ACK */
1897         }
1898         case TCP_TW_ACK:
1899                 tcp_v4_timewait_ack(sk, skb);
1900                 break;
1901         case TCP_TW_RST:
1902                 goto no_tcp_socket;
1903         case TCP_TW_SUCCESS:;
1904         }
1905         goto discard_it;
1906 }
1907
1908 /* With per-bucket locks this operation is not-atomic, so that
1909  * this version is not worse.
1910  */
1911 static void __tcp_v4_rehash(struct sock *sk)
1912 {
1913         sk->sk_prot->unhash(sk);
1914         sk->sk_prot->hash(sk);
1915 }
1916
1917 static int tcp_v4_reselect_saddr(struct sock *sk)
1918 {
1919         struct inet_opt *inet = inet_sk(sk);
1920         int err;
1921         struct rtable *rt;
1922         __u32 old_saddr = inet->saddr;
1923         __u32 new_saddr;
1924         __u32 daddr = inet->daddr;
1925
1926         if (inet->opt && inet->opt->srr)
1927                 daddr = inet->opt->faddr;
1928
1929         /* Query new route. */
1930         err = ip_route_connect(&rt, daddr, 0,
1931                                RT_TOS(inet->tos) | sk->sk_localroute,
1932                                sk->sk_bound_dev_if,
1933                                IPPROTO_TCP,
1934                                inet->sport, inet->dport, sk);
1935         if (err)
1936                 return err;
1937
1938         __sk_dst_set(sk, &rt->u.dst);
1939         tcp_v4_setup_caps(sk, &rt->u.dst);
1940         tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
1941
1942         new_saddr = rt->rt_src;
1943
1944         if (new_saddr == old_saddr)
1945                 return 0;
1946
1947         if (sysctl_ip_dynaddr > 1) {
1948                 printk(KERN_INFO "tcp_v4_rebuild_header(): shifting inet->"
1949                                  "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n",
1950                        NIPQUAD(old_saddr),
1951                        NIPQUAD(new_saddr));
1952         }
1953
1954         inet->saddr = new_saddr;
1955         inet->rcv_saddr = new_saddr;
1956
1957         /* XXX The only one ugly spot where we need to
1958          * XXX really change the sockets identity after
1959          * XXX it has entered the hashes. -DaveM
1960          *
1961          * Besides that, it does not check for connection
1962          * uniqueness. Wait for troubles.
1963          */
1964         __tcp_v4_rehash(sk);
1965         return 0;
1966 }
1967
1968 int tcp_v4_rebuild_header(struct sock *sk)
1969 {
1970         struct inet_opt *inet = inet_sk(sk);
1971         struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1972         u32 daddr;
1973         int err;
1974
1975         /* Route is OK, nothing to do. */
1976         if (rt)
1977                 return 0;
1978
1979         /* Reroute. */
1980         daddr = inet->daddr;
1981         if (inet->opt && inet->opt->srr)
1982                 daddr = inet->opt->faddr;
1983
1984         {
1985                 struct flowi fl = { .oif = sk->sk_bound_dev_if,
1986                                     .nl_u = { .ip4_u =
1987                                               { .daddr = daddr,
1988                                                 .saddr = inet->saddr,
1989                                                 .tos = RT_CONN_FLAGS(sk) } },
1990                                     .proto = IPPROTO_TCP,
1991                                     .uli_u = { .ports =
1992                                                { .sport = inet->sport,
1993                                                  .dport = inet->dport } } };
1994                                                 
1995                 err = ip_route_output_flow(&rt, &fl, sk, 0);
1996         }
1997         if (!err) {
1998                 __sk_dst_set(sk, &rt->u.dst);
1999                 tcp_v4_setup_caps(sk, &rt->u.dst);
2000                 tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
2001                 return 0;
2002         }
2003
2004         /* Routing failed... */
2005         sk->sk_route_caps = 0;
2006
2007         if (!sysctl_ip_dynaddr ||
2008             sk->sk_state != TCP_SYN_SENT ||
2009             (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
2010             (err = tcp_v4_reselect_saddr(sk)) != 0)
2011                 sk->sk_err_soft = -err;
2012
2013         return err;
2014 }
2015
2016 static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
2017 {
2018         struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;
2019         struct inet_opt *inet = inet_sk(sk);
2020
2021         sin->sin_family         = AF_INET;
2022         sin->sin_addr.s_addr    = inet->daddr;
2023         sin->sin_port           = inet->dport;
2024 }
2025
2026 /* VJ's idea. Save last timestamp seen from this destination
2027  * and hold it at least for normal timewait interval to use for duplicate
2028  * segment detection in subsequent connections, before they enter synchronized
2029  * state.
2030  */
2031
2032 int tcp_v4_remember_stamp(struct sock *sk)
2033 {
2034         struct inet_opt *inet = inet_sk(sk);
2035         struct tcp_opt *tp = tcp_sk(sk);
2036         struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
2037         struct inet_peer *peer = NULL;
2038         int release_it = 0;
2039
2040         if (!rt || rt->rt_dst != inet->daddr) {
2041                 peer = inet_getpeer(inet->daddr, 1);
2042                 release_it = 1;
2043         } else {
2044                 if (!rt->peer)
2045                         rt_bind_peer(rt, 1);
2046                 peer = rt->peer;
2047         }
2048
2049         if (peer) {
2050                 if ((s32)(peer->tcp_ts - tp->ts_recent) <= 0 ||
2051                     (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2052                      peer->tcp_ts_stamp <= tp->ts_recent_stamp)) {
2053                         peer->tcp_ts_stamp = tp->ts_recent_stamp;
2054                         peer->tcp_ts = tp->ts_recent;
2055                 }
2056                 if (release_it)
2057                         inet_putpeer(peer);
2058                 return 1;
2059         }
2060
2061         return 0;
2062 }
2063
2064 int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
2065 {
2066         struct inet_peer *peer = NULL;
2067
2068         peer = inet_getpeer(tw->tw_daddr, 1);
2069
2070         if (peer) {
2071                 if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 ||
2072                     (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2073                      peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) {
2074                         peer->tcp_ts_stamp = tw->tw_ts_recent_stamp;
2075                         peer->tcp_ts = tw->tw_ts_recent;
2076                 }
2077                 inet_putpeer(peer);
2078                 return 1;
2079         }
2080
2081         return 0;
2082 }
2083
2084 struct tcp_func ipv4_specific = {
2085         .queue_xmit     =       ip_queue_xmit,
2086         .send_check     =       tcp_v4_send_check,
2087         .rebuild_header =       tcp_v4_rebuild_header,
2088         .conn_request   =       tcp_v4_conn_request,
2089         .syn_recv_sock  =       tcp_v4_syn_recv_sock,
2090         .remember_stamp =       tcp_v4_remember_stamp,
2091         .net_header_len =       sizeof(struct iphdr),
2092         .setsockopt     =       ip_setsockopt,
2093         .getsockopt     =       ip_getsockopt,
2094         .addr2sockaddr  =       v4_addr2sockaddr,
2095         .sockaddr_len   =       sizeof(struct sockaddr_in),
2096 };
2097
2098 /* NOTE: A lot of things set to zero explicitly by call to
2099  *       sk_alloc() so need not be done here.
2100  */
2101 static int tcp_v4_init_sock(struct sock *sk)
2102 {
2103         struct tcp_opt *tp = tcp_sk(sk);
2104
2105         skb_queue_head_init(&tp->out_of_order_queue);
2106         tcp_init_xmit_timers(sk);
2107         tcp_prequeue_init(tp);
2108
2109         tp->rto  = TCP_TIMEOUT_INIT;
2110         tp->mdev = TCP_TIMEOUT_INIT;
2111
2112         /* So many TCP implementations out there (incorrectly) count the
2113          * initial SYN frame in their delayed-ACK and congestion control
2114          * algorithms that we must have the following bandaid to talk
2115          * efficiently to them.  -DaveM
2116          */
2117         tp->snd_cwnd = 2;
2118
2119         /* See draft-stevens-tcpca-spec-01 for discussion of the
2120          * initialization of these values.
2121          */
2122         tp->snd_ssthresh = 0x7fffffff;  /* Infinity */
2123         tp->snd_cwnd_clamp = ~0;
2124         tp->mss_cache = 536;
2125
2126         tp->reordering = sysctl_tcp_reordering;
2127
2128         sk->sk_state = TCP_CLOSE;
2129
2130         sk->sk_write_space = sk_stream_write_space;
2131         sk->sk_use_write_queue = 1;
2132
2133         tp->af_specific = &ipv4_specific;
2134
2135         sk->sk_sndbuf = sysctl_tcp_wmem[1];
2136         sk->sk_rcvbuf = sysctl_tcp_rmem[1];
2137
2138         atomic_inc(&tcp_sockets_allocated);
2139
2140         return 0;
2141 }
2142
2143 int tcp_v4_destroy_sock(struct sock *sk)
2144 {
2145         struct tcp_opt *tp = tcp_sk(sk);
2146
2147         tcp_clear_xmit_timers(sk);
2148
2149         /* Cleanup up the write buffer. */
2150         sk_stream_writequeue_purge(sk);
2151
2152         /* Cleans up our, hopefully empty, out_of_order_queue. */
2153         __skb_queue_purge(&tp->out_of_order_queue);
2154
2155         /* Clean prequeue, it must be empty really */
2156         __skb_queue_purge(&tp->ucopy.prequeue);
2157
2158         /* Clean up a referenced TCP bind bucket. */
2159         if (tp->bind_hash)
2160                 tcp_put_port(sk);
2161
2162         atomic_dec(&tcp_sockets_allocated);
2163
2164         return 0;
2165 }
2166
2167 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2168
2169 #ifdef CONFIG_PROC_FS
2170 /* Proc filesystem TCP sock list dumping. */
2171
2172 static inline struct tcp_tw_bucket *tw_head(struct hlist_head *head)
2173 {
2174         return hlist_empty(head) ? NULL :
2175                 list_entry(head->first, struct tcp_tw_bucket, tw_node);
2176 }
2177
2178 static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw)
2179 {
2180         return tw->tw_node.next ?
2181                 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2182 }
2183
2184 static void *listening_get_next(struct seq_file *seq, void *cur)
2185 {
2186         struct tcp_opt *tp;
2187         struct hlist_node *node;
2188         struct sock *sk = cur;
2189         struct tcp_iter_state* st = seq->private;
2190
2191         if (!sk) {
2192                 st->bucket = 0;
2193                 sk = sk_head(&tcp_listening_hash[0]);
2194                 goto get_sk;
2195         }
2196
2197         ++st->num;
2198
2199         if (st->state == TCP_SEQ_STATE_OPENREQ) {
2200                 struct open_request *req = cur;
2201
2202                 tp = tcp_sk(st->syn_wait_sk);
2203                 req = req->dl_next;
2204                 while (1) {
2205                         while (req) {
2206                                 if (!vx_check(req->sk->sk_xid, VX_IDENT|VX_WATCH))
2207                                         continue;
2208                                 if (req->class->family == st->family) {
2209                                         cur = req;
2210                                         goto out;
2211                                 }
2212                                 req = req->dl_next;
2213                         }
2214                         if (++st->sbucket >= TCP_SYNQ_HSIZE)
2215                                 break;
2216 get_req:
2217                         req = tp->listen_opt->syn_table[st->sbucket];
2218                 }
2219                 sk        = sk_next(st->syn_wait_sk);
2220                 st->state = TCP_SEQ_STATE_LISTENING;
2221                 read_unlock_bh(&tp->syn_wait_lock);
2222         } else
2223                 sk = sk_next(sk);
2224 get_sk:
2225         sk_for_each_from(sk, node) {
2226                 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2227                         continue;
2228                 if (sk->sk_family == st->family) {
2229                         cur = sk;
2230                         goto out;
2231                 }
2232                 tp = tcp_sk(sk);
2233                 read_lock_bh(&tp->syn_wait_lock);
2234                 if (tp->listen_opt && tp->listen_opt->qlen) {
2235                         st->uid         = sock_i_uid(sk);
2236                         st->syn_wait_sk = sk;
2237                         st->state       = TCP_SEQ_STATE_OPENREQ;
2238                         st->sbucket     = 0;
2239                         goto get_req;
2240                 }
2241                 read_unlock_bh(&tp->syn_wait_lock);
2242         }
2243         if (++st->bucket < TCP_LHTABLE_SIZE) {
2244                 sk = sk_head(&tcp_listening_hash[st->bucket]);
2245                 goto get_sk;
2246         }
2247         cur = NULL;
2248 out:
2249         return cur;
2250 }
2251
2252 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2253 {
2254         void *rc = listening_get_next(seq, NULL);
2255
2256         while (rc && *pos) {
2257                 rc = listening_get_next(seq, rc);
2258                 --*pos;
2259         }
2260         return rc;
2261 }
2262
2263 static void *established_get_first(struct seq_file *seq)
2264 {
2265         struct tcp_iter_state* st = seq->private;
2266         void *rc = NULL;
2267
2268         for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) {
2269                 struct sock *sk;
2270                 struct hlist_node *node;
2271                 struct tcp_tw_bucket *tw;
2272                
2273                 read_lock(&tcp_ehash[st->bucket].lock);
2274                 sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) {
2275                         if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2276                                 continue;
2277                         if (sk->sk_family != st->family)
2278                                 continue;
2279                         rc = sk;
2280                         goto out;
2281                 }
2282                 st->state = TCP_SEQ_STATE_TIME_WAIT;
2283                 tw_for_each(tw, node,
2284                             &tcp_ehash[st->bucket + tcp_ehash_size].chain) {
2285                         if (!vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))
2286                                 continue;
2287                         if (tw->tw_family != st->family)
2288                                 continue;
2289                         rc = tw;
2290                         goto out;
2291                 }
2292                 read_unlock(&tcp_ehash[st->bucket].lock);
2293                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2294         }
2295 out:
2296         return rc;
2297 }
2298
2299 static void *established_get_next(struct seq_file *seq, void *cur)
2300 {
2301         struct sock *sk = cur;
2302         struct tcp_tw_bucket *tw;
2303         struct hlist_node *node;
2304         struct tcp_iter_state* st = seq->private;
2305
2306         ++st->num;
2307
2308         if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2309                 tw = cur;
2310                 tw = tw_next(tw);
2311 get_tw:
2312                 while (tw && tw->tw_family != st->family &&
2313                         !vx_check(tw->tw_xid, VX_IDENT|VX_WATCH)) {
2314                         tw = tw_next(tw);
2315                 }
2316                 if (tw) {
2317                         cur = tw;
2318                         goto out;
2319                 }
2320                 read_unlock(&tcp_ehash[st->bucket].lock);
2321                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2322                 if (++st->bucket < tcp_ehash_size) {
2323                         read_lock(&tcp_ehash[st->bucket].lock);
2324                         sk = sk_head(&tcp_ehash[st->bucket].chain);
2325                 } else {
2326                         cur = NULL;
2327                         goto out;
2328                 }
2329         } else
2330                 sk = sk_next(sk);
2331
2332         sk_for_each_from(sk, node) {
2333                 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2334                         continue;
2335                 if (sk->sk_family == st->family)
2336                         goto found;
2337         }
2338
2339         st->state = TCP_SEQ_STATE_TIME_WAIT;
2340         tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain);
2341         goto get_tw;
2342 found:
2343         cur = sk;
2344 out:
2345         return cur;
2346 }
2347
2348 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2349 {
2350         void *rc = established_get_first(seq);
2351
2352         while (rc && pos) {
2353                 rc = established_get_next(seq, rc);
2354                 --pos;
2355         }               
2356         return rc;
2357 }
2358
2359 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2360 {
2361         void *rc;
2362         struct tcp_iter_state* st = seq->private;
2363
2364         tcp_listen_lock();
2365         st->state = TCP_SEQ_STATE_LISTENING;
2366         rc        = listening_get_idx(seq, &pos);
2367
2368         if (!rc) {
2369                 tcp_listen_unlock();
2370                 local_bh_disable();
2371                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2372                 rc        = established_get_idx(seq, pos);
2373         }
2374
2375         return rc;
2376 }
2377
2378 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2379 {
2380         struct tcp_iter_state* st = seq->private;
2381         st->state = TCP_SEQ_STATE_LISTENING;
2382         st->num = 0;
2383         return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2384 }
2385
2386 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2387 {
2388         void *rc = NULL;
2389         struct tcp_iter_state* st;
2390
2391         if (v == SEQ_START_TOKEN) {
2392                 rc = tcp_get_idx(seq, 0);
2393                 goto out;
2394         }
2395         st = seq->private;
2396
2397         switch (st->state) {
2398         case TCP_SEQ_STATE_OPENREQ:
2399         case TCP_SEQ_STATE_LISTENING:
2400                 rc = listening_get_next(seq, v);
2401                 if (!rc) {
2402                         tcp_listen_unlock();
2403                         local_bh_disable();
2404                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2405                         rc        = established_get_first(seq);
2406                 }
2407                 break;
2408         case TCP_SEQ_STATE_ESTABLISHED:
2409         case TCP_SEQ_STATE_TIME_WAIT:
2410                 rc = established_get_next(seq, v);
2411                 break;
2412         }
2413 out:
2414         ++*pos;
2415         return rc;
2416 }
2417
2418 static void tcp_seq_stop(struct seq_file *seq, void *v)
2419 {
2420         struct tcp_iter_state* st = seq->private;
2421
2422         switch (st->state) {
2423         case TCP_SEQ_STATE_OPENREQ:
2424                 if (v) {
2425                         struct tcp_opt *tp = tcp_sk(st->syn_wait_sk);
2426                         read_unlock_bh(&tp->syn_wait_lock);
2427                 }
2428         case TCP_SEQ_STATE_LISTENING:
2429                 if (v != SEQ_START_TOKEN)
2430                         tcp_listen_unlock();
2431                 break;
2432         case TCP_SEQ_STATE_TIME_WAIT:
2433         case TCP_SEQ_STATE_ESTABLISHED:
2434                 if (v)
2435                         read_unlock(&tcp_ehash[st->bucket].lock);
2436                 local_bh_enable();
2437                 break;
2438         }
2439 }
2440
2441 static int tcp_seq_open(struct inode *inode, struct file *file)
2442 {
2443         struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2444         struct seq_file *seq;
2445         struct tcp_iter_state *s;
2446         int rc;
2447
2448         if (unlikely(afinfo == NULL))
2449                 return -EINVAL;
2450
2451         s = kmalloc(sizeof(*s), GFP_KERNEL);
2452         if (!s)
2453                 return -ENOMEM;
2454         memset(s, 0, sizeof(*s));
2455         s->family               = afinfo->family;
2456         s->seq_ops.start        = tcp_seq_start;
2457         s->seq_ops.next         = tcp_seq_next;
2458         s->seq_ops.show         = afinfo->seq_show;
2459         s->seq_ops.stop         = tcp_seq_stop;
2460
2461         rc = seq_open(file, &s->seq_ops);
2462         if (rc)
2463                 goto out_kfree;
2464         seq          = file->private_data;
2465         seq->private = s;
2466 out:
2467         return rc;
2468 out_kfree:
2469         kfree(s);
2470         goto out;
2471 }
2472
2473 int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
2474 {
2475         int rc = 0;
2476         struct proc_dir_entry *p;
2477
2478         if (!afinfo)
2479                 return -EINVAL;
2480         afinfo->seq_fops->owner         = afinfo->owner;
2481         afinfo->seq_fops->open          = tcp_seq_open;
2482         afinfo->seq_fops->read          = seq_read;
2483         afinfo->seq_fops->llseek        = seq_lseek;
2484         afinfo->seq_fops->release       = seq_release_private;
2485         
2486         p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
2487         if (p)
2488                 p->data = afinfo;
2489         else
2490                 rc = -ENOMEM;
2491         return rc;
2492 }
2493
2494 void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
2495 {
2496         if (!afinfo)
2497                 return;
2498         proc_net_remove(afinfo->name);
2499         memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); 
2500 }
2501
2502 static void get_openreq4(struct sock *sk, struct open_request *req,
2503                          char *tmpbuf, int i, int uid)
2504 {
2505         int ttd = req->expires - jiffies;
2506
2507         sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2508                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
2509                 i,
2510                 req->af.v4_req.loc_addr,
2511                 ntohs(inet_sk(sk)->sport),
2512                 req->af.v4_req.rmt_addr,
2513                 ntohs(req->rmt_port),
2514                 TCP_SYN_RECV,
2515                 0, 0, /* could print option size, but that is af dependent. */
2516                 1,    /* timers active (only the expire timer) */
2517                 jiffies_to_clock_t(ttd),
2518                 req->retrans,
2519                 uid,
2520                 0,  /* non standard timer */
2521                 0, /* open_requests have no inode */
2522                 atomic_read(&sk->sk_refcnt),
2523                 req);
2524 }
2525
2526 static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
2527 {
2528         int timer_active;
2529         unsigned long timer_expires;
2530         struct tcp_opt *tp = tcp_sk(sp);
2531         struct inet_opt *inet = inet_sk(sp);
2532         unsigned int dest = inet->daddr;
2533         unsigned int src = inet->rcv_saddr;
2534         __u16 destp = ntohs(inet->dport);
2535         __u16 srcp = ntohs(inet->sport);
2536
2537         if (tp->pending == TCP_TIME_RETRANS) {
2538                 timer_active    = 1;
2539                 timer_expires   = tp->timeout;
2540         } else if (tp->pending == TCP_TIME_PROBE0) {
2541                 timer_active    = 4;
2542                 timer_expires   = tp->timeout;
2543         } else if (timer_pending(&sp->sk_timer)) {
2544                 timer_active    = 2;
2545                 timer_expires   = sp->sk_timer.expires;
2546         } else {
2547                 timer_active    = 0;
2548                 timer_expires = jiffies;
2549         }
2550
2551         sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2552                         "%08X %5d %8d %lu %d %p %u %u %u %u %d",
2553                 i, src, srcp, dest, destp, sp->sk_state,
2554                 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
2555                 timer_active,
2556                 jiffies_to_clock_t(timer_expires - jiffies),
2557                 tp->retransmits,
2558                 sock_i_uid(sp),
2559                 tp->probes_out,
2560                 sock_i_ino(sp),
2561                 atomic_read(&sp->sk_refcnt), sp,
2562                 tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong,
2563                 tp->snd_cwnd,
2564                 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
2565 }
2566
2567 static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i)
2568 {
2569         unsigned int dest, src;
2570         __u16 destp, srcp;
2571         int ttd = tw->tw_ttd - jiffies;
2572
2573         if (ttd < 0)
2574                 ttd = 0;
2575
2576         dest  = tw->tw_daddr;
2577         src   = tw->tw_rcv_saddr;
2578         destp = ntohs(tw->tw_dport);
2579         srcp  = ntohs(tw->tw_sport);
2580
2581         sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2582                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p",
2583                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2584                 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2585                 atomic_read(&tw->tw_refcnt), tw);
2586 }
2587
2588 #define TMPSZ 150
2589
2590 static int tcp4_seq_show(struct seq_file *seq, void *v)
2591 {
2592         struct tcp_iter_state* st;
2593         char tmpbuf[TMPSZ + 1];
2594
2595         if (v == SEQ_START_TOKEN) {
2596                 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2597                            "  sl  local_address rem_address   st tx_queue "
2598                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2599                            "inode");
2600                 goto out;
2601         }
2602         st = seq->private;
2603
2604         switch (st->state) {
2605         case TCP_SEQ_STATE_LISTENING:
2606         case TCP_SEQ_STATE_ESTABLISHED:
2607                 get_tcp4_sock(v, tmpbuf, st->num);
2608                 break;
2609         case TCP_SEQ_STATE_OPENREQ:
2610                 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
2611                 break;
2612         case TCP_SEQ_STATE_TIME_WAIT:
2613                 get_timewait4_sock(v, tmpbuf, st->num);
2614                 break;
2615         }
2616         seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
2617 out:
2618         return 0;
2619 }
2620
2621 static struct file_operations tcp4_seq_fops;
2622 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2623         .owner          = THIS_MODULE,
2624         .name           = "tcp",
2625         .family         = AF_INET,
2626         .seq_show       = tcp4_seq_show,
2627         .seq_fops       = &tcp4_seq_fops,
2628 };
2629
2630 int __init tcp4_proc_init(void)
2631 {
2632         return tcp_proc_register(&tcp4_seq_afinfo);
2633 }
2634
2635 void tcp4_proc_exit(void)
2636 {
2637         tcp_proc_unregister(&tcp4_seq_afinfo);
2638 }
2639 #endif /* CONFIG_PROC_FS */
2640
2641 struct proto tcp_prot = {
2642         .name                   = "TCP",
2643         .close                  = tcp_close,
2644         .connect                = tcp_v4_connect,
2645         .disconnect             = tcp_disconnect,
2646         .accept                 = tcp_accept,
2647         .ioctl                  = tcp_ioctl,
2648         .init                   = tcp_v4_init_sock,
2649         .destroy                = tcp_v4_destroy_sock,
2650         .shutdown               = tcp_shutdown,
2651         .setsockopt             = tcp_setsockopt,
2652         .getsockopt             = tcp_getsockopt,
2653         .sendmsg                = tcp_sendmsg,
2654         .recvmsg                = tcp_recvmsg,
2655         .backlog_rcv            = tcp_v4_do_rcv,
2656         .hash                   = tcp_v4_hash,
2657         .unhash                 = tcp_unhash,
2658         .get_port               = tcp_v4_get_port,
2659         .enter_memory_pressure  = tcp_enter_memory_pressure,
2660         .sockets_allocated      = &tcp_sockets_allocated,
2661         .memory_allocated       = &tcp_memory_allocated,
2662         .memory_pressure        = &tcp_memory_pressure,
2663         .sysctl_mem             = sysctl_tcp_mem,
2664         .sysctl_wmem            = sysctl_tcp_wmem,
2665         .sysctl_rmem            = sysctl_tcp_rmem,
2666         .max_header             = MAX_TCP_HEADER,
2667 };
2668
2669
2670
2671 void __init tcp_v4_init(struct net_proto_family *ops)
2672 {
2673         int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
2674         if (err < 0)
2675                 panic("Failed to create the TCP control socket.\n");
2676         tcp_socket->sk->sk_allocation   = GFP_ATOMIC;
2677         inet_sk(tcp_socket->sk)->uc_ttl = -1;
2678
2679         /* Unhash it so that IP input processing does not even
2680          * see it, we do not wish this socket to see incoming
2681          * packets.
2682          */
2683         tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
2684 }
2685
2686 EXPORT_SYMBOL(ipv4_specific);
2687 EXPORT_SYMBOL(tcp_bind_hash);
2688 EXPORT_SYMBOL(tcp_bucket_create);
2689 EXPORT_SYMBOL(tcp_hashinfo);
2690 EXPORT_SYMBOL(tcp_inherit_port);
2691 EXPORT_SYMBOL(tcp_listen_wlock);
2692 EXPORT_SYMBOL(tcp_port_rover);
2693 EXPORT_SYMBOL(tcp_prot);
2694 EXPORT_SYMBOL(tcp_put_port);
2695 EXPORT_SYMBOL(tcp_unhash);
2696 EXPORT_SYMBOL(tcp_v4_conn_request);
2697 EXPORT_SYMBOL(tcp_v4_connect);
2698 EXPORT_SYMBOL(tcp_v4_do_rcv);
2699 EXPORT_SYMBOL(tcp_v4_lookup_listener);
2700 EXPORT_SYMBOL(tcp_v4_rebuild_header);
2701 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2702 EXPORT_SYMBOL(tcp_v4_send_check);
2703 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2704
2705 #ifdef CONFIG_PROC_FS
2706 EXPORT_SYMBOL(tcp_proc_register);
2707 EXPORT_SYMBOL(tcp_proc_unregister);
2708 #endif
2709 #ifdef CONFIG_SYSCTL
2710 EXPORT_SYMBOL(sysctl_local_port_range);
2711 EXPORT_SYMBOL(sysctl_max_syn_backlog);
2712 EXPORT_SYMBOL(sysctl_tcp_low_latency);
2713 #endif