ckrm E17 memory controller
[linux-2.6.git] / net / ipv4 / tcp_ipv4.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Version:     $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
9  *
10  *              IPv4 specific functions
11  *
12  *
13  *              code split from:
14  *              linux/ipv4/tcp.c
15  *              linux/ipv4/tcp_input.c
16  *              linux/ipv4/tcp_output.c
17  *
18  *              See tcp.c for author information
19  *
20  *      This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25
26 /*
27  * Changes:
28  *              David S. Miller :       New socket lookup architecture.
29  *                                      This code is dedicated to John Dyson.
30  *              David S. Miller :       Change semantics of established hash,
31  *                                      half is devoted to TIME_WAIT sockets
32  *                                      and the rest go in the other half.
33  *              Andi Kleen :            Add support for syncookies and fixed
34  *                                      some bugs: ip options weren't passed to
35  *                                      the TCP layer, missed a check for an
36  *                                      ACK bit.
37  *              Andi Kleen :            Implemented fast path mtu discovery.
38  *                                      Fixed many serious bugs in the
39  *                                      open_request handling and moved
40  *                                      most of it into the af independent code.
41  *                                      Added tail drop and some other bugfixes.
42  *                                      Added new listen sematics.
43  *              Mike McLagan    :       Routing by source
44  *      Juan Jose Ciarlante:            ip_dynaddr bits
45  *              Andi Kleen:             various fixes.
46  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
47  *                                      coma.
48  *      Andi Kleen              :       Fix new listen.
49  *      Andi Kleen              :       Fix accept error reporting.
50  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
51  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
52  *                                      a single port at the same time.
53  */
54
55 #include <linux/config.h>
56
57 #include <linux/types.h>
58 #include <linux/fcntl.h>
59 #include <linux/module.h>
60 #include <linux/random.h>
61 #include <linux/cache.h>
62 #include <linux/jhash.h>
63 #include <linux/init.h>
64 #include <linux/times.h>
65
66 #include <net/icmp.h>
67 #include <net/tcp.h>
68 #include <net/ipv6.h>
69 #include <net/inet_common.h>
70 #include <net/xfrm.h>
71
72 #include <linux/inet.h>
73 #include <linux/ipv6.h>
74 #include <linux/stddef.h>
75 #include <linux/proc_fs.h>
76 #include <linux/seq_file.h>
77
78 extern int sysctl_ip_dynaddr;
79 int sysctl_tcp_tw_reuse;
80 int sysctl_tcp_low_latency;
81
82 /* Check TCP sequence numbers in ICMP packets. */
83 #define ICMP_MIN_LENGTH 8
84
85 /* Socket used for sending RSTs */
86 static struct socket *tcp_socket;
87
88 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
89                        struct sk_buff *skb);
90
91 struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
92         .__tcp_lhash_lock       =       RW_LOCK_UNLOCKED,
93         .__tcp_lhash_users      =       ATOMIC_INIT(0),
94         .__tcp_lhash_wait
95           = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait),
96         .__tcp_portalloc_lock   =       SPIN_LOCK_UNLOCKED
97 };
98
99 /*
100  * This array holds the first and last local port number.
101  * For high-usage systems, use sysctl to change this to
102  * 32768-61000
103  */
104 int sysctl_local_port_range[2] = { 1024, 4999 };
105 int tcp_port_rover = 1024 - 1;
106
107 static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
108                                  __u32 faddr, __u16 fport)
109 {
110         int h = (laddr ^ lport) ^ (faddr ^ fport);
111         h ^= h >> 16;
112         h ^= h >> 8;
113         return h & (tcp_ehash_size - 1);
114 }
115
116 static __inline__ int tcp_sk_hashfn(struct sock *sk)
117 {
118         struct inet_opt *inet = inet_sk(sk);
119         __u32 laddr = inet->rcv_saddr;
120         __u16 lport = inet->num;
121         __u32 faddr = inet->daddr;
122         __u16 fport = inet->dport;
123
124         return tcp_hashfn(laddr, lport, faddr, fport);
125 }
126
127 /* Allocate and initialize a new TCP local port bind bucket.
128  * The bindhash mutex for snum's hash chain must be held here.
129  */
130 struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
131                                           unsigned short snum)
132 {
133         struct tcp_bind_bucket *tb = kmem_cache_alloc(tcp_bucket_cachep,
134                                                       SLAB_ATOMIC);
135         if (tb) {
136                 tb->port = snum;
137                 tb->fastreuse = 0;
138                 INIT_HLIST_HEAD(&tb->owners);
139                 hlist_add_head(&tb->node, &head->chain);
140         }
141         return tb;
142 }
143
144 /* Caller must hold hashbucket lock for this tb with local BH disabled */
145 void tcp_bucket_destroy(struct tcp_bind_bucket *tb)
146 {
147         if (hlist_empty(&tb->owners)) {
148                 __hlist_del(&tb->node);
149                 kmem_cache_free(tcp_bucket_cachep, tb);
150         }
151 }
152
153 /* Caller must disable local BH processing. */
154 static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
155 {
156         struct tcp_bind_hashbucket *head =
157                                 &tcp_bhash[tcp_bhashfn(inet_sk(child)->num)];
158         struct tcp_bind_bucket *tb;
159
160         spin_lock(&head->lock);
161         tb = tcp_sk(sk)->bind_hash;
162         sk_add_bind_node(child, &tb->owners);
163         tcp_sk(child)->bind_hash = tb;
164         spin_unlock(&head->lock);
165 }
166
167 inline void tcp_inherit_port(struct sock *sk, struct sock *child)
168 {
169         local_bh_disable();
170         __tcp_inherit_port(sk, child);
171         local_bh_enable();
172 }
173
174 void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
175                    unsigned short snum)
176 {
177         inet_sk(sk)->num = snum;
178         sk_add_bind_node(sk, &tb->owners);
179         tcp_sk(sk)->bind_hash = tb;
180 }
181
182 static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
183 {
184         const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk);
185         struct sock *sk2;
186         struct hlist_node *node;
187         int reuse = sk->sk_reuse;
188
189         sk_for_each_bound(sk2, node, &tb->owners) {
190                 if (sk != sk2 &&
191                     !tcp_v6_ipv6only(sk2) &&
192                     (!sk->sk_bound_dev_if ||
193                      !sk2->sk_bound_dev_if ||
194                      sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
195                         if (!reuse || !sk2->sk_reuse ||
196                             sk2->sk_state == TCP_LISTEN) {
197                                 const u32 sk2_rcv_saddr = tcp_v4_rcv_saddr(sk2);
198                                 if (!sk2_rcv_saddr || !sk_rcv_saddr ||
199                                     sk2_rcv_saddr == sk_rcv_saddr)
200                                         break;
201                         }
202                 }
203         }
204         return node != NULL;
205 }
206
207 /* Obtain a reference to a local port for the given sock,
208  * if snum is zero it means select any available local port.
209  */
210 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
211 {
212         struct tcp_bind_hashbucket *head;
213         struct hlist_node *node;
214         struct tcp_bind_bucket *tb;
215         int ret;
216
217         local_bh_disable();
218         if (!snum) {
219                 int low = sysctl_local_port_range[0];
220                 int high = sysctl_local_port_range[1];
221                 int remaining = (high - low) + 1;
222                 int rover;
223
224                 spin_lock(&tcp_portalloc_lock);
225                 rover = tcp_port_rover;
226                 do {
227                         rover++;
228                         if (rover < low || rover > high)
229                                 rover = low;
230                         head = &tcp_bhash[tcp_bhashfn(rover)];
231                         spin_lock(&head->lock);
232                         tb_for_each(tb, node, &head->chain)
233                                 if (tb->port == rover)
234                                         goto next;
235                         break;
236                 next:
237                         spin_unlock(&head->lock);
238                 } while (--remaining > 0);
239                 tcp_port_rover = rover;
240                 spin_unlock(&tcp_portalloc_lock);
241
242                 /* Exhausted local port range during search? */
243                 ret = 1;
244                 if (remaining <= 0)
245                         goto fail;
246
247                 /* OK, here is the one we will use.  HEAD is
248                  * non-NULL and we hold it's mutex.
249                  */
250                 snum = rover;
251         } else {
252                 head = &tcp_bhash[tcp_bhashfn(snum)];
253                 spin_lock(&head->lock);
254                 tb_for_each(tb, node, &head->chain)
255                         if (tb->port == snum)
256                                 goto tb_found;
257         }
258         tb = NULL;
259         goto tb_not_found;
260 tb_found:
261         if (!hlist_empty(&tb->owners)) {
262                 if (sk->sk_reuse > 1)
263                         goto success;
264                 if (tb->fastreuse > 0 &&
265                     sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
266                         goto success;
267                 } else {
268                         ret = 1;
269                         if (tcp_bind_conflict(sk, tb))
270                                 goto fail_unlock;
271                 }
272         }
273 tb_not_found:
274         ret = 1;
275         if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
276                 goto fail_unlock;
277         if (hlist_empty(&tb->owners)) {
278                 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
279                         tb->fastreuse = 1;
280                 else
281                         tb->fastreuse = 0;
282         } else if (tb->fastreuse &&
283                    (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
284                 tb->fastreuse = 0;
285 success:
286         if (!tcp_sk(sk)->bind_hash)
287                 tcp_bind_hash(sk, tb, snum);
288         BUG_TRAP(tcp_sk(sk)->bind_hash == tb);
289         ret = 0;
290
291 fail_unlock:
292         spin_unlock(&head->lock);
293 fail:
294         local_bh_enable();
295         return ret;
296 }
297
298 /* Get rid of any references to a local port held by the
299  * given sock.
300  */
301 static void __tcp_put_port(struct sock *sk)
302 {
303         struct inet_opt *inet = inet_sk(sk);
304         struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)];
305         struct tcp_bind_bucket *tb;
306
307         spin_lock(&head->lock);
308         tb = tcp_sk(sk)->bind_hash;
309         __sk_del_bind_node(sk);
310         tcp_sk(sk)->bind_hash = NULL;
311         inet->num = 0;
312         tcp_bucket_destroy(tb);
313         spin_unlock(&head->lock);
314 }
315
316 void tcp_put_port(struct sock *sk)
317 {
318         local_bh_disable();
319         __tcp_put_port(sk);
320         local_bh_enable();
321 }
322
323 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
324  * Look, when several writers sleep and reader wakes them up, all but one
325  * immediately hit write lock and grab all the cpus. Exclusive sleep solves
326  * this, _but_ remember, it adds useless work on UP machines (wake up each
327  * exclusive lock release). It should be ifdefed really.
328  */
329
330 void tcp_listen_wlock(void)
331 {
332         write_lock(&tcp_lhash_lock);
333
334         if (atomic_read(&tcp_lhash_users)) {
335                 DEFINE_WAIT(wait);
336
337                 for (;;) {
338                         prepare_to_wait_exclusive(&tcp_lhash_wait,
339                                                 &wait, TASK_UNINTERRUPTIBLE);
340                         if (!atomic_read(&tcp_lhash_users))
341                                 break;
342                         write_unlock_bh(&tcp_lhash_lock);
343                         schedule();
344                         write_lock_bh(&tcp_lhash_lock);
345                 }
346
347                 finish_wait(&tcp_lhash_wait, &wait);
348         }
349 }
350
351 static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
352 {
353         struct hlist_head *list;
354         rwlock_t *lock;
355
356         BUG_TRAP(sk_unhashed(sk));
357         if (listen_possible && sk->sk_state == TCP_LISTEN) {
358                 list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
359                 lock = &tcp_lhash_lock;
360                 tcp_listen_wlock();
361         } else {
362                 list = &tcp_ehash[(sk->sk_hashent = tcp_sk_hashfn(sk))].chain;
363                 lock = &tcp_ehash[sk->sk_hashent].lock;
364                 write_lock(lock);
365         }
366         __sk_add_node(sk, list);
367         sock_prot_inc_use(sk->sk_prot);
368         write_unlock(lock);
369         if (listen_possible && sk->sk_state == TCP_LISTEN)
370                 wake_up(&tcp_lhash_wait);
371 }
372
373 static void tcp_v4_hash(struct sock *sk)
374 {
375         if (sk->sk_state != TCP_CLOSE) {
376                 local_bh_disable();
377                 __tcp_v4_hash(sk, 1);
378                 local_bh_enable();
379         }
380 }
381
382 void tcp_unhash(struct sock *sk)
383 {
384         rwlock_t *lock;
385
386         if (sk_unhashed(sk))
387                 goto ende;
388
389         if (sk->sk_state == TCP_LISTEN) {
390                 local_bh_disable();
391                 tcp_listen_wlock();
392                 lock = &tcp_lhash_lock;
393         } else {
394                 struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
395                 lock = &head->lock;
396                 write_lock_bh(&head->lock);
397         }
398
399         if (__sk_del_node_init(sk))
400                 sock_prot_dec_use(sk->sk_prot);
401         write_unlock_bh(lock);
402
403  ende:
404         if (sk->sk_state == TCP_LISTEN)
405                 wake_up(&tcp_lhash_wait);
406 }
407
408 /* Don't inline this cruft.  Here are some nice properties to
409  * exploit here.  The BSD API does not allow a listening TCP
410  * to specify the remote port nor the remote address for the
411  * connection.  So always assume those are both wildcarded
412  * during the search since they can never be otherwise.
413  */
414 static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr,
415                                              unsigned short hnum, int dif)
416 {
417         struct sock *result = NULL, *sk;
418         struct hlist_node *node;
419         int score, hiscore;
420
421         hiscore=-1;
422         sk_for_each(sk, node, head) {
423                 struct inet_opt *inet = inet_sk(sk);
424
425                 if (inet->num == hnum && !ipv6_only_sock(sk)) {
426                         __u32 rcv_saddr = inet->rcv_saddr;
427
428                         score = (sk->sk_family == PF_INET ? 1 : 0);
429                         if (rcv_saddr) {
430                                 if (rcv_saddr != daddr)
431                                         continue;
432                                 score+=2;
433                         }
434                         if (sk->sk_bound_dev_if) {
435                                 if (sk->sk_bound_dev_if != dif)
436                                         continue;
437                                 score+=2;
438                         }
439                         if (score == 5)
440                                 return sk;
441                         if (score > hiscore) {
442                                 hiscore = score;
443                                 result = sk;
444                         }
445                 }
446         }
447         return result;
448 }
449
450 /* Optimize the common listener case. */
451 inline struct sock *tcp_v4_lookup_listener(u32 daddr,
452                 unsigned short hnum, int dif)
453 {
454         struct sock *sk = NULL;
455         struct hlist_head *head;
456
457         read_lock(&tcp_lhash_lock);
458         head = &tcp_listening_hash[tcp_lhashfn(hnum)];
459         if (!hlist_empty(head)) {
460                 struct inet_opt *inet = inet_sk((sk = __sk_head(head)));
461
462                 if (inet->num == hnum && !sk->sk_node.next &&
463                     (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
464                     (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
465                     !sk->sk_bound_dev_if)
466                         goto sherry_cache;
467                 sk = __tcp_v4_lookup_listener(head, daddr, hnum, dif);
468         }
469         if (sk) {
470 sherry_cache:
471                 sock_hold(sk);
472         }
473         read_unlock(&tcp_lhash_lock);
474         return sk;
475 }
476
477 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
478  * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
479  *
480  * Local BH must be disabled here.
481  */
482
483 static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
484                                                        u32 daddr, u16 hnum,
485                                                        int dif)
486 {
487         struct tcp_ehash_bucket *head;
488         TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
489         __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
490         struct sock *sk;
491         struct hlist_node *node;
492         /* Optimize here for direct hit, only listening connections can
493          * have wildcards anyways.
494          */
495         int hash = tcp_hashfn(daddr, hnum, saddr, sport);
496         head = &tcp_ehash[hash];
497         read_lock(&head->lock);
498         sk_for_each(sk, node, &head->chain) {
499                 if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
500                         goto hit; /* You sunk my battleship! */
501         }
502
503         /* Must check for a TIME_WAIT'er before going to listener hash. */
504         sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
505                 if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
506                         goto hit;
507         }
508         sk = NULL;
509 out:
510         read_unlock(&head->lock);
511         return sk;
512 hit:
513         sock_hold(sk);
514         goto out;
515 }
516
517 static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport,
518                                            u32 daddr, u16 hnum, int dif)
519 {
520         struct sock *sk = __tcp_v4_lookup_established(saddr, sport,
521                                                       daddr, hnum, dif);
522
523         return sk ? : tcp_v4_lookup_listener(daddr, hnum, dif);
524 }
525
526 inline struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr,
527                                   u16 dport, int dif)
528 {
529         struct sock *sk;
530
531         local_bh_disable();
532         sk = __tcp_v4_lookup(saddr, sport, daddr, ntohs(dport), dif);
533         local_bh_enable();
534
535         return sk;
536 }
537
538 EXPORT_SYMBOL_GPL(tcp_v4_lookup);
539
540 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
541 {
542         return secure_tcp_sequence_number(skb->nh.iph->daddr,
543                                           skb->nh.iph->saddr,
544                                           skb->h.th->dest,
545                                           skb->h.th->source);
546 }
547
548 /* called with local bh disabled */
549 static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
550                                       struct tcp_tw_bucket **twp)
551 {
552         struct inet_opt *inet = inet_sk(sk);
553         u32 daddr = inet->rcv_saddr;
554         u32 saddr = inet->daddr;
555         int dif = sk->sk_bound_dev_if;
556         TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
557         __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
558         int hash = tcp_hashfn(daddr, lport, saddr, inet->dport);
559         struct tcp_ehash_bucket *head = &tcp_ehash[hash];
560         struct sock *sk2;
561         struct hlist_node *node;
562         struct tcp_tw_bucket *tw;
563
564         write_lock(&head->lock);
565
566         /* Check TIME-WAIT sockets first. */
567         sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
568                 tw = (struct tcp_tw_bucket *)sk2;
569
570                 if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
571                         struct tcp_opt *tp = tcp_sk(sk);
572
573                         /* With PAWS, it is safe from the viewpoint
574                            of data integrity. Even without PAWS it
575                            is safe provided sequence spaces do not
576                            overlap i.e. at data rates <= 80Mbit/sec.
577
578                            Actually, the idea is close to VJ's one,
579                            only timestamp cache is held not per host,
580                            but per port pair and TW bucket is used
581                            as state holder.
582
583                            If TW bucket has been already destroyed we
584                            fall back to VJ's scheme and use initial
585                            timestamp retrieved from peer table.
586                          */
587                         if (tw->tw_ts_recent_stamp &&
588                             (!twp || (sysctl_tcp_tw_reuse &&
589                                       xtime.tv_sec -
590                                       tw->tw_ts_recent_stamp > 1))) {
591                                 if ((tp->write_seq =
592                                                 tw->tw_snd_nxt + 65535 + 2) == 0)
593                                         tp->write_seq = 1;
594                                 tp->ts_recent       = tw->tw_ts_recent;
595                                 tp->ts_recent_stamp = tw->tw_ts_recent_stamp;
596                                 sock_hold(sk2);
597                                 goto unique;
598                         } else
599                                 goto not_unique;
600                 }
601         }
602         tw = NULL;
603
604         /* And established part... */
605         sk_for_each(sk2, node, &head->chain) {
606                 if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif))
607                         goto not_unique;
608         }
609
610 unique:
611         /* Must record num and sport now. Otherwise we will see
612          * in hash table socket with a funny identity. */
613         inet->num = lport;
614         inet->sport = htons(lport);
615         sk->sk_hashent = hash;
616         BUG_TRAP(sk_unhashed(sk));
617         __sk_add_node(sk, &head->chain);
618         sock_prot_inc_use(sk->sk_prot);
619         write_unlock(&head->lock);
620
621         if (twp) {
622                 *twp = tw;
623                 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
624         } else if (tw) {
625                 /* Silly. Should hash-dance instead... */
626                 tcp_tw_deschedule(tw);
627                 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
628
629                 tcp_tw_put(tw);
630         }
631
632         return 0;
633
634 not_unique:
635         write_unlock(&head->lock);
636         return -EADDRNOTAVAIL;
637 }
638
639 /*
640  * Bind a port for a connect operation and hash it.
641  */
642 static int tcp_v4_hash_connect(struct sock *sk)
643 {
644         unsigned short snum = inet_sk(sk)->num;
645         struct tcp_bind_hashbucket *head;
646         struct tcp_bind_bucket *tb;
647         int ret;
648
649         if (!snum) {
650                 int rover;
651                 int low = sysctl_local_port_range[0];
652                 int high = sysctl_local_port_range[1];
653                 int remaining = (high - low) + 1;
654                 struct hlist_node *node;
655                 struct tcp_tw_bucket *tw = NULL;
656
657                 local_bh_disable();
658
659                 /* TODO. Actually it is not so bad idea to remove
660                  * tcp_portalloc_lock before next submission to Linus.
661                  * As soon as we touch this place at all it is time to think.
662                  *
663                  * Now it protects single _advisory_ variable tcp_port_rover,
664                  * hence it is mostly useless.
665                  * Code will work nicely if we just delete it, but
666                  * I am afraid in contented case it will work not better or
667                  * even worse: another cpu just will hit the same bucket
668                  * and spin there.
669                  * So some cpu salt could remove both contention and
670                  * memory pingpong. Any ideas how to do this in a nice way?
671                  */
672                 spin_lock(&tcp_portalloc_lock);
673                 rover = tcp_port_rover;
674
675                 do {
676                         rover++;
677                         if ((rover < low) || (rover > high))
678                                 rover = low;
679                         head = &tcp_bhash[tcp_bhashfn(rover)];
680                         spin_lock(&head->lock);
681
682                         /* Does not bother with rcv_saddr checks,
683                          * because the established check is already
684                          * unique enough.
685                          */
686                         tb_for_each(tb, node, &head->chain) {
687                                 if (tb->port == rover) {
688                                         BUG_TRAP(!hlist_empty(&tb->owners));
689                                         if (tb->fastreuse >= 0)
690                                                 goto next_port;
691                                         if (!__tcp_v4_check_established(sk,
692                                                                         rover,
693                                                                         &tw))
694                                                 goto ok;
695                                         goto next_port;
696                                 }
697                         }
698
699                         tb = tcp_bucket_create(head, rover);
700                         if (!tb) {
701                                 spin_unlock(&head->lock);
702                                 break;
703                         }
704                         tb->fastreuse = -1;
705                         goto ok;
706
707                 next_port:
708                         spin_unlock(&head->lock);
709                 } while (--remaining > 0);
710                 tcp_port_rover = rover;
711                 spin_unlock(&tcp_portalloc_lock);
712
713                 local_bh_enable();
714
715                 return -EADDRNOTAVAIL;
716
717 ok:
718                 /* All locks still held and bhs disabled */
719                 tcp_port_rover = rover;
720                 spin_unlock(&tcp_portalloc_lock);
721
722                 tcp_bind_hash(sk, tb, rover);
723                 if (sk_unhashed(sk)) {
724                         inet_sk(sk)->sport = htons(rover);
725                         __tcp_v4_hash(sk, 0);
726                 }
727                 spin_unlock(&head->lock);
728
729                 if (tw) {
730                         tcp_tw_deschedule(tw);
731                         tcp_tw_put(tw);
732                 }
733
734                 ret = 0;
735                 goto out;
736         }
737
738         head  = &tcp_bhash[tcp_bhashfn(snum)];
739         tb  = tcp_sk(sk)->bind_hash;
740         spin_lock_bh(&head->lock);
741         if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
742                 __tcp_v4_hash(sk, 0);
743                 spin_unlock_bh(&head->lock);
744                 return 0;
745         } else {
746                 spin_unlock(&head->lock);
747                 /* No definite answer... Walk to established hash table */
748                 ret = __tcp_v4_check_established(sk, snum, NULL);
749 out:
750                 local_bh_enable();
751                 return ret;
752         }
753 }
754
755 /* This will initiate an outgoing connection. */
756 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
757 {
758         struct inet_opt *inet = inet_sk(sk);
759         struct tcp_opt *tp = tcp_sk(sk);
760         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
761         struct rtable *rt;
762         u32 daddr, nexthop;
763         int tmp;
764         int err;
765
766         if (addr_len < sizeof(struct sockaddr_in))
767                 return -EINVAL;
768
769         if (usin->sin_family != AF_INET)
770                 return -EAFNOSUPPORT;
771
772         nexthop = daddr = usin->sin_addr.s_addr;
773         if (inet->opt && inet->opt->srr) {
774                 if (!daddr)
775                         return -EINVAL;
776                 nexthop = inet->opt->faddr;
777         }
778
779         tmp = ip_route_connect(&rt, nexthop, inet->saddr,
780                                RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
781                                IPPROTO_TCP,
782                                inet->sport, usin->sin_port, sk);
783         if (tmp < 0)
784                 return tmp;
785
786         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
787                 ip_rt_put(rt);
788                 return -ENETUNREACH;
789         }
790
791         if (!inet->opt || !inet->opt->srr)
792                 daddr = rt->rt_dst;
793
794         if (!inet->saddr)
795                 inet->saddr = rt->rt_src;
796         inet->rcv_saddr = inet->saddr;
797
798         if (tp->ts_recent_stamp && inet->daddr != daddr) {
799                 /* Reset inherited state */
800                 tp->ts_recent       = 0;
801                 tp->ts_recent_stamp = 0;
802                 tp->write_seq       = 0;
803         }
804
805         if (sysctl_tcp_tw_recycle &&
806             !tp->ts_recent_stamp && rt->rt_dst == daddr) {
807                 struct inet_peer *peer = rt_get_peer(rt);
808
809                 /* VJ's idea. We save last timestamp seen from
810                  * the destination in peer table, when entering state TIME-WAIT
811                  * and initialize ts_recent from it, when trying new connection.
812                  */
813
814                 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
815                         tp->ts_recent_stamp = peer->tcp_ts_stamp;
816                         tp->ts_recent = peer->tcp_ts;
817                 }
818         }
819
820         inet->dport = usin->sin_port;
821         inet->daddr = daddr;
822
823         tp->ext_header_len = 0;
824         if (inet->opt)
825                 tp->ext_header_len = inet->opt->optlen;
826
827         tp->mss_clamp = 536;
828
829         /* Socket identity is still unknown (sport may be zero).
830          * However we set state to SYN-SENT and not releasing socket
831          * lock select source port, enter ourselves into the hash tables and
832          * complete initialization after this.
833          */
834         tcp_set_state(sk, TCP_SYN_SENT);
835         err = tcp_v4_hash_connect(sk);
836         if (err)
837                 goto failure;
838
839         err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
840         if (err)
841                 goto failure;
842
843         /* OK, now commit destination to socket.  */
844         __sk_dst_set(sk, &rt->u.dst);
845         tcp_v4_setup_caps(sk, &rt->u.dst);
846         tp->ext2_header_len = rt->u.dst.header_len;
847
848         if (!tp->write_seq)
849                 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
850                                                            inet->daddr,
851                                                            inet->sport,
852                                                            usin->sin_port);
853
854         inet->id = tp->write_seq ^ jiffies;
855
856         err = tcp_connect(sk);
857         rt = NULL;
858         if (err)
859                 goto failure;
860
861         return 0;
862
863 failure:
864         /* This unhashes the socket and releases the local port, if necessary. */
865         tcp_set_state(sk, TCP_CLOSE);
866         ip_rt_put(rt);
867         sk->sk_route_caps = 0;
868         inet->dport = 0;
869         return err;
870 }
871
872 static __inline__ int tcp_v4_iif(struct sk_buff *skb)
873 {
874         return ((struct rtable *)skb->dst)->rt_iif;
875 }
876
877 static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
878 {
879         return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
880 }
881
882 static struct open_request *tcp_v4_search_req(struct tcp_opt *tp,
883                                               struct open_request ***prevp,
884                                               __u16 rport,
885                                               __u32 raddr, __u32 laddr)
886 {
887         struct tcp_listen_opt *lopt = tp->listen_opt;
888         struct open_request *req, **prev;
889
890         for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
891              (req = *prev) != NULL;
892              prev = &req->dl_next) {
893                 if (req->rmt_port == rport &&
894                     req->af.v4_req.rmt_addr == raddr &&
895                     req->af.v4_req.loc_addr == laddr &&
896                     TCP_INET_FAMILY(req->class->family)) {
897                         BUG_TRAP(!req->sk);
898                         *prevp = prev;
899                         break;
900                 }
901         }
902
903         return req;
904 }
905
906 static void tcp_v4_synq_add(struct sock *sk, struct open_request *req)
907 {
908         struct tcp_opt *tp = tcp_sk(sk);
909         struct tcp_listen_opt *lopt = tp->listen_opt;
910         u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
911
912         req->expires = jiffies + TCP_TIMEOUT_INIT;
913         req->retrans = 0;
914         req->sk = NULL;
915         req->dl_next = lopt->syn_table[h];
916
917         write_lock(&tp->syn_wait_lock);
918         lopt->syn_table[h] = req;
919         write_unlock(&tp->syn_wait_lock);
920
921 #ifdef CONFIG_ACCEPT_QUEUES
922         tcp_synq_added(sk, req);
923 #else
924         tcp_synq_added(sk);
925 #endif
926 }
927
928
929 /*
930  * This routine does path mtu discovery as defined in RFC1191.
931  */
932 static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
933                                      u32 mtu)
934 {
935         struct dst_entry *dst;
936         struct inet_opt *inet = inet_sk(sk);
937         struct tcp_opt *tp = tcp_sk(sk);
938
939         /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
940          * send out by Linux are always <576bytes so they should go through
941          * unfragmented).
942          */
943         if (sk->sk_state == TCP_LISTEN)
944                 return;
945
946         /* We don't check in the destentry if pmtu discovery is forbidden
947          * on this route. We just assume that no packet_to_big packets
948          * are send back when pmtu discovery is not active.
949          * There is a small race when the user changes this flag in the
950          * route, but I think that's acceptable.
951          */
952         if ((dst = __sk_dst_check(sk, 0)) == NULL)
953                 return;
954
955         dst->ops->update_pmtu(dst, mtu);
956
957         /* Something is about to be wrong... Remember soft error
958          * for the case, if this connection will not able to recover.
959          */
960         if (mtu < dst_pmtu(dst) && ip_dont_fragment(sk, dst))
961                 sk->sk_err_soft = EMSGSIZE;
962
963         mtu = dst_pmtu(dst);
964
965         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
966             tp->pmtu_cookie > mtu) {
967                 tcp_sync_mss(sk, mtu);
968
969                 /* Resend the TCP packet because it's
970                  * clear that the old packet has been
971                  * dropped. This is the new "fast" path mtu
972                  * discovery.
973                  */
974                 tcp_simple_retransmit(sk);
975         } /* else let the usual retransmit timer handle it */
976 }
977
978 /*
979  * This routine is called by the ICMP module when it gets some
980  * sort of error condition.  If err < 0 then the socket should
981  * be closed and the error returned to the user.  If err > 0
982  * it's just the icmp type << 8 | icmp code.  After adjustment
983  * header points to the first 8 bytes of the tcp header.  We need
984  * to find the appropriate port.
985  *
986  * The locking strategy used here is very "optimistic". When
987  * someone else accesses the socket the ICMP is just dropped
988  * and for some paths there is no check at all.
989  * A more general error queue to queue errors for later handling
990  * is probably better.
991  *
992  */
993
994 void tcp_v4_err(struct sk_buff *skb, u32 info)
995 {
996         struct iphdr *iph = (struct iphdr *)skb->data;
997         struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
998         struct tcp_opt *tp;
999         struct inet_opt *inet;
1000         int type = skb->h.icmph->type;
1001         int code = skb->h.icmph->code;
1002         struct sock *sk;
1003         __u32 seq;
1004         int err;
1005
1006         if (skb->len < (iph->ihl << 2) + 8) {
1007                 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
1008                 return;
1009         }
1010
1011         sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,
1012                            th->source, tcp_v4_iif(skb));
1013         if (!sk) {
1014                 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
1015                 return;
1016         }
1017         if (sk->sk_state == TCP_TIME_WAIT) {
1018                 tcp_tw_put((struct tcp_tw_bucket *)sk);
1019                 return;
1020         }
1021
1022         bh_lock_sock(sk);
1023         /* If too many ICMPs get dropped on busy
1024          * servers this needs to be solved differently.
1025          */
1026         if (sock_owned_by_user(sk))
1027                 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
1028
1029         if (sk->sk_state == TCP_CLOSE)
1030                 goto out;
1031
1032         tp = tcp_sk(sk);
1033         seq = ntohl(th->seq);
1034         if (sk->sk_state != TCP_LISTEN &&
1035             !between(seq, tp->snd_una, tp->snd_nxt)) {
1036                 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
1037                 goto out;
1038         }
1039
1040         switch (type) {
1041         case ICMP_SOURCE_QUENCH:
1042                 /* Just silently ignore these. */
1043                 goto out;
1044         case ICMP_PARAMETERPROB:
1045                 err = EPROTO;
1046                 break;
1047         case ICMP_DEST_UNREACH:
1048                 if (code > NR_ICMP_UNREACH)
1049                         goto out;
1050
1051                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
1052                         if (!sock_owned_by_user(sk))
1053                                 do_pmtu_discovery(sk, iph, info);
1054                         goto out;
1055                 }
1056
1057                 err = icmp_err_convert[code].errno;
1058                 break;
1059         case ICMP_TIME_EXCEEDED:
1060                 err = EHOSTUNREACH;
1061                 break;
1062         default:
1063                 goto out;
1064         }
1065
1066         switch (sk->sk_state) {
1067                 struct open_request *req, **prev;
1068         case TCP_LISTEN:
1069                 if (sock_owned_by_user(sk))
1070                         goto out;
1071
1072                 req = tcp_v4_search_req(tp, &prev, th->dest,
1073                                         iph->daddr, iph->saddr);
1074                 if (!req)
1075                         goto out;
1076
1077                 /* ICMPs are not backlogged, hence we cannot get
1078                    an established socket here.
1079                  */
1080                 BUG_TRAP(!req->sk);
1081
1082                 if (seq != req->snt_isn) {
1083                         NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
1084                         goto out;
1085                 }
1086
1087                 /*
1088                  * Still in SYN_RECV, just remove it silently.
1089                  * There is no good way to pass the error to the newly
1090                  * created socket, and POSIX does not want network
1091                  * errors returned from accept().
1092                  */
1093                 tcp_synq_drop(sk, req, prev);
1094                 goto out;
1095
1096         case TCP_SYN_SENT:
1097         case TCP_SYN_RECV:  /* Cannot happen.
1098                                It can f.e. if SYNs crossed.
1099                              */
1100                 if (!sock_owned_by_user(sk)) {
1101                         TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1102                         sk->sk_err = err;
1103
1104                         sk->sk_error_report(sk);
1105
1106                         tcp_done(sk);
1107                 } else {
1108                         sk->sk_err_soft = err;
1109                 }
1110                 goto out;
1111         }
1112
1113         /* If we've already connected we will keep trying
1114          * until we time out, or the user gives up.
1115          *
1116          * rfc1122 4.2.3.9 allows to consider as hard errors
1117          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
1118          * but it is obsoleted by pmtu discovery).
1119          *
1120          * Note, that in modern internet, where routing is unreliable
1121          * and in each dark corner broken firewalls sit, sending random
1122          * errors ordered by their masters even this two messages finally lose
1123          * their original sense (even Linux sends invalid PORT_UNREACHs)
1124          *
1125          * Now we are in compliance with RFCs.
1126          *                                                      --ANK (980905)
1127          */
1128
1129         inet = inet_sk(sk);
1130         if (!sock_owned_by_user(sk) && inet->recverr) {
1131                 sk->sk_err = err;
1132                 sk->sk_error_report(sk);
1133         } else  { /* Only an error on timeout */
1134                 sk->sk_err_soft = err;
1135         }
1136
1137 out:
1138         bh_unlock_sock(sk);
1139         sock_put(sk);
1140 }
1141
1142 /* This routine computes an IPv4 TCP checksum. */
1143 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
1144                        struct sk_buff *skb)
1145 {
1146         struct inet_opt *inet = inet_sk(sk);
1147
1148         if (skb->ip_summed == CHECKSUM_HW) {
1149                 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
1150                 skb->csum = offsetof(struct tcphdr, check);
1151         } else {
1152                 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
1153                                          csum_partial((char *)th,
1154                                                       th->doff << 2,
1155                                                       skb->csum));
1156         }
1157 }
1158
1159 /*
1160  *      This routine will send an RST to the other tcp.
1161  *
1162  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
1163  *                    for reset.
1164  *      Answer: if a packet caused RST, it is not for a socket
1165  *              existing in our system, if it is matched to a socket,
1166  *              it is just duplicate segment or bug in other side's TCP.
1167  *              So that we build reply only basing on parameters
1168  *              arrived with segment.
1169  *      Exception: precedence violation. We do not implement it in any case.
1170  */
1171
1172 static void tcp_v4_send_reset(struct sk_buff *skb)
1173 {
1174         struct tcphdr *th = skb->h.th;
1175         struct tcphdr rth;
1176         struct ip_reply_arg arg;
1177
1178         /* Never send a reset in response to a reset. */
1179         if (th->rst)
1180                 return;
1181
1182         if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
1183                 return;
1184
1185         /* Swap the send and the receive. */
1186         memset(&rth, 0, sizeof(struct tcphdr));
1187         rth.dest   = th->source;
1188         rth.source = th->dest;
1189         rth.doff   = sizeof(struct tcphdr) / 4;
1190         rth.rst    = 1;
1191
1192         if (th->ack) {
1193                 rth.seq = th->ack_seq;
1194         } else {
1195                 rth.ack = 1;
1196                 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
1197                                     skb->len - (th->doff << 2));
1198         }
1199
1200         memset(&arg, 0, sizeof arg);
1201         arg.iov[0].iov_base = (unsigned char *)&rth;
1202         arg.iov[0].iov_len  = sizeof rth;
1203         arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1204                                       skb->nh.iph->saddr, /*XXX*/
1205                                       sizeof(struct tcphdr), IPPROTO_TCP, 0);
1206         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1207
1208         ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
1209
1210         TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1211         TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1212 }
1213
1214 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
1215    outside socket context is ugly, certainly. What can I do?
1216  */
1217
1218 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
1219                             u32 win, u32 ts)
1220 {
1221         struct tcphdr *th = skb->h.th;
1222         struct {
1223                 struct tcphdr th;
1224                 u32 tsopt[3];
1225         } rep;
1226         struct ip_reply_arg arg;
1227
1228         memset(&rep.th, 0, sizeof(struct tcphdr));
1229         memset(&arg, 0, sizeof arg);
1230
1231         arg.iov[0].iov_base = (unsigned char *)&rep;
1232         arg.iov[0].iov_len  = sizeof(rep.th);
1233         if (ts) {
1234                 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1235                                      (TCPOPT_TIMESTAMP << 8) |
1236                                      TCPOLEN_TIMESTAMP);
1237                 rep.tsopt[1] = htonl(tcp_time_stamp);
1238                 rep.tsopt[2] = htonl(ts);
1239                 arg.iov[0].iov_len = sizeof(rep);
1240         }
1241
1242         /* Swap the send and the receive. */
1243         rep.th.dest    = th->source;
1244         rep.th.source  = th->dest;
1245         rep.th.doff    = arg.iov[0].iov_len / 4;
1246         rep.th.seq     = htonl(seq);
1247         rep.th.ack_seq = htonl(ack);
1248         rep.th.ack     = 1;
1249         rep.th.window  = htons(win);
1250
1251         arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1252                                       skb->nh.iph->saddr, /*XXX*/
1253                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
1254         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1255
1256         ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
1257
1258         TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1259 }
1260
1261 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
1262 {
1263         struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
1264
1265         tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
1266                         tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
1267
1268         tcp_tw_put(tw);
1269 }
1270
1271 static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req)
1272 {
1273         tcp_v4_send_ack(skb, req->snt_isn + 1, req->rcv_isn + 1, req->rcv_wnd,
1274                         req->ts_recent);
1275 }
1276
1277 static struct dst_entry* tcp_v4_route_req(struct sock *sk,
1278                                           struct open_request *req)
1279 {
1280         struct rtable *rt;
1281         struct ip_options *opt = req->af.v4_req.opt;
1282         struct flowi fl = { .oif = sk->sk_bound_dev_if,
1283                             .nl_u = { .ip4_u =
1284                                       { .daddr = ((opt && opt->srr) ?
1285                                                   opt->faddr :
1286                                                   req->af.v4_req.rmt_addr),
1287                                         .saddr = req->af.v4_req.loc_addr,
1288                                         .tos = RT_CONN_FLAGS(sk) } },
1289                             .proto = IPPROTO_TCP,
1290                             .uli_u = { .ports =
1291                                        { .sport = inet_sk(sk)->sport,
1292                                          .dport = req->rmt_port } } };
1293
1294         if (ip_route_output_flow(&rt, &fl, sk, 0)) {
1295                 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1296                 return NULL;
1297         }
1298         if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
1299                 ip_rt_put(rt);
1300                 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1301                 return NULL;
1302         }
1303         return &rt->u.dst;
1304 }
1305
1306 /*
1307  *      Send a SYN-ACK after having received an ACK.
1308  *      This still operates on a open_request only, not on a big
1309  *      socket.
1310  */
1311 static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
1312                               struct dst_entry *dst)
1313 {
1314         int err = -1;
1315         struct sk_buff * skb;
1316
1317         /* First, grab a route. */
1318         if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1319                 goto out;
1320
1321         skb = tcp_make_synack(sk, dst, req);
1322
1323         if (skb) {
1324                 struct tcphdr *th = skb->h.th;
1325
1326                 th->check = tcp_v4_check(th, skb->len,
1327                                          req->af.v4_req.loc_addr,
1328                                          req->af.v4_req.rmt_addr,
1329                                          csum_partial((char *)th, skb->len,
1330                                                       skb->csum));
1331
1332                 err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr,
1333                                             req->af.v4_req.rmt_addr,
1334                                             req->af.v4_req.opt);
1335                 if (err == NET_XMIT_CN)
1336                         err = 0;
1337         }
1338
1339 out:
1340         dst_release(dst);
1341         return err;
1342 }
1343
1344 /*
1345  *      IPv4 open_request destructor.
1346  */
1347 static void tcp_v4_or_free(struct open_request *req)
1348 {
1349         if (req->af.v4_req.opt)
1350                 kfree(req->af.v4_req.opt);
1351 }
1352
1353 static inline void syn_flood_warning(struct sk_buff *skb)
1354 {
1355         static unsigned long warntime;
1356
1357         if (time_after(jiffies, (warntime + HZ * 60))) {
1358                 warntime = jiffies;
1359                 printk(KERN_INFO
1360                        "possible SYN flooding on port %d. Sending cookies.\n",
1361                        ntohs(skb->h.th->dest));
1362         }
1363 }
1364
1365 /*
1366  * Save and compile IPv4 options into the open_request if needed.
1367  */
1368 static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
1369                                                      struct sk_buff *skb)
1370 {
1371         struct ip_options *opt = &(IPCB(skb)->opt);
1372         struct ip_options *dopt = NULL;
1373
1374         if (opt && opt->optlen) {
1375                 int opt_size = optlength(opt);
1376                 dopt = kmalloc(opt_size, GFP_ATOMIC);
1377                 if (dopt) {
1378                         if (ip_options_echo(dopt, skb)) {
1379                                 kfree(dopt);
1380                                 dopt = NULL;
1381                         }
1382                 }
1383         }
1384         return dopt;
1385 }
1386
1387 /*
1388  * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
1389  * One SYN_RECV socket costs about 80bytes on a 32bit machine.
1390  * It would be better to replace it with a global counter for all sockets
1391  * but then some measure against one socket starving all other sockets
1392  * would be needed.
1393  *
1394  * It was 128 by default. Experiments with real servers show, that
1395  * it is absolutely not enough even at 100conn/sec. 256 cures most
1396  * of problems. This value is adjusted to 128 for very small machines
1397  * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
1398  * Further increasing requires to change hash table size.
1399  */
1400 int sysctl_max_syn_backlog = 256;
1401
1402 struct or_calltable or_ipv4 = {
1403         .family         =       PF_INET,
1404         .rtx_syn_ack    =       tcp_v4_send_synack,
1405         .send_ack       =       tcp_v4_or_send_ack,
1406         .destructor     =       tcp_v4_or_free,
1407         .send_reset     =       tcp_v4_send_reset,
1408 };
1409
1410 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1411 {
1412         struct tcp_opt tp;
1413         struct open_request *req;
1414         __u32 saddr = skb->nh.iph->saddr;
1415         __u32 daddr = skb->nh.iph->daddr;
1416         __u32 isn = TCP_SKB_CB(skb)->when;
1417         struct dst_entry *dst = NULL;
1418 #ifdef CONFIG_ACCEPT_QUEUES
1419         int class = 0;
1420 #endif
1421 #ifdef CONFIG_SYN_COOKIES
1422         int want_cookie = 0;
1423 #else
1424 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1425 #endif
1426
1427         /* Never answer to SYNs send to broadcast or multicast */
1428         if (((struct rtable *)skb->dst)->rt_flags &
1429             (RTCF_BROADCAST | RTCF_MULTICAST))
1430                 goto drop;
1431
1432         /* TW buckets are converted to open requests without
1433          * limitations, they conserve resources and peer is
1434          * evidently real one.
1435          */
1436         if (tcp_synq_is_full(sk) && !isn) {
1437 #ifdef CONFIG_SYN_COOKIES
1438                 if (sysctl_tcp_syncookies) {
1439                         want_cookie = 1;
1440                 } else
1441 #endif
1442                 goto drop;
1443         }
1444
1445 #ifdef CONFIG_ACCEPT_QUEUES
1446         class = (skb->nfmark <= 0) ? 0 :
1447                 ((skb->nfmark >= NUM_ACCEPT_QUEUES) ? 0: skb->nfmark);
1448         /*
1449          * Accept only if the class has shares set or if the default class
1450          * i.e. class 0 has shares
1451          */
1452         if (!(tcp_sk(sk)->acceptq[class].aq_ratio)) {
1453                 if (tcp_sk(sk)->acceptq[0].aq_ratio) 
1454                         class = 0;
1455                 else
1456                         goto drop;
1457         }
1458 #endif
1459
1460         /* Accept backlog is full. If we have already queued enough
1461          * of warm entries in syn queue, drop request. It is better than
1462          * clogging syn queue with openreqs with exponentially increasing
1463          * timeout.
1464          */
1465 #ifdef CONFIG_ACCEPT_QUEUES
1466         if (sk_acceptq_is_full(sk, class) && tcp_synq_young(sk, class) > 1)
1467 #else
1468         if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
1469 #endif
1470                 goto drop;
1471
1472         req = tcp_openreq_alloc();
1473         if (!req)
1474                 goto drop;
1475
1476         tcp_clear_options(&tp);
1477         tp.mss_clamp = 536;
1478         tp.user_mss  = tcp_sk(sk)->user_mss;
1479
1480         tcp_parse_options(skb, &tp, 0);
1481
1482         if (want_cookie) {
1483                 tcp_clear_options(&tp);
1484                 tp.saw_tstamp = 0;
1485         }
1486
1487         if (tp.saw_tstamp && !tp.rcv_tsval) {
1488                 /* Some OSes (unknown ones, but I see them on web server, which
1489                  * contains information interesting only for windows'
1490                  * users) do not send their stamp in SYN. It is easy case.
1491                  * We simply do not advertise TS support.
1492                  */
1493                 tp.saw_tstamp = 0;
1494                 tp.tstamp_ok  = 0;
1495         }
1496         tp.tstamp_ok = tp.saw_tstamp;
1497
1498         tcp_openreq_init(req, &tp, skb);
1499 #ifdef CONFIG_ACCEPT_QUEUES
1500         req->acceptq_class = class;
1501         req->acceptq_time_stamp = jiffies;
1502 #endif
1503         req->af.v4_req.loc_addr = daddr;
1504         req->af.v4_req.rmt_addr = saddr;
1505         req->af.v4_req.opt = tcp_v4_save_options(sk, skb);
1506         req->class = &or_ipv4;
1507         if (!want_cookie)
1508                 TCP_ECN_create_request(req, skb->h.th);
1509
1510         if (want_cookie) {
1511 #ifdef CONFIG_SYN_COOKIES
1512                 syn_flood_warning(skb);
1513 #endif
1514                 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1515         } else if (!isn) {
1516                 struct inet_peer *peer = NULL;
1517
1518                 /* VJ's idea. We save last timestamp seen
1519                  * from the destination in peer table, when entering
1520                  * state TIME-WAIT, and check against it before
1521                  * accepting new connection request.
1522                  *
1523                  * If "isn" is not zero, this request hit alive
1524                  * timewait bucket, so that all the necessary checks
1525                  * are made in the function processing timewait state.
1526                  */
1527                 if (tp.saw_tstamp &&
1528                     sysctl_tcp_tw_recycle &&
1529                     (dst = tcp_v4_route_req(sk, req)) != NULL &&
1530                     (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1531                     peer->v4daddr == saddr) {
1532                         if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1533                             (s32)(peer->tcp_ts - req->ts_recent) >
1534                                                         TCP_PAWS_WINDOW) {
1535                                 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
1536                                 dst_release(dst);
1537                                 goto drop_and_free;
1538                         }
1539                 }
1540                 /* Kill the following clause, if you dislike this way. */
1541                 else if (!sysctl_tcp_syncookies &&
1542                          (sysctl_max_syn_backlog - tcp_synq_len(sk) <
1543                           (sysctl_max_syn_backlog >> 2)) &&
1544                          (!peer || !peer->tcp_ts_stamp) &&
1545                          (!dst || !dst_metric(dst, RTAX_RTT))) {
1546                         /* Without syncookies last quarter of
1547                          * backlog is filled with destinations,
1548                          * proven to be alive.
1549                          * It means that we continue to communicate
1550                          * to destinations, already remembered
1551                          * to the moment of synflood.
1552                          */
1553                         NETDEBUG(if (net_ratelimit()) \
1554                                         printk(KERN_DEBUG "TCP: drop open "
1555                                                           "request from %u.%u."
1556                                                           "%u.%u/%u\n", \
1557                                                NIPQUAD(saddr),
1558                                                ntohs(skb->h.th->source)));
1559                         dst_release(dst);
1560                         goto drop_and_free;
1561                 }
1562
1563                 isn = tcp_v4_init_sequence(sk, skb);
1564         }
1565         req->snt_isn = isn;
1566
1567         if (tcp_v4_send_synack(sk, req, dst))
1568                 goto drop_and_free;
1569
1570         if (want_cookie) {
1571                 tcp_openreq_free(req);
1572         } else {
1573                 tcp_v4_synq_add(sk, req);
1574         }
1575         return 0;
1576
1577 drop_and_free:
1578         tcp_openreq_free(req);
1579 drop:
1580         TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1581         return 0;
1582 }
1583
1584
1585 /*
1586  * The three way handshake has completed - we got a valid synack -
1587  * now create the new socket.
1588  */
1589 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1590                                   struct open_request *req,
1591                                   struct dst_entry *dst)
1592 {
1593         struct inet_opt *newinet;
1594         struct tcp_opt *newtp;
1595         struct sock *newsk;
1596
1597 #ifdef CONFIG_ACCEPT_QUEUES
1598         if (sk_acceptq_is_full(sk, req->acceptq_class))
1599 #else
1600         if (sk_acceptq_is_full(sk))
1601 #endif
1602                 goto exit_overflow;
1603
1604         if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1605                 goto exit;
1606
1607         newsk = tcp_create_openreq_child(sk, req, skb);
1608         if (!newsk)
1609                 goto exit;
1610
1611         newsk->sk_dst_cache = dst;
1612         tcp_v4_setup_caps(newsk, dst);
1613
1614         newtp                 = tcp_sk(newsk);
1615         newinet               = inet_sk(newsk);
1616         newinet->daddr        = req->af.v4_req.rmt_addr;
1617         newinet->rcv_saddr    = req->af.v4_req.loc_addr;
1618         newinet->saddr        = req->af.v4_req.loc_addr;
1619         newinet->opt          = req->af.v4_req.opt;
1620         req->af.v4_req.opt    = NULL;
1621         newinet->mc_index     = tcp_v4_iif(skb);
1622         newinet->mc_ttl       = skb->nh.iph->ttl;
1623         newtp->ext_header_len = 0;
1624         if (newinet->opt)
1625                 newtp->ext_header_len = newinet->opt->optlen;
1626         newtp->ext2_header_len = dst->header_len;
1627         newinet->id = newtp->write_seq ^ jiffies;
1628
1629         tcp_sync_mss(newsk, dst_pmtu(dst));
1630         newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1631         tcp_initialize_rcv_mss(newsk);
1632
1633         __tcp_v4_hash(newsk, 0);
1634         __tcp_inherit_port(sk, newsk);
1635
1636         return newsk;
1637
1638 exit_overflow:
1639         NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1640 exit:
1641         NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1642         dst_release(dst);
1643         return NULL;
1644 }
1645
1646 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1647 {
1648         struct tcphdr *th = skb->h.th;
1649         struct iphdr *iph = skb->nh.iph;
1650         struct tcp_opt *tp = tcp_sk(sk);
1651         struct sock *nsk;
1652         struct open_request **prev;
1653         /* Find possible connection requests. */
1654         struct open_request *req = tcp_v4_search_req(tp, &prev, th->source,
1655                                                      iph->saddr, iph->daddr);
1656         if (req)
1657                 return tcp_check_req(sk, skb, req, prev);
1658
1659         nsk = __tcp_v4_lookup_established(skb->nh.iph->saddr,
1660                                           th->source,
1661                                           skb->nh.iph->daddr,
1662                                           ntohs(th->dest),
1663                                           tcp_v4_iif(skb));
1664
1665         if (nsk) {
1666                 if (nsk->sk_state != TCP_TIME_WAIT) {
1667                         bh_lock_sock(nsk);
1668                         return nsk;
1669                 }
1670                 tcp_tw_put((struct tcp_tw_bucket *)nsk);
1671                 return NULL;
1672         }
1673
1674 #ifdef CONFIG_SYN_COOKIES
1675         if (!th->rst && !th->syn && th->ack)
1676                 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1677 #endif
1678         return sk;
1679 }
1680
1681 static int tcp_v4_checksum_init(struct sk_buff *skb)
1682 {
1683         if (skb->ip_summed == CHECKSUM_HW) {
1684                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1685                 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1686                                   skb->nh.iph->daddr, skb->csum))
1687                         return 0;
1688
1689                 NETDEBUG(if (net_ratelimit())
1690                                 printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
1691                 skb->ip_summed = CHECKSUM_NONE;
1692         }
1693         if (skb->len <= 76) {
1694                 if (tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1695                                  skb->nh.iph->daddr,
1696                                  skb_checksum(skb, 0, skb->len, 0)))
1697                         return -1;
1698                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1699         } else {
1700                 skb->csum = ~tcp_v4_check(skb->h.th, skb->len,
1701                                           skb->nh.iph->saddr,
1702                                           skb->nh.iph->daddr, 0);
1703         }
1704         return 0;
1705 }
1706
1707
1708 /* The socket must have it's spinlock held when we get
1709  * here.
1710  *
1711  * We have a potential double-lock case here, so even when
1712  * doing backlog processing we use the BH locking scheme.
1713  * This is because we cannot sleep with the original spinlock
1714  * held.
1715  */
1716 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1717 {
1718         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1719                 TCP_CHECK_TIMER(sk);
1720                 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1721                         goto reset;
1722                 TCP_CHECK_TIMER(sk);
1723                 return 0;
1724         }
1725
1726         if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
1727                 goto csum_err;
1728
1729         if (sk->sk_state == TCP_LISTEN) {
1730                 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1731                 if (!nsk)
1732                         goto discard;
1733
1734                 if (nsk != sk) {
1735                         if (tcp_child_process(sk, nsk, skb))
1736                                 goto reset;
1737                         return 0;
1738                 }
1739         }
1740
1741         TCP_CHECK_TIMER(sk);
1742         if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1743                 goto reset;
1744         TCP_CHECK_TIMER(sk);
1745         return 0;
1746
1747 reset:
1748         tcp_v4_send_reset(skb);
1749 discard:
1750         kfree_skb(skb);
1751         /* Be careful here. If this function gets more complicated and
1752          * gcc suffers from register pressure on the x86, sk (in %ebx)
1753          * might be destroyed here. This current version compiles correctly,
1754          * but you have been warned.
1755          */
1756         return 0;
1757
1758 csum_err:
1759         TCP_INC_STATS_BH(TCP_MIB_INERRS);
1760         goto discard;
1761 }
1762
1763 /*
1764  *      From tcp_input.c
1765  */
1766
1767 int tcp_v4_rcv(struct sk_buff *skb)
1768 {
1769         struct tcphdr *th;
1770         struct sock *sk;
1771         int ret;
1772
1773         if (skb->pkt_type != PACKET_HOST)
1774                 goto discard_it;
1775
1776         /* Count it even if it's bad */
1777         TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1778
1779         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1780                 goto discard_it;
1781
1782         th = skb->h.th;
1783
1784         if (th->doff < sizeof(struct tcphdr) / 4)
1785                 goto bad_packet;
1786         if (!pskb_may_pull(skb, th->doff * 4))
1787                 goto discard_it;
1788
1789         /* An explanation is required here, I think.
1790          * Packet length and doff are validated by header prediction,
1791          * provided case of th->doff==0 is elimineted.
1792          * So, we defer the checks. */
1793         if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1794              tcp_v4_checksum_init(skb) < 0))
1795                 goto bad_packet;
1796
1797         th = skb->h.th;
1798         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1799         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1800                                     skb->len - th->doff * 4);
1801         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1802         TCP_SKB_CB(skb)->when    = 0;
1803         TCP_SKB_CB(skb)->flags   = skb->nh.iph->tos;
1804         TCP_SKB_CB(skb)->sacked  = 0;
1805
1806         sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source,
1807                              skb->nh.iph->daddr, ntohs(th->dest),
1808                              tcp_v4_iif(skb));
1809
1810         if (!sk)
1811                 goto no_tcp_socket;
1812
1813 process:
1814         if (sk->sk_state == TCP_TIME_WAIT)
1815                 goto do_time_wait;
1816
1817         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1818                 goto discard_and_relse;
1819
1820         if (sk_filter(sk, skb, 0))
1821                 goto discard_and_relse;
1822
1823         skb->dev = NULL;
1824
1825         bh_lock_sock(sk);
1826         ret = 0;
1827         if (!sock_owned_by_user(sk)) {
1828                 if (!tcp_prequeue(sk, skb))
1829                         ret = tcp_v4_do_rcv(sk, skb);
1830         } else
1831                 sk_add_backlog(sk, skb);
1832         bh_unlock_sock(sk);
1833
1834         sock_put(sk);
1835
1836         return ret;
1837
1838 no_tcp_socket:
1839         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1840                 goto discard_it;
1841
1842         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1843 bad_packet:
1844                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1845         } else {
1846                 tcp_v4_send_reset(skb);
1847         }
1848
1849 discard_it:
1850         /* Discard frame. */
1851         kfree_skb(skb);
1852         return 0;
1853
1854 discard_and_relse:
1855         sock_put(sk);
1856         goto discard_it;
1857
1858 do_time_wait:
1859         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1860                 tcp_tw_put((struct tcp_tw_bucket *) sk);
1861                 goto discard_it;
1862         }
1863
1864         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1865                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1866                 tcp_tw_put((struct tcp_tw_bucket *) sk);
1867                 goto discard_it;
1868         }
1869         switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
1870                                            skb, th, skb->len)) {
1871         case TCP_TW_SYN: {
1872                 struct sock *sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr,
1873                                                           ntohs(th->dest),
1874                                                           tcp_v4_iif(skb));
1875                 if (sk2) {
1876                         tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
1877                         tcp_tw_put((struct tcp_tw_bucket *)sk);
1878                         sk = sk2;
1879                         goto process;
1880                 }
1881                 /* Fall through to ACK */
1882         }
1883         case TCP_TW_ACK:
1884                 tcp_v4_timewait_ack(sk, skb);
1885                 break;
1886         case TCP_TW_RST:
1887                 goto no_tcp_socket;
1888         case TCP_TW_SUCCESS:;
1889         }
1890         goto discard_it;
1891 }
1892
1893 /* With per-bucket locks this operation is not-atomic, so that
1894  * this version is not worse.
1895  */
1896 static void __tcp_v4_rehash(struct sock *sk)
1897 {
1898         sk->sk_prot->unhash(sk);
1899         sk->sk_prot->hash(sk);
1900 }
1901
1902 static int tcp_v4_reselect_saddr(struct sock *sk)
1903 {
1904         struct inet_opt *inet = inet_sk(sk);
1905         int err;
1906         struct rtable *rt;
1907         __u32 old_saddr = inet->saddr;
1908         __u32 new_saddr;
1909         __u32 daddr = inet->daddr;
1910
1911         if (inet->opt && inet->opt->srr)
1912                 daddr = inet->opt->faddr;
1913
1914         /* Query new route. */
1915         err = ip_route_connect(&rt, daddr, 0,
1916                                RT_TOS(inet->tos) | sk->sk_localroute,
1917                                sk->sk_bound_dev_if,
1918                                IPPROTO_TCP,
1919                                inet->sport, inet->dport, sk);
1920         if (err)
1921                 return err;
1922
1923         __sk_dst_set(sk, &rt->u.dst);
1924         tcp_v4_setup_caps(sk, &rt->u.dst);
1925         tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
1926
1927         new_saddr = rt->rt_src;
1928
1929         if (new_saddr == old_saddr)
1930                 return 0;
1931
1932         if (sysctl_ip_dynaddr > 1) {
1933                 printk(KERN_INFO "tcp_v4_rebuild_header(): shifting inet->"
1934                                  "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n",
1935                        NIPQUAD(old_saddr),
1936                        NIPQUAD(new_saddr));
1937         }
1938
1939         inet->saddr = new_saddr;
1940         inet->rcv_saddr = new_saddr;
1941
1942         /* XXX The only one ugly spot where we need to
1943          * XXX really change the sockets identity after
1944          * XXX it has entered the hashes. -DaveM
1945          *
1946          * Besides that, it does not check for connection
1947          * uniqueness. Wait for troubles.
1948          */
1949         __tcp_v4_rehash(sk);
1950         return 0;
1951 }
1952
1953 int tcp_v4_rebuild_header(struct sock *sk)
1954 {
1955         struct inet_opt *inet = inet_sk(sk);
1956         struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1957         u32 daddr;
1958         int err;
1959
1960         /* Route is OK, nothing to do. */
1961         if (rt)
1962                 return 0;
1963
1964         /* Reroute. */
1965         daddr = inet->daddr;
1966         if (inet->opt && inet->opt->srr)
1967                 daddr = inet->opt->faddr;
1968
1969         {
1970                 struct flowi fl = { .oif = sk->sk_bound_dev_if,
1971                                     .nl_u = { .ip4_u =
1972                                               { .daddr = daddr,
1973                                                 .saddr = inet->saddr,
1974                                                 .tos = RT_CONN_FLAGS(sk) } },
1975                                     .proto = IPPROTO_TCP,
1976                                     .uli_u = { .ports =
1977                                                { .sport = inet->sport,
1978                                                  .dport = inet->dport } } };
1979                                                 
1980                 err = ip_route_output_flow(&rt, &fl, sk, 0);
1981         }
1982         if (!err) {
1983                 __sk_dst_set(sk, &rt->u.dst);
1984                 tcp_v4_setup_caps(sk, &rt->u.dst);
1985                 tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
1986                 return 0;
1987         }
1988
1989         /* Routing failed... */
1990         sk->sk_route_caps = 0;
1991
1992         if (!sysctl_ip_dynaddr ||
1993             sk->sk_state != TCP_SYN_SENT ||
1994             (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
1995             (err = tcp_v4_reselect_saddr(sk)) != 0)
1996                 sk->sk_err_soft = -err;
1997
1998         return err;
1999 }
2000
2001 static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
2002 {
2003         struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;
2004         struct inet_opt *inet = inet_sk(sk);
2005
2006         sin->sin_family         = AF_INET;
2007         sin->sin_addr.s_addr    = inet->daddr;
2008         sin->sin_port           = inet->dport;
2009 }
2010
2011 /* VJ's idea. Save last timestamp seen from this destination
2012  * and hold it at least for normal timewait interval to use for duplicate
2013  * segment detection in subsequent connections, before they enter synchronized
2014  * state.
2015  */
2016
2017 int tcp_v4_remember_stamp(struct sock *sk)
2018 {
2019         struct inet_opt *inet = inet_sk(sk);
2020         struct tcp_opt *tp = tcp_sk(sk);
2021         struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
2022         struct inet_peer *peer = NULL;
2023         int release_it = 0;
2024
2025         if (!rt || rt->rt_dst != inet->daddr) {
2026                 peer = inet_getpeer(inet->daddr, 1);
2027                 release_it = 1;
2028         } else {
2029                 if (!rt->peer)
2030                         rt_bind_peer(rt, 1);
2031                 peer = rt->peer;
2032         }
2033
2034         if (peer) {
2035                 if ((s32)(peer->tcp_ts - tp->ts_recent) <= 0 ||
2036                     (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2037                      peer->tcp_ts_stamp <= tp->ts_recent_stamp)) {
2038                         peer->tcp_ts_stamp = tp->ts_recent_stamp;
2039                         peer->tcp_ts = tp->ts_recent;
2040                 }
2041                 if (release_it)
2042                         inet_putpeer(peer);
2043                 return 1;
2044         }
2045
2046         return 0;
2047 }
2048
2049 int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
2050 {
2051         struct inet_peer *peer = NULL;
2052
2053         peer = inet_getpeer(tw->tw_daddr, 1);
2054
2055         if (peer) {
2056                 if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 ||
2057                     (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2058                      peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) {
2059                         peer->tcp_ts_stamp = tw->tw_ts_recent_stamp;
2060                         peer->tcp_ts = tw->tw_ts_recent;
2061                 }
2062                 inet_putpeer(peer);
2063                 return 1;
2064         }
2065
2066         return 0;
2067 }
2068
2069 struct tcp_func ipv4_specific = {
2070         .queue_xmit     =       ip_queue_xmit,
2071         .send_check     =       tcp_v4_send_check,
2072         .rebuild_header =       tcp_v4_rebuild_header,
2073         .conn_request   =       tcp_v4_conn_request,
2074         .syn_recv_sock  =       tcp_v4_syn_recv_sock,
2075         .remember_stamp =       tcp_v4_remember_stamp,
2076         .net_header_len =       sizeof(struct iphdr),
2077         .setsockopt     =       ip_setsockopt,
2078         .getsockopt     =       ip_getsockopt,
2079         .addr2sockaddr  =       v4_addr2sockaddr,
2080         .sockaddr_len   =       sizeof(struct sockaddr_in),
2081 };
2082
2083 /* NOTE: A lot of things set to zero explicitly by call to
2084  *       sk_alloc() so need not be done here.
2085  */
2086 static int tcp_v4_init_sock(struct sock *sk)
2087 {
2088         struct tcp_opt *tp = tcp_sk(sk);
2089
2090         skb_queue_head_init(&tp->out_of_order_queue);
2091         tcp_init_xmit_timers(sk);
2092         tcp_prequeue_init(tp);
2093
2094         tp->rto  = TCP_TIMEOUT_INIT;
2095         tp->mdev = TCP_TIMEOUT_INIT;
2096
2097         /* So many TCP implementations out there (incorrectly) count the
2098          * initial SYN frame in their delayed-ACK and congestion control
2099          * algorithms that we must have the following bandaid to talk
2100          * efficiently to them.  -DaveM
2101          */
2102         tp->snd_cwnd = 2;
2103
2104         /* See draft-stevens-tcpca-spec-01 for discussion of the
2105          * initialization of these values.
2106          */
2107         tp->snd_ssthresh = 0x7fffffff;  /* Infinity */
2108         tp->snd_cwnd_clamp = ~0;
2109         tp->mss_cache_std = tp->mss_cache = 536;
2110
2111         tp->reordering = sysctl_tcp_reordering;
2112
2113         sk->sk_state = TCP_CLOSE;
2114
2115         sk->sk_write_space = sk_stream_write_space;
2116         sk->sk_use_write_queue = 1;
2117
2118         tp->af_specific = &ipv4_specific;
2119
2120         sk->sk_sndbuf = sysctl_tcp_wmem[1];
2121         sk->sk_rcvbuf = sysctl_tcp_rmem[1];
2122
2123         atomic_inc(&tcp_sockets_allocated);
2124
2125         return 0;
2126 }
2127
2128 int tcp_v4_destroy_sock(struct sock *sk)
2129 {
2130         struct tcp_opt *tp = tcp_sk(sk);
2131
2132         tcp_clear_xmit_timers(sk);
2133
2134         /* Cleanup up the write buffer. */
2135         sk_stream_writequeue_purge(sk);
2136
2137         /* Cleans up our, hopefully empty, out_of_order_queue. */
2138         __skb_queue_purge(&tp->out_of_order_queue);
2139
2140         /* Clean prequeue, it must be empty really */
2141         __skb_queue_purge(&tp->ucopy.prequeue);
2142
2143         /* Clean up a referenced TCP bind bucket. */
2144         if (tp->bind_hash)
2145                 tcp_put_port(sk);
2146
2147         /*
2148          * If sendmsg cached page exists, toss it.
2149          */
2150         if (sk->sk_sndmsg_page) {
2151                 __free_page(sk->sk_sndmsg_page);
2152                 sk->sk_sndmsg_page = NULL;
2153         }
2154
2155         atomic_dec(&tcp_sockets_allocated);
2156
2157         return 0;
2158 }
2159
2160 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2161
2162 #ifdef CONFIG_PROC_FS
2163 /* Proc filesystem TCP sock list dumping. */
2164
2165 static inline struct tcp_tw_bucket *tw_head(struct hlist_head *head)
2166 {
2167         return hlist_empty(head) ? NULL :
2168                 list_entry(head->first, struct tcp_tw_bucket, tw_node);
2169 }
2170
2171 static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw)
2172 {
2173         return tw->tw_node.next ?
2174                 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2175 }
2176
2177 static void *listening_get_next(struct seq_file *seq, void *cur)
2178 {
2179         struct tcp_opt *tp;
2180         struct hlist_node *node;
2181         struct sock *sk = cur;
2182         struct tcp_iter_state* st = seq->private;
2183
2184         if (!sk) {
2185                 st->bucket = 0;
2186                 sk = sk_head(&tcp_listening_hash[0]);
2187                 goto get_sk;
2188         }
2189
2190         ++st->num;
2191
2192         if (st->state == TCP_SEQ_STATE_OPENREQ) {
2193                 struct open_request *req = cur;
2194
2195                 tp = tcp_sk(st->syn_wait_sk);
2196                 req = req->dl_next;
2197                 while (1) {
2198                         while (req) {
2199                                 if (req->class->family == st->family) {
2200                                         cur = req;
2201                                         goto out;
2202                                 }
2203                                 req = req->dl_next;
2204                         }
2205                         if (++st->sbucket >= TCP_SYNQ_HSIZE)
2206                                 break;
2207 get_req:
2208                         req = tp->listen_opt->syn_table[st->sbucket];
2209                 }
2210                 sk        = sk_next(st->syn_wait_sk);
2211                 st->state = TCP_SEQ_STATE_LISTENING;
2212                 read_unlock_bh(&tp->syn_wait_lock);
2213         } else {
2214                 tp = tcp_sk(sk);
2215                 read_lock_bh(&tp->syn_wait_lock);
2216                 if (tp->listen_opt && tp->listen_opt->qlen)
2217                         goto start_req;
2218                 read_unlock_bh(&tp->syn_wait_lock);
2219                 sk = sk_next(sk);
2220         }
2221 get_sk:
2222         sk_for_each_from(sk, node) {
2223                 if (sk->sk_family == st->family) {
2224                         cur = sk;
2225                         goto out;
2226                 }
2227                 tp = tcp_sk(sk);
2228                 read_lock_bh(&tp->syn_wait_lock);
2229                 if (tp->listen_opt && tp->listen_opt->qlen) {
2230 start_req:
2231                         st->uid         = sock_i_uid(sk);
2232                         st->syn_wait_sk = sk;
2233                         st->state       = TCP_SEQ_STATE_OPENREQ;
2234                         st->sbucket     = 0;
2235                         goto get_req;
2236                 }
2237                 read_unlock_bh(&tp->syn_wait_lock);
2238         }
2239         if (++st->bucket < TCP_LHTABLE_SIZE) {
2240                 sk = sk_head(&tcp_listening_hash[st->bucket]);
2241                 goto get_sk;
2242         }
2243         cur = NULL;
2244 out:
2245         return cur;
2246 }
2247
2248 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2249 {
2250         void *rc = listening_get_next(seq, NULL);
2251
2252         while (rc && *pos) {
2253                 rc = listening_get_next(seq, rc);
2254                 --*pos;
2255         }
2256         return rc;
2257 }
2258
2259 static void *established_get_first(struct seq_file *seq)
2260 {
2261         struct tcp_iter_state* st = seq->private;
2262         void *rc = NULL;
2263
2264         for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) {
2265                 struct sock *sk;
2266                 struct hlist_node *node;
2267                 struct tcp_tw_bucket *tw;
2268                
2269                 read_lock(&tcp_ehash[st->bucket].lock);
2270                 sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) {
2271                         if (sk->sk_family != st->family) {
2272                                 continue;
2273                         }
2274                         rc = sk;
2275                         goto out;
2276                 }
2277                 st->state = TCP_SEQ_STATE_TIME_WAIT;
2278                 tw_for_each(tw, node,
2279                             &tcp_ehash[st->bucket + tcp_ehash_size].chain) {
2280                         if (tw->tw_family != st->family) {
2281                                 continue;
2282                         }
2283                         rc = tw;
2284                         goto out;
2285                 }
2286                 read_unlock(&tcp_ehash[st->bucket].lock);
2287                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2288         }
2289 out:
2290         return rc;
2291 }
2292
2293 static void *established_get_next(struct seq_file *seq, void *cur)
2294 {
2295         struct sock *sk = cur;
2296         struct tcp_tw_bucket *tw;
2297         struct hlist_node *node;
2298         struct tcp_iter_state* st = seq->private;
2299
2300         ++st->num;
2301
2302         if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2303                 tw = cur;
2304                 tw = tw_next(tw);
2305 get_tw:
2306                 while (tw && tw->tw_family != st->family) {
2307                         tw = tw_next(tw);
2308                 }
2309                 if (tw) {
2310                         cur = tw;
2311                         goto out;
2312                 }
2313                 read_unlock(&tcp_ehash[st->bucket].lock);
2314                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2315                 if (++st->bucket < tcp_ehash_size) {
2316                         read_lock(&tcp_ehash[st->bucket].lock);
2317                         sk = sk_head(&tcp_ehash[st->bucket].chain);
2318                 } else {
2319                         cur = NULL;
2320                         goto out;
2321                 }
2322         } else
2323                 sk = sk_next(sk);
2324
2325         sk_for_each_from(sk, node) {
2326                 if (sk->sk_family == st->family)
2327                         goto found;
2328         }
2329
2330         st->state = TCP_SEQ_STATE_TIME_WAIT;
2331         tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain);
2332         goto get_tw;
2333 found:
2334         cur = sk;
2335 out:
2336         return cur;
2337 }
2338
2339 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2340 {
2341         void *rc = established_get_first(seq);
2342
2343         while (rc && pos) {
2344                 rc = established_get_next(seq, rc);
2345                 --pos;
2346         }               
2347         return rc;
2348 }
2349
2350 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2351 {
2352         void *rc;
2353         struct tcp_iter_state* st = seq->private;
2354
2355         tcp_listen_lock();
2356         st->state = TCP_SEQ_STATE_LISTENING;
2357         rc        = listening_get_idx(seq, &pos);
2358
2359         if (!rc) {
2360                 tcp_listen_unlock();
2361                 local_bh_disable();
2362                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2363                 rc        = established_get_idx(seq, pos);
2364         }
2365
2366         return rc;
2367 }
2368
2369 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2370 {
2371         struct tcp_iter_state* st = seq->private;
2372         st->state = TCP_SEQ_STATE_LISTENING;
2373         st->num = 0;
2374         return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2375 }
2376
2377 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2378 {
2379         void *rc = NULL;
2380         struct tcp_iter_state* st;
2381
2382         if (v == SEQ_START_TOKEN) {
2383                 rc = tcp_get_idx(seq, 0);
2384                 goto out;
2385         }
2386         st = seq->private;
2387
2388         switch (st->state) {
2389         case TCP_SEQ_STATE_OPENREQ:
2390         case TCP_SEQ_STATE_LISTENING:
2391                 rc = listening_get_next(seq, v);
2392                 if (!rc) {
2393                         tcp_listen_unlock();
2394                         local_bh_disable();
2395                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2396                         rc        = established_get_first(seq);
2397                 }
2398                 break;
2399         case TCP_SEQ_STATE_ESTABLISHED:
2400         case TCP_SEQ_STATE_TIME_WAIT:
2401                 rc = established_get_next(seq, v);
2402                 break;
2403         }
2404 out:
2405         ++*pos;
2406         return rc;
2407 }
2408
2409 static void tcp_seq_stop(struct seq_file *seq, void *v)
2410 {
2411         struct tcp_iter_state* st = seq->private;
2412
2413         switch (st->state) {
2414         case TCP_SEQ_STATE_OPENREQ:
2415                 if (v) {
2416                         struct tcp_opt *tp = tcp_sk(st->syn_wait_sk);
2417                         read_unlock_bh(&tp->syn_wait_lock);
2418                 }
2419         case TCP_SEQ_STATE_LISTENING:
2420                 if (v != SEQ_START_TOKEN)
2421                         tcp_listen_unlock();
2422                 break;
2423         case TCP_SEQ_STATE_TIME_WAIT:
2424         case TCP_SEQ_STATE_ESTABLISHED:
2425                 if (v)
2426                         read_unlock(&tcp_ehash[st->bucket].lock);
2427                 local_bh_enable();
2428                 break;
2429         }
2430 }
2431
2432 static int tcp_seq_open(struct inode *inode, struct file *file)
2433 {
2434         struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2435         struct seq_file *seq;
2436         struct tcp_iter_state *s;
2437         int rc;
2438
2439         if (unlikely(afinfo == NULL))
2440                 return -EINVAL;
2441
2442         s = kmalloc(sizeof(*s), GFP_KERNEL);
2443         if (!s)
2444                 return -ENOMEM;
2445         memset(s, 0, sizeof(*s));
2446         s->family               = afinfo->family;
2447         s->seq_ops.start        = tcp_seq_start;
2448         s->seq_ops.next         = tcp_seq_next;
2449         s->seq_ops.show         = afinfo->seq_show;
2450         s->seq_ops.stop         = tcp_seq_stop;
2451
2452         rc = seq_open(file, &s->seq_ops);
2453         if (rc)
2454                 goto out_kfree;
2455         seq          = file->private_data;
2456         seq->private = s;
2457 out:
2458         return rc;
2459 out_kfree:
2460         kfree(s);
2461         goto out;
2462 }
2463
2464 int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
2465 {
2466         int rc = 0;
2467         struct proc_dir_entry *p;
2468
2469         if (!afinfo)
2470                 return -EINVAL;
2471         afinfo->seq_fops->owner         = afinfo->owner;
2472         afinfo->seq_fops->open          = tcp_seq_open;
2473         afinfo->seq_fops->read          = seq_read;
2474         afinfo->seq_fops->llseek        = seq_lseek;
2475         afinfo->seq_fops->release       = seq_release_private;
2476         
2477         p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
2478         if (p)
2479                 p->data = afinfo;
2480         else
2481                 rc = -ENOMEM;
2482         return rc;
2483 }
2484
2485 void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
2486 {
2487         if (!afinfo)
2488                 return;
2489         proc_net_remove(afinfo->name);
2490         memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); 
2491 }
2492
2493 static void get_openreq4(struct sock *sk, struct open_request *req,
2494                          char *tmpbuf, int i, int uid)
2495 {
2496         int ttd = req->expires - jiffies;
2497
2498         sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2499                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
2500                 i,
2501                 req->af.v4_req.loc_addr,
2502                 ntohs(inet_sk(sk)->sport),
2503                 req->af.v4_req.rmt_addr,
2504                 ntohs(req->rmt_port),
2505                 TCP_SYN_RECV,
2506                 0, 0, /* could print option size, but that is af dependent. */
2507                 1,    /* timers active (only the expire timer) */
2508                 jiffies_to_clock_t(ttd),
2509                 req->retrans,
2510                 uid,
2511                 0,  /* non standard timer */
2512                 0, /* open_requests have no inode */
2513                 atomic_read(&sk->sk_refcnt),
2514                 req);
2515 }
2516
2517 static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
2518 {
2519         int timer_active;
2520         unsigned long timer_expires;
2521         struct tcp_opt *tp = tcp_sk(sp);
2522         struct inet_opt *inet = inet_sk(sp);
2523         unsigned int dest = inet->daddr;
2524         unsigned int src = inet->rcv_saddr;
2525         __u16 destp = ntohs(inet->dport);
2526         __u16 srcp = ntohs(inet->sport);
2527
2528         if (tp->pending == TCP_TIME_RETRANS) {
2529                 timer_active    = 1;
2530                 timer_expires   = tp->timeout;
2531         } else if (tp->pending == TCP_TIME_PROBE0) {
2532                 timer_active    = 4;
2533                 timer_expires   = tp->timeout;
2534         } else if (timer_pending(&sp->sk_timer)) {
2535                 timer_active    = 2;
2536                 timer_expires   = sp->sk_timer.expires;
2537         } else {
2538                 timer_active    = 0;
2539                 timer_expires = jiffies;
2540         }
2541
2542         sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2543                         "%08X %5d %8d %lu %d %p %u %u %u %u %d",
2544                 i, src, srcp, dest, destp, sp->sk_state,
2545                 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
2546                 timer_active,
2547                 jiffies_to_clock_t(timer_expires - jiffies),
2548                 tp->retransmits,
2549                 sock_i_uid(sp),
2550                 tp->probes_out,
2551                 sock_i_ino(sp),
2552                 atomic_read(&sp->sk_refcnt), sp,
2553                 tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong,
2554                 tp->snd_cwnd,
2555                 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
2556 }
2557
2558 static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i)
2559 {
2560         unsigned int dest, src;
2561         __u16 destp, srcp;
2562         int ttd = tw->tw_ttd - jiffies;
2563
2564         if (ttd < 0)
2565                 ttd = 0;
2566
2567         dest  = tw->tw_daddr;
2568         src   = tw->tw_rcv_saddr;
2569         destp = ntohs(tw->tw_dport);
2570         srcp  = ntohs(tw->tw_sport);
2571
2572         sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2573                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p",
2574                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2575                 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2576                 atomic_read(&tw->tw_refcnt), tw);
2577 }
2578
2579 #define TMPSZ 150
2580
2581 static int tcp4_seq_show(struct seq_file *seq, void *v)
2582 {
2583         struct tcp_iter_state* st;
2584         char tmpbuf[TMPSZ + 1];
2585
2586         if (v == SEQ_START_TOKEN) {
2587                 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2588                            "  sl  local_address rem_address   st tx_queue "
2589                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2590                            "inode");
2591                 goto out;
2592         }
2593         st = seq->private;
2594
2595         switch (st->state) {
2596         case TCP_SEQ_STATE_LISTENING:
2597         case TCP_SEQ_STATE_ESTABLISHED:
2598                 get_tcp4_sock(v, tmpbuf, st->num);
2599                 break;
2600         case TCP_SEQ_STATE_OPENREQ:
2601                 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
2602                 break;
2603         case TCP_SEQ_STATE_TIME_WAIT:
2604                 get_timewait4_sock(v, tmpbuf, st->num);
2605                 break;
2606         }
2607         seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
2608 out:
2609         return 0;
2610 }
2611
2612 static struct file_operations tcp4_seq_fops;
2613 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2614         .owner          = THIS_MODULE,
2615         .name           = "tcp",
2616         .family         = AF_INET,
2617         .seq_show       = tcp4_seq_show,
2618         .seq_fops       = &tcp4_seq_fops,
2619 };
2620
2621 int __init tcp4_proc_init(void)
2622 {
2623         return tcp_proc_register(&tcp4_seq_afinfo);
2624 }
2625
2626 void tcp4_proc_exit(void)
2627 {
2628         tcp_proc_unregister(&tcp4_seq_afinfo);
2629 }
2630 #endif /* CONFIG_PROC_FS */
2631
2632 struct proto tcp_prot = {
2633         .name                   = "TCP",
2634         .owner                  = THIS_MODULE,
2635         .close                  = tcp_close,
2636         .connect                = tcp_v4_connect,
2637         .disconnect             = tcp_disconnect,
2638         .accept                 = tcp_accept,
2639         .ioctl                  = tcp_ioctl,
2640         .init                   = tcp_v4_init_sock,
2641         .destroy                = tcp_v4_destroy_sock,
2642         .shutdown               = tcp_shutdown,
2643         .setsockopt             = tcp_setsockopt,
2644         .getsockopt             = tcp_getsockopt,
2645         .sendmsg                = tcp_sendmsg,
2646         .recvmsg                = tcp_recvmsg,
2647         .backlog_rcv            = tcp_v4_do_rcv,
2648         .hash                   = tcp_v4_hash,
2649         .unhash                 = tcp_unhash,
2650         .get_port               = tcp_v4_get_port,
2651         .enter_memory_pressure  = tcp_enter_memory_pressure,
2652         .sockets_allocated      = &tcp_sockets_allocated,
2653         .memory_allocated       = &tcp_memory_allocated,
2654         .memory_pressure        = &tcp_memory_pressure,
2655         .sysctl_mem             = sysctl_tcp_mem,
2656         .sysctl_wmem            = sysctl_tcp_wmem,
2657         .sysctl_rmem            = sysctl_tcp_rmem,
2658         .max_header             = MAX_TCP_HEADER,
2659         .slab_obj_size          = sizeof(struct tcp_sock),
2660 };
2661
2662
2663
2664 void __init tcp_v4_init(struct net_proto_family *ops)
2665 {
2666         int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
2667         if (err < 0)
2668                 panic("Failed to create the TCP control socket.\n");
2669         tcp_socket->sk->sk_allocation   = GFP_ATOMIC;
2670         inet_sk(tcp_socket->sk)->uc_ttl = -1;
2671
2672         /* Unhash it so that IP input processing does not even
2673          * see it, we do not wish this socket to see incoming
2674          * packets.
2675          */
2676         tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
2677 }
2678
2679 EXPORT_SYMBOL(ipv4_specific);
2680 EXPORT_SYMBOL(tcp_bind_hash);
2681 EXPORT_SYMBOL(tcp_bucket_create);
2682 EXPORT_SYMBOL(tcp_hashinfo);
2683 EXPORT_SYMBOL(tcp_inherit_port);
2684 EXPORT_SYMBOL(tcp_listen_wlock);
2685 EXPORT_SYMBOL(tcp_port_rover);
2686 EXPORT_SYMBOL(tcp_prot);
2687 EXPORT_SYMBOL(tcp_put_port);
2688 EXPORT_SYMBOL(tcp_unhash);
2689 EXPORT_SYMBOL(tcp_v4_conn_request);
2690 EXPORT_SYMBOL(tcp_v4_lookup_listener);
2691 EXPORT_SYMBOL(tcp_v4_connect);
2692 EXPORT_SYMBOL(tcp_v4_do_rcv);
2693 EXPORT_SYMBOL(tcp_v4_rebuild_header);
2694 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2695 EXPORT_SYMBOL(tcp_v4_send_check);
2696 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2697
2698 #ifdef CONFIG_PROC_FS
2699 EXPORT_SYMBOL(tcp_proc_register);
2700 EXPORT_SYMBOL(tcp_proc_unregister);
2701 #endif
2702 EXPORT_SYMBOL(sysctl_local_port_range);
2703 EXPORT_SYMBOL(sysctl_max_syn_backlog);
2704 EXPORT_SYMBOL(sysctl_tcp_low_latency);
2705