fedora core 2.6.10-1.12-FC2
[linux-2.6.git] / net / ipv4 / tcp_ipv4.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Version:     $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
9  *
10  *              IPv4 specific functions
11  *
12  *
13  *              code split from:
14  *              linux/ipv4/tcp.c
15  *              linux/ipv4/tcp_input.c
16  *              linux/ipv4/tcp_output.c
17  *
18  *              See tcp.c for author information
19  *
20  *      This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25
26 /*
27  * Changes:
28  *              David S. Miller :       New socket lookup architecture.
29  *                                      This code is dedicated to John Dyson.
30  *              David S. Miller :       Change semantics of established hash,
31  *                                      half is devoted to TIME_WAIT sockets
32  *                                      and the rest go in the other half.
33  *              Andi Kleen :            Add support for syncookies and fixed
34  *                                      some bugs: ip options weren't passed to
35  *                                      the TCP layer, missed a check for an
36  *                                      ACK bit.
37  *              Andi Kleen :            Implemented fast path mtu discovery.
38  *                                      Fixed many serious bugs in the
39  *                                      open_request handling and moved
40  *                                      most of it into the af independent code.
41  *                                      Added tail drop and some other bugfixes.
42  *                                      Added new listen sematics.
43  *              Mike McLagan    :       Routing by source
44  *      Juan Jose Ciarlante:            ip_dynaddr bits
45  *              Andi Kleen:             various fixes.
46  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
47  *                                      coma.
48  *      Andi Kleen              :       Fix new listen.
49  *      Andi Kleen              :       Fix accept error reporting.
50  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
51  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
52  *                                      a single port at the same time.
53  */
54
55 #include <linux/config.h>
56
57 #include <linux/types.h>
58 #include <linux/fcntl.h>
59 #include <linux/module.h>
60 #include <linux/random.h>
61 #include <linux/cache.h>
62 #include <linux/jhash.h>
63 #include <linux/init.h>
64 #include <linux/times.h>
65
66 #include <net/icmp.h>
67 #include <net/tcp.h>
68 #include <net/ipv6.h>
69 #include <net/inet_common.h>
70 #include <net/xfrm.h>
71
72 #include <linux/inet.h>
73 #include <linux/ipv6.h>
74 #include <linux/stddef.h>
75 #include <linux/proc_fs.h>
76 #include <linux/seq_file.h>
77
78 extern int sysctl_ip_dynaddr;
79 int sysctl_tcp_tw_reuse;
80 int sysctl_tcp_low_latency;
81
82 /* Check TCP sequence numbers in ICMP packets. */
83 #define ICMP_MIN_LENGTH 8
84
85 /* Socket used for sending RSTs */
86 static struct socket *tcp_socket;
87
88 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
89                        struct sk_buff *skb);
90
91 struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
92         .__tcp_lhash_lock       =       RW_LOCK_UNLOCKED,
93         .__tcp_lhash_users      =       ATOMIC_INIT(0),
94         .__tcp_lhash_wait
95           = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait),
96         .__tcp_portalloc_lock   =       SPIN_LOCK_UNLOCKED
97 };
98
99 /*
100  * This array holds the first and last local port number.
101  * For high-usage systems, use sysctl to change this to
102  * 32768-61000
103  */
104 int sysctl_local_port_range[2] = { 1024, 4999 };
105 int tcp_port_rover = 1024 - 1;
106
107 static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
108                                  __u32 faddr, __u16 fport)
109 {
110         int h = (laddr ^ lport) ^ (faddr ^ fport);
111         h ^= h >> 16;
112         h ^= h >> 8;
113         return h & (tcp_ehash_size - 1);
114 }
115
116 static __inline__ int tcp_sk_hashfn(struct sock *sk)
117 {
118         struct inet_opt *inet = inet_sk(sk);
119         __u32 laddr = inet->rcv_saddr;
120         __u16 lport = inet->num;
121         __u32 faddr = inet->daddr;
122         __u16 fport = inet->dport;
123
124         return tcp_hashfn(laddr, lport, faddr, fport);
125 }
126
127 /* Allocate and initialize a new TCP local port bind bucket.
128  * The bindhash mutex for snum's hash chain must be held here.
129  */
130 struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
131                                           unsigned short snum)
132 {
133         struct tcp_bind_bucket *tb = kmem_cache_alloc(tcp_bucket_cachep,
134                                                       SLAB_ATOMIC);
135         if (tb) {
136                 tb->port = snum;
137                 tb->fastreuse = 0;
138                 INIT_HLIST_HEAD(&tb->owners);
139                 hlist_add_head(&tb->node, &head->chain);
140         }
141         return tb;
142 }
143
144 /* Caller must hold hashbucket lock for this tb with local BH disabled */
145 void tcp_bucket_destroy(struct tcp_bind_bucket *tb)
146 {
147         if (hlist_empty(&tb->owners)) {
148                 __hlist_del(&tb->node);
149                 kmem_cache_free(tcp_bucket_cachep, tb);
150         }
151 }
152
153 /* Caller must disable local BH processing. */
154 static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
155 {
156         struct tcp_bind_hashbucket *head =
157                                 &tcp_bhash[tcp_bhashfn(inet_sk(child)->num)];
158         struct tcp_bind_bucket *tb;
159
160         spin_lock(&head->lock);
161         tb = tcp_sk(sk)->bind_hash;
162         sk_add_bind_node(child, &tb->owners);
163         tcp_sk(child)->bind_hash = tb;
164         spin_unlock(&head->lock);
165 }
166
167 inline void tcp_inherit_port(struct sock *sk, struct sock *child)
168 {
169         local_bh_disable();
170         __tcp_inherit_port(sk, child);
171         local_bh_enable();
172 }
173
174 void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
175                    unsigned short snum)
176 {
177         inet_sk(sk)->num = snum;
178         sk_add_bind_node(sk, &tb->owners);
179         tcp_sk(sk)->bind_hash = tb;
180 }
181
182 static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
183 {
184         const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk);
185         struct sock *sk2;
186         struct hlist_node *node;
187         int reuse = sk->sk_reuse;
188
189         sk_for_each_bound(sk2, node, &tb->owners) {
190                 if (sk != sk2 &&
191                     !tcp_v6_ipv6only(sk2) &&
192                     (!sk->sk_bound_dev_if ||
193                      !sk2->sk_bound_dev_if ||
194                      sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
195                         if (!reuse || !sk2->sk_reuse ||
196                             sk2->sk_state == TCP_LISTEN) {
197                                 const u32 sk2_rcv_saddr = tcp_v4_rcv_saddr(sk2);
198                                 if (!sk2_rcv_saddr || !sk_rcv_saddr ||
199                                     sk2_rcv_saddr == sk_rcv_saddr)
200                                         break;
201                         }
202                 }
203         }
204         return node != NULL;
205 }
206
207 /* Obtain a reference to a local port for the given sock,
208  * if snum is zero it means select any available local port.
209  */
210 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
211 {
212         struct tcp_bind_hashbucket *head;
213         struct hlist_node *node;
214         struct tcp_bind_bucket *tb;
215         int ret;
216
217         local_bh_disable();
218         if (!snum) {
219                 int low = sysctl_local_port_range[0];
220                 int high = sysctl_local_port_range[1];
221                 int remaining = (high - low) + 1;
222                 int rover;
223
224                 spin_lock(&tcp_portalloc_lock);
225                 rover = tcp_port_rover;
226                 do {
227                         rover++;
228                         if (rover < low || rover > high)
229                                 rover = low;
230                         head = &tcp_bhash[tcp_bhashfn(rover)];
231                         spin_lock(&head->lock);
232                         tb_for_each(tb, node, &head->chain)
233                                 if (tb->port == rover)
234                                         goto next;
235                         break;
236                 next:
237                         spin_unlock(&head->lock);
238                 } while (--remaining > 0);
239                 tcp_port_rover = rover;
240                 spin_unlock(&tcp_portalloc_lock);
241
242                 /* Exhausted local port range during search? */
243                 ret = 1;
244                 if (remaining <= 0)
245                         goto fail;
246
247                 /* OK, here is the one we will use.  HEAD is
248                  * non-NULL and we hold it's mutex.
249                  */
250                 snum = rover;
251         } else {
252                 head = &tcp_bhash[tcp_bhashfn(snum)];
253                 spin_lock(&head->lock);
254                 tb_for_each(tb, node, &head->chain)
255                         if (tb->port == snum)
256                                 goto tb_found;
257         }
258         tb = NULL;
259         goto tb_not_found;
260 tb_found:
261         if (!hlist_empty(&tb->owners)) {
262                 if (sk->sk_reuse > 1)
263                         goto success;
264                 if (tb->fastreuse > 0 &&
265                     sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
266                         goto success;
267                 } else {
268                         ret = 1;
269                         if (tcp_bind_conflict(sk, tb))
270                                 goto fail_unlock;
271                 }
272         }
273 tb_not_found:
274         ret = 1;
275         if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
276                 goto fail_unlock;
277         if (hlist_empty(&tb->owners)) {
278                 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
279                         tb->fastreuse = 1;
280                 else
281                         tb->fastreuse = 0;
282         } else if (tb->fastreuse &&
283                    (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
284                 tb->fastreuse = 0;
285 success:
286         if (!tcp_sk(sk)->bind_hash)
287                 tcp_bind_hash(sk, tb, snum);
288         BUG_TRAP(tcp_sk(sk)->bind_hash == tb);
289         ret = 0;
290
291 fail_unlock:
292         spin_unlock(&head->lock);
293 fail:
294         local_bh_enable();
295         return ret;
296 }
297
298 /* Get rid of any references to a local port held by the
299  * given sock.
300  */
301 static void __tcp_put_port(struct sock *sk)
302 {
303         struct inet_opt *inet = inet_sk(sk);
304         struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)];
305         struct tcp_bind_bucket *tb;
306
307         spin_lock(&head->lock);
308         tb = tcp_sk(sk)->bind_hash;
309         __sk_del_bind_node(sk);
310         tcp_sk(sk)->bind_hash = NULL;
311         inet->num = 0;
312         tcp_bucket_destroy(tb);
313         spin_unlock(&head->lock);
314 }
315
316 void tcp_put_port(struct sock *sk)
317 {
318         local_bh_disable();
319         __tcp_put_port(sk);
320         local_bh_enable();
321 }
322
323 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
324  * Look, when several writers sleep and reader wakes them up, all but one
325  * immediately hit write lock and grab all the cpus. Exclusive sleep solves
326  * this, _but_ remember, it adds useless work on UP machines (wake up each
327  * exclusive lock release). It should be ifdefed really.
328  */
329
330 void tcp_listen_wlock(void)
331 {
332         write_lock(&tcp_lhash_lock);
333
334         if (atomic_read(&tcp_lhash_users)) {
335                 DEFINE_WAIT(wait);
336
337                 for (;;) {
338                         prepare_to_wait_exclusive(&tcp_lhash_wait,
339                                                 &wait, TASK_UNINTERRUPTIBLE);
340                         if (!atomic_read(&tcp_lhash_users))
341                                 break;
342                         write_unlock_bh(&tcp_lhash_lock);
343                         schedule();
344                         write_lock_bh(&tcp_lhash_lock);
345                 }
346
347                 finish_wait(&tcp_lhash_wait, &wait);
348         }
349 }
350
351 static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
352 {
353         struct hlist_head *list;
354         rwlock_t *lock;
355
356         BUG_TRAP(sk_unhashed(sk));
357         if (listen_possible && sk->sk_state == TCP_LISTEN) {
358                 list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
359                 lock = &tcp_lhash_lock;
360                 tcp_listen_wlock();
361         } else {
362                 list = &tcp_ehash[(sk->sk_hashent = tcp_sk_hashfn(sk))].chain;
363                 lock = &tcp_ehash[sk->sk_hashent].lock;
364                 write_lock(lock);
365         }
366         __sk_add_node(sk, list);
367         sock_prot_inc_use(sk->sk_prot);
368         write_unlock(lock);
369         if (listen_possible && sk->sk_state == TCP_LISTEN)
370                 wake_up(&tcp_lhash_wait);
371 }
372
373 static void tcp_v4_hash(struct sock *sk)
374 {
375         if (sk->sk_state != TCP_CLOSE) {
376                 local_bh_disable();
377                 __tcp_v4_hash(sk, 1);
378                 local_bh_enable();
379         }
380 }
381
382 void tcp_unhash(struct sock *sk)
383 {
384         rwlock_t *lock;
385
386         if (sk_unhashed(sk))
387                 goto ende;
388
389         if (sk->sk_state == TCP_LISTEN) {
390                 local_bh_disable();
391                 tcp_listen_wlock();
392                 lock = &tcp_lhash_lock;
393         } else {
394                 struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
395                 lock = &head->lock;
396                 write_lock_bh(&head->lock);
397         }
398
399         if (__sk_del_node_init(sk))
400                 sock_prot_dec_use(sk->sk_prot);
401         write_unlock_bh(lock);
402
403  ende:
404         if (sk->sk_state == TCP_LISTEN)
405                 wake_up(&tcp_lhash_wait);
406 }
407
408 /* Don't inline this cruft.  Here are some nice properties to
409  * exploit here.  The BSD API does not allow a listening TCP
410  * to specify the remote port nor the remote address for the
411  * connection.  So always assume those are both wildcarded
412  * during the search since they can never be otherwise.
413  */
414 static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr,
415                                              unsigned short hnum, int dif)
416 {
417         struct sock *result = NULL, *sk;
418         struct hlist_node *node;
419         int score, hiscore;
420
421         hiscore=-1;
422         sk_for_each(sk, node, head) {
423                 struct inet_opt *inet = inet_sk(sk);
424
425                 if (inet->num == hnum && !ipv6_only_sock(sk)) {
426                         __u32 rcv_saddr = inet->rcv_saddr;
427
428                         score = (sk->sk_family == PF_INET ? 1 : 0);
429                         if (rcv_saddr) {
430                                 if (rcv_saddr != daddr)
431                                         continue;
432                                 score+=2;
433                         }
434                         if (sk->sk_bound_dev_if) {
435                                 if (sk->sk_bound_dev_if != dif)
436                                         continue;
437                                 score+=2;
438                         }
439                         if (score == 5)
440                                 return sk;
441                         if (score > hiscore) {
442                                 hiscore = score;
443                                 result = sk;
444                         }
445                 }
446         }
447         return result;
448 }
449
450 /* Optimize the common listener case. */
451 struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum, int dif)
452 {
453         struct sock *sk = NULL;
454         struct hlist_head *head;
455
456         read_lock(&tcp_lhash_lock);
457         head = &tcp_listening_hash[tcp_lhashfn(hnum)];
458         if (!hlist_empty(head)) {
459                 struct inet_opt *inet = inet_sk((sk = __sk_head(head)));
460
461                 if (inet->num == hnum && !sk->sk_node.next &&
462                     (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
463                     (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
464                     !sk->sk_bound_dev_if)
465                         goto sherry_cache;
466                 sk = __tcp_v4_lookup_listener(head, daddr, hnum, dif);
467         }
468         if (sk) {
469 sherry_cache:
470                 sock_hold(sk);
471         }
472         read_unlock(&tcp_lhash_lock);
473         return sk;
474 }
475
476 EXPORT_SYMBOL_GPL(tcp_v4_lookup_listener);
477
478 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
479  * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
480  *
481  * Local BH must be disabled here.
482  */
483
484 static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
485                                                        u32 daddr, u16 hnum,
486                                                        int dif)
487 {
488         struct tcp_ehash_bucket *head;
489         TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
490         __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
491         struct sock *sk;
492         struct hlist_node *node;
493         /* Optimize here for direct hit, only listening connections can
494          * have wildcards anyways.
495          */
496         int hash = tcp_hashfn(daddr, hnum, saddr, sport);
497         head = &tcp_ehash[hash];
498         read_lock(&head->lock);
499         sk_for_each(sk, node, &head->chain) {
500                 if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
501                         goto hit; /* You sunk my battleship! */
502         }
503
504         /* Must check for a TIME_WAIT'er before going to listener hash. */
505         sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
506                 if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
507                         goto hit;
508         }
509         sk = NULL;
510 out:
511         read_unlock(&head->lock);
512         return sk;
513 hit:
514         sock_hold(sk);
515         goto out;
516 }
517
518 static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport,
519                                            u32 daddr, u16 hnum, int dif)
520 {
521         struct sock *sk = __tcp_v4_lookup_established(saddr, sport,
522                                                       daddr, hnum, dif);
523
524         return sk ? : tcp_v4_lookup_listener(daddr, hnum, dif);
525 }
526
527 inline struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr,
528                                   u16 dport, int dif)
529 {
530         struct sock *sk;
531
532         local_bh_disable();
533         sk = __tcp_v4_lookup(saddr, sport, daddr, ntohs(dport), dif);
534         local_bh_enable();
535
536         return sk;
537 }
538
539 EXPORT_SYMBOL_GPL(tcp_v4_lookup);
540
541 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
542 {
543         return secure_tcp_sequence_number(skb->nh.iph->daddr,
544                                           skb->nh.iph->saddr,
545                                           skb->h.th->dest,
546                                           skb->h.th->source);
547 }
548
549 /* called with local bh disabled */
550 static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
551                                       struct tcp_tw_bucket **twp)
552 {
553         struct inet_opt *inet = inet_sk(sk);
554         u32 daddr = inet->rcv_saddr;
555         u32 saddr = inet->daddr;
556         int dif = sk->sk_bound_dev_if;
557         TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
558         __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
559         int hash = tcp_hashfn(daddr, lport, saddr, inet->dport);
560         struct tcp_ehash_bucket *head = &tcp_ehash[hash];
561         struct sock *sk2;
562         struct hlist_node *node;
563         struct tcp_tw_bucket *tw;
564
565         write_lock(&head->lock);
566
567         /* Check TIME-WAIT sockets first. */
568         sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
569                 tw = (struct tcp_tw_bucket *)sk2;
570
571                 if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
572                         struct tcp_opt *tp = tcp_sk(sk);
573
574                         /* With PAWS, it is safe from the viewpoint
575                            of data integrity. Even without PAWS it
576                            is safe provided sequence spaces do not
577                            overlap i.e. at data rates <= 80Mbit/sec.
578
579                            Actually, the idea is close to VJ's one,
580                            only timestamp cache is held not per host,
581                            but per port pair and TW bucket is used
582                            as state holder.
583
584                            If TW bucket has been already destroyed we
585                            fall back to VJ's scheme and use initial
586                            timestamp retrieved from peer table.
587                          */
588                         if (tw->tw_ts_recent_stamp &&
589                             (!twp || (sysctl_tcp_tw_reuse &&
590                                       xtime.tv_sec -
591                                       tw->tw_ts_recent_stamp > 1))) {
592                                 if ((tp->write_seq =
593                                                 tw->tw_snd_nxt + 65535 + 2) == 0)
594                                         tp->write_seq = 1;
595                                 tp->ts_recent       = tw->tw_ts_recent;
596                                 tp->ts_recent_stamp = tw->tw_ts_recent_stamp;
597                                 sock_hold(sk2);
598                                 goto unique;
599                         } else
600                                 goto not_unique;
601                 }
602         }
603         tw = NULL;
604
605         /* And established part... */
606         sk_for_each(sk2, node, &head->chain) {
607                 if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif))
608                         goto not_unique;
609         }
610
611 unique:
612         /* Must record num and sport now. Otherwise we will see
613          * in hash table socket with a funny identity. */
614         inet->num = lport;
615         inet->sport = htons(lport);
616         sk->sk_hashent = hash;
617         BUG_TRAP(sk_unhashed(sk));
618         __sk_add_node(sk, &head->chain);
619         sock_prot_inc_use(sk->sk_prot);
620         write_unlock(&head->lock);
621
622         if (twp) {
623                 *twp = tw;
624                 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
625         } else if (tw) {
626                 /* Silly. Should hash-dance instead... */
627                 tcp_tw_deschedule(tw);
628                 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
629
630                 tcp_tw_put(tw);
631         }
632
633         return 0;
634
635 not_unique:
636         write_unlock(&head->lock);
637         return -EADDRNOTAVAIL;
638 }
639
640 /*
641  * Bind a port for a connect operation and hash it.
642  */
643 static int tcp_v4_hash_connect(struct sock *sk)
644 {
645         unsigned short snum = inet_sk(sk)->num;
646         struct tcp_bind_hashbucket *head;
647         struct tcp_bind_bucket *tb;
648         int ret;
649
650         if (!snum) {
651                 int rover;
652                 int low = sysctl_local_port_range[0];
653                 int high = sysctl_local_port_range[1];
654                 int remaining = (high - low) + 1;
655                 struct hlist_node *node;
656                 struct tcp_tw_bucket *tw = NULL;
657
658                 local_bh_disable();
659
660                 /* TODO. Actually it is not so bad idea to remove
661                  * tcp_portalloc_lock before next submission to Linus.
662                  * As soon as we touch this place at all it is time to think.
663                  *
664                  * Now it protects single _advisory_ variable tcp_port_rover,
665                  * hence it is mostly useless.
666                  * Code will work nicely if we just delete it, but
667                  * I am afraid in contented case it will work not better or
668                  * even worse: another cpu just will hit the same bucket
669                  * and spin there.
670                  * So some cpu salt could remove both contention and
671                  * memory pingpong. Any ideas how to do this in a nice way?
672                  */
673                 spin_lock(&tcp_portalloc_lock);
674                 rover = tcp_port_rover;
675
676                 do {
677                         rover++;
678                         if ((rover < low) || (rover > high))
679                                 rover = low;
680                         head = &tcp_bhash[tcp_bhashfn(rover)];
681                         spin_lock(&head->lock);
682
683                         /* Does not bother with rcv_saddr checks,
684                          * because the established check is already
685                          * unique enough.
686                          */
687                         tb_for_each(tb, node, &head->chain) {
688                                 if (tb->port == rover) {
689                                         BUG_TRAP(!hlist_empty(&tb->owners));
690                                         if (tb->fastreuse >= 0)
691                                                 goto next_port;
692                                         if (!__tcp_v4_check_established(sk,
693                                                                         rover,
694                                                                         &tw))
695                                                 goto ok;
696                                         goto next_port;
697                                 }
698                         }
699
700                         tb = tcp_bucket_create(head, rover);
701                         if (!tb) {
702                                 spin_unlock(&head->lock);
703                                 break;
704                         }
705                         tb->fastreuse = -1;
706                         goto ok;
707
708                 next_port:
709                         spin_unlock(&head->lock);
710                 } while (--remaining > 0);
711                 tcp_port_rover = rover;
712                 spin_unlock(&tcp_portalloc_lock);
713
714                 local_bh_enable();
715
716                 return -EADDRNOTAVAIL;
717
718 ok:
719                 /* All locks still held and bhs disabled */
720                 tcp_port_rover = rover;
721                 spin_unlock(&tcp_portalloc_lock);
722
723                 tcp_bind_hash(sk, tb, rover);
724                 if (sk_unhashed(sk)) {
725                         inet_sk(sk)->sport = htons(rover);
726                         __tcp_v4_hash(sk, 0);
727                 }
728                 spin_unlock(&head->lock);
729
730                 if (tw) {
731                         tcp_tw_deschedule(tw);
732                         tcp_tw_put(tw);
733                 }
734
735                 ret = 0;
736                 goto out;
737         }
738
739         head  = &tcp_bhash[tcp_bhashfn(snum)];
740         tb  = tcp_sk(sk)->bind_hash;
741         spin_lock_bh(&head->lock);
742         if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
743                 __tcp_v4_hash(sk, 0);
744                 spin_unlock_bh(&head->lock);
745                 return 0;
746         } else {
747                 spin_unlock(&head->lock);
748                 /* No definite answer... Walk to established hash table */
749                 ret = __tcp_v4_check_established(sk, snum, NULL);
750 out:
751                 local_bh_enable();
752                 return ret;
753         }
754 }
755
756 /* This will initiate an outgoing connection. */
757 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
758 {
759         struct inet_opt *inet = inet_sk(sk);
760         struct tcp_opt *tp = tcp_sk(sk);
761         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
762         struct rtable *rt;
763         u32 daddr, nexthop;
764         int tmp;
765         int err;
766
767         if (addr_len < sizeof(struct sockaddr_in))
768                 return -EINVAL;
769
770         if (usin->sin_family != AF_INET)
771                 return -EAFNOSUPPORT;
772
773         nexthop = daddr = usin->sin_addr.s_addr;
774         if (inet->opt && inet->opt->srr) {
775                 if (!daddr)
776                         return -EINVAL;
777                 nexthop = inet->opt->faddr;
778         }
779
780         tmp = ip_route_connect(&rt, nexthop, inet->saddr,
781                                RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
782                                IPPROTO_TCP,
783                                inet->sport, usin->sin_port, sk);
784         if (tmp < 0)
785                 return tmp;
786
787         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
788                 ip_rt_put(rt);
789                 return -ENETUNREACH;
790         }
791
792         if (!inet->opt || !inet->opt->srr)
793                 daddr = rt->rt_dst;
794
795         if (!inet->saddr)
796                 inet->saddr = rt->rt_src;
797         inet->rcv_saddr = inet->saddr;
798
799         if (tp->ts_recent_stamp && inet->daddr != daddr) {
800                 /* Reset inherited state */
801                 tp->ts_recent       = 0;
802                 tp->ts_recent_stamp = 0;
803                 tp->write_seq       = 0;
804         }
805
806         if (sysctl_tcp_tw_recycle &&
807             !tp->ts_recent_stamp && rt->rt_dst == daddr) {
808                 struct inet_peer *peer = rt_get_peer(rt);
809
810                 /* VJ's idea. We save last timestamp seen from
811                  * the destination in peer table, when entering state TIME-WAIT
812                  * and initialize ts_recent from it, when trying new connection.
813                  */
814
815                 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
816                         tp->ts_recent_stamp = peer->tcp_ts_stamp;
817                         tp->ts_recent = peer->tcp_ts;
818                 }
819         }
820
821         inet->dport = usin->sin_port;
822         inet->daddr = daddr;
823
824         tp->ext_header_len = 0;
825         if (inet->opt)
826                 tp->ext_header_len = inet->opt->optlen;
827
828         tp->mss_clamp = 536;
829
830         /* Socket identity is still unknown (sport may be zero).
831          * However we set state to SYN-SENT and not releasing socket
832          * lock select source port, enter ourselves into the hash tables and
833          * complete initialization after this.
834          */
835         tcp_set_state(sk, TCP_SYN_SENT);
836         err = tcp_v4_hash_connect(sk);
837         if (err)
838                 goto failure;
839
840         err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
841         if (err)
842                 goto failure;
843
844         /* OK, now commit destination to socket.  */
845         __sk_dst_set(sk, &rt->u.dst);
846         tcp_v4_setup_caps(sk, &rt->u.dst);
847         tp->ext2_header_len = rt->u.dst.header_len;
848
849         if (!tp->write_seq)
850                 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
851                                                            inet->daddr,
852                                                            inet->sport,
853                                                            usin->sin_port);
854
855         inet->id = tp->write_seq ^ jiffies;
856
857         err = tcp_connect(sk);
858         rt = NULL;
859         if (err)
860                 goto failure;
861
862         return 0;
863
864 failure:
865         /* This unhashes the socket and releases the local port, if necessary. */
866         tcp_set_state(sk, TCP_CLOSE);
867         ip_rt_put(rt);
868         sk->sk_route_caps = 0;
869         inet->dport = 0;
870         return err;
871 }
872
873 static __inline__ int tcp_v4_iif(struct sk_buff *skb)
874 {
875         return ((struct rtable *)skb->dst)->rt_iif;
876 }
877
878 static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
879 {
880         return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
881 }
882
883 static struct open_request *tcp_v4_search_req(struct tcp_opt *tp,
884                                               struct open_request ***prevp,
885                                               __u16 rport,
886                                               __u32 raddr, __u32 laddr)
887 {
888         struct tcp_listen_opt *lopt = tp->listen_opt;
889         struct open_request *req, **prev;
890
891         for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
892              (req = *prev) != NULL;
893              prev = &req->dl_next) {
894                 if (req->rmt_port == rport &&
895                     req->af.v4_req.rmt_addr == raddr &&
896                     req->af.v4_req.loc_addr == laddr &&
897                     TCP_INET_FAMILY(req->class->family)) {
898                         BUG_TRAP(!req->sk);
899                         *prevp = prev;
900                         break;
901                 }
902         }
903
904         return req;
905 }
906
907 static void tcp_v4_synq_add(struct sock *sk, struct open_request *req)
908 {
909         struct tcp_opt *tp = tcp_sk(sk);
910         struct tcp_listen_opt *lopt = tp->listen_opt;
911         u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
912
913         req->expires = jiffies + TCP_TIMEOUT_INIT;
914         req->retrans = 0;
915         req->sk = NULL;
916         req->dl_next = lopt->syn_table[h];
917
918         write_lock(&tp->syn_wait_lock);
919         lopt->syn_table[h] = req;
920         write_unlock(&tp->syn_wait_lock);
921
922         tcp_synq_added(sk);
923 }
924
925
926 /*
927  * This routine does path mtu discovery as defined in RFC1191.
928  */
929 static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
930                                      u32 mtu)
931 {
932         struct dst_entry *dst;
933         struct inet_opt *inet = inet_sk(sk);
934         struct tcp_opt *tp = tcp_sk(sk);
935
936         /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
937          * send out by Linux are always <576bytes so they should go through
938          * unfragmented).
939          */
940         if (sk->sk_state == TCP_LISTEN)
941                 return;
942
943         /* We don't check in the destentry if pmtu discovery is forbidden
944          * on this route. We just assume that no packet_to_big packets
945          * are send back when pmtu discovery is not active.
946          * There is a small race when the user changes this flag in the
947          * route, but I think that's acceptable.
948          */
949         if ((dst = __sk_dst_check(sk, 0)) == NULL)
950                 return;
951
952         dst->ops->update_pmtu(dst, mtu);
953
954         /* Something is about to be wrong... Remember soft error
955          * for the case, if this connection will not able to recover.
956          */
957         if (mtu < dst_pmtu(dst) && ip_dont_fragment(sk, dst))
958                 sk->sk_err_soft = EMSGSIZE;
959
960         mtu = dst_pmtu(dst);
961
962         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
963             tp->pmtu_cookie > mtu) {
964                 tcp_sync_mss(sk, mtu);
965
966                 /* Resend the TCP packet because it's
967                  * clear that the old packet has been
968                  * dropped. This is the new "fast" path mtu
969                  * discovery.
970                  */
971                 tcp_simple_retransmit(sk);
972         } /* else let the usual retransmit timer handle it */
973 }
974
975 /*
976  * This routine is called by the ICMP module when it gets some
977  * sort of error condition.  If err < 0 then the socket should
978  * be closed and the error returned to the user.  If err > 0
979  * it's just the icmp type << 8 | icmp code.  After adjustment
980  * header points to the first 8 bytes of the tcp header.  We need
981  * to find the appropriate port.
982  *
983  * The locking strategy used here is very "optimistic". When
984  * someone else accesses the socket the ICMP is just dropped
985  * and for some paths there is no check at all.
986  * A more general error queue to queue errors for later handling
987  * is probably better.
988  *
989  */
990
991 void tcp_v4_err(struct sk_buff *skb, u32 info)
992 {
993         struct iphdr *iph = (struct iphdr *)skb->data;
994         struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
995         struct tcp_opt *tp;
996         struct inet_opt *inet;
997         int type = skb->h.icmph->type;
998         int code = skb->h.icmph->code;
999         struct sock *sk;
1000         __u32 seq;
1001         int err;
1002
1003         if (skb->len < (iph->ihl << 2) + 8) {
1004                 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
1005                 return;
1006         }
1007
1008         sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,
1009                            th->source, tcp_v4_iif(skb));
1010         if (!sk) {
1011                 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
1012                 return;
1013         }
1014         if (sk->sk_state == TCP_TIME_WAIT) {
1015                 tcp_tw_put((struct tcp_tw_bucket *)sk);
1016                 return;
1017         }
1018
1019         bh_lock_sock(sk);
1020         /* If too many ICMPs get dropped on busy
1021          * servers this needs to be solved differently.
1022          */
1023         if (sock_owned_by_user(sk))
1024                 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
1025
1026         if (sk->sk_state == TCP_CLOSE)
1027                 goto out;
1028
1029         tp = tcp_sk(sk);
1030         seq = ntohl(th->seq);
1031         if (sk->sk_state != TCP_LISTEN &&
1032             !between(seq, tp->snd_una, tp->snd_nxt)) {
1033                 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
1034                 goto out;
1035         }
1036
1037         switch (type) {
1038         case ICMP_SOURCE_QUENCH:
1039                 /* Just silently ignore these. */
1040                 goto out;
1041         case ICMP_PARAMETERPROB:
1042                 err = EPROTO;
1043                 break;
1044         case ICMP_DEST_UNREACH:
1045                 if (code > NR_ICMP_UNREACH)
1046                         goto out;
1047
1048                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
1049                         if (!sock_owned_by_user(sk))
1050                                 do_pmtu_discovery(sk, iph, info);
1051                         goto out;
1052                 }
1053
1054                 err = icmp_err_convert[code].errno;
1055                 break;
1056         case ICMP_TIME_EXCEEDED:
1057                 err = EHOSTUNREACH;
1058                 break;
1059         default:
1060                 goto out;
1061         }
1062
1063         switch (sk->sk_state) {
1064                 struct open_request *req, **prev;
1065         case TCP_LISTEN:
1066                 if (sock_owned_by_user(sk))
1067                         goto out;
1068
1069                 req = tcp_v4_search_req(tp, &prev, th->dest,
1070                                         iph->daddr, iph->saddr);
1071                 if (!req)
1072                         goto out;
1073
1074                 /* ICMPs are not backlogged, hence we cannot get
1075                    an established socket here.
1076                  */
1077                 BUG_TRAP(!req->sk);
1078
1079                 if (seq != req->snt_isn) {
1080                         NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
1081                         goto out;
1082                 }
1083
1084                 /*
1085                  * Still in SYN_RECV, just remove it silently.
1086                  * There is no good way to pass the error to the newly
1087                  * created socket, and POSIX does not want network
1088                  * errors returned from accept().
1089                  */
1090                 tcp_synq_drop(sk, req, prev);
1091                 goto out;
1092
1093         case TCP_SYN_SENT:
1094         case TCP_SYN_RECV:  /* Cannot happen.
1095                                It can f.e. if SYNs crossed.
1096                              */
1097                 if (!sock_owned_by_user(sk)) {
1098                         TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1099                         sk->sk_err = err;
1100
1101                         sk->sk_error_report(sk);
1102
1103                         tcp_done(sk);
1104                 } else {
1105                         sk->sk_err_soft = err;
1106                 }
1107                 goto out;
1108         }
1109
1110         /* If we've already connected we will keep trying
1111          * until we time out, or the user gives up.
1112          *
1113          * rfc1122 4.2.3.9 allows to consider as hard errors
1114          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
1115          * but it is obsoleted by pmtu discovery).
1116          *
1117          * Note, that in modern internet, where routing is unreliable
1118          * and in each dark corner broken firewalls sit, sending random
1119          * errors ordered by their masters even this two messages finally lose
1120          * their original sense (even Linux sends invalid PORT_UNREACHs)
1121          *
1122          * Now we are in compliance with RFCs.
1123          *                                                      --ANK (980905)
1124          */
1125
1126         inet = inet_sk(sk);
1127         if (!sock_owned_by_user(sk) && inet->recverr) {
1128                 sk->sk_err = err;
1129                 sk->sk_error_report(sk);
1130         } else  { /* Only an error on timeout */
1131                 sk->sk_err_soft = err;
1132         }
1133
1134 out:
1135         bh_unlock_sock(sk);
1136         sock_put(sk);
1137 }
1138
1139 /* This routine computes an IPv4 TCP checksum. */
1140 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
1141                        struct sk_buff *skb)
1142 {
1143         struct inet_opt *inet = inet_sk(sk);
1144
1145         if (skb->ip_summed == CHECKSUM_HW) {
1146                 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
1147                 skb->csum = offsetof(struct tcphdr, check);
1148         } else {
1149                 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
1150                                          csum_partial((char *)th,
1151                                                       th->doff << 2,
1152                                                       skb->csum));
1153         }
1154 }
1155
1156 /*
1157  *      This routine will send an RST to the other tcp.
1158  *
1159  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
1160  *                    for reset.
1161  *      Answer: if a packet caused RST, it is not for a socket
1162  *              existing in our system, if it is matched to a socket,
1163  *              it is just duplicate segment or bug in other side's TCP.
1164  *              So that we build reply only basing on parameters
1165  *              arrived with segment.
1166  *      Exception: precedence violation. We do not implement it in any case.
1167  */
1168
1169 static void tcp_v4_send_reset(struct sk_buff *skb)
1170 {
1171         struct tcphdr *th = skb->h.th;
1172         struct tcphdr rth;
1173         struct ip_reply_arg arg;
1174
1175         /* Never send a reset in response to a reset. */
1176         if (th->rst)
1177                 return;
1178
1179         if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
1180                 return;
1181
1182         /* Swap the send and the receive. */
1183         memset(&rth, 0, sizeof(struct tcphdr));
1184         rth.dest   = th->source;
1185         rth.source = th->dest;
1186         rth.doff   = sizeof(struct tcphdr) / 4;
1187         rth.rst    = 1;
1188
1189         if (th->ack) {
1190                 rth.seq = th->ack_seq;
1191         } else {
1192                 rth.ack = 1;
1193                 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
1194                                     skb->len - (th->doff << 2));
1195         }
1196
1197         memset(&arg, 0, sizeof arg);
1198         arg.iov[0].iov_base = (unsigned char *)&rth;
1199         arg.iov[0].iov_len  = sizeof rth;
1200         arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1201                                       skb->nh.iph->saddr, /*XXX*/
1202                                       sizeof(struct tcphdr), IPPROTO_TCP, 0);
1203         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1204
1205         ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
1206
1207         TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1208         TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1209 }
1210
1211 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
1212    outside socket context is ugly, certainly. What can I do?
1213  */
1214
1215 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
1216                             u32 win, u32 ts)
1217 {
1218         struct tcphdr *th = skb->h.th;
1219         struct {
1220                 struct tcphdr th;
1221                 u32 tsopt[3];
1222         } rep;
1223         struct ip_reply_arg arg;
1224
1225         memset(&rep.th, 0, sizeof(struct tcphdr));
1226         memset(&arg, 0, sizeof arg);
1227
1228         arg.iov[0].iov_base = (unsigned char *)&rep;
1229         arg.iov[0].iov_len  = sizeof(rep.th);
1230         if (ts) {
1231                 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1232                                      (TCPOPT_TIMESTAMP << 8) |
1233                                      TCPOLEN_TIMESTAMP);
1234                 rep.tsopt[1] = htonl(tcp_time_stamp);
1235                 rep.tsopt[2] = htonl(ts);
1236                 arg.iov[0].iov_len = sizeof(rep);
1237         }
1238
1239         /* Swap the send and the receive. */
1240         rep.th.dest    = th->source;
1241         rep.th.source  = th->dest;
1242         rep.th.doff    = arg.iov[0].iov_len / 4;
1243         rep.th.seq     = htonl(seq);
1244         rep.th.ack_seq = htonl(ack);
1245         rep.th.ack     = 1;
1246         rep.th.window  = htons(win);
1247
1248         arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1249                                       skb->nh.iph->saddr, /*XXX*/
1250                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
1251         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1252
1253         ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
1254
1255         TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1256 }
1257
1258 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
1259 {
1260         struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
1261
1262         tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
1263                         tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
1264
1265         tcp_tw_put(tw);
1266 }
1267
1268 static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req)
1269 {
1270         tcp_v4_send_ack(skb, req->snt_isn + 1, req->rcv_isn + 1, req->rcv_wnd,
1271                         req->ts_recent);
1272 }
1273
1274 static struct dst_entry* tcp_v4_route_req(struct sock *sk,
1275                                           struct open_request *req)
1276 {
1277         struct rtable *rt;
1278         struct ip_options *opt = req->af.v4_req.opt;
1279         struct flowi fl = { .oif = sk->sk_bound_dev_if,
1280                             .nl_u = { .ip4_u =
1281                                       { .daddr = ((opt && opt->srr) ?
1282                                                   opt->faddr :
1283                                                   req->af.v4_req.rmt_addr),
1284                                         .saddr = req->af.v4_req.loc_addr,
1285                                         .tos = RT_CONN_FLAGS(sk) } },
1286                             .proto = IPPROTO_TCP,
1287                             .uli_u = { .ports =
1288                                        { .sport = inet_sk(sk)->sport,
1289                                          .dport = req->rmt_port } } };
1290
1291         if (ip_route_output_flow(&rt, &fl, sk, 0)) {
1292                 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1293                 return NULL;
1294         }
1295         if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
1296                 ip_rt_put(rt);
1297                 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1298                 return NULL;
1299         }
1300         return &rt->u.dst;
1301 }
1302
1303 /*
1304  *      Send a SYN-ACK after having received an ACK.
1305  *      This still operates on a open_request only, not on a big
1306  *      socket.
1307  */
1308 static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
1309                               struct dst_entry *dst)
1310 {
1311         int err = -1;
1312         struct sk_buff * skb;
1313
1314         /* First, grab a route. */
1315         if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1316                 goto out;
1317
1318         skb = tcp_make_synack(sk, dst, req);
1319
1320         if (skb) {
1321                 struct tcphdr *th = skb->h.th;
1322
1323                 th->check = tcp_v4_check(th, skb->len,
1324                                          req->af.v4_req.loc_addr,
1325                                          req->af.v4_req.rmt_addr,
1326                                          csum_partial((char *)th, skb->len,
1327                                                       skb->csum));
1328
1329                 err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr,
1330                                             req->af.v4_req.rmt_addr,
1331                                             req->af.v4_req.opt);
1332                 if (err == NET_XMIT_CN)
1333                         err = 0;
1334         }
1335
1336 out:
1337         dst_release(dst);
1338         return err;
1339 }
1340
1341 /*
1342  *      IPv4 open_request destructor.
1343  */
1344 static void tcp_v4_or_free(struct open_request *req)
1345 {
1346         if (req->af.v4_req.opt)
1347                 kfree(req->af.v4_req.opt);
1348 }
1349
1350 static inline void syn_flood_warning(struct sk_buff *skb)
1351 {
1352         static unsigned long warntime;
1353
1354         if (time_after(jiffies, (warntime + HZ * 60))) {
1355                 warntime = jiffies;
1356                 printk(KERN_INFO
1357                        "possible SYN flooding on port %d. Sending cookies.\n",
1358                        ntohs(skb->h.th->dest));
1359         }
1360 }
1361
1362 /*
1363  * Save and compile IPv4 options into the open_request if needed.
1364  */
1365 static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
1366                                                      struct sk_buff *skb)
1367 {
1368         struct ip_options *opt = &(IPCB(skb)->opt);
1369         struct ip_options *dopt = NULL;
1370
1371         if (opt && opt->optlen) {
1372                 int opt_size = optlength(opt);
1373                 dopt = kmalloc(opt_size, GFP_ATOMIC);
1374                 if (dopt) {
1375                         if (ip_options_echo(dopt, skb)) {
1376                                 kfree(dopt);
1377                                 dopt = NULL;
1378                         }
1379                 }
1380         }
1381         return dopt;
1382 }
1383
1384 /*
1385  * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
1386  * One SYN_RECV socket costs about 80bytes on a 32bit machine.
1387  * It would be better to replace it with a global counter for all sockets
1388  * but then some measure against one socket starving all other sockets
1389  * would be needed.
1390  *
1391  * It was 128 by default. Experiments with real servers show, that
1392  * it is absolutely not enough even at 100conn/sec. 256 cures most
1393  * of problems. This value is adjusted to 128 for very small machines
1394  * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
1395  * Further increasing requires to change hash table size.
1396  */
1397 int sysctl_max_syn_backlog = 256;
1398
1399 struct or_calltable or_ipv4 = {
1400         .family         =       PF_INET,
1401         .rtx_syn_ack    =       tcp_v4_send_synack,
1402         .send_ack       =       tcp_v4_or_send_ack,
1403         .destructor     =       tcp_v4_or_free,
1404         .send_reset     =       tcp_v4_send_reset,
1405 };
1406
1407 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1408 {
1409         struct tcp_opt tp;
1410         struct open_request *req;
1411         __u32 saddr = skb->nh.iph->saddr;
1412         __u32 daddr = skb->nh.iph->daddr;
1413         __u32 isn = TCP_SKB_CB(skb)->when;
1414         struct dst_entry *dst = NULL;
1415 #ifdef CONFIG_SYN_COOKIES
1416         int want_cookie = 0;
1417 #else
1418 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1419 #endif
1420
1421         /* Never answer to SYNs send to broadcast or multicast */
1422         if (((struct rtable *)skb->dst)->rt_flags &
1423             (RTCF_BROADCAST | RTCF_MULTICAST))
1424                 goto drop;
1425
1426         /* TW buckets are converted to open requests without
1427          * limitations, they conserve resources and peer is
1428          * evidently real one.
1429          */
1430         if (tcp_synq_is_full(sk) && !isn) {
1431 #ifdef CONFIG_SYN_COOKIES
1432                 if (sysctl_tcp_syncookies) {
1433                         want_cookie = 1;
1434                 } else
1435 #endif
1436                 goto drop;
1437         }
1438
1439         /* Accept backlog is full. If we have already queued enough
1440          * of warm entries in syn queue, drop request. It is better than
1441          * clogging syn queue with openreqs with exponentially increasing
1442          * timeout.
1443          */
1444         if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
1445                 goto drop;
1446
1447         req = tcp_openreq_alloc();
1448         if (!req)
1449                 goto drop;
1450
1451         tcp_clear_options(&tp);
1452         tp.mss_clamp = 536;
1453         tp.user_mss  = tcp_sk(sk)->user_mss;
1454
1455         tcp_parse_options(skb, &tp, 0);
1456
1457         if (want_cookie) {
1458                 tcp_clear_options(&tp);
1459                 tp.saw_tstamp = 0;
1460         }
1461
1462         if (tp.saw_tstamp && !tp.rcv_tsval) {
1463                 /* Some OSes (unknown ones, but I see them on web server, which
1464                  * contains information interesting only for windows'
1465                  * users) do not send their stamp in SYN. It is easy case.
1466                  * We simply do not advertise TS support.
1467                  */
1468                 tp.saw_tstamp = 0;
1469                 tp.tstamp_ok  = 0;
1470         }
1471         tp.tstamp_ok = tp.saw_tstamp;
1472
1473         tcp_openreq_init(req, &tp, skb);
1474
1475         req->af.v4_req.loc_addr = daddr;
1476         req->af.v4_req.rmt_addr = saddr;
1477         req->af.v4_req.opt = tcp_v4_save_options(sk, skb);
1478         req->class = &or_ipv4;
1479         if (!want_cookie)
1480                 TCP_ECN_create_request(req, skb->h.th);
1481
1482         if (want_cookie) {
1483 #ifdef CONFIG_SYN_COOKIES
1484                 syn_flood_warning(skb);
1485 #endif
1486                 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1487         } else if (!isn) {
1488                 struct inet_peer *peer = NULL;
1489
1490                 /* VJ's idea. We save last timestamp seen
1491                  * from the destination in peer table, when entering
1492                  * state TIME-WAIT, and check against it before
1493                  * accepting new connection request.
1494                  *
1495                  * If "isn" is not zero, this request hit alive
1496                  * timewait bucket, so that all the necessary checks
1497                  * are made in the function processing timewait state.
1498                  */
1499                 if (tp.saw_tstamp &&
1500                     sysctl_tcp_tw_recycle &&
1501                     (dst = tcp_v4_route_req(sk, req)) != NULL &&
1502                     (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1503                     peer->v4daddr == saddr) {
1504                         if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1505                             (s32)(peer->tcp_ts - req->ts_recent) >
1506                                                         TCP_PAWS_WINDOW) {
1507                                 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
1508                                 dst_release(dst);
1509                                 goto drop_and_free;
1510                         }
1511                 }
1512                 /* Kill the following clause, if you dislike this way. */
1513                 else if (!sysctl_tcp_syncookies &&
1514                          (sysctl_max_syn_backlog - tcp_synq_len(sk) <
1515                           (sysctl_max_syn_backlog >> 2)) &&
1516                          (!peer || !peer->tcp_ts_stamp) &&
1517                          (!dst || !dst_metric(dst, RTAX_RTT))) {
1518                         /* Without syncookies last quarter of
1519                          * backlog is filled with destinations,
1520                          * proven to be alive.
1521                          * It means that we continue to communicate
1522                          * to destinations, already remembered
1523                          * to the moment of synflood.
1524                          */
1525                         NETDEBUG(if (net_ratelimit()) \
1526                                         printk(KERN_DEBUG "TCP: drop open "
1527                                                           "request from %u.%u."
1528                                                           "%u.%u/%u\n", \
1529                                                NIPQUAD(saddr),
1530                                                ntohs(skb->h.th->source)));
1531                         dst_release(dst);
1532                         goto drop_and_free;
1533                 }
1534
1535                 isn = tcp_v4_init_sequence(sk, skb);
1536         }
1537         req->snt_isn = isn;
1538
1539         if (tcp_v4_send_synack(sk, req, dst))
1540                 goto drop_and_free;
1541
1542         if (want_cookie) {
1543                 tcp_openreq_free(req);
1544         } else {
1545                 tcp_v4_synq_add(sk, req);
1546         }
1547         return 0;
1548
1549 drop_and_free:
1550         tcp_openreq_free(req);
1551 drop:
1552         TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1553         return 0;
1554 }
1555
1556
1557 /*
1558  * The three way handshake has completed - we got a valid synack -
1559  * now create the new socket.
1560  */
1561 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1562                                   struct open_request *req,
1563                                   struct dst_entry *dst)
1564 {
1565         struct inet_opt *newinet;
1566         struct tcp_opt *newtp;
1567         struct sock *newsk;
1568
1569         if (sk_acceptq_is_full(sk))
1570                 goto exit_overflow;
1571
1572         if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1573                 goto exit;
1574
1575         newsk = tcp_create_openreq_child(sk, req, skb);
1576         if (!newsk)
1577                 goto exit;
1578
1579         newsk->sk_dst_cache = dst;
1580         tcp_v4_setup_caps(newsk, dst);
1581
1582         newtp                 = tcp_sk(newsk);
1583         newinet               = inet_sk(newsk);
1584         newinet->daddr        = req->af.v4_req.rmt_addr;
1585         newinet->rcv_saddr    = req->af.v4_req.loc_addr;
1586         newinet->saddr        = req->af.v4_req.loc_addr;
1587         newinet->opt          = req->af.v4_req.opt;
1588         req->af.v4_req.opt    = NULL;
1589         newinet->mc_index     = tcp_v4_iif(skb);
1590         newinet->mc_ttl       = skb->nh.iph->ttl;
1591         newtp->ext_header_len = 0;
1592         if (newinet->opt)
1593                 newtp->ext_header_len = newinet->opt->optlen;
1594         newtp->ext2_header_len = dst->header_len;
1595         newinet->id = newtp->write_seq ^ jiffies;
1596
1597         tcp_sync_mss(newsk, dst_pmtu(dst));
1598         newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1599         tcp_initialize_rcv_mss(newsk);
1600
1601         __tcp_v4_hash(newsk, 0);
1602         __tcp_inherit_port(sk, newsk);
1603
1604         return newsk;
1605
1606 exit_overflow:
1607         NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1608 exit:
1609         NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1610         dst_release(dst);
1611         return NULL;
1612 }
1613
1614 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1615 {
1616         struct tcphdr *th = skb->h.th;
1617         struct iphdr *iph = skb->nh.iph;
1618         struct tcp_opt *tp = tcp_sk(sk);
1619         struct sock *nsk;
1620         struct open_request **prev;
1621         /* Find possible connection requests. */
1622         struct open_request *req = tcp_v4_search_req(tp, &prev, th->source,
1623                                                      iph->saddr, iph->daddr);
1624         if (req)
1625                 return tcp_check_req(sk, skb, req, prev);
1626
1627         nsk = __tcp_v4_lookup_established(skb->nh.iph->saddr,
1628                                           th->source,
1629                                           skb->nh.iph->daddr,
1630                                           ntohs(th->dest),
1631                                           tcp_v4_iif(skb));
1632
1633         if (nsk) {
1634                 if (nsk->sk_state != TCP_TIME_WAIT) {
1635                         bh_lock_sock(nsk);
1636                         return nsk;
1637                 }
1638                 tcp_tw_put((struct tcp_tw_bucket *)nsk);
1639                 return NULL;
1640         }
1641
1642 #ifdef CONFIG_SYN_COOKIES
1643         if (!th->rst && !th->syn && th->ack)
1644                 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1645 #endif
1646         return sk;
1647 }
1648
1649 static int tcp_v4_checksum_init(struct sk_buff *skb)
1650 {
1651         if (skb->ip_summed == CHECKSUM_HW) {
1652                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1653                 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1654                                   skb->nh.iph->daddr, skb->csum))
1655                         return 0;
1656
1657                 NETDEBUG(if (net_ratelimit())
1658                                 printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
1659                 skb->ip_summed = CHECKSUM_NONE;
1660         }
1661         if (skb->len <= 76) {
1662                 if (tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1663                                  skb->nh.iph->daddr,
1664                                  skb_checksum(skb, 0, skb->len, 0)))
1665                         return -1;
1666                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1667         } else {
1668                 skb->csum = ~tcp_v4_check(skb->h.th, skb->len,
1669                                           skb->nh.iph->saddr,
1670                                           skb->nh.iph->daddr, 0);
1671         }
1672         return 0;
1673 }
1674
1675
1676 /* The socket must have it's spinlock held when we get
1677  * here.
1678  *
1679  * We have a potential double-lock case here, so even when
1680  * doing backlog processing we use the BH locking scheme.
1681  * This is because we cannot sleep with the original spinlock
1682  * held.
1683  */
1684 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1685 {
1686         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1687                 TCP_CHECK_TIMER(sk);
1688                 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1689                         goto reset;
1690                 TCP_CHECK_TIMER(sk);
1691                 return 0;
1692         }
1693
1694         if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
1695                 goto csum_err;
1696
1697         if (sk->sk_state == TCP_LISTEN) {
1698                 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1699                 if (!nsk)
1700                         goto discard;
1701
1702                 if (nsk != sk) {
1703                         if (tcp_child_process(sk, nsk, skb))
1704                                 goto reset;
1705                         return 0;
1706                 }
1707         }
1708
1709         TCP_CHECK_TIMER(sk);
1710         if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1711                 goto reset;
1712         TCP_CHECK_TIMER(sk);
1713         return 0;
1714
1715 reset:
1716         tcp_v4_send_reset(skb);
1717 discard:
1718         kfree_skb(skb);
1719         /* Be careful here. If this function gets more complicated and
1720          * gcc suffers from register pressure on the x86, sk (in %ebx)
1721          * might be destroyed here. This current version compiles correctly,
1722          * but you have been warned.
1723          */
1724         return 0;
1725
1726 csum_err:
1727         TCP_INC_STATS_BH(TCP_MIB_INERRS);
1728         goto discard;
1729 }
1730
1731 /*
1732  *      From tcp_input.c
1733  */
1734
1735 int tcp_v4_rcv(struct sk_buff *skb)
1736 {
1737         struct tcphdr *th;
1738         struct sock *sk;
1739         int ret;
1740
1741         if (skb->pkt_type != PACKET_HOST)
1742                 goto discard_it;
1743
1744         /* Count it even if it's bad */
1745         TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1746
1747         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1748                 goto discard_it;
1749
1750         th = skb->h.th;
1751
1752         if (th->doff < sizeof(struct tcphdr) / 4)
1753                 goto bad_packet;
1754         if (!pskb_may_pull(skb, th->doff * 4))
1755                 goto discard_it;
1756
1757         /* An explanation is required here, I think.
1758          * Packet length and doff are validated by header prediction,
1759          * provided case of th->doff==0 is elimineted.
1760          * So, we defer the checks. */
1761         if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1762              tcp_v4_checksum_init(skb) < 0))
1763                 goto bad_packet;
1764
1765         th = skb->h.th;
1766         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1767         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1768                                     skb->len - th->doff * 4);
1769         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1770         TCP_SKB_CB(skb)->when    = 0;
1771         TCP_SKB_CB(skb)->flags   = skb->nh.iph->tos;
1772         TCP_SKB_CB(skb)->sacked  = 0;
1773
1774         sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source,
1775                              skb->nh.iph->daddr, ntohs(th->dest),
1776                              tcp_v4_iif(skb));
1777
1778         if (!sk)
1779                 goto no_tcp_socket;
1780
1781 process:
1782         if (sk->sk_state == TCP_TIME_WAIT)
1783                 goto do_time_wait;
1784
1785         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1786                 goto discard_and_relse;
1787
1788         if (sk_filter(sk, skb, 0))
1789                 goto discard_and_relse;
1790
1791         skb->dev = NULL;
1792
1793         bh_lock_sock(sk);
1794         ret = 0;
1795         if (!sock_owned_by_user(sk)) {
1796                 if (!tcp_prequeue(sk, skb))
1797                         ret = tcp_v4_do_rcv(sk, skb);
1798         } else
1799                 sk_add_backlog(sk, skb);
1800         bh_unlock_sock(sk);
1801
1802         sock_put(sk);
1803
1804         return ret;
1805
1806 no_tcp_socket:
1807         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1808                 goto discard_it;
1809
1810         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1811 bad_packet:
1812                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1813         } else {
1814                 tcp_v4_send_reset(skb);
1815         }
1816
1817 discard_it:
1818         /* Discard frame. */
1819         kfree_skb(skb);
1820         return 0;
1821
1822 discard_and_relse:
1823         sock_put(sk);
1824         goto discard_it;
1825
1826 do_time_wait:
1827         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1828                 tcp_tw_put((struct tcp_tw_bucket *) sk);
1829                 goto discard_it;
1830         }
1831
1832         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1833                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1834                 tcp_tw_put((struct tcp_tw_bucket *) sk);
1835                 goto discard_it;
1836         }
1837         switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
1838                                            skb, th, skb->len)) {
1839         case TCP_TW_SYN: {
1840                 struct sock *sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr,
1841                                                           ntohs(th->dest),
1842                                                           tcp_v4_iif(skb));
1843                 if (sk2) {
1844                         tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
1845                         tcp_tw_put((struct tcp_tw_bucket *)sk);
1846                         sk = sk2;
1847                         goto process;
1848                 }
1849                 /* Fall through to ACK */
1850         }
1851         case TCP_TW_ACK:
1852                 tcp_v4_timewait_ack(sk, skb);
1853                 break;
1854         case TCP_TW_RST:
1855                 goto no_tcp_socket;
1856         case TCP_TW_SUCCESS:;
1857         }
1858         goto discard_it;
1859 }
1860
1861 /* With per-bucket locks this operation is not-atomic, so that
1862  * this version is not worse.
1863  */
1864 static void __tcp_v4_rehash(struct sock *sk)
1865 {
1866         sk->sk_prot->unhash(sk);
1867         sk->sk_prot->hash(sk);
1868 }
1869
1870 static int tcp_v4_reselect_saddr(struct sock *sk)
1871 {
1872         struct inet_opt *inet = inet_sk(sk);
1873         int err;
1874         struct rtable *rt;
1875         __u32 old_saddr = inet->saddr;
1876         __u32 new_saddr;
1877         __u32 daddr = inet->daddr;
1878
1879         if (inet->opt && inet->opt->srr)
1880                 daddr = inet->opt->faddr;
1881
1882         /* Query new route. */
1883         err = ip_route_connect(&rt, daddr, 0,
1884                                RT_TOS(inet->tos) | sk->sk_localroute,
1885                                sk->sk_bound_dev_if,
1886                                IPPROTO_TCP,
1887                                inet->sport, inet->dport, sk);
1888         if (err)
1889                 return err;
1890
1891         __sk_dst_set(sk, &rt->u.dst);
1892         tcp_v4_setup_caps(sk, &rt->u.dst);
1893         tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
1894
1895         new_saddr = rt->rt_src;
1896
1897         if (new_saddr == old_saddr)
1898                 return 0;
1899
1900         if (sysctl_ip_dynaddr > 1) {
1901                 printk(KERN_INFO "tcp_v4_rebuild_header(): shifting inet->"
1902                                  "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n",
1903                        NIPQUAD(old_saddr),
1904                        NIPQUAD(new_saddr));
1905         }
1906
1907         inet->saddr = new_saddr;
1908         inet->rcv_saddr = new_saddr;
1909
1910         /* XXX The only one ugly spot where we need to
1911          * XXX really change the sockets identity after
1912          * XXX it has entered the hashes. -DaveM
1913          *
1914          * Besides that, it does not check for connection
1915          * uniqueness. Wait for troubles.
1916          */
1917         __tcp_v4_rehash(sk);
1918         return 0;
1919 }
1920
1921 int tcp_v4_rebuild_header(struct sock *sk)
1922 {
1923         struct inet_opt *inet = inet_sk(sk);
1924         struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1925         u32 daddr;
1926         int err;
1927
1928         /* Route is OK, nothing to do. */
1929         if (rt)
1930                 return 0;
1931
1932         /* Reroute. */
1933         daddr = inet->daddr;
1934         if (inet->opt && inet->opt->srr)
1935                 daddr = inet->opt->faddr;
1936
1937         {
1938                 struct flowi fl = { .oif = sk->sk_bound_dev_if,
1939                                     .nl_u = { .ip4_u =
1940                                               { .daddr = daddr,
1941                                                 .saddr = inet->saddr,
1942                                                 .tos = RT_CONN_FLAGS(sk) } },
1943                                     .proto = IPPROTO_TCP,
1944                                     .uli_u = { .ports =
1945                                                { .sport = inet->sport,
1946                                                  .dport = inet->dport } } };
1947                                                 
1948                 err = ip_route_output_flow(&rt, &fl, sk, 0);
1949         }
1950         if (!err) {
1951                 __sk_dst_set(sk, &rt->u.dst);
1952                 tcp_v4_setup_caps(sk, &rt->u.dst);
1953                 tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
1954                 return 0;
1955         }
1956
1957         /* Routing failed... */
1958         sk->sk_route_caps = 0;
1959
1960         if (!sysctl_ip_dynaddr ||
1961             sk->sk_state != TCP_SYN_SENT ||
1962             (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
1963             (err = tcp_v4_reselect_saddr(sk)) != 0)
1964                 sk->sk_err_soft = -err;
1965
1966         return err;
1967 }
1968
1969 static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
1970 {
1971         struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;
1972         struct inet_opt *inet = inet_sk(sk);
1973
1974         sin->sin_family         = AF_INET;
1975         sin->sin_addr.s_addr    = inet->daddr;
1976         sin->sin_port           = inet->dport;
1977 }
1978
1979 /* VJ's idea. Save last timestamp seen from this destination
1980  * and hold it at least for normal timewait interval to use for duplicate
1981  * segment detection in subsequent connections, before they enter synchronized
1982  * state.
1983  */
1984
1985 int tcp_v4_remember_stamp(struct sock *sk)
1986 {
1987         struct inet_opt *inet = inet_sk(sk);
1988         struct tcp_opt *tp = tcp_sk(sk);
1989         struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1990         struct inet_peer *peer = NULL;
1991         int release_it = 0;
1992
1993         if (!rt || rt->rt_dst != inet->daddr) {
1994                 peer = inet_getpeer(inet->daddr, 1);
1995                 release_it = 1;
1996         } else {
1997                 if (!rt->peer)
1998                         rt_bind_peer(rt, 1);
1999                 peer = rt->peer;
2000         }
2001
2002         if (peer) {
2003                 if ((s32)(peer->tcp_ts - tp->ts_recent) <= 0 ||
2004                     (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2005                      peer->tcp_ts_stamp <= tp->ts_recent_stamp)) {
2006                         peer->tcp_ts_stamp = tp->ts_recent_stamp;
2007                         peer->tcp_ts = tp->ts_recent;
2008                 }
2009                 if (release_it)
2010                         inet_putpeer(peer);
2011                 return 1;
2012         }
2013
2014         return 0;
2015 }
2016
2017 int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
2018 {
2019         struct inet_peer *peer = NULL;
2020
2021         peer = inet_getpeer(tw->tw_daddr, 1);
2022
2023         if (peer) {
2024                 if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 ||
2025                     (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2026                      peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) {
2027                         peer->tcp_ts_stamp = tw->tw_ts_recent_stamp;
2028                         peer->tcp_ts = tw->tw_ts_recent;
2029                 }
2030                 inet_putpeer(peer);
2031                 return 1;
2032         }
2033
2034         return 0;
2035 }
2036
2037 struct tcp_func ipv4_specific = {
2038         .queue_xmit     =       ip_queue_xmit,
2039         .send_check     =       tcp_v4_send_check,
2040         .rebuild_header =       tcp_v4_rebuild_header,
2041         .conn_request   =       tcp_v4_conn_request,
2042         .syn_recv_sock  =       tcp_v4_syn_recv_sock,
2043         .remember_stamp =       tcp_v4_remember_stamp,
2044         .net_header_len =       sizeof(struct iphdr),
2045         .setsockopt     =       ip_setsockopt,
2046         .getsockopt     =       ip_getsockopt,
2047         .addr2sockaddr  =       v4_addr2sockaddr,
2048         .sockaddr_len   =       sizeof(struct sockaddr_in),
2049 };
2050
2051 /* NOTE: A lot of things set to zero explicitly by call to
2052  *       sk_alloc() so need not be done here.
2053  */
2054 static int tcp_v4_init_sock(struct sock *sk)
2055 {
2056         struct tcp_opt *tp = tcp_sk(sk);
2057
2058         skb_queue_head_init(&tp->out_of_order_queue);
2059         tcp_init_xmit_timers(sk);
2060         tcp_prequeue_init(tp);
2061
2062         tp->rto  = TCP_TIMEOUT_INIT;
2063         tp->mdev = TCP_TIMEOUT_INIT;
2064
2065         /* So many TCP implementations out there (incorrectly) count the
2066          * initial SYN frame in their delayed-ACK and congestion control
2067          * algorithms that we must have the following bandaid to talk
2068          * efficiently to them.  -DaveM
2069          */
2070         tp->snd_cwnd = 2;
2071
2072         /* See draft-stevens-tcpca-spec-01 for discussion of the
2073          * initialization of these values.
2074          */
2075         tp->snd_ssthresh = 0x7fffffff;  /* Infinity */
2076         tp->snd_cwnd_clamp = ~0;
2077         tp->mss_cache_std = tp->mss_cache = 536;
2078
2079         tp->reordering = sysctl_tcp_reordering;
2080
2081         sk->sk_state = TCP_CLOSE;
2082
2083         sk->sk_write_space = sk_stream_write_space;
2084         sk->sk_use_write_queue = 1;
2085
2086         tp->af_specific = &ipv4_specific;
2087
2088         sk->sk_sndbuf = sysctl_tcp_wmem[1];
2089         sk->sk_rcvbuf = sysctl_tcp_rmem[1];
2090
2091         atomic_inc(&tcp_sockets_allocated);
2092
2093         return 0;
2094 }
2095
2096 int tcp_v4_destroy_sock(struct sock *sk)
2097 {
2098         struct tcp_opt *tp = tcp_sk(sk);
2099
2100         tcp_clear_xmit_timers(sk);
2101
2102         /* Cleanup up the write buffer. */
2103         sk_stream_writequeue_purge(sk);
2104
2105         /* Cleans up our, hopefully empty, out_of_order_queue. */
2106         __skb_queue_purge(&tp->out_of_order_queue);
2107
2108         /* Clean prequeue, it must be empty really */
2109         __skb_queue_purge(&tp->ucopy.prequeue);
2110
2111         /* Clean up a referenced TCP bind bucket. */
2112         if (tp->bind_hash)
2113                 tcp_put_port(sk);
2114
2115         /*
2116          * If sendmsg cached page exists, toss it.
2117          */
2118         if (sk->sk_sndmsg_page) {
2119                 __free_page(sk->sk_sndmsg_page);
2120                 sk->sk_sndmsg_page = NULL;
2121         }
2122
2123         atomic_dec(&tcp_sockets_allocated);
2124
2125         return 0;
2126 }
2127
2128 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2129
2130 #ifdef CONFIG_PROC_FS
2131 /* Proc filesystem TCP sock list dumping. */
2132
2133 static inline struct tcp_tw_bucket *tw_head(struct hlist_head *head)
2134 {
2135         return hlist_empty(head) ? NULL :
2136                 list_entry(head->first, struct tcp_tw_bucket, tw_node);
2137 }
2138
2139 static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw)
2140 {
2141         return tw->tw_node.next ?
2142                 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2143 }
2144
2145 static void *listening_get_next(struct seq_file *seq, void *cur)
2146 {
2147         struct tcp_opt *tp;
2148         struct hlist_node *node;
2149         struct sock *sk = cur;
2150         struct tcp_iter_state* st = seq->private;
2151
2152         if (!sk) {
2153                 st->bucket = 0;
2154                 sk = sk_head(&tcp_listening_hash[0]);
2155                 goto get_sk;
2156         }
2157
2158         ++st->num;
2159
2160         if (st->state == TCP_SEQ_STATE_OPENREQ) {
2161                 struct open_request *req = cur;
2162
2163                 tp = tcp_sk(st->syn_wait_sk);
2164                 req = req->dl_next;
2165                 while (1) {
2166                         while (req) {
2167                                 if (req->class->family == st->family) {
2168                                         cur = req;
2169                                         goto out;
2170                                 }
2171                                 req = req->dl_next;
2172                         }
2173                         if (++st->sbucket >= TCP_SYNQ_HSIZE)
2174                                 break;
2175 get_req:
2176                         req = tp->listen_opt->syn_table[st->sbucket];
2177                 }
2178                 sk        = sk_next(st->syn_wait_sk);
2179                 st->state = TCP_SEQ_STATE_LISTENING;
2180                 read_unlock_bh(&tp->syn_wait_lock);
2181         } else {
2182                 tp = tcp_sk(sk);
2183                 read_lock_bh(&tp->syn_wait_lock);
2184                 if (tp->listen_opt && tp->listen_opt->qlen)
2185                         goto start_req;
2186                 read_unlock_bh(&tp->syn_wait_lock);
2187                 sk = sk_next(sk);
2188         }
2189 get_sk:
2190         sk_for_each_from(sk, node) {
2191                 if (sk->sk_family == st->family) {
2192                         cur = sk;
2193                         goto out;
2194                 }
2195                 tp = tcp_sk(sk);
2196                 read_lock_bh(&tp->syn_wait_lock);
2197                 if (tp->listen_opt && tp->listen_opt->qlen) {
2198 start_req:
2199                         st->uid         = sock_i_uid(sk);
2200                         st->syn_wait_sk = sk;
2201                         st->state       = TCP_SEQ_STATE_OPENREQ;
2202                         st->sbucket     = 0;
2203                         goto get_req;
2204                 }
2205                 read_unlock_bh(&tp->syn_wait_lock);
2206         }
2207         if (++st->bucket < TCP_LHTABLE_SIZE) {
2208                 sk = sk_head(&tcp_listening_hash[st->bucket]);
2209                 goto get_sk;
2210         }
2211         cur = NULL;
2212 out:
2213         return cur;
2214 }
2215
2216 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2217 {
2218         void *rc = listening_get_next(seq, NULL);
2219
2220         while (rc && *pos) {
2221                 rc = listening_get_next(seq, rc);
2222                 --*pos;
2223         }
2224         return rc;
2225 }
2226
2227 static void *established_get_first(struct seq_file *seq)
2228 {
2229         struct tcp_iter_state* st = seq->private;
2230         void *rc = NULL;
2231
2232         for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) {
2233                 struct sock *sk;
2234                 struct hlist_node *node;
2235                 struct tcp_tw_bucket *tw;
2236                
2237                 read_lock(&tcp_ehash[st->bucket].lock);
2238                 sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) {
2239                         if (sk->sk_family != st->family) {
2240                                 continue;
2241                         }
2242                         rc = sk;
2243                         goto out;
2244                 }
2245                 st->state = TCP_SEQ_STATE_TIME_WAIT;
2246                 tw_for_each(tw, node,
2247                             &tcp_ehash[st->bucket + tcp_ehash_size].chain) {
2248                         if (tw->tw_family != st->family) {
2249                                 continue;
2250                         }
2251                         rc = tw;
2252                         goto out;
2253                 }
2254                 read_unlock(&tcp_ehash[st->bucket].lock);
2255                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2256         }
2257 out:
2258         return rc;
2259 }
2260
2261 static void *established_get_next(struct seq_file *seq, void *cur)
2262 {
2263         struct sock *sk = cur;
2264         struct tcp_tw_bucket *tw;
2265         struct hlist_node *node;
2266         struct tcp_iter_state* st = seq->private;
2267
2268         ++st->num;
2269
2270         if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2271                 tw = cur;
2272                 tw = tw_next(tw);
2273 get_tw:
2274                 while (tw && tw->tw_family != st->family) {
2275                         tw = tw_next(tw);
2276                 }
2277                 if (tw) {
2278                         cur = tw;
2279                         goto out;
2280                 }
2281                 read_unlock(&tcp_ehash[st->bucket].lock);
2282                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2283                 if (++st->bucket < tcp_ehash_size) {
2284                         read_lock(&tcp_ehash[st->bucket].lock);
2285                         sk = sk_head(&tcp_ehash[st->bucket].chain);
2286                 } else {
2287                         cur = NULL;
2288                         goto out;
2289                 }
2290         } else
2291                 sk = sk_next(sk);
2292
2293         sk_for_each_from(sk, node) {
2294                 if (sk->sk_family == st->family)
2295                         goto found;
2296         }
2297
2298         st->state = TCP_SEQ_STATE_TIME_WAIT;
2299         tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain);
2300         goto get_tw;
2301 found:
2302         cur = sk;
2303 out:
2304         return cur;
2305 }
2306
2307 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2308 {
2309         void *rc = established_get_first(seq);
2310
2311         while (rc && pos) {
2312                 rc = established_get_next(seq, rc);
2313                 --pos;
2314         }               
2315         return rc;
2316 }
2317
2318 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2319 {
2320         void *rc;
2321         struct tcp_iter_state* st = seq->private;
2322
2323         tcp_listen_lock();
2324         st->state = TCP_SEQ_STATE_LISTENING;
2325         rc        = listening_get_idx(seq, &pos);
2326
2327         if (!rc) {
2328                 tcp_listen_unlock();
2329                 local_bh_disable();
2330                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2331                 rc        = established_get_idx(seq, pos);
2332         }
2333
2334         return rc;
2335 }
2336
2337 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2338 {
2339         struct tcp_iter_state* st = seq->private;
2340         st->state = TCP_SEQ_STATE_LISTENING;
2341         st->num = 0;
2342         return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2343 }
2344
2345 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2346 {
2347         void *rc = NULL;
2348         struct tcp_iter_state* st;
2349
2350         if (v == SEQ_START_TOKEN) {
2351                 rc = tcp_get_idx(seq, 0);
2352                 goto out;
2353         }
2354         st = seq->private;
2355
2356         switch (st->state) {
2357         case TCP_SEQ_STATE_OPENREQ:
2358         case TCP_SEQ_STATE_LISTENING:
2359                 rc = listening_get_next(seq, v);
2360                 if (!rc) {
2361                         tcp_listen_unlock();
2362                         local_bh_disable();
2363                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2364                         rc        = established_get_first(seq);
2365                 }
2366                 break;
2367         case TCP_SEQ_STATE_ESTABLISHED:
2368         case TCP_SEQ_STATE_TIME_WAIT:
2369                 rc = established_get_next(seq, v);
2370                 break;
2371         }
2372 out:
2373         ++*pos;
2374         return rc;
2375 }
2376
2377 static void tcp_seq_stop(struct seq_file *seq, void *v)
2378 {
2379         struct tcp_iter_state* st = seq->private;
2380
2381         switch (st->state) {
2382         case TCP_SEQ_STATE_OPENREQ:
2383                 if (v) {
2384                         struct tcp_opt *tp = tcp_sk(st->syn_wait_sk);
2385                         read_unlock_bh(&tp->syn_wait_lock);
2386                 }
2387         case TCP_SEQ_STATE_LISTENING:
2388                 if (v != SEQ_START_TOKEN)
2389                         tcp_listen_unlock();
2390                 break;
2391         case TCP_SEQ_STATE_TIME_WAIT:
2392         case TCP_SEQ_STATE_ESTABLISHED:
2393                 if (v)
2394                         read_unlock(&tcp_ehash[st->bucket].lock);
2395                 local_bh_enable();
2396                 break;
2397         }
2398 }
2399
2400 static int tcp_seq_open(struct inode *inode, struct file *file)
2401 {
2402         struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2403         struct seq_file *seq;
2404         struct tcp_iter_state *s;
2405         int rc;
2406
2407         if (unlikely(afinfo == NULL))
2408                 return -EINVAL;
2409
2410         s = kmalloc(sizeof(*s), GFP_KERNEL);
2411         if (!s)
2412                 return -ENOMEM;
2413         memset(s, 0, sizeof(*s));
2414         s->family               = afinfo->family;
2415         s->seq_ops.start        = tcp_seq_start;
2416         s->seq_ops.next         = tcp_seq_next;
2417         s->seq_ops.show         = afinfo->seq_show;
2418         s->seq_ops.stop         = tcp_seq_stop;
2419
2420         rc = seq_open(file, &s->seq_ops);
2421         if (rc)
2422                 goto out_kfree;
2423         seq          = file->private_data;
2424         seq->private = s;
2425 out:
2426         return rc;
2427 out_kfree:
2428         kfree(s);
2429         goto out;
2430 }
2431
2432 int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
2433 {
2434         int rc = 0;
2435         struct proc_dir_entry *p;
2436
2437         if (!afinfo)
2438                 return -EINVAL;
2439         afinfo->seq_fops->owner         = afinfo->owner;
2440         afinfo->seq_fops->open          = tcp_seq_open;
2441         afinfo->seq_fops->read          = seq_read;
2442         afinfo->seq_fops->llseek        = seq_lseek;
2443         afinfo->seq_fops->release       = seq_release_private;
2444         
2445         p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
2446         if (p)
2447                 p->data = afinfo;
2448         else
2449                 rc = -ENOMEM;
2450         return rc;
2451 }
2452
2453 void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
2454 {
2455         if (!afinfo)
2456                 return;
2457         proc_net_remove(afinfo->name);
2458         memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); 
2459 }
2460
2461 static void get_openreq4(struct sock *sk, struct open_request *req,
2462                          char *tmpbuf, int i, int uid)
2463 {
2464         int ttd = req->expires - jiffies;
2465
2466         sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2467                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
2468                 i,
2469                 req->af.v4_req.loc_addr,
2470                 ntohs(inet_sk(sk)->sport),
2471                 req->af.v4_req.rmt_addr,
2472                 ntohs(req->rmt_port),
2473                 TCP_SYN_RECV,
2474                 0, 0, /* could print option size, but that is af dependent. */
2475                 1,    /* timers active (only the expire timer) */
2476                 jiffies_to_clock_t(ttd),
2477                 req->retrans,
2478                 uid,
2479                 0,  /* non standard timer */
2480                 0, /* open_requests have no inode */
2481                 atomic_read(&sk->sk_refcnt),
2482                 req);
2483 }
2484
2485 static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
2486 {
2487         int timer_active;
2488         unsigned long timer_expires;
2489         struct tcp_opt *tp = tcp_sk(sp);
2490         struct inet_opt *inet = inet_sk(sp);
2491         unsigned int dest = inet->daddr;
2492         unsigned int src = inet->rcv_saddr;
2493         __u16 destp = ntohs(inet->dport);
2494         __u16 srcp = ntohs(inet->sport);
2495
2496         if (tp->pending == TCP_TIME_RETRANS) {
2497                 timer_active    = 1;
2498                 timer_expires   = tp->timeout;
2499         } else if (tp->pending == TCP_TIME_PROBE0) {
2500                 timer_active    = 4;
2501                 timer_expires   = tp->timeout;
2502         } else if (timer_pending(&sp->sk_timer)) {
2503                 timer_active    = 2;
2504                 timer_expires   = sp->sk_timer.expires;
2505         } else {
2506                 timer_active    = 0;
2507                 timer_expires = jiffies;
2508         }
2509
2510         sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2511                         "%08X %5d %8d %lu %d %p %u %u %u %u %d",
2512                 i, src, srcp, dest, destp, sp->sk_state,
2513                 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
2514                 timer_active,
2515                 jiffies_to_clock_t(timer_expires - jiffies),
2516                 tp->retransmits,
2517                 sock_i_uid(sp),
2518                 tp->probes_out,
2519                 sock_i_ino(sp),
2520                 atomic_read(&sp->sk_refcnt), sp,
2521                 tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong,
2522                 tp->snd_cwnd,
2523                 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
2524 }
2525
2526 static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i)
2527 {
2528         unsigned int dest, src;
2529         __u16 destp, srcp;
2530         int ttd = tw->tw_ttd - jiffies;
2531
2532         if (ttd < 0)
2533                 ttd = 0;
2534
2535         dest  = tw->tw_daddr;
2536         src   = tw->tw_rcv_saddr;
2537         destp = ntohs(tw->tw_dport);
2538         srcp  = ntohs(tw->tw_sport);
2539
2540         sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2541                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p",
2542                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2543                 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2544                 atomic_read(&tw->tw_refcnt), tw);
2545 }
2546
2547 #define TMPSZ 150
2548
2549 static int tcp4_seq_show(struct seq_file *seq, void *v)
2550 {
2551         struct tcp_iter_state* st;
2552         char tmpbuf[TMPSZ + 1];
2553
2554         if (v == SEQ_START_TOKEN) {
2555                 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2556                            "  sl  local_address rem_address   st tx_queue "
2557                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2558                            "inode");
2559                 goto out;
2560         }
2561         st = seq->private;
2562
2563         switch (st->state) {
2564         case TCP_SEQ_STATE_LISTENING:
2565         case TCP_SEQ_STATE_ESTABLISHED:
2566                 get_tcp4_sock(v, tmpbuf, st->num);
2567                 break;
2568         case TCP_SEQ_STATE_OPENREQ:
2569                 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
2570                 break;
2571         case TCP_SEQ_STATE_TIME_WAIT:
2572                 get_timewait4_sock(v, tmpbuf, st->num);
2573                 break;
2574         }
2575         seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
2576 out:
2577         return 0;
2578 }
2579
2580 static struct file_operations tcp4_seq_fops;
2581 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2582         .owner          = THIS_MODULE,
2583         .name           = "tcp",
2584         .family         = AF_INET,
2585         .seq_show       = tcp4_seq_show,
2586         .seq_fops       = &tcp4_seq_fops,
2587 };
2588
2589 int __init tcp4_proc_init(void)
2590 {
2591         return tcp_proc_register(&tcp4_seq_afinfo);
2592 }
2593
2594 void tcp4_proc_exit(void)
2595 {
2596         tcp_proc_unregister(&tcp4_seq_afinfo);
2597 }
2598 #endif /* CONFIG_PROC_FS */
2599
2600 struct proto tcp_prot = {
2601         .name                   = "TCP",
2602         .owner                  = THIS_MODULE,
2603         .close                  = tcp_close,
2604         .connect                = tcp_v4_connect,
2605         .disconnect             = tcp_disconnect,
2606         .accept                 = tcp_accept,
2607         .ioctl                  = tcp_ioctl,
2608         .init                   = tcp_v4_init_sock,
2609         .destroy                = tcp_v4_destroy_sock,
2610         .shutdown               = tcp_shutdown,
2611         .setsockopt             = tcp_setsockopt,
2612         .getsockopt             = tcp_getsockopt,
2613         .sendmsg                = tcp_sendmsg,
2614         .recvmsg                = tcp_recvmsg,
2615         .backlog_rcv            = tcp_v4_do_rcv,
2616         .hash                   = tcp_v4_hash,
2617         .unhash                 = tcp_unhash,
2618         .get_port               = tcp_v4_get_port,
2619         .enter_memory_pressure  = tcp_enter_memory_pressure,
2620         .sockets_allocated      = &tcp_sockets_allocated,
2621         .memory_allocated       = &tcp_memory_allocated,
2622         .memory_pressure        = &tcp_memory_pressure,
2623         .sysctl_mem             = sysctl_tcp_mem,
2624         .sysctl_wmem            = sysctl_tcp_wmem,
2625         .sysctl_rmem            = sysctl_tcp_rmem,
2626         .max_header             = MAX_TCP_HEADER,
2627         .slab_obj_size          = sizeof(struct tcp_sock),
2628 };
2629
2630
2631
2632 void __init tcp_v4_init(struct net_proto_family *ops)
2633 {
2634         int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
2635         if (err < 0)
2636                 panic("Failed to create the TCP control socket.\n");
2637         tcp_socket->sk->sk_allocation   = GFP_ATOMIC;
2638         inet_sk(tcp_socket->sk)->uc_ttl = -1;
2639
2640         /* Unhash it so that IP input processing does not even
2641          * see it, we do not wish this socket to see incoming
2642          * packets.
2643          */
2644         tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
2645 }
2646
2647 EXPORT_SYMBOL(ipv4_specific);
2648 EXPORT_SYMBOL(tcp_bind_hash);
2649 EXPORT_SYMBOL(tcp_bucket_create);
2650 EXPORT_SYMBOL(tcp_hashinfo);
2651 EXPORT_SYMBOL(tcp_inherit_port);
2652 EXPORT_SYMBOL(tcp_listen_wlock);
2653 EXPORT_SYMBOL(tcp_port_rover);
2654 EXPORT_SYMBOL(tcp_prot);
2655 EXPORT_SYMBOL(tcp_put_port);
2656 EXPORT_SYMBOL(tcp_unhash);
2657 EXPORT_SYMBOL(tcp_v4_conn_request);
2658 EXPORT_SYMBOL(tcp_v4_connect);
2659 EXPORT_SYMBOL(tcp_v4_do_rcv);
2660 EXPORT_SYMBOL(tcp_v4_rebuild_header);
2661 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2662 EXPORT_SYMBOL(tcp_v4_send_check);
2663 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2664
2665 #ifdef CONFIG_PROC_FS
2666 EXPORT_SYMBOL(tcp_proc_register);
2667 EXPORT_SYMBOL(tcp_proc_unregister);
2668 #endif
2669 EXPORT_SYMBOL(sysctl_local_port_range);
2670 EXPORT_SYMBOL(sysctl_max_syn_backlog);
2671 EXPORT_SYMBOL(sysctl_tcp_low_latency);
2672