vserver 2.0 rc7
[linux-2.6.git] / net / ipv4 / tcp_ipv4.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Version:     $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
9  *
10  *              IPv4 specific functions
11  *
12  *
13  *              code split from:
14  *              linux/ipv4/tcp.c
15  *              linux/ipv4/tcp_input.c
16  *              linux/ipv4/tcp_output.c
17  *
18  *              See tcp.c for author information
19  *
20  *      This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25
26 /*
27  * Changes:
28  *              David S. Miller :       New socket lookup architecture.
29  *                                      This code is dedicated to John Dyson.
30  *              David S. Miller :       Change semantics of established hash,
31  *                                      half is devoted to TIME_WAIT sockets
32  *                                      and the rest go in the other half.
33  *              Andi Kleen :            Add support for syncookies and fixed
34  *                                      some bugs: ip options weren't passed to
35  *                                      the TCP layer, missed a check for an
36  *                                      ACK bit.
37  *              Andi Kleen :            Implemented fast path mtu discovery.
38  *                                      Fixed many serious bugs in the
39  *                                      open_request handling and moved
40  *                                      most of it into the af independent code.
41  *                                      Added tail drop and some other bugfixes.
42  *                                      Added new listen sematics.
43  *              Mike McLagan    :       Routing by source
44  *      Juan Jose Ciarlante:            ip_dynaddr bits
45  *              Andi Kleen:             various fixes.
46  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
47  *                                      coma.
48  *      Andi Kleen              :       Fix new listen.
49  *      Andi Kleen              :       Fix accept error reporting.
50  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
51  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
52  *                                      a single port at the same time.
53  */
54
55 #include <linux/config.h>
56
57 #include <linux/types.h>
58 #include <linux/fcntl.h>
59 #include <linux/module.h>
60 #include <linux/random.h>
61 #include <linux/cache.h>
62 #include <linux/jhash.h>
63 #include <linux/init.h>
64 #include <linux/times.h>
65
66 #include <net/icmp.h>
67 #include <net/tcp.h>
68 #include <net/ipv6.h>
69 #include <net/inet_common.h>
70 #include <net/xfrm.h>
71
72 #include <linux/inet.h>
73 #include <linux/ipv6.h>
74 #include <linux/stddef.h>
75 #include <linux/proc_fs.h>
76 #include <linux/seq_file.h>
77 #include <linux/vserver/debug.h>
78
79 extern int sysctl_ip_dynaddr;
80 int sysctl_tcp_tw_reuse;
81 int sysctl_tcp_low_latency;
82
83 /* Check TCP sequence numbers in ICMP packets. */
84 #define ICMP_MIN_LENGTH 8
85
86 /* Socket used for sending RSTs */
87 static struct socket *tcp_socket;
88
89 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
90                        struct sk_buff *skb);
91
92 struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
93         .__tcp_lhash_lock       =       RW_LOCK_UNLOCKED,
94         .__tcp_lhash_users      =       ATOMIC_INIT(0),
95         .__tcp_lhash_wait
96           = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait),
97         .__tcp_portalloc_lock   =       SPIN_LOCK_UNLOCKED
98 };
99
100 /*
101  * This array holds the first and last local port number.
102  * For high-usage systems, use sysctl to change this to
103  * 32768-61000
104  */
105 int sysctl_local_port_range[2] = { 1024, 4999 };
106 int tcp_port_rover = 1024 - 1;
107
108 static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
109                                  __u32 faddr, __u16 fport)
110 {
111         int h = (laddr ^ lport) ^ (faddr ^ fport);
112         h ^= h >> 16;
113         h ^= h >> 8;
114         return h & (tcp_ehash_size - 1);
115 }
116
117 static __inline__ int tcp_sk_hashfn(struct sock *sk)
118 {
119         struct inet_sock *inet = inet_sk(sk);
120         __u32 laddr = inet->rcv_saddr;
121         __u16 lport = inet->num;
122         __u32 faddr = inet->daddr;
123         __u16 fport = inet->dport;
124
125         return tcp_hashfn(laddr, lport, faddr, fport);
126 }
127
128 /* Allocate and initialize a new TCP local port bind bucket.
129  * The bindhash mutex for snum's hash chain must be held here.
130  */
131 struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
132                                           unsigned short snum)
133 {
134         struct tcp_bind_bucket *tb = kmem_cache_alloc(tcp_bucket_cachep,
135                                                       SLAB_ATOMIC);
136         if (tb) {
137                 tb->port = snum;
138                 tb->fastreuse = 0;
139                 INIT_HLIST_HEAD(&tb->owners);
140                 hlist_add_head(&tb->node, &head->chain);
141         }
142         return tb;
143 }
144
145 /* Caller must hold hashbucket lock for this tb with local BH disabled */
146 void tcp_bucket_destroy(struct tcp_bind_bucket *tb)
147 {
148         if (hlist_empty(&tb->owners)) {
149                 __hlist_del(&tb->node);
150                 kmem_cache_free(tcp_bucket_cachep, tb);
151         }
152 }
153
154 /* Caller must disable local BH processing. */
155 static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
156 {
157         struct tcp_bind_hashbucket *head =
158                                 &tcp_bhash[tcp_bhashfn(inet_sk(child)->num)];
159         struct tcp_bind_bucket *tb;
160
161         spin_lock(&head->lock);
162         tb = tcp_sk(sk)->bind_hash;
163         sk_add_bind_node(child, &tb->owners);
164         tcp_sk(child)->bind_hash = tb;
165         spin_unlock(&head->lock);
166 }
167
168 inline void tcp_inherit_port(struct sock *sk, struct sock *child)
169 {
170         local_bh_disable();
171         __tcp_inherit_port(sk, child);
172         local_bh_enable();
173 }
174
175 void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
176                    unsigned short snum)
177 {
178         inet_sk(sk)->num = snum;
179         sk_add_bind_node(sk, &tb->owners);
180         tcp_sk(sk)->bind_hash = tb;
181 }
182
183 static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
184 {
185         struct sock *sk2;
186         struct hlist_node *node;
187         int reuse = sk->sk_reuse;
188
189         sk_for_each_bound(sk2, node, &tb->owners) {
190                 if (sk != sk2 &&
191                     !tcp_v6_ipv6only(sk2) &&
192                     (!sk->sk_bound_dev_if ||
193                      !sk2->sk_bound_dev_if ||
194                      sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
195                         if (!reuse || !sk2->sk_reuse ||
196                             sk2->sk_state == TCP_LISTEN) {
197                                 if (nx_addr_conflict(sk->sk_nx_info,
198                                         tcp_v4_rcv_saddr(sk), sk2))
199                                         break;
200                         }
201                 }
202         }
203         return node != NULL;
204 }
205
206 /* Obtain a reference to a local port for the given sock,
207  * if snum is zero it means select any available local port.
208  */
209 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
210 {
211         struct tcp_bind_hashbucket *head;
212         struct hlist_node *node;
213         struct tcp_bind_bucket *tb;
214         int ret;
215
216         local_bh_disable();
217         if (!snum) {
218                 int low = sysctl_local_port_range[0];
219                 int high = sysctl_local_port_range[1];
220                 int remaining = (high - low) + 1;
221                 int rover;
222
223                 spin_lock(&tcp_portalloc_lock);
224                 if (tcp_port_rover < low)
225                         rover = low;
226                 else
227                         rover = tcp_port_rover;
228                 do {
229                         rover++;
230                         if (rover > high)
231                                 rover = low;
232                         head = &tcp_bhash[tcp_bhashfn(rover)];
233                         spin_lock(&head->lock);
234                         tb_for_each(tb, node, &head->chain)
235                                 if (tb->port == rover)
236                                         goto next;
237                         break;
238                 next:
239                         spin_unlock(&head->lock);
240                 } while (--remaining > 0);
241                 tcp_port_rover = rover;
242                 spin_unlock(&tcp_portalloc_lock);
243
244                 /* Exhausted local port range during search? */
245                 ret = 1;
246                 if (remaining <= 0)
247                         goto fail;
248
249                 /* OK, here is the one we will use.  HEAD is
250                  * non-NULL and we hold it's mutex.
251                  */
252                 snum = rover;
253         } else {
254                 head = &tcp_bhash[tcp_bhashfn(snum)];
255                 spin_lock(&head->lock);
256                 tb_for_each(tb, node, &head->chain)
257                         if (tb->port == snum)
258                                 goto tb_found;
259         }
260         tb = NULL;
261         goto tb_not_found;
262 tb_found:
263         if (!hlist_empty(&tb->owners)) {
264                 if (sk->sk_reuse > 1)
265                         goto success;
266                 if (tb->fastreuse > 0 &&
267                     sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
268                         goto success;
269                 } else {
270                         ret = 1;
271                         if (tcp_bind_conflict(sk, tb))
272                                 goto fail_unlock;
273                 }
274         }
275 tb_not_found:
276         ret = 1;
277         if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
278                 goto fail_unlock;
279         if (hlist_empty(&tb->owners)) {
280                 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
281                         tb->fastreuse = 1;
282                 else
283                         tb->fastreuse = 0;
284         } else if (tb->fastreuse &&
285                    (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
286                 tb->fastreuse = 0;
287 success:
288         if (!tcp_sk(sk)->bind_hash)
289                 tcp_bind_hash(sk, tb, snum);
290         BUG_TRAP(tcp_sk(sk)->bind_hash == tb);
291         ret = 0;
292
293 fail_unlock:
294         spin_unlock(&head->lock);
295 fail:
296         local_bh_enable();
297         return ret;
298 }
299
300 /* Get rid of any references to a local port held by the
301  * given sock.
302  */
303 static void __tcp_put_port(struct sock *sk)
304 {
305         struct inet_sock *inet = inet_sk(sk);
306         struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)];
307         struct tcp_bind_bucket *tb;
308
309         spin_lock(&head->lock);
310         tb = tcp_sk(sk)->bind_hash;
311         __sk_del_bind_node(sk);
312         tcp_sk(sk)->bind_hash = NULL;
313         inet->num = 0;
314         tcp_bucket_destroy(tb);
315         spin_unlock(&head->lock);
316 }
317
318 void tcp_put_port(struct sock *sk)
319 {
320         local_bh_disable();
321         __tcp_put_port(sk);
322         local_bh_enable();
323 }
324
325 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
326  * Look, when several writers sleep and reader wakes them up, all but one
327  * immediately hit write lock and grab all the cpus. Exclusive sleep solves
328  * this, _but_ remember, it adds useless work on UP machines (wake up each
329  * exclusive lock release). It should be ifdefed really.
330  */
331
332 void tcp_listen_wlock(void)
333 {
334         write_lock(&tcp_lhash_lock);
335
336         if (atomic_read(&tcp_lhash_users)) {
337                 DEFINE_WAIT(wait);
338
339                 for (;;) {
340                         prepare_to_wait_exclusive(&tcp_lhash_wait,
341                                                 &wait, TASK_UNINTERRUPTIBLE);
342                         if (!atomic_read(&tcp_lhash_users))
343                                 break;
344                         write_unlock_bh(&tcp_lhash_lock);
345                         schedule();
346                         write_lock_bh(&tcp_lhash_lock);
347                 }
348
349                 finish_wait(&tcp_lhash_wait, &wait);
350         }
351 }
352
353 static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
354 {
355         struct hlist_head *list;
356         rwlock_t *lock;
357
358         BUG_TRAP(sk_unhashed(sk));
359         if (listen_possible && sk->sk_state == TCP_LISTEN) {
360                 list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
361                 lock = &tcp_lhash_lock;
362                 tcp_listen_wlock();
363         } else {
364                 list = &tcp_ehash[(sk->sk_hashent = tcp_sk_hashfn(sk))].chain;
365                 lock = &tcp_ehash[sk->sk_hashent].lock;
366                 write_lock(lock);
367         }
368         __sk_add_node(sk, list);
369         sock_prot_inc_use(sk->sk_prot);
370         write_unlock(lock);
371         if (listen_possible && sk->sk_state == TCP_LISTEN)
372                 wake_up(&tcp_lhash_wait);
373 }
374
375 static void tcp_v4_hash(struct sock *sk)
376 {
377         if (sk->sk_state != TCP_CLOSE) {
378                 local_bh_disable();
379                 __tcp_v4_hash(sk, 1);
380                 local_bh_enable();
381         }
382 }
383
384 void tcp_unhash(struct sock *sk)
385 {
386         rwlock_t *lock;
387
388         if (sk_unhashed(sk))
389                 goto ende;
390
391         if (sk->sk_state == TCP_LISTEN) {
392                 local_bh_disable();
393                 tcp_listen_wlock();
394                 lock = &tcp_lhash_lock;
395         } else {
396                 struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
397                 lock = &head->lock;
398                 write_lock_bh(&head->lock);
399         }
400
401         if (__sk_del_node_init(sk))
402                 sock_prot_dec_use(sk->sk_prot);
403         write_unlock_bh(lock);
404
405  ende:
406         if (sk->sk_state == TCP_LISTEN)
407                 wake_up(&tcp_lhash_wait);
408 }
409
410
411 /*
412  *      Check if a given address matches for a tcp socket
413  *
414  *      nxi:    the socket's nx_info if any
415  *      addr:   to be verified address
416  *      saddr:  socket addresses
417  */
418 static inline int tcp_addr_match (
419         struct nx_info *nxi,
420         uint32_t addr,
421         uint32_t saddr)
422 {
423         if (addr && (saddr == addr))
424                 return 1;
425         if (!saddr)
426                 return addr_in_nx_info(nxi, addr);
427         return 0;
428 }
429
430 /* Don't inline this cruft.  Here are some nice properties to
431  * exploit here.  The BSD API does not allow a listening TCP
432  * to specify the remote port nor the remote address for the
433  * connection.  So always assume those are both wildcarded
434  * during the search since they can never be otherwise.
435  */
436 static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr,
437                                              unsigned short hnum, int dif)
438 {
439         struct sock *result = NULL, *sk;
440         struct hlist_node *node;
441         int score, hiscore;
442
443         hiscore=-1;
444         sk_for_each(sk, node, head) {
445                 struct inet_sock *inet = inet_sk(sk);
446
447                 if (inet->num == hnum && !ipv6_only_sock(sk)) {
448                         __u32 rcv_saddr = inet->rcv_saddr;
449
450                         score = (sk->sk_family == PF_INET ? 1 : 0);
451                         if (tcp_addr_match(sk->sk_nx_info, daddr, rcv_saddr))
452                                 score+=2;
453                         else
454                                 continue;
455                         if (sk->sk_bound_dev_if) {
456                                 if (sk->sk_bound_dev_if != dif)
457                                         continue;
458                                 score+=2;
459                         }
460                         if (score == 5)
461                                 return sk;
462                         if (score > hiscore) {
463                                 hiscore = score;
464                                 result = sk;
465                         }
466                 }
467         }
468         return result;
469 }
470
471 /* Optimize the common listener case. */
472 static inline struct sock *tcp_v4_lookup_listener(u32 daddr,
473                 unsigned short hnum, int dif)
474 {
475         struct sock *sk = NULL;
476         struct hlist_head *head;
477
478         read_lock(&tcp_lhash_lock);
479         head = &tcp_listening_hash[tcp_lhashfn(hnum)];
480         if (!hlist_empty(head)) {
481                 struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
482
483                 if (inet->num == hnum && !sk->sk_node.next &&
484                     (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
485                     tcp_addr_match(sk->sk_nx_info, daddr, inet->rcv_saddr) &&
486                     !sk->sk_bound_dev_if)
487                         goto sherry_cache;
488                 sk = __tcp_v4_lookup_listener(head, daddr, hnum, dif);
489         }
490         if (sk) {
491 sherry_cache:
492                 sock_hold(sk);
493         }
494         read_unlock(&tcp_lhash_lock);
495         return sk;
496 }
497
498 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
499  * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
500  *
501  * Local BH must be disabled here.
502  */
503
504 static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
505                                                        u32 daddr, u16 hnum,
506                                                        int dif)
507 {
508         struct tcp_ehash_bucket *head;
509         TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
510         __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
511         struct sock *sk;
512         struct hlist_node *node;
513         /* Optimize here for direct hit, only listening connections can
514          * have wildcards anyways.
515          */
516         int hash = tcp_hashfn(daddr, hnum, saddr, sport);
517         head = &tcp_ehash[hash];
518         read_lock(&head->lock);
519         sk_for_each(sk, node, &head->chain) {
520                 if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
521                         goto hit; /* You sunk my battleship! */
522         }
523
524         /* Must check for a TIME_WAIT'er before going to listener hash. */
525         sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
526                 if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
527                         goto hit;
528         }
529         sk = NULL;
530 out:
531         read_unlock(&head->lock);
532         return sk;
533 hit:
534         sock_hold(sk);
535         goto out;
536 }
537
538 static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport,
539                                            u32 daddr, u16 hnum, int dif)
540 {
541         struct sock *sk = __tcp_v4_lookup_established(saddr, sport,
542                                                       daddr, hnum, dif);
543
544         return sk ? : tcp_v4_lookup_listener(daddr, hnum, dif);
545 }
546
547 inline struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr,
548                                   u16 dport, int dif)
549 {
550         struct sock *sk;
551
552         local_bh_disable();
553         sk = __tcp_v4_lookup(saddr, sport, daddr, ntohs(dport), dif);
554         local_bh_enable();
555
556         return sk;
557 }
558
559 EXPORT_SYMBOL_GPL(tcp_v4_lookup);
560
561 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
562 {
563         return secure_tcp_sequence_number(skb->nh.iph->daddr,
564                                           skb->nh.iph->saddr,
565                                           skb->h.th->dest,
566                                           skb->h.th->source);
567 }
568
569 /* called with local bh disabled */
570 static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
571                                       struct tcp_tw_bucket **twp)
572 {
573         struct inet_sock *inet = inet_sk(sk);
574         u32 daddr = inet->rcv_saddr;
575         u32 saddr = inet->daddr;
576         int dif = sk->sk_bound_dev_if;
577         TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
578         __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
579         int hash = tcp_hashfn(daddr, lport, saddr, inet->dport);
580         struct tcp_ehash_bucket *head = &tcp_ehash[hash];
581         struct sock *sk2;
582         struct hlist_node *node;
583         struct tcp_tw_bucket *tw;
584
585         write_lock(&head->lock);
586
587         /* Check TIME-WAIT sockets first. */
588         sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
589                 tw = (struct tcp_tw_bucket *)sk2;
590
591                 if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
592                         struct tcp_sock *tp = tcp_sk(sk);
593
594                         /* With PAWS, it is safe from the viewpoint
595                            of data integrity. Even without PAWS it
596                            is safe provided sequence spaces do not
597                            overlap i.e. at data rates <= 80Mbit/sec.
598
599                            Actually, the idea is close to VJ's one,
600                            only timestamp cache is held not per host,
601                            but per port pair and TW bucket is used
602                            as state holder.
603
604                            If TW bucket has been already destroyed we
605                            fall back to VJ's scheme and use initial
606                            timestamp retrieved from peer table.
607                          */
608                         if (tw->tw_ts_recent_stamp &&
609                             (!twp || (sysctl_tcp_tw_reuse &&
610                                       xtime.tv_sec -
611                                       tw->tw_ts_recent_stamp > 1))) {
612                                 if ((tp->write_seq =
613                                                 tw->tw_snd_nxt + 65535 + 2) == 0)
614                                         tp->write_seq = 1;
615                                 tp->rx_opt.ts_recent       = tw->tw_ts_recent;
616                                 tp->rx_opt.ts_recent_stamp = tw->tw_ts_recent_stamp;
617                                 sock_hold(sk2);
618                                 goto unique;
619                         } else
620                                 goto not_unique;
621                 }
622         }
623         tw = NULL;
624
625         /* And established part... */
626         sk_for_each(sk2, node, &head->chain) {
627                 if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif))
628                         goto not_unique;
629         }
630
631 unique:
632         /* Must record num and sport now. Otherwise we will see
633          * in hash table socket with a funny identity. */
634         inet->num = lport;
635         inet->sport = htons(lport);
636         sk->sk_hashent = hash;
637         BUG_TRAP(sk_unhashed(sk));
638         __sk_add_node(sk, &head->chain);
639         sock_prot_inc_use(sk->sk_prot);
640         write_unlock(&head->lock);
641
642         if (twp) {
643                 *twp = tw;
644                 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
645         } else if (tw) {
646                 /* Silly. Should hash-dance instead... */
647                 tcp_tw_deschedule(tw);
648                 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
649
650                 tcp_tw_put(tw);
651         }
652
653         return 0;
654
655 not_unique:
656         write_unlock(&head->lock);
657         return -EADDRNOTAVAIL;
658 }
659
660 static inline u32 connect_port_offset(const struct sock *sk)
661 {
662         const struct inet_sock *inet = inet_sk(sk);
663
664         return secure_tcp_port_ephemeral(inet->rcv_saddr, inet->daddr, 
665                                          inet->dport);
666 }
667
668 /*
669  * Bind a port for a connect operation and hash it.
670  */
671 static inline int tcp_v4_hash_connect(struct sock *sk)
672 {
673         unsigned short snum = inet_sk(sk)->num;
674         struct tcp_bind_hashbucket *head;
675         struct tcp_bind_bucket *tb;
676         int ret;
677
678         if (!snum) {
679                 int low = sysctl_local_port_range[0];
680                 int high = sysctl_local_port_range[1];
681                 int range = high - low;
682                 int i;
683                 int port;
684                 static u32 hint;
685                 u32 offset = hint + connect_port_offset(sk);
686                 struct hlist_node *node;
687                 struct tcp_tw_bucket *tw = NULL;
688
689                 local_bh_disable();
690                 for (i = 1; i <= range; i++) {
691                         port = low + (i + offset) % range;
692                         head = &tcp_bhash[tcp_bhashfn(port)];
693                         spin_lock(&head->lock);
694
695                         /* Does not bother with rcv_saddr checks,
696                          * because the established check is already
697                          * unique enough.
698                          */
699                         tb_for_each(tb, node, &head->chain) {
700                                 if (tb->port == port) {
701                                         BUG_TRAP(!hlist_empty(&tb->owners));
702                                         if (tb->fastreuse >= 0)
703                                                 goto next_port;
704                                         if (!__tcp_v4_check_established(sk,
705                                                                         port,
706                                                                         &tw))
707                                                 goto ok;
708                                         goto next_port;
709                                 }
710                         }
711
712                         tb = tcp_bucket_create(head, port);
713                         if (!tb) {
714                                 spin_unlock(&head->lock);
715                                 break;
716                         }
717                         tb->fastreuse = -1;
718                         goto ok;
719
720                 next_port:
721                         spin_unlock(&head->lock);
722                 }
723                 local_bh_enable();
724
725                 return -EADDRNOTAVAIL;
726
727 ok:
728                 hint += i;
729
730                 /* Head lock still held and bh's disabled */
731                 tcp_bind_hash(sk, tb, port);
732                 if (sk_unhashed(sk)) {
733                         inet_sk(sk)->sport = htons(port);
734                         __tcp_v4_hash(sk, 0);
735                 }
736                 spin_unlock(&head->lock);
737
738                 if (tw) {
739                         tcp_tw_deschedule(tw);
740                         tcp_tw_put(tw);
741                 }
742
743                 ret = 0;
744                 goto out;
745         }
746
747         head  = &tcp_bhash[tcp_bhashfn(snum)];
748         tb  = tcp_sk(sk)->bind_hash;
749         spin_lock_bh(&head->lock);
750         if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
751                 __tcp_v4_hash(sk, 0);
752                 spin_unlock_bh(&head->lock);
753                 return 0;
754         } else {
755                 spin_unlock(&head->lock);
756                 /* No definite answer... Walk to established hash table */
757                 ret = __tcp_v4_check_established(sk, snum, NULL);
758 out:
759                 local_bh_enable();
760                 return ret;
761         }
762 }
763
764 /* This will initiate an outgoing connection. */
765 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
766 {
767         struct inet_sock *inet = inet_sk(sk);
768         struct tcp_sock *tp = tcp_sk(sk);
769         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
770         struct rtable *rt;
771         u32 daddr, nexthop;
772         int tmp;
773         int err;
774
775         if (addr_len < sizeof(struct sockaddr_in))
776                 return -EINVAL;
777
778         if (usin->sin_family != AF_INET)
779                 return -EAFNOSUPPORT;
780
781         nexthop = daddr = usin->sin_addr.s_addr;
782         if (inet->opt && inet->opt->srr) {
783                 if (!daddr)
784                         return -EINVAL;
785                 nexthop = inet->opt->faddr;
786         }
787
788         tmp = ip_route_connect(&rt, nexthop, inet->saddr,
789                                RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
790                                IPPROTO_TCP,
791                                inet->sport, usin->sin_port, sk);
792         if (tmp < 0)
793                 return tmp;
794
795         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
796                 ip_rt_put(rt);
797                 return -ENETUNREACH;
798         }
799
800         if (!inet->opt || !inet->opt->srr)
801                 daddr = rt->rt_dst;
802
803         if (!inet->saddr)
804                 inet->saddr = rt->rt_src;
805         inet->rcv_saddr = inet->saddr;
806
807         if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
808                 /* Reset inherited state */
809                 tp->rx_opt.ts_recent       = 0;
810                 tp->rx_opt.ts_recent_stamp = 0;
811                 tp->write_seq              = 0;
812         }
813
814         if (sysctl_tcp_tw_recycle &&
815             !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
816                 struct inet_peer *peer = rt_get_peer(rt);
817
818                 /* VJ's idea. We save last timestamp seen from
819                  * the destination in peer table, when entering state TIME-WAIT
820                  * and initialize rx_opt.ts_recent from it, when trying new connection.
821                  */
822
823                 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
824                         tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
825                         tp->rx_opt.ts_recent = peer->tcp_ts;
826                 }
827         }
828
829         inet->dport = usin->sin_port;
830         inet->daddr = daddr;
831
832         tp->ext_header_len = 0;
833         if (inet->opt)
834                 tp->ext_header_len = inet->opt->optlen;
835
836         tp->rx_opt.mss_clamp = 536;
837
838         /* Socket identity is still unknown (sport may be zero).
839          * However we set state to SYN-SENT and not releasing socket
840          * lock select source port, enter ourselves into the hash tables and
841          * complete initialization after this.
842          */
843         tcp_set_state(sk, TCP_SYN_SENT);
844         err = tcp_v4_hash_connect(sk);
845         if (err)
846                 goto failure;
847
848         err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
849         if (err)
850                 goto failure;
851
852         /* OK, now commit destination to socket.  */
853         __sk_dst_set(sk, &rt->u.dst);
854         tcp_v4_setup_caps(sk, &rt->u.dst);
855
856         if (!tp->write_seq)
857                 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
858                                                            inet->daddr,
859                                                            inet->sport,
860                                                            usin->sin_port);
861
862         inet->id = tp->write_seq ^ jiffies;
863
864         err = tcp_connect(sk);
865         rt = NULL;
866         if (err)
867                 goto failure;
868
869         return 0;
870
871 failure:
872         /* This unhashes the socket and releases the local port, if necessary. */
873         tcp_set_state(sk, TCP_CLOSE);
874         ip_rt_put(rt);
875         sk->sk_route_caps = 0;
876         inet->dport = 0;
877         return err;
878 }
879
880 static __inline__ int tcp_v4_iif(struct sk_buff *skb)
881 {
882         return ((struct rtable *)skb->dst)->rt_iif;
883 }
884
885 static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
886 {
887         return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
888 }
889
890 static struct open_request *tcp_v4_search_req(struct tcp_sock *tp,
891                                               struct open_request ***prevp,
892                                               __u16 rport,
893                                               __u32 raddr, __u32 laddr)
894 {
895         struct tcp_listen_opt *lopt = tp->listen_opt;
896         struct open_request *req, **prev;
897
898         for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
899              (req = *prev) != NULL;
900              prev = &req->dl_next) {
901                 if (req->rmt_port == rport &&
902                     req->af.v4_req.rmt_addr == raddr &&
903                     req->af.v4_req.loc_addr == laddr &&
904                     TCP_INET_FAMILY(req->class->family)) {
905                         BUG_TRAP(!req->sk);
906                         *prevp = prev;
907                         break;
908                 }
909         }
910
911         return req;
912 }
913
914 static void tcp_v4_synq_add(struct sock *sk, struct open_request *req)
915 {
916         struct tcp_sock *tp = tcp_sk(sk);
917         struct tcp_listen_opt *lopt = tp->listen_opt;
918         u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
919
920         req->expires = jiffies + TCP_TIMEOUT_INIT;
921         req->retrans = 0;
922         req->sk = NULL;
923         req->dl_next = lopt->syn_table[h];
924
925         write_lock(&tp->syn_wait_lock);
926         lopt->syn_table[h] = req;
927         write_unlock(&tp->syn_wait_lock);
928
929         tcp_synq_added(sk);
930 }
931
932
933 /*
934  * This routine does path mtu discovery as defined in RFC1191.
935  */
936 static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
937                                      u32 mtu)
938 {
939         struct dst_entry *dst;
940         struct inet_sock *inet = inet_sk(sk);
941         struct tcp_sock *tp = tcp_sk(sk);
942
943         /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
944          * send out by Linux are always <576bytes so they should go through
945          * unfragmented).
946          */
947         if (sk->sk_state == TCP_LISTEN)
948                 return;
949
950         /* We don't check in the destentry if pmtu discovery is forbidden
951          * on this route. We just assume that no packet_to_big packets
952          * are send back when pmtu discovery is not active.
953          * There is a small race when the user changes this flag in the
954          * route, but I think that's acceptable.
955          */
956         if ((dst = __sk_dst_check(sk, 0)) == NULL)
957                 return;
958
959         dst->ops->update_pmtu(dst, mtu);
960
961         /* Something is about to be wrong... Remember soft error
962          * for the case, if this connection will not able to recover.
963          */
964         if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
965                 sk->sk_err_soft = EMSGSIZE;
966
967         mtu = dst_mtu(dst);
968
969         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
970             tp->pmtu_cookie > mtu) {
971                 tcp_sync_mss(sk, mtu);
972
973                 /* Resend the TCP packet because it's
974                  * clear that the old packet has been
975                  * dropped. This is the new "fast" path mtu
976                  * discovery.
977                  */
978                 tcp_simple_retransmit(sk);
979         } /* else let the usual retransmit timer handle it */
980 }
981
982 /*
983  * This routine is called by the ICMP module when it gets some
984  * sort of error condition.  If err < 0 then the socket should
985  * be closed and the error returned to the user.  If err > 0
986  * it's just the icmp type << 8 | icmp code.  After adjustment
987  * header points to the first 8 bytes of the tcp header.  We need
988  * to find the appropriate port.
989  *
990  * The locking strategy used here is very "optimistic". When
991  * someone else accesses the socket the ICMP is just dropped
992  * and for some paths there is no check at all.
993  * A more general error queue to queue errors for later handling
994  * is probably better.
995  *
996  */
997
998 void tcp_v4_err(struct sk_buff *skb, u32 info)
999 {
1000         struct iphdr *iph = (struct iphdr *)skb->data;
1001         struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1002         struct tcp_sock *tp;
1003         struct inet_sock *inet;
1004         int type = skb->h.icmph->type;
1005         int code = skb->h.icmph->code;
1006         struct sock *sk;
1007         __u32 seq;
1008         int err;
1009
1010         if (skb->len < (iph->ihl << 2) + 8) {
1011                 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
1012                 return;
1013         }
1014
1015         sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,
1016                            th->source, tcp_v4_iif(skb));
1017         if (!sk) {
1018                 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
1019                 return;
1020         }
1021         if (sk->sk_state == TCP_TIME_WAIT) {
1022                 tcp_tw_put((struct tcp_tw_bucket *)sk);
1023                 return;
1024         }
1025
1026         bh_lock_sock(sk);
1027         /* If too many ICMPs get dropped on busy
1028          * servers this needs to be solved differently.
1029          */
1030         if (sock_owned_by_user(sk))
1031                 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
1032
1033         if (sk->sk_state == TCP_CLOSE)
1034                 goto out;
1035
1036         tp = tcp_sk(sk);
1037         seq = ntohl(th->seq);
1038         if (sk->sk_state != TCP_LISTEN &&
1039             !between(seq, tp->snd_una, tp->snd_nxt)) {
1040                 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
1041                 goto out;
1042         }
1043
1044         switch (type) {
1045         case ICMP_SOURCE_QUENCH:
1046                 /* Just silently ignore these. */
1047                 goto out;
1048         case ICMP_PARAMETERPROB:
1049                 err = EPROTO;
1050                 break;
1051         case ICMP_DEST_UNREACH:
1052                 if (code > NR_ICMP_UNREACH)
1053                         goto out;
1054
1055                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
1056                         if (!sock_owned_by_user(sk))
1057                                 do_pmtu_discovery(sk, iph, info);
1058                         goto out;
1059                 }
1060
1061                 err = icmp_err_convert[code].errno;
1062                 break;
1063         case ICMP_TIME_EXCEEDED:
1064                 err = EHOSTUNREACH;
1065                 break;
1066         default:
1067                 goto out;
1068         }
1069
1070         switch (sk->sk_state) {
1071                 struct open_request *req, **prev;
1072         case TCP_LISTEN:
1073                 if (sock_owned_by_user(sk))
1074                         goto out;
1075
1076                 req = tcp_v4_search_req(tp, &prev, th->dest,
1077                                         iph->daddr, iph->saddr);
1078                 if (!req)
1079                         goto out;
1080
1081                 /* ICMPs are not backlogged, hence we cannot get
1082                    an established socket here.
1083                  */
1084                 BUG_TRAP(!req->sk);
1085
1086                 if (seq != req->snt_isn) {
1087                         NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
1088                         goto out;
1089                 }
1090
1091                 /*
1092                  * Still in SYN_RECV, just remove it silently.
1093                  * There is no good way to pass the error to the newly
1094                  * created socket, and POSIX does not want network
1095                  * errors returned from accept().
1096                  */
1097                 tcp_synq_drop(sk, req, prev);
1098                 goto out;
1099
1100         case TCP_SYN_SENT:
1101         case TCP_SYN_RECV:  /* Cannot happen.
1102                                It can f.e. if SYNs crossed.
1103                              */
1104                 if (!sock_owned_by_user(sk)) {
1105                         TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1106                         sk->sk_err = err;
1107
1108                         sk->sk_error_report(sk);
1109
1110                         tcp_done(sk);
1111                 } else {
1112                         sk->sk_err_soft = err;
1113                 }
1114                 goto out;
1115         }
1116
1117         /* If we've already connected we will keep trying
1118          * until we time out, or the user gives up.
1119          *
1120          * rfc1122 4.2.3.9 allows to consider as hard errors
1121          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
1122          * but it is obsoleted by pmtu discovery).
1123          *
1124          * Note, that in modern internet, where routing is unreliable
1125          * and in each dark corner broken firewalls sit, sending random
1126          * errors ordered by their masters even this two messages finally lose
1127          * their original sense (even Linux sends invalid PORT_UNREACHs)
1128          *
1129          * Now we are in compliance with RFCs.
1130          *                                                      --ANK (980905)
1131          */
1132
1133         inet = inet_sk(sk);
1134         if (!sock_owned_by_user(sk) && inet->recverr) {
1135                 sk->sk_err = err;
1136                 sk->sk_error_report(sk);
1137         } else  { /* Only an error on timeout */
1138                 sk->sk_err_soft = err;
1139         }
1140
1141 out:
1142         bh_unlock_sock(sk);
1143         sock_put(sk);
1144 }
1145
1146 /* This routine computes an IPv4 TCP checksum. */
1147 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
1148                        struct sk_buff *skb)
1149 {
1150         struct inet_sock *inet = inet_sk(sk);
1151
1152         if (skb->ip_summed == CHECKSUM_HW) {
1153                 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
1154                 skb->csum = offsetof(struct tcphdr, check);
1155         } else {
1156                 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
1157                                          csum_partial((char *)th,
1158                                                       th->doff << 2,
1159                                                       skb->csum));
1160         }
1161 }
1162
1163 /*
1164  *      This routine will send an RST to the other tcp.
1165  *
1166  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
1167  *                    for reset.
1168  *      Answer: if a packet caused RST, it is not for a socket
1169  *              existing in our system, if it is matched to a socket,
1170  *              it is just duplicate segment or bug in other side's TCP.
1171  *              So that we build reply only basing on parameters
1172  *              arrived with segment.
1173  *      Exception: precedence violation. We do not implement it in any case.
1174  */
1175
1176 static void tcp_v4_send_reset(struct sk_buff *skb)
1177 {
1178         struct tcphdr *th = skb->h.th;
1179         struct tcphdr rth;
1180         struct ip_reply_arg arg;
1181
1182         /* Never send a reset in response to a reset. */
1183         if (th->rst)
1184                 return;
1185
1186         if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
1187                 return;
1188
1189         /* Swap the send and the receive. */
1190         memset(&rth, 0, sizeof(struct tcphdr));
1191         rth.dest   = th->source;
1192         rth.source = th->dest;
1193         rth.doff   = sizeof(struct tcphdr) / 4;
1194         rth.rst    = 1;
1195
1196         if (th->ack) {
1197                 rth.seq = th->ack_seq;
1198         } else {
1199                 rth.ack = 1;
1200                 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
1201                                     skb->len - (th->doff << 2));
1202         }
1203
1204         memset(&arg, 0, sizeof arg);
1205         arg.iov[0].iov_base = (unsigned char *)&rth;
1206         arg.iov[0].iov_len  = sizeof rth;
1207         arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1208                                       skb->nh.iph->saddr, /*XXX*/
1209                                       sizeof(struct tcphdr), IPPROTO_TCP, 0);
1210         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1211
1212         ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
1213
1214         TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1215         TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1216 }
1217
1218 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
1219    outside socket context is ugly, certainly. What can I do?
1220  */
1221
1222 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
1223                             u32 win, u32 ts)
1224 {
1225         struct tcphdr *th = skb->h.th;
1226         struct {
1227                 struct tcphdr th;
1228                 u32 tsopt[3];
1229         } rep;
1230         struct ip_reply_arg arg;
1231
1232         memset(&rep.th, 0, sizeof(struct tcphdr));
1233         memset(&arg, 0, sizeof arg);
1234
1235         arg.iov[0].iov_base = (unsigned char *)&rep;
1236         arg.iov[0].iov_len  = sizeof(rep.th);
1237         if (ts) {
1238                 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1239                                      (TCPOPT_TIMESTAMP << 8) |
1240                                      TCPOLEN_TIMESTAMP);
1241                 rep.tsopt[1] = htonl(tcp_time_stamp);
1242                 rep.tsopt[2] = htonl(ts);
1243                 arg.iov[0].iov_len = sizeof(rep);
1244         }
1245
1246         /* Swap the send and the receive. */
1247         rep.th.dest    = th->source;
1248         rep.th.source  = th->dest;
1249         rep.th.doff    = arg.iov[0].iov_len / 4;
1250         rep.th.seq     = htonl(seq);
1251         rep.th.ack_seq = htonl(ack);
1252         rep.th.ack     = 1;
1253         rep.th.window  = htons(win);
1254
1255         arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1256                                       skb->nh.iph->saddr, /*XXX*/
1257                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
1258         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1259
1260         ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
1261
1262         TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1263 }
1264
1265 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
1266 {
1267         struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
1268
1269         tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
1270                         tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
1271
1272         tcp_tw_put(tw);
1273 }
1274
1275 static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req)
1276 {
1277         tcp_v4_send_ack(skb, req->snt_isn + 1, req->rcv_isn + 1, req->rcv_wnd,
1278                         req->ts_recent);
1279 }
1280
1281 static struct dst_entry* tcp_v4_route_req(struct sock *sk,
1282                                           struct open_request *req)
1283 {
1284         struct rtable *rt;
1285         struct ip_options *opt = req->af.v4_req.opt;
1286         struct flowi fl = { .oif = sk->sk_bound_dev_if,
1287                             .nl_u = { .ip4_u =
1288                                       { .daddr = ((opt && opt->srr) ?
1289                                                   opt->faddr :
1290                                                   req->af.v4_req.rmt_addr),
1291                                         .saddr = req->af.v4_req.loc_addr,
1292                                         .tos = RT_CONN_FLAGS(sk) } },
1293                             .proto = IPPROTO_TCP,
1294                             .uli_u = { .ports =
1295                                        { .sport = inet_sk(sk)->sport,
1296                                          .dport = req->rmt_port } } };
1297
1298         if (ip_route_output_flow(&rt, &fl, sk, 0)) {
1299                 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1300                 return NULL;
1301         }
1302         if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
1303                 ip_rt_put(rt);
1304                 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1305                 return NULL;
1306         }
1307         return &rt->u.dst;
1308 }
1309
1310 /*
1311  *      Send a SYN-ACK after having received an ACK.
1312  *      This still operates on a open_request only, not on a big
1313  *      socket.
1314  */
1315 static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
1316                               struct dst_entry *dst)
1317 {
1318         int err = -1;
1319         struct sk_buff * skb;
1320
1321         /* First, grab a route. */
1322         if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1323                 goto out;
1324
1325         skb = tcp_make_synack(sk, dst, req);
1326
1327         if (skb) {
1328                 struct tcphdr *th = skb->h.th;
1329
1330                 th->check = tcp_v4_check(th, skb->len,
1331                                          req->af.v4_req.loc_addr,
1332                                          req->af.v4_req.rmt_addr,
1333                                          csum_partial((char *)th, skb->len,
1334                                                       skb->csum));
1335
1336                 err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr,
1337                                             req->af.v4_req.rmt_addr,
1338                                             req->af.v4_req.opt);
1339                 if (err == NET_XMIT_CN)
1340                         err = 0;
1341         }
1342
1343 out:
1344         dst_release(dst);
1345         return err;
1346 }
1347
1348 /*
1349  *      IPv4 open_request destructor.
1350  */
1351 static void tcp_v4_or_free(struct open_request *req)
1352 {
1353         if (req->af.v4_req.opt)
1354                 kfree(req->af.v4_req.opt);
1355 }
1356
1357 static inline void syn_flood_warning(struct sk_buff *skb)
1358 {
1359         static unsigned long warntime;
1360
1361         if (time_after(jiffies, (warntime + HZ * 60))) {
1362                 warntime = jiffies;
1363                 printk(KERN_INFO
1364                        "possible SYN flooding on port %d. Sending cookies.\n",
1365                        ntohs(skb->h.th->dest));
1366         }
1367 }
1368
1369 /*
1370  * Save and compile IPv4 options into the open_request if needed.
1371  */
1372 static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
1373                                                      struct sk_buff *skb)
1374 {
1375         struct ip_options *opt = &(IPCB(skb)->opt);
1376         struct ip_options *dopt = NULL;
1377
1378         if (opt && opt->optlen) {
1379                 int opt_size = optlength(opt);
1380                 dopt = kmalloc(opt_size, GFP_ATOMIC);
1381                 if (dopt) {
1382                         if (ip_options_echo(dopt, skb)) {
1383                                 kfree(dopt);
1384                                 dopt = NULL;
1385                         }
1386                 }
1387         }
1388         return dopt;
1389 }
1390
1391 /*
1392  * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
1393  * One SYN_RECV socket costs about 80bytes on a 32bit machine.
1394  * It would be better to replace it with a global counter for all sockets
1395  * but then some measure against one socket starving all other sockets
1396  * would be needed.
1397  *
1398  * It was 128 by default. Experiments with real servers show, that
1399  * it is absolutely not enough even at 100conn/sec. 256 cures most
1400  * of problems. This value is adjusted to 128 for very small machines
1401  * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
1402  * Further increasing requires to change hash table size.
1403  */
1404 int sysctl_max_syn_backlog = 256;
1405
1406 struct or_calltable or_ipv4 = {
1407         .family         =       PF_INET,
1408         .rtx_syn_ack    =       tcp_v4_send_synack,
1409         .send_ack       =       tcp_v4_or_send_ack,
1410         .destructor     =       tcp_v4_or_free,
1411         .send_reset     =       tcp_v4_send_reset,
1412 };
1413
1414 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1415 {
1416         struct tcp_options_received tmp_opt;
1417         struct open_request *req;
1418         __u32 saddr = skb->nh.iph->saddr;
1419         __u32 daddr = skb->nh.iph->daddr;
1420         __u32 isn = TCP_SKB_CB(skb)->when;
1421         struct dst_entry *dst = NULL;
1422 #ifdef CONFIG_SYN_COOKIES
1423         int want_cookie = 0;
1424 #else
1425 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1426 #endif
1427
1428         /* Never answer to SYNs send to broadcast or multicast */
1429         if (((struct rtable *)skb->dst)->rt_flags &
1430             (RTCF_BROADCAST | RTCF_MULTICAST))
1431                 goto drop;
1432
1433         /* TW buckets are converted to open requests without
1434          * limitations, they conserve resources and peer is
1435          * evidently real one.
1436          */
1437         if (tcp_synq_is_full(sk) && !isn) {
1438 #ifdef CONFIG_SYN_COOKIES
1439                 if (sysctl_tcp_syncookies) {
1440                         want_cookie = 1;
1441                 } else
1442 #endif
1443                 goto drop;
1444         }
1445
1446         /* Accept backlog is full. If we have already queued enough
1447          * of warm entries in syn queue, drop request. It is better than
1448          * clogging syn queue with openreqs with exponentially increasing
1449          * timeout.
1450          */
1451         if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
1452                 goto drop;
1453
1454         req = tcp_openreq_alloc();
1455         if (!req)
1456                 goto drop;
1457
1458         tcp_clear_options(&tmp_opt);
1459         tmp_opt.mss_clamp = 536;
1460         tmp_opt.user_mss  = tcp_sk(sk)->rx_opt.user_mss;
1461
1462         tcp_parse_options(skb, &tmp_opt, 0);
1463
1464         if (want_cookie) {
1465                 tcp_clear_options(&tmp_opt);
1466                 tmp_opt.saw_tstamp = 0;
1467         }
1468
1469         if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) {
1470                 /* Some OSes (unknown ones, but I see them on web server, which
1471                  * contains information interesting only for windows'
1472                  * users) do not send their stamp in SYN. It is easy case.
1473                  * We simply do not advertise TS support.
1474                  */
1475                 tmp_opt.saw_tstamp = 0;
1476                 tmp_opt.tstamp_ok  = 0;
1477         }
1478         tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1479
1480         tcp_openreq_init(req, &tmp_opt, skb);
1481
1482         req->af.v4_req.loc_addr = daddr;
1483         req->af.v4_req.rmt_addr = saddr;
1484         req->af.v4_req.opt = tcp_v4_save_options(sk, skb);
1485         req->class = &or_ipv4;
1486         if (!want_cookie)
1487                 TCP_ECN_create_request(req, skb->h.th);
1488
1489         if (want_cookie) {
1490 #ifdef CONFIG_SYN_COOKIES
1491                 syn_flood_warning(skb);
1492 #endif
1493                 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1494         } else if (!isn) {
1495                 struct inet_peer *peer = NULL;
1496
1497                 /* VJ's idea. We save last timestamp seen
1498                  * from the destination in peer table, when entering
1499                  * state TIME-WAIT, and check against it before
1500                  * accepting new connection request.
1501                  *
1502                  * If "isn" is not zero, this request hit alive
1503                  * timewait bucket, so that all the necessary checks
1504                  * are made in the function processing timewait state.
1505                  */
1506                 if (tmp_opt.saw_tstamp &&
1507                     sysctl_tcp_tw_recycle &&
1508                     (dst = tcp_v4_route_req(sk, req)) != NULL &&
1509                     (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1510                     peer->v4daddr == saddr) {
1511                         if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1512                             (s32)(peer->tcp_ts - req->ts_recent) >
1513                                                         TCP_PAWS_WINDOW) {
1514                                 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
1515                                 dst_release(dst);
1516                                 goto drop_and_free;
1517                         }
1518                 }
1519                 /* Kill the following clause, if you dislike this way. */
1520                 else if (!sysctl_tcp_syncookies &&
1521                          (sysctl_max_syn_backlog - tcp_synq_len(sk) <
1522                           (sysctl_max_syn_backlog >> 2)) &&
1523                          (!peer || !peer->tcp_ts_stamp) &&
1524                          (!dst || !dst_metric(dst, RTAX_RTT))) {
1525                         /* Without syncookies last quarter of
1526                          * backlog is filled with destinations,
1527                          * proven to be alive.
1528                          * It means that we continue to communicate
1529                          * to destinations, already remembered
1530                          * to the moment of synflood.
1531                          */
1532                         NETDEBUG(if (net_ratelimit()) \
1533                                         printk(KERN_DEBUG "TCP: drop open "
1534                                                           "request from %u.%u."
1535                                                           "%u.%u/%u\n", \
1536                                                NIPQUAD(saddr),
1537                                                ntohs(skb->h.th->source)));
1538                         dst_release(dst);
1539                         goto drop_and_free;
1540                 }
1541
1542                 isn = tcp_v4_init_sequence(sk, skb);
1543         }
1544         req->snt_isn = isn;
1545
1546         if (tcp_v4_send_synack(sk, req, dst))
1547                 goto drop_and_free;
1548
1549         if (want_cookie) {
1550                 tcp_openreq_free(req);
1551         } else {
1552                 tcp_v4_synq_add(sk, req);
1553         }
1554         return 0;
1555
1556 drop_and_free:
1557         tcp_openreq_free(req);
1558 drop:
1559         TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1560         return 0;
1561 }
1562
1563
1564 /*
1565  * The three way handshake has completed - we got a valid synack -
1566  * now create the new socket.
1567  */
1568 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1569                                   struct open_request *req,
1570                                   struct dst_entry *dst)
1571 {
1572         struct inet_sock *newinet;
1573         struct tcp_sock *newtp;
1574         struct sock *newsk;
1575
1576         if (sk_acceptq_is_full(sk))
1577                 goto exit_overflow;
1578
1579         if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1580                 goto exit;
1581
1582         newsk = tcp_create_openreq_child(sk, req, skb);
1583         if (!newsk)
1584                 goto exit;
1585
1586         newsk->sk_dst_cache = dst;
1587         tcp_v4_setup_caps(newsk, dst);
1588
1589         newtp                 = tcp_sk(newsk);
1590         newinet               = inet_sk(newsk);
1591         newinet->daddr        = req->af.v4_req.rmt_addr;
1592         newinet->rcv_saddr    = req->af.v4_req.loc_addr;
1593         newinet->saddr        = req->af.v4_req.loc_addr;
1594         newinet->opt          = req->af.v4_req.opt;
1595         req->af.v4_req.opt    = NULL;
1596         newinet->mc_index     = tcp_v4_iif(skb);
1597         newinet->mc_ttl       = skb->nh.iph->ttl;
1598         newtp->ext_header_len = 0;
1599         if (newinet->opt)
1600                 newtp->ext_header_len = newinet->opt->optlen;
1601         newinet->id = newtp->write_seq ^ jiffies;
1602
1603         tcp_sync_mss(newsk, dst_mtu(dst));
1604         newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1605         tcp_initialize_rcv_mss(newsk);
1606
1607         __tcp_v4_hash(newsk, 0);
1608         __tcp_inherit_port(sk, newsk);
1609
1610         return newsk;
1611
1612 exit_overflow:
1613         NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1614 exit:
1615         NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1616         dst_release(dst);
1617         return NULL;
1618 }
1619
1620 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1621 {
1622         struct tcphdr *th = skb->h.th;
1623         struct iphdr *iph = skb->nh.iph;
1624         struct tcp_sock *tp = tcp_sk(sk);
1625         struct sock *nsk;
1626         struct open_request **prev;
1627         /* Find possible connection requests. */
1628         struct open_request *req = tcp_v4_search_req(tp, &prev, th->source,
1629                                                      iph->saddr, iph->daddr);
1630         if (req)
1631                 return tcp_check_req(sk, skb, req, prev);
1632
1633         nsk = __tcp_v4_lookup_established(skb->nh.iph->saddr,
1634                                           th->source,
1635                                           skb->nh.iph->daddr,
1636                                           ntohs(th->dest),
1637                                           tcp_v4_iif(skb));
1638
1639         if (nsk) {
1640                 if (nsk->sk_state != TCP_TIME_WAIT) {
1641                         bh_lock_sock(nsk);
1642                         return nsk;
1643                 }
1644                 tcp_tw_put((struct tcp_tw_bucket *)nsk);
1645                 return NULL;
1646         }
1647
1648 #ifdef CONFIG_SYN_COOKIES
1649         if (!th->rst && !th->syn && th->ack)
1650                 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1651 #endif
1652         return sk;
1653 }
1654
1655 static int tcp_v4_checksum_init(struct sk_buff *skb)
1656 {
1657         if (skb->ip_summed == CHECKSUM_HW) {
1658                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1659                 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1660                                   skb->nh.iph->daddr, skb->csum))
1661                         return 0;
1662
1663                 NETDEBUG(if (net_ratelimit())
1664                                 printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
1665                 skb->ip_summed = CHECKSUM_NONE;
1666         }
1667         if (skb->len <= 76) {
1668                 if (tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1669                                  skb->nh.iph->daddr,
1670                                  skb_checksum(skb, 0, skb->len, 0)))
1671                         return -1;
1672                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1673         } else {
1674                 skb->csum = ~tcp_v4_check(skb->h.th, skb->len,
1675                                           skb->nh.iph->saddr,
1676                                           skb->nh.iph->daddr, 0);
1677         }
1678         return 0;
1679 }
1680
1681
1682 /* The socket must have it's spinlock held when we get
1683  * here.
1684  *
1685  * We have a potential double-lock case here, so even when
1686  * doing backlog processing we use the BH locking scheme.
1687  * This is because we cannot sleep with the original spinlock
1688  * held.
1689  */
1690 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1691 {
1692         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1693                 TCP_CHECK_TIMER(sk);
1694                 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1695                         goto reset;
1696                 TCP_CHECK_TIMER(sk);
1697                 return 0;
1698         }
1699
1700         if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
1701                 goto csum_err;
1702
1703         if (sk->sk_state == TCP_LISTEN) {
1704                 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1705                 if (!nsk)
1706                         goto discard;
1707
1708                 if (nsk != sk) {
1709                         if (tcp_child_process(sk, nsk, skb))
1710                                 goto reset;
1711                         return 0;
1712                 }
1713         }
1714
1715         TCP_CHECK_TIMER(sk);
1716         if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1717                 goto reset;
1718         TCP_CHECK_TIMER(sk);
1719         return 0;
1720
1721 reset:
1722         tcp_v4_send_reset(skb);
1723 discard:
1724         kfree_skb(skb);
1725         /* Be careful here. If this function gets more complicated and
1726          * gcc suffers from register pressure on the x86, sk (in %ebx)
1727          * might be destroyed here. This current version compiles correctly,
1728          * but you have been warned.
1729          */
1730         return 0;
1731
1732 csum_err:
1733         TCP_INC_STATS_BH(TCP_MIB_INERRS);
1734         goto discard;
1735 }
1736
1737 /*
1738  *      From tcp_input.c
1739  */
1740
1741 int tcp_v4_rcv(struct sk_buff *skb)
1742 {
1743         struct tcphdr *th;
1744         struct sock *sk;
1745         int ret;
1746
1747         if (skb->pkt_type != PACKET_HOST)
1748                 goto discard_it;
1749
1750         /* Count it even if it's bad */
1751         TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1752
1753         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1754                 goto discard_it;
1755
1756         th = skb->h.th;
1757
1758         if (th->doff < sizeof(struct tcphdr) / 4)
1759                 goto bad_packet;
1760         if (!pskb_may_pull(skb, th->doff * 4))
1761                 goto discard_it;
1762
1763         /* An explanation is required here, I think.
1764          * Packet length and doff are validated by header prediction,
1765          * provided case of th->doff==0 is elimineted.
1766          * So, we defer the checks. */
1767         if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1768              tcp_v4_checksum_init(skb) < 0))
1769                 goto bad_packet;
1770
1771         th = skb->h.th;
1772         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1773         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1774                                     skb->len - th->doff * 4);
1775         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1776         TCP_SKB_CB(skb)->when    = 0;
1777         TCP_SKB_CB(skb)->flags   = skb->nh.iph->tos;
1778         TCP_SKB_CB(skb)->sacked  = 0;
1779
1780         sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source,
1781                              skb->nh.iph->daddr, ntohs(th->dest),
1782                              tcp_v4_iif(skb));
1783
1784         if (!sk)
1785                 goto no_tcp_socket;
1786
1787 process:
1788         if (sk->sk_state == TCP_TIME_WAIT)
1789                 goto do_time_wait;
1790
1791         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1792                 goto discard_and_relse;
1793
1794         if (sk_filter(sk, skb, 0))
1795                 goto discard_and_relse;
1796
1797         skb->dev = NULL;
1798
1799         bh_lock_sock(sk);
1800         ret = 0;
1801         if (!sock_owned_by_user(sk)) {
1802                 if (!tcp_prequeue(sk, skb))
1803                         ret = tcp_v4_do_rcv(sk, skb);
1804         } else
1805                 sk_add_backlog(sk, skb);
1806         bh_unlock_sock(sk);
1807
1808         sock_put(sk);
1809
1810         return ret;
1811
1812 no_tcp_socket:
1813         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1814                 goto discard_it;
1815
1816         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1817 bad_packet:
1818                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1819         } else {
1820                 tcp_v4_send_reset(skb);
1821         }
1822
1823 discard_it:
1824         /* Discard frame. */
1825         kfree_skb(skb);
1826         return 0;
1827
1828 discard_and_relse:
1829         sock_put(sk);
1830         goto discard_it;
1831
1832 do_time_wait:
1833         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1834                 tcp_tw_put((struct tcp_tw_bucket *) sk);
1835                 goto discard_it;
1836         }
1837
1838         if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1839                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1840                 tcp_tw_put((struct tcp_tw_bucket *) sk);
1841                 goto discard_it;
1842         }
1843         switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
1844                                            skb, th, skb->len)) {
1845         case TCP_TW_SYN: {
1846                 struct sock *sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr,
1847                                                           ntohs(th->dest),
1848                                                           tcp_v4_iif(skb));
1849                 if (sk2) {
1850                         tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
1851                         tcp_tw_put((struct tcp_tw_bucket *)sk);
1852                         sk = sk2;
1853                         goto process;
1854                 }
1855                 /* Fall through to ACK */
1856         }
1857         case TCP_TW_ACK:
1858                 tcp_v4_timewait_ack(sk, skb);
1859                 break;
1860         case TCP_TW_RST:
1861                 goto no_tcp_socket;
1862         case TCP_TW_SUCCESS:;
1863         }
1864         goto discard_it;
1865 }
1866
1867 /* With per-bucket locks this operation is not-atomic, so that
1868  * this version is not worse.
1869  */
1870 static void __tcp_v4_rehash(struct sock *sk)
1871 {
1872         sk->sk_prot->unhash(sk);
1873         sk->sk_prot->hash(sk);
1874 }
1875
1876 static int tcp_v4_reselect_saddr(struct sock *sk)
1877 {
1878         struct inet_sock *inet = inet_sk(sk);
1879         int err;
1880         struct rtable *rt;
1881         __u32 old_saddr = inet->saddr;
1882         __u32 new_saddr;
1883         __u32 daddr = inet->daddr;
1884
1885         if (inet->opt && inet->opt->srr)
1886                 daddr = inet->opt->faddr;
1887
1888         /* Query new route. */
1889         err = ip_route_connect(&rt, daddr, 0,
1890                                RT_CONN_FLAGS(sk),
1891                                sk->sk_bound_dev_if,
1892                                IPPROTO_TCP,
1893                                inet->sport, inet->dport, sk);
1894         if (err)
1895                 return err;
1896
1897         __sk_dst_set(sk, &rt->u.dst);
1898         tcp_v4_setup_caps(sk, &rt->u.dst);
1899
1900         new_saddr = rt->rt_src;
1901
1902         if (new_saddr == old_saddr)
1903                 return 0;
1904
1905         if (sysctl_ip_dynaddr > 1) {
1906                 printk(KERN_INFO "tcp_v4_rebuild_header(): shifting inet->"
1907                                  "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n",
1908                        NIPQUAD(old_saddr),
1909                        NIPQUAD(new_saddr));
1910         }
1911
1912         inet->saddr = new_saddr;
1913         inet->rcv_saddr = new_saddr;
1914
1915         /* XXX The only one ugly spot where we need to
1916          * XXX really change the sockets identity after
1917          * XXX it has entered the hashes. -DaveM
1918          *
1919          * Besides that, it does not check for connection
1920          * uniqueness. Wait for troubles.
1921          */
1922         __tcp_v4_rehash(sk);
1923         return 0;
1924 }
1925
1926 int tcp_v4_rebuild_header(struct sock *sk)
1927 {
1928         struct inet_sock *inet = inet_sk(sk);
1929         struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1930         u32 daddr;
1931         int err;
1932
1933         /* Route is OK, nothing to do. */
1934         if (rt)
1935                 return 0;
1936
1937         /* Reroute. */
1938         daddr = inet->daddr;
1939         if (inet->opt && inet->opt->srr)
1940                 daddr = inet->opt->faddr;
1941
1942         {
1943                 struct flowi fl = { .oif = sk->sk_bound_dev_if,
1944                                     .nl_u = { .ip4_u =
1945                                               { .daddr = daddr,
1946                                                 .saddr = inet->saddr,
1947                                                 .tos = RT_CONN_FLAGS(sk) } },
1948                                     .proto = IPPROTO_TCP,
1949                                     .uli_u = { .ports =
1950                                                { .sport = inet->sport,
1951                                                  .dport = inet->dport } } };
1952                                                 
1953                 err = ip_route_output_flow(&rt, &fl, sk, 0);
1954         }
1955         if (!err) {
1956                 __sk_dst_set(sk, &rt->u.dst);
1957                 tcp_v4_setup_caps(sk, &rt->u.dst);
1958                 return 0;
1959         }
1960
1961         /* Routing failed... */
1962         sk->sk_route_caps = 0;
1963
1964         if (!sysctl_ip_dynaddr ||
1965             sk->sk_state != TCP_SYN_SENT ||
1966             (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
1967             (err = tcp_v4_reselect_saddr(sk)) != 0)
1968                 sk->sk_err_soft = -err;
1969
1970         return err;
1971 }
1972
1973 static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
1974 {
1975         struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;
1976         struct inet_sock *inet = inet_sk(sk);
1977
1978         sin->sin_family         = AF_INET;
1979         sin->sin_addr.s_addr    = inet->daddr;
1980         sin->sin_port           = inet->dport;
1981 }
1982
1983 /* VJ's idea. Save last timestamp seen from this destination
1984  * and hold it at least for normal timewait interval to use for duplicate
1985  * segment detection in subsequent connections, before they enter synchronized
1986  * state.
1987  */
1988
1989 int tcp_v4_remember_stamp(struct sock *sk)
1990 {
1991         struct inet_sock *inet = inet_sk(sk);
1992         struct tcp_sock *tp = tcp_sk(sk);
1993         struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1994         struct inet_peer *peer = NULL;
1995         int release_it = 0;
1996
1997         if (!rt || rt->rt_dst != inet->daddr) {
1998                 peer = inet_getpeer(inet->daddr, 1);
1999                 release_it = 1;
2000         } else {
2001                 if (!rt->peer)
2002                         rt_bind_peer(rt, 1);
2003                 peer = rt->peer;
2004         }
2005
2006         if (peer) {
2007                 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
2008                     (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2009                      peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
2010                         peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
2011                         peer->tcp_ts = tp->rx_opt.ts_recent;
2012                 }
2013                 if (release_it)
2014                         inet_putpeer(peer);
2015                 return 1;
2016         }
2017
2018         return 0;
2019 }
2020
2021 int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
2022 {
2023         struct inet_peer *peer = NULL;
2024
2025         peer = inet_getpeer(tw->tw_daddr, 1);
2026
2027         if (peer) {
2028                 if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 ||
2029                     (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2030                      peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) {
2031                         peer->tcp_ts_stamp = tw->tw_ts_recent_stamp;
2032                         peer->tcp_ts = tw->tw_ts_recent;
2033                 }
2034                 inet_putpeer(peer);
2035                 return 1;
2036         }
2037
2038         return 0;
2039 }
2040
2041 struct tcp_func ipv4_specific = {
2042         .queue_xmit     =       ip_queue_xmit,
2043         .send_check     =       tcp_v4_send_check,
2044         .rebuild_header =       tcp_v4_rebuild_header,
2045         .conn_request   =       tcp_v4_conn_request,
2046         .syn_recv_sock  =       tcp_v4_syn_recv_sock,
2047         .remember_stamp =       tcp_v4_remember_stamp,
2048         .net_header_len =       sizeof(struct iphdr),
2049         .setsockopt     =       ip_setsockopt,
2050         .getsockopt     =       ip_getsockopt,
2051         .addr2sockaddr  =       v4_addr2sockaddr,
2052         .sockaddr_len   =       sizeof(struct sockaddr_in),
2053 };
2054
2055 /* NOTE: A lot of things set to zero explicitly by call to
2056  *       sk_alloc() so need not be done here.
2057  */
2058 static int tcp_v4_init_sock(struct sock *sk)
2059 {
2060         struct tcp_sock *tp = tcp_sk(sk);
2061
2062         skb_queue_head_init(&tp->out_of_order_queue);
2063         tcp_init_xmit_timers(sk);
2064         tcp_prequeue_init(tp);
2065
2066         tp->rto  = TCP_TIMEOUT_INIT;
2067         tp->mdev = TCP_TIMEOUT_INIT;
2068
2069         /* So many TCP implementations out there (incorrectly) count the
2070          * initial SYN frame in their delayed-ACK and congestion control
2071          * algorithms that we must have the following bandaid to talk
2072          * efficiently to them.  -DaveM
2073          */
2074         tp->snd_cwnd = 2;
2075
2076         /* See draft-stevens-tcpca-spec-01 for discussion of the
2077          * initialization of these values.
2078          */
2079         tp->snd_ssthresh = 0x7fffffff;  /* Infinity */
2080         tp->snd_cwnd_clamp = ~0;
2081         tp->mss_cache_std = tp->mss_cache = 536;
2082
2083         tp->reordering = sysctl_tcp_reordering;
2084
2085         sk->sk_state = TCP_CLOSE;
2086
2087         sk->sk_write_space = sk_stream_write_space;
2088         sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2089
2090         tp->af_specific = &ipv4_specific;
2091
2092         sk->sk_sndbuf = sysctl_tcp_wmem[1];
2093         sk->sk_rcvbuf = sysctl_tcp_rmem[1];
2094
2095         atomic_inc(&tcp_sockets_allocated);
2096
2097         return 0;
2098 }
2099
2100 int tcp_v4_destroy_sock(struct sock *sk)
2101 {
2102         struct tcp_sock *tp = tcp_sk(sk);
2103
2104         tcp_clear_xmit_timers(sk);
2105
2106         /* Cleanup up the write buffer. */
2107         sk_stream_writequeue_purge(sk);
2108
2109         /* Cleans up our, hopefully empty, out_of_order_queue. */
2110         __skb_queue_purge(&tp->out_of_order_queue);
2111
2112         /* Clean prequeue, it must be empty really */
2113         __skb_queue_purge(&tp->ucopy.prequeue);
2114
2115         /* Clean up a referenced TCP bind bucket. */
2116         if (tp->bind_hash)
2117                 tcp_put_port(sk);
2118
2119         /*
2120          * If sendmsg cached page exists, toss it.
2121          */
2122         if (sk->sk_sndmsg_page) {
2123                 __free_page(sk->sk_sndmsg_page);
2124                 sk->sk_sndmsg_page = NULL;
2125         }
2126
2127         atomic_dec(&tcp_sockets_allocated);
2128
2129         return 0;
2130 }
2131
2132 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2133
2134 #ifdef CONFIG_PROC_FS
2135 /* Proc filesystem TCP sock list dumping. */
2136
2137 static inline struct tcp_tw_bucket *tw_head(struct hlist_head *head)
2138 {
2139         return hlist_empty(head) ? NULL :
2140                 list_entry(head->first, struct tcp_tw_bucket, tw_node);
2141 }
2142
2143 static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw)
2144 {
2145         return tw->tw_node.next ?
2146                 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2147 }
2148
2149 static void *listening_get_next(struct seq_file *seq, void *cur)
2150 {
2151         struct tcp_sock *tp;
2152         struct hlist_node *node;
2153         struct sock *sk = cur;
2154         struct tcp_iter_state* st = seq->private;
2155
2156         if (!sk) {
2157                 st->bucket = 0;
2158                 sk = sk_head(&tcp_listening_hash[0]);
2159                 goto get_sk;
2160         }
2161
2162         ++st->num;
2163
2164         if (st->state == TCP_SEQ_STATE_OPENREQ) {
2165                 struct open_request *req = cur;
2166
2167                 tp = tcp_sk(st->syn_wait_sk);
2168                 req = req->dl_next;
2169                 while (1) {
2170                         while (req) {
2171                                 vxdprintk(VXD_CBIT(net, 6),
2172                                         "sk,req: %p [#%d] (from %d)", req->sk,
2173                                         (req->sk)?req->sk->sk_xid:0, vx_current_xid());
2174                                 if (req->sk &&
2175                                         !vx_check(req->sk->sk_xid, VX_IDENT|VX_WATCH))
2176                                         continue;
2177                                 if (req->class->family == st->family) {
2178                                         cur = req;
2179                                         goto out;
2180                                 }
2181                                 req = req->dl_next;
2182                         }
2183                         if (++st->sbucket >= TCP_SYNQ_HSIZE)
2184                                 break;
2185 get_req:
2186                         req = tp->listen_opt->syn_table[st->sbucket];
2187                 }
2188                 sk        = sk_next(st->syn_wait_sk);
2189                 st->state = TCP_SEQ_STATE_LISTENING;
2190                 read_unlock_bh(&tp->syn_wait_lock);
2191         } else {
2192                 tp = tcp_sk(sk);
2193                 read_lock_bh(&tp->syn_wait_lock);
2194                 if (tp->listen_opt && tp->listen_opt->qlen)
2195                         goto start_req;
2196                 read_unlock_bh(&tp->syn_wait_lock);
2197                 sk = sk_next(sk);
2198         }
2199 get_sk:
2200         sk_for_each_from(sk, node) {
2201                 vxdprintk(VXD_CBIT(net, 6), "sk: %p [#%d] (from %d)",
2202                         sk, sk->sk_xid, vx_current_xid());
2203                 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2204                         continue;
2205                 if (sk->sk_family == st->family) {
2206                         cur = sk;
2207                         goto out;
2208                 }
2209                 tp = tcp_sk(sk);
2210                 read_lock_bh(&tp->syn_wait_lock);
2211                 if (tp->listen_opt && tp->listen_opt->qlen) {
2212 start_req:
2213                         st->uid         = sock_i_uid(sk);
2214                         st->syn_wait_sk = sk;
2215                         st->state       = TCP_SEQ_STATE_OPENREQ;
2216                         st->sbucket     = 0;
2217                         goto get_req;
2218                 }
2219                 read_unlock_bh(&tp->syn_wait_lock);
2220         }
2221         if (++st->bucket < TCP_LHTABLE_SIZE) {
2222                 sk = sk_head(&tcp_listening_hash[st->bucket]);
2223                 goto get_sk;
2224         }
2225         cur = NULL;
2226 out:
2227         return cur;
2228 }
2229
2230 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2231 {
2232         void *rc = listening_get_next(seq, NULL);
2233
2234         while (rc && *pos) {
2235                 rc = listening_get_next(seq, rc);
2236                 --*pos;
2237         }
2238         return rc;
2239 }
2240
2241 static void *established_get_first(struct seq_file *seq)
2242 {
2243         struct tcp_iter_state* st = seq->private;
2244         void *rc = NULL;
2245
2246         for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) {
2247                 struct sock *sk;
2248                 struct hlist_node *node;
2249                 struct tcp_tw_bucket *tw;
2250
2251                 /* We can reschedule _before_ having picked the target: */
2252                 cond_resched_softirq();
2253
2254                 read_lock(&tcp_ehash[st->bucket].lock);
2255                 sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) {
2256                         vxdprintk(VXD_CBIT(net, 6),
2257                                 "sk,egf: %p [#%d] (from %d)",
2258                                 sk, sk->sk_xid, vx_current_xid());
2259                         if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2260                                 continue;
2261                         if (sk->sk_family != st->family)
2262                                 continue;
2263                         rc = sk;
2264                         goto out;
2265                 }
2266                 st->state = TCP_SEQ_STATE_TIME_WAIT;
2267                 tw_for_each(tw, node,
2268                             &tcp_ehash[st->bucket + tcp_ehash_size].chain) {
2269                         vxdprintk(VXD_CBIT(net, 6),
2270                                 "tw: %p [#%d] (from %d)",
2271                                 tw, tw->tw_xid, vx_current_xid());
2272                         if (!vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))
2273                                 continue;
2274                         if (tw->tw_family != st->family)
2275                                 continue;
2276                         rc = tw;
2277                         goto out;
2278                 }
2279                 read_unlock(&tcp_ehash[st->bucket].lock);
2280                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2281         }
2282 out:
2283         return rc;
2284 }
2285
2286 static void *established_get_next(struct seq_file *seq, void *cur)
2287 {
2288         struct sock *sk = cur;
2289         struct tcp_tw_bucket *tw;
2290         struct hlist_node *node;
2291         struct tcp_iter_state* st = seq->private;
2292
2293         ++st->num;
2294
2295         if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2296                 tw = cur;
2297                 tw = tw_next(tw);
2298 get_tw:
2299                 while (tw && (tw->tw_family != st->family ||
2300                         !vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))) {
2301                         tw = tw_next(tw);
2302                 }
2303                 if (tw) {
2304                         cur = tw;
2305                         goto out;
2306                 }
2307                 read_unlock(&tcp_ehash[st->bucket].lock);
2308                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2309
2310                 /* We can reschedule between buckets: */
2311                 cond_resched_softirq();
2312
2313                 if (++st->bucket < tcp_ehash_size) {
2314                         read_lock(&tcp_ehash[st->bucket].lock);
2315                         sk = sk_head(&tcp_ehash[st->bucket].chain);
2316                 } else {
2317                         cur = NULL;
2318                         goto out;
2319                 }
2320         } else
2321                 sk = sk_next(sk);
2322
2323         sk_for_each_from(sk, node) {
2324                 vxdprintk(VXD_CBIT(net, 6),
2325                         "sk,egn: %p [#%d] (from %d)",
2326                         sk, sk->sk_xid, vx_current_xid());
2327                 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2328                         continue;
2329                 if (sk->sk_family == st->family)
2330                         goto found;
2331         }
2332
2333         st->state = TCP_SEQ_STATE_TIME_WAIT;
2334         tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain);
2335         goto get_tw;
2336 found:
2337         cur = sk;
2338 out:
2339         return cur;
2340 }
2341
2342 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2343 {
2344         void *rc = established_get_first(seq);
2345
2346         while (rc && pos) {
2347                 rc = established_get_next(seq, rc);
2348                 --pos;
2349         }               
2350         return rc;
2351 }
2352
2353 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2354 {
2355         void *rc;
2356         struct tcp_iter_state* st = seq->private;
2357
2358         tcp_listen_lock();
2359         st->state = TCP_SEQ_STATE_LISTENING;
2360         rc        = listening_get_idx(seq, &pos);
2361
2362         if (!rc) {
2363                 tcp_listen_unlock();
2364                 local_bh_disable();
2365                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2366                 rc        = established_get_idx(seq, pos);
2367         }
2368
2369         return rc;
2370 }
2371
2372 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2373 {
2374         struct tcp_iter_state* st = seq->private;
2375         st->state = TCP_SEQ_STATE_LISTENING;
2376         st->num = 0;
2377         return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2378 }
2379
2380 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2381 {
2382         void *rc = NULL;
2383         struct tcp_iter_state* st;
2384
2385         if (v == SEQ_START_TOKEN) {
2386                 rc = tcp_get_idx(seq, 0);
2387                 goto out;
2388         }
2389         st = seq->private;
2390
2391         switch (st->state) {
2392         case TCP_SEQ_STATE_OPENREQ:
2393         case TCP_SEQ_STATE_LISTENING:
2394                 rc = listening_get_next(seq, v);
2395                 if (!rc) {
2396                         tcp_listen_unlock();
2397                         local_bh_disable();
2398                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2399                         rc        = established_get_first(seq);
2400                 }
2401                 break;
2402         case TCP_SEQ_STATE_ESTABLISHED:
2403         case TCP_SEQ_STATE_TIME_WAIT:
2404                 rc = established_get_next(seq, v);
2405                 break;
2406         }
2407 out:
2408         ++*pos;
2409         return rc;
2410 }
2411
2412 static void tcp_seq_stop(struct seq_file *seq, void *v)
2413 {
2414         struct tcp_iter_state* st = seq->private;
2415
2416         switch (st->state) {
2417         case TCP_SEQ_STATE_OPENREQ:
2418                 if (v) {
2419                         struct tcp_sock *tp = tcp_sk(st->syn_wait_sk);
2420                         read_unlock_bh(&tp->syn_wait_lock);
2421                 }
2422         case TCP_SEQ_STATE_LISTENING:
2423                 if (v != SEQ_START_TOKEN)
2424                         tcp_listen_unlock();
2425                 break;
2426         case TCP_SEQ_STATE_TIME_WAIT:
2427         case TCP_SEQ_STATE_ESTABLISHED:
2428                 if (v)
2429                         read_unlock(&tcp_ehash[st->bucket].lock);
2430                 local_bh_enable();
2431                 break;
2432         }
2433 }
2434
2435 static int tcp_seq_open(struct inode *inode, struct file *file)
2436 {
2437         struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2438         struct seq_file *seq;
2439         struct tcp_iter_state *s;
2440         int rc;
2441
2442         if (unlikely(afinfo == NULL))
2443                 return -EINVAL;
2444
2445         s = kmalloc(sizeof(*s), GFP_KERNEL);
2446         if (!s)
2447                 return -ENOMEM;
2448         memset(s, 0, sizeof(*s));
2449         s->family               = afinfo->family;
2450         s->seq_ops.start        = tcp_seq_start;
2451         s->seq_ops.next         = tcp_seq_next;
2452         s->seq_ops.show         = afinfo->seq_show;
2453         s->seq_ops.stop         = tcp_seq_stop;
2454
2455         rc = seq_open(file, &s->seq_ops);
2456         if (rc)
2457                 goto out_kfree;
2458         seq          = file->private_data;
2459         seq->private = s;
2460 out:
2461         return rc;
2462 out_kfree:
2463         kfree(s);
2464         goto out;
2465 }
2466
2467 int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
2468 {
2469         int rc = 0;
2470         struct proc_dir_entry *p;
2471
2472         if (!afinfo)
2473                 return -EINVAL;
2474         afinfo->seq_fops->owner         = afinfo->owner;
2475         afinfo->seq_fops->open          = tcp_seq_open;
2476         afinfo->seq_fops->read          = seq_read;
2477         afinfo->seq_fops->llseek        = seq_lseek;
2478         afinfo->seq_fops->release       = seq_release_private;
2479         
2480         p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
2481         if (p)
2482                 p->data = afinfo;
2483         else
2484                 rc = -ENOMEM;
2485         return rc;
2486 }
2487
2488 void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
2489 {
2490         if (!afinfo)
2491                 return;
2492         proc_net_remove(afinfo->name);
2493         memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); 
2494 }
2495
2496 static void get_openreq4(struct sock *sk, struct open_request *req,
2497                          char *tmpbuf, int i, int uid)
2498 {
2499         int ttd = req->expires - jiffies;
2500
2501         sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2502                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
2503                 i,
2504                 req->af.v4_req.loc_addr,
2505                 ntohs(inet_sk(sk)->sport),
2506                 req->af.v4_req.rmt_addr,
2507                 ntohs(req->rmt_port),
2508                 TCP_SYN_RECV,
2509                 0, 0, /* could print option size, but that is af dependent. */
2510                 1,    /* timers active (only the expire timer) */
2511                 jiffies_to_clock_t(ttd),
2512                 req->retrans,
2513                 uid,
2514                 0,  /* non standard timer */
2515                 0, /* open_requests have no inode */
2516                 atomic_read(&sk->sk_refcnt),
2517                 req);
2518 }
2519
2520 static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
2521 {
2522         int timer_active;
2523         unsigned long timer_expires;
2524         struct tcp_sock *tp = tcp_sk(sp);
2525         struct inet_sock *inet = inet_sk(sp);
2526         unsigned int dest = inet->daddr;
2527         unsigned int src = inet->rcv_saddr;
2528         __u16 destp = ntohs(inet->dport);
2529         __u16 srcp = ntohs(inet->sport);
2530
2531         if (tp->pending == TCP_TIME_RETRANS) {
2532                 timer_active    = 1;
2533                 timer_expires   = tp->timeout;
2534         } else if (tp->pending == TCP_TIME_PROBE0) {
2535                 timer_active    = 4;
2536                 timer_expires   = tp->timeout;
2537         } else if (timer_pending(&sp->sk_timer)) {
2538                 timer_active    = 2;
2539                 timer_expires   = sp->sk_timer.expires;
2540         } else {
2541                 timer_active    = 0;
2542                 timer_expires = jiffies;
2543         }
2544
2545         sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2546                         "%08X %5d %8d %lu %d %p %u %u %u %u %d",
2547                 i, src, srcp, dest, destp, sp->sk_state,
2548                 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
2549                 timer_active,
2550                 jiffies_to_clock_t(timer_expires - jiffies),
2551                 tp->retransmits,
2552                 sock_i_uid(sp),
2553                 tp->probes_out,
2554                 sock_i_ino(sp),
2555                 atomic_read(&sp->sk_refcnt), sp,
2556                 tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong,
2557                 tp->snd_cwnd,
2558                 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
2559 }
2560
2561 static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i)
2562 {
2563         unsigned int dest, src;
2564         __u16 destp, srcp;
2565         int ttd = tw->tw_ttd - jiffies;
2566
2567         if (ttd < 0)
2568                 ttd = 0;
2569
2570         dest  = tw->tw_daddr;
2571         src   = tw->tw_rcv_saddr;
2572         destp = ntohs(tw->tw_dport);
2573         srcp  = ntohs(tw->tw_sport);
2574
2575         sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2576                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p",
2577                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2578                 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2579                 atomic_read(&tw->tw_refcnt), tw);
2580 }
2581
2582 #define TMPSZ 150
2583
2584 static int tcp4_seq_show(struct seq_file *seq, void *v)
2585 {
2586         struct tcp_iter_state* st;
2587         char tmpbuf[TMPSZ + 1];
2588
2589         if (v == SEQ_START_TOKEN) {
2590                 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2591                            "  sl  local_address rem_address   st tx_queue "
2592                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2593                            "inode");
2594                 goto out;
2595         }
2596         st = seq->private;
2597
2598         switch (st->state) {
2599         case TCP_SEQ_STATE_LISTENING:
2600         case TCP_SEQ_STATE_ESTABLISHED:
2601                 get_tcp4_sock(v, tmpbuf, st->num);
2602                 break;
2603         case TCP_SEQ_STATE_OPENREQ:
2604                 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
2605                 break;
2606         case TCP_SEQ_STATE_TIME_WAIT:
2607                 get_timewait4_sock(v, tmpbuf, st->num);
2608                 break;
2609         }
2610         seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
2611 out:
2612         return 0;
2613 }
2614
2615 static struct file_operations tcp4_seq_fops;
2616 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2617         .owner          = THIS_MODULE,
2618         .name           = "tcp",
2619         .family         = AF_INET,
2620         .seq_show       = tcp4_seq_show,
2621         .seq_fops       = &tcp4_seq_fops,
2622 };
2623
2624 int __init tcp4_proc_init(void)
2625 {
2626         return tcp_proc_register(&tcp4_seq_afinfo);
2627 }
2628
2629 void tcp4_proc_exit(void)
2630 {
2631         tcp_proc_unregister(&tcp4_seq_afinfo);
2632 }
2633 #endif /* CONFIG_PROC_FS */
2634
2635 struct proto tcp_prot = {
2636         .name                   = "TCP",
2637         .owner                  = THIS_MODULE,
2638         .close                  = tcp_close,
2639         .connect                = tcp_v4_connect,
2640         .disconnect             = tcp_disconnect,
2641         .accept                 = tcp_accept,
2642         .ioctl                  = tcp_ioctl,
2643         .init                   = tcp_v4_init_sock,
2644         .destroy                = tcp_v4_destroy_sock,
2645         .shutdown               = tcp_shutdown,
2646         .setsockopt             = tcp_setsockopt,
2647         .getsockopt             = tcp_getsockopt,
2648         .sendmsg                = tcp_sendmsg,
2649         .recvmsg                = tcp_recvmsg,
2650         .backlog_rcv            = tcp_v4_do_rcv,
2651         .hash                   = tcp_v4_hash,
2652         .unhash                 = tcp_unhash,
2653         .get_port               = tcp_v4_get_port,
2654         .enter_memory_pressure  = tcp_enter_memory_pressure,
2655         .sockets_allocated      = &tcp_sockets_allocated,
2656         .memory_allocated       = &tcp_memory_allocated,
2657         .memory_pressure        = &tcp_memory_pressure,
2658         .sysctl_mem             = sysctl_tcp_mem,
2659         .sysctl_wmem            = sysctl_tcp_wmem,
2660         .sysctl_rmem            = sysctl_tcp_rmem,
2661         .max_header             = MAX_TCP_HEADER,
2662         .obj_size               = sizeof(struct tcp_sock),
2663 };
2664
2665
2666
2667 void __init tcp_v4_init(struct net_proto_family *ops)
2668 {
2669         int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
2670         if (err < 0)
2671                 panic("Failed to create the TCP control socket.\n");
2672         tcp_socket->sk->sk_allocation   = GFP_ATOMIC;
2673         inet_sk(tcp_socket->sk)->uc_ttl = -1;
2674
2675         /* Unhash it so that IP input processing does not even
2676          * see it, we do not wish this socket to see incoming
2677          * packets.
2678          */
2679         tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
2680 }
2681
2682 EXPORT_SYMBOL(ipv4_specific);
2683 EXPORT_SYMBOL(tcp_bind_hash);
2684 EXPORT_SYMBOL(tcp_bucket_create);
2685 EXPORT_SYMBOL(tcp_hashinfo);
2686 EXPORT_SYMBOL(tcp_inherit_port);
2687 EXPORT_SYMBOL(tcp_listen_wlock);
2688 EXPORT_SYMBOL(tcp_port_rover);
2689 EXPORT_SYMBOL(tcp_prot);
2690 EXPORT_SYMBOL(tcp_put_port);
2691 EXPORT_SYMBOL(tcp_unhash);
2692 EXPORT_SYMBOL(tcp_v4_conn_request);
2693 EXPORT_SYMBOL(tcp_v4_connect);
2694 EXPORT_SYMBOL(tcp_v4_do_rcv);
2695 EXPORT_SYMBOL(tcp_v4_rebuild_header);
2696 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2697 EXPORT_SYMBOL(tcp_v4_send_check);
2698 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2699
2700 #ifdef CONFIG_PROC_FS
2701 EXPORT_SYMBOL(tcp_proc_register);
2702 EXPORT_SYMBOL(tcp_proc_unregister);
2703 #endif
2704 EXPORT_SYMBOL(sysctl_local_port_range);
2705 EXPORT_SYMBOL(sysctl_max_syn_backlog);
2706 EXPORT_SYMBOL(sysctl_tcp_low_latency);
2707 EXPORT_SYMBOL(sysctl_tcp_tw_reuse);
2708