2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
10 * IPv4 specific functions
15 * linux/ipv4/tcp_input.c
16 * linux/ipv4/tcp_output.c
18 * See tcp.c for author information
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
28 * David S. Miller : New socket lookup architecture.
29 * This code is dedicated to John Dyson.
30 * David S. Miller : Change semantics of established hash,
31 * half is devoted to TIME_WAIT sockets
32 * and the rest go in the other half.
33 * Andi Kleen : Add support for syncookies and fixed
34 * some bugs: ip options weren't passed to
35 * the TCP layer, missed a check for an
37 * Andi Kleen : Implemented fast path mtu discovery.
38 * Fixed many serious bugs in the
39 * open_request handling and moved
40 * most of it into the af independent code.
41 * Added tail drop and some other bugfixes.
42 * Added new listen sematics.
43 * Mike McLagan : Routing by source
44 * Juan Jose Ciarlante: ip_dynaddr bits
45 * Andi Kleen: various fixes.
46 * Vitaly E. Lavrov : Transparent proxy revived after year
48 * Andi Kleen : Fix new listen.
49 * Andi Kleen : Fix accept error reporting.
50 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
51 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
52 * a single port at the same time.
55 #include <linux/config.h>
57 #include <linux/types.h>
58 #include <linux/fcntl.h>
59 #include <linux/module.h>
60 #include <linux/random.h>
61 #include <linux/cache.h>
62 #include <linux/jhash.h>
63 #include <linux/init.h>
64 #include <linux/times.h>
69 #include <net/inet_common.h>
72 #include <linux/inet.h>
73 #include <linux/ipv6.h>
74 #include <linux/stddef.h>
75 #include <linux/proc_fs.h>
76 #include <linux/seq_file.h>
78 extern int sysctl_ip_dynaddr;
79 int sysctl_tcp_tw_reuse;
80 int sysctl_tcp_low_latency;
82 /* Check TCP sequence numbers in ICMP packets. */
83 #define ICMP_MIN_LENGTH 8
85 /* Socket used for sending RSTs */
86 static struct socket *tcp_socket;
88 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
91 struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
92 .__tcp_lhash_lock = RW_LOCK_UNLOCKED,
93 .__tcp_lhash_users = ATOMIC_INIT(0),
95 = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait),
96 .__tcp_portalloc_lock = SPIN_LOCK_UNLOCKED
100 * This array holds the first and last local port number.
101 * For high-usage systems, use sysctl to change this to
104 int sysctl_local_port_range[2] = { 1024, 4999 };
105 int tcp_port_rover = 1024 - 1;
107 static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
108 __u32 faddr, __u16 fport)
110 int h = (laddr ^ lport) ^ (faddr ^ fport);
113 return h & (tcp_ehash_size - 1);
116 static __inline__ int tcp_sk_hashfn(struct sock *sk)
118 struct inet_opt *inet = inet_sk(sk);
119 __u32 laddr = inet->rcv_saddr;
120 __u16 lport = inet->num;
121 __u32 faddr = inet->daddr;
122 __u16 fport = inet->dport;
124 return tcp_hashfn(laddr, lport, faddr, fport);
127 /* Allocate and initialize a new TCP local port bind bucket.
128 * The bindhash mutex for snum's hash chain must be held here.
130 struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
133 struct tcp_bind_bucket *tb = kmem_cache_alloc(tcp_bucket_cachep,
138 INIT_HLIST_HEAD(&tb->owners);
139 hlist_add_head(&tb->node, &head->chain);
144 /* Caller must hold hashbucket lock for this tb with local BH disabled */
145 void tcp_bucket_destroy(struct tcp_bind_bucket *tb)
147 if (hlist_empty(&tb->owners)) {
148 __hlist_del(&tb->node);
149 kmem_cache_free(tcp_bucket_cachep, tb);
153 /* Caller must disable local BH processing. */
154 static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
156 struct tcp_bind_hashbucket *head =
157 &tcp_bhash[tcp_bhashfn(inet_sk(child)->num)];
158 struct tcp_bind_bucket *tb;
160 spin_lock(&head->lock);
161 tb = tcp_sk(sk)->bind_hash;
162 sk_add_bind_node(child, &tb->owners);
163 tcp_sk(child)->bind_hash = tb;
164 spin_unlock(&head->lock);
167 inline void tcp_inherit_port(struct sock *sk, struct sock *child)
170 __tcp_inherit_port(sk, child);
174 void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
177 inet_sk(sk)->num = snum;
178 sk_add_bind_node(sk, &tb->owners);
179 tcp_sk(sk)->bind_hash = tb;
183 Return 1 if addr match the socket IP list
184 or the socket is INADDR_ANY
186 static inline int tcp_in_list(struct sock *sk, u32 addr)
188 struct nx_info *nxi = sk->sk_nx_info;
190 vxdprintk("tcp_in_list(%p) %p,%p;%lx\n",
191 sk, nxi, sk->sk_socket,
192 (sk->sk_socket?sk->sk_socket->flags:0));
199 if (nxi->ipv4[i] == addr)
202 else if (!tcp_v4_rcv_saddr(sk) || tcp_v4_rcv_saddr(sk) == addr)
208 Check if the addresses in sk1 conflict with those in sk2
210 int tcp_ipv4_addr_conflict(struct sock *sk1, struct sock *sk2)
213 nxdprintk("inet_bind(%p,%p) %p,%p;%lx %p,%p;%lx\n",
215 sk1->sk_nx_info, sk1->sk_socket,
216 (sk1->sk_socket?sk1->sk_socket->flags:0),
217 sk2->sk_nx_info, sk2->sk_socket,
218 (sk2->sk_socket?sk2->sk_socket->flags:0));
220 if (tcp_v4_rcv_saddr(sk1)) {
221 /* Bind to one address only */
222 return tcp_in_list (sk2, tcp_v4_rcv_saddr(sk1));
223 } else if (sk1->sk_nx_info) {
224 /* A restricted bind(any) */
225 struct nx_info *nxi = sk1->sk_nx_info;
230 if (tcp_in_list (sk2, nxi->ipv4[i]))
232 } else /* A bind(any) do not allow other bind on the same port */
237 static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
240 struct hlist_node *node;
241 int reuse = sk->sk_reuse;
243 sk_for_each_bound(sk2, node, &tb->owners) {
245 !tcp_v6_ipv6only(sk2) &&
246 (!sk->sk_bound_dev_if ||
247 !sk2->sk_bound_dev_if ||
248 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
249 if (!reuse || !sk2->sk_reuse ||
250 sk2->sk_state == TCP_LISTEN) {
251 if (tcp_ipv4_addr_conflict(sk, sk2))
259 /* Obtain a reference to a local port for the given sock,
260 * if snum is zero it means select any available local port.
262 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
264 struct tcp_bind_hashbucket *head;
265 struct hlist_node *node;
266 struct tcp_bind_bucket *tb;
271 int low = sysctl_local_port_range[0];
272 int high = sysctl_local_port_range[1];
273 int remaining = (high - low) + 1;
276 spin_lock(&tcp_portalloc_lock);
277 rover = tcp_port_rover;
280 if (rover < low || rover > high)
282 head = &tcp_bhash[tcp_bhashfn(rover)];
283 spin_lock(&head->lock);
284 tb_for_each(tb, node, &head->chain)
285 if (tb->port == rover)
289 spin_unlock(&head->lock);
290 } while (--remaining > 0);
291 tcp_port_rover = rover;
292 spin_unlock(&tcp_portalloc_lock);
294 /* Exhausted local port range during search? */
299 /* OK, here is the one we will use. HEAD is
300 * non-NULL and we hold it's mutex.
304 head = &tcp_bhash[tcp_bhashfn(snum)];
305 spin_lock(&head->lock);
306 tb_for_each(tb, node, &head->chain)
307 if (tb->port == snum)
313 if (!hlist_empty(&tb->owners)) {
314 if (sk->sk_reuse > 1)
316 if (tb->fastreuse > 0 &&
317 sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
321 if (tcp_bind_conflict(sk, tb))
327 if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
329 if (hlist_empty(&tb->owners)) {
330 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
334 } else if (tb->fastreuse &&
335 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
338 if (!tcp_sk(sk)->bind_hash)
339 tcp_bind_hash(sk, tb, snum);
340 BUG_TRAP(tcp_sk(sk)->bind_hash == tb);
344 spin_unlock(&head->lock);
350 /* Get rid of any references to a local port held by the
353 static void __tcp_put_port(struct sock *sk)
355 struct inet_opt *inet = inet_sk(sk);
356 struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)];
357 struct tcp_bind_bucket *tb;
359 spin_lock(&head->lock);
360 tb = tcp_sk(sk)->bind_hash;
361 __sk_del_bind_node(sk);
362 tcp_sk(sk)->bind_hash = NULL;
364 tcp_bucket_destroy(tb);
365 spin_unlock(&head->lock);
368 void tcp_put_port(struct sock *sk)
375 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
376 * Look, when several writers sleep and reader wakes them up, all but one
377 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
378 * this, _but_ remember, it adds useless work on UP machines (wake up each
379 * exclusive lock release). It should be ifdefed really.
382 void tcp_listen_wlock(void)
384 write_lock(&tcp_lhash_lock);
386 if (atomic_read(&tcp_lhash_users)) {
390 prepare_to_wait_exclusive(&tcp_lhash_wait,
391 &wait, TASK_UNINTERRUPTIBLE);
392 if (!atomic_read(&tcp_lhash_users))
394 write_unlock_bh(&tcp_lhash_lock);
396 write_lock_bh(&tcp_lhash_lock);
399 finish_wait(&tcp_lhash_wait, &wait);
403 static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
405 struct hlist_head *list;
408 BUG_TRAP(sk_unhashed(sk));
409 if (listen_possible && sk->sk_state == TCP_LISTEN) {
410 list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
411 lock = &tcp_lhash_lock;
414 list = &tcp_ehash[(sk->sk_hashent = tcp_sk_hashfn(sk))].chain;
415 lock = &tcp_ehash[sk->sk_hashent].lock;
418 __sk_add_node(sk, list);
419 sock_prot_inc_use(sk->sk_prot);
421 if (listen_possible && sk->sk_state == TCP_LISTEN)
422 wake_up(&tcp_lhash_wait);
425 static void tcp_v4_hash(struct sock *sk)
427 if (sk->sk_state != TCP_CLOSE) {
429 __tcp_v4_hash(sk, 1);
434 void tcp_unhash(struct sock *sk)
441 if (sk->sk_state == TCP_LISTEN) {
444 lock = &tcp_lhash_lock;
446 struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
448 write_lock_bh(&head->lock);
451 if (__sk_del_node_init(sk))
452 sock_prot_dec_use(sk->sk_prot);
453 write_unlock_bh(lock);
456 if (sk->sk_state == TCP_LISTEN)
457 wake_up(&tcp_lhash_wait);
461 Check if an address is in the list
463 static inline int tcp_addr_in_list(
466 struct nx_info *nx_info)
468 if (rcv_saddr == daddr)
470 else if (rcv_saddr == 0) {
471 /* Accept any address or check the list */
475 int n = nx_info->nbipv4;
479 if (nx_info->ipv4[i] == daddr)
488 /* Don't inline this cruft. Here are some nice properties to
489 * exploit here. The BSD API does not allow a listening TCP
490 * to specify the remote port nor the remote address for the
491 * connection. So always assume those are both wildcarded
492 * during the search since they can never be otherwise.
494 static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr,
495 unsigned short hnum, int dif)
497 struct sock *result = NULL, *sk;
498 struct hlist_node *node;
502 sk_for_each(sk, node, head) {
503 struct inet_opt *inet = inet_sk(sk);
505 if (inet->num == hnum && !ipv6_only_sock(sk)) {
506 __u32 rcv_saddr = inet->rcv_saddr;
508 score = (sk->sk_family == PF_INET ? 1 : 0);
509 if (tcp_addr_in_list(rcv_saddr, daddr, sk->sk_nx_info))
513 if (sk->sk_bound_dev_if) {
514 if (sk->sk_bound_dev_if != dif)
520 if (score > hiscore) {
529 /* Optimize the common listener case. */
530 inline struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum,
533 struct sock *sk = NULL;
534 struct hlist_head *head;
536 read_lock(&tcp_lhash_lock);
537 head = &tcp_listening_hash[tcp_lhashfn(hnum)];
538 if (!hlist_empty(head)) {
539 struct inet_opt *inet = inet_sk((sk = __sk_head(head)));
541 if (inet->num == hnum && !sk->sk_node.next &&
542 (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
543 tcp_addr_in_list(inet->rcv_saddr, daddr, sk->sk_nx_info) &&
544 !sk->sk_bound_dev_if)
546 sk = __tcp_v4_lookup_listener(head, daddr, hnum, dif);
552 read_unlock(&tcp_lhash_lock);
556 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
557 * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
559 * Local BH must be disabled here.
562 static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
566 struct tcp_ehash_bucket *head;
567 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
568 __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
570 struct hlist_node *node;
571 /* Optimize here for direct hit, only listening connections can
572 * have wildcards anyways.
574 int hash = tcp_hashfn(daddr, hnum, saddr, sport);
575 head = &tcp_ehash[hash];
576 read_lock(&head->lock);
577 sk_for_each(sk, node, &head->chain) {
578 if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
579 goto hit; /* You sunk my battleship! */
582 /* Must check for a TIME_WAIT'er before going to listener hash. */
583 sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
584 if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
589 read_unlock(&head->lock);
596 static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport,
597 u32 daddr, u16 hnum, int dif)
599 struct sock *sk = __tcp_v4_lookup_established(saddr, sport,
602 return sk ? : tcp_v4_lookup_listener(daddr, hnum, dif);
605 inline struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr,
611 sk = __tcp_v4_lookup(saddr, sport, daddr, ntohs(dport), dif);
617 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
619 return secure_tcp_sequence_number(skb->nh.iph->daddr,
625 /* called with local bh disabled */
626 static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
627 struct tcp_tw_bucket **twp)
629 struct inet_opt *inet = inet_sk(sk);
630 u32 daddr = inet->rcv_saddr;
631 u32 saddr = inet->daddr;
632 int dif = sk->sk_bound_dev_if;
633 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
634 __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
635 int hash = tcp_hashfn(daddr, lport, saddr, inet->dport);
636 struct tcp_ehash_bucket *head = &tcp_ehash[hash];
638 struct hlist_node *node;
639 struct tcp_tw_bucket *tw;
641 write_lock(&head->lock);
643 /* Check TIME-WAIT sockets first. */
644 sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
645 tw = (struct tcp_tw_bucket *)sk2;
647 if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
648 struct tcp_opt *tp = tcp_sk(sk);
650 /* With PAWS, it is safe from the viewpoint
651 of data integrity. Even without PAWS it
652 is safe provided sequence spaces do not
653 overlap i.e. at data rates <= 80Mbit/sec.
655 Actually, the idea is close to VJ's one,
656 only timestamp cache is held not per host,
657 but per port pair and TW bucket is used
660 If TW bucket has been already destroyed we
661 fall back to VJ's scheme and use initial
662 timestamp retrieved from peer table.
664 if (tw->tw_ts_recent_stamp &&
665 (!twp || (sysctl_tcp_tw_reuse &&
667 tw->tw_ts_recent_stamp > 1))) {
669 tw->tw_snd_nxt + 65535 + 2) == 0)
671 tp->ts_recent = tw->tw_ts_recent;
672 tp->ts_recent_stamp = tw->tw_ts_recent_stamp;
681 /* And established part... */
682 sk_for_each(sk2, node, &head->chain) {
683 if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif))
688 /* Must record num and sport now. Otherwise we will see
689 * in hash table socket with a funny identity. */
691 inet->sport = htons(lport);
692 sk->sk_hashent = hash;
693 BUG_TRAP(sk_unhashed(sk));
694 __sk_add_node(sk, &head->chain);
695 sock_prot_inc_use(sk->sk_prot);
696 write_unlock(&head->lock);
700 NET_INC_STATS_BH(TimeWaitRecycled);
702 /* Silly. Should hash-dance instead... */
703 tcp_tw_deschedule(tw);
704 NET_INC_STATS_BH(TimeWaitRecycled);
712 write_unlock(&head->lock);
713 return -EADDRNOTAVAIL;
717 * Bind a port for a connect operation and hash it.
719 static int tcp_v4_hash_connect(struct sock *sk)
721 unsigned short snum = inet_sk(sk)->num;
722 struct tcp_bind_hashbucket *head;
723 struct tcp_bind_bucket *tb;
728 int low = sysctl_local_port_range[0];
729 int high = sysctl_local_port_range[1];
730 int remaining = (high - low) + 1;
731 struct hlist_node *node;
732 struct tcp_tw_bucket *tw = NULL;
736 /* TODO. Actually it is not so bad idea to remove
737 * tcp_portalloc_lock before next submission to Linus.
738 * As soon as we touch this place at all it is time to think.
740 * Now it protects single _advisory_ variable tcp_port_rover,
741 * hence it is mostly useless.
742 * Code will work nicely if we just delete it, but
743 * I am afraid in contented case it will work not better or
744 * even worse: another cpu just will hit the same bucket
746 * So some cpu salt could remove both contention and
747 * memory pingpong. Any ideas how to do this in a nice way?
749 spin_lock(&tcp_portalloc_lock);
750 rover = tcp_port_rover;
754 if ((rover < low) || (rover > high))
756 head = &tcp_bhash[tcp_bhashfn(rover)];
757 spin_lock(&head->lock);
759 /* Does not bother with rcv_saddr checks,
760 * because the established check is already
763 tb_for_each(tb, node, &head->chain) {
764 if (tb->port == rover) {
765 BUG_TRAP(!hlist_empty(&tb->owners));
766 if (tb->fastreuse >= 0)
768 if (!__tcp_v4_check_established(sk,
776 tb = tcp_bucket_create(head, rover);
778 spin_unlock(&head->lock);
785 spin_unlock(&head->lock);
786 } while (--remaining > 0);
787 tcp_port_rover = rover;
788 spin_unlock(&tcp_portalloc_lock);
792 return -EADDRNOTAVAIL;
795 /* All locks still held and bhs disabled */
796 tcp_port_rover = rover;
797 spin_unlock(&tcp_portalloc_lock);
799 tcp_bind_hash(sk, tb, rover);
800 if (sk_unhashed(sk)) {
801 inet_sk(sk)->sport = htons(rover);
802 __tcp_v4_hash(sk, 0);
804 spin_unlock(&head->lock);
807 tcp_tw_deschedule(tw);
815 head = &tcp_bhash[tcp_bhashfn(snum)];
816 tb = tcp_sk(sk)->bind_hash;
817 spin_lock_bh(&head->lock);
818 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
819 __tcp_v4_hash(sk, 0);
820 spin_unlock_bh(&head->lock);
823 spin_unlock(&head->lock);
824 /* No definite answer... Walk to established hash table */
825 ret = __tcp_v4_check_established(sk, snum, NULL);
832 /* This will initiate an outgoing connection. */
833 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
835 struct inet_opt *inet = inet_sk(sk);
836 struct tcp_opt *tp = tcp_sk(sk);
837 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
843 if (addr_len < sizeof(struct sockaddr_in))
846 if (usin->sin_family != AF_INET)
847 return -EAFNOSUPPORT;
849 nexthop = daddr = usin->sin_addr.s_addr;
850 if (inet->opt && inet->opt->srr) {
853 nexthop = inet->opt->faddr;
856 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
857 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
859 inet->sport, usin->sin_port, sk);
863 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
868 if (!inet->opt || !inet->opt->srr)
872 inet->saddr = rt->rt_src;
873 inet->rcv_saddr = inet->saddr;
875 if (tp->ts_recent_stamp && inet->daddr != daddr) {
876 /* Reset inherited state */
878 tp->ts_recent_stamp = 0;
882 if (sysctl_tcp_tw_recycle &&
883 !tp->ts_recent_stamp && rt->rt_dst == daddr) {
884 struct inet_peer *peer = rt_get_peer(rt);
886 /* VJ's idea. We save last timestamp seen from
887 * the destination in peer table, when entering state TIME-WAIT
888 * and initialize ts_recent from it, when trying new connection.
891 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
892 tp->ts_recent_stamp = peer->tcp_ts_stamp;
893 tp->ts_recent = peer->tcp_ts;
897 inet->dport = usin->sin_port;
900 tp->ext_header_len = 0;
902 tp->ext_header_len = inet->opt->optlen;
906 /* Socket identity is still unknown (sport may be zero).
907 * However we set state to SYN-SENT and not releasing socket
908 * lock select source port, enter ourselves into the hash tables and
909 * complete initialization after this.
911 tcp_set_state(sk, TCP_SYN_SENT);
912 err = tcp_v4_hash_connect(sk);
916 err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
920 /* OK, now commit destination to socket. */
921 __sk_dst_set(sk, &rt->u.dst);
922 tcp_v4_setup_caps(sk, &rt->u.dst);
923 tp->ext2_header_len = rt->u.dst.header_len;
926 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
931 inet->id = tp->write_seq ^ jiffies;
933 err = tcp_connect(sk);
941 /* This unhashes the socket and releases the local port, if necessary. */
942 tcp_set_state(sk, TCP_CLOSE);
944 sk->sk_route_caps = 0;
949 static __inline__ int tcp_v4_iif(struct sk_buff *skb)
951 return ((struct rtable *)skb->dst)->rt_iif;
954 static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
956 return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
959 static struct open_request *tcp_v4_search_req(struct tcp_opt *tp,
960 struct open_request ***prevp,
962 __u32 raddr, __u32 laddr)
964 struct tcp_listen_opt *lopt = tp->listen_opt;
965 struct open_request *req, **prev;
967 for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
968 (req = *prev) != NULL;
969 prev = &req->dl_next) {
970 if (req->rmt_port == rport &&
971 req->af.v4_req.rmt_addr == raddr &&
972 req->af.v4_req.loc_addr == laddr &&
973 TCP_INET_FAMILY(req->class->family)) {
983 static void tcp_v4_synq_add(struct sock *sk, struct open_request *req)
985 struct tcp_opt *tp = tcp_sk(sk);
986 struct tcp_listen_opt *lopt = tp->listen_opt;
987 u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
989 req->expires = jiffies + TCP_TIMEOUT_INIT;
992 req->dl_next = lopt->syn_table[h];
994 write_lock(&tp->syn_wait_lock);
995 lopt->syn_table[h] = req;
996 write_unlock(&tp->syn_wait_lock);
1003 * This routine does path mtu discovery as defined in RFC1191.
1005 static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
1008 struct dst_entry *dst;
1009 struct inet_opt *inet = inet_sk(sk);
1010 struct tcp_opt *tp = tcp_sk(sk);
1012 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
1013 * send out by Linux are always <576bytes so they should go through
1016 if (sk->sk_state == TCP_LISTEN)
1019 /* We don't check in the destentry if pmtu discovery is forbidden
1020 * on this route. We just assume that no packet_to_big packets
1021 * are send back when pmtu discovery is not active.
1022 * There is a small race when the user changes this flag in the
1023 * route, but I think that's acceptable.
1025 if ((dst = __sk_dst_check(sk, 0)) == NULL)
1028 dst->ops->update_pmtu(dst, mtu);
1030 /* Something is about to be wrong... Remember soft error
1031 * for the case, if this connection will not able to recover.
1033 if (mtu < dst_pmtu(dst) && ip_dont_fragment(sk, dst))
1034 sk->sk_err_soft = EMSGSIZE;
1036 mtu = dst_pmtu(dst);
1038 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
1039 tp->pmtu_cookie > mtu) {
1040 tcp_sync_mss(sk, mtu);
1042 /* Resend the TCP packet because it's
1043 * clear that the old packet has been
1044 * dropped. This is the new "fast" path mtu
1047 tcp_simple_retransmit(sk);
1048 } /* else let the usual retransmit timer handle it */
1052 * This routine is called by the ICMP module when it gets some
1053 * sort of error condition. If err < 0 then the socket should
1054 * be closed and the error returned to the user. If err > 0
1055 * it's just the icmp type << 8 | icmp code. After adjustment
1056 * header points to the first 8 bytes of the tcp header. We need
1057 * to find the appropriate port.
1059 * The locking strategy used here is very "optimistic". When
1060 * someone else accesses the socket the ICMP is just dropped
1061 * and for some paths there is no check at all.
1062 * A more general error queue to queue errors for later handling
1063 * is probably better.
1067 void tcp_v4_err(struct sk_buff *skb, u32 info)
1069 struct iphdr *iph = (struct iphdr *)skb->data;
1070 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1072 struct inet_opt *inet;
1073 int type = skb->h.icmph->type;
1074 int code = skb->h.icmph->code;
1079 if (skb->len < (iph->ihl << 2) + 8) {
1080 ICMP_INC_STATS_BH(IcmpInErrors);
1084 sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,
1085 th->source, tcp_v4_iif(skb));
1087 ICMP_INC_STATS_BH(IcmpInErrors);
1090 if (sk->sk_state == TCP_TIME_WAIT) {
1091 tcp_tw_put((struct tcp_tw_bucket *)sk);
1096 /* If too many ICMPs get dropped on busy
1097 * servers this needs to be solved differently.
1099 if (sock_owned_by_user(sk))
1100 NET_INC_STATS_BH(LockDroppedIcmps);
1102 if (sk->sk_state == TCP_CLOSE)
1106 seq = ntohl(th->seq);
1107 if (sk->sk_state != TCP_LISTEN &&
1108 !between(seq, tp->snd_una, tp->snd_nxt)) {
1109 NET_INC_STATS(OutOfWindowIcmps);
1114 case ICMP_SOURCE_QUENCH:
1115 /* This is deprecated, but if someone generated it,
1116 * we have no reasons to ignore it.
1118 if (!sock_owned_by_user(sk))
1121 case ICMP_PARAMETERPROB:
1124 case ICMP_DEST_UNREACH:
1125 if (code > NR_ICMP_UNREACH)
1128 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
1129 if (!sock_owned_by_user(sk))
1130 do_pmtu_discovery(sk, iph, info);
1134 err = icmp_err_convert[code].errno;
1136 case ICMP_TIME_EXCEEDED:
1143 switch (sk->sk_state) {
1144 struct open_request *req, **prev;
1146 if (sock_owned_by_user(sk))
1149 req = tcp_v4_search_req(tp, &prev, th->dest,
1150 iph->daddr, iph->saddr);
1154 /* ICMPs are not backlogged, hence we cannot get
1155 an established socket here.
1159 if (seq != req->snt_isn) {
1160 NET_INC_STATS_BH(OutOfWindowIcmps);
1165 * Still in SYN_RECV, just remove it silently.
1166 * There is no good way to pass the error to the newly
1167 * created socket, and POSIX does not want network
1168 * errors returned from accept().
1170 tcp_synq_drop(sk, req, prev);
1174 case TCP_SYN_RECV: /* Cannot happen.
1175 It can f.e. if SYNs crossed.
1177 if (!sock_owned_by_user(sk)) {
1178 TCP_INC_STATS_BH(TcpAttemptFails);
1181 sk->sk_error_report(sk);
1185 sk->sk_err_soft = err;
1190 /* If we've already connected we will keep trying
1191 * until we time out, or the user gives up.
1193 * rfc1122 4.2.3.9 allows to consider as hard errors
1194 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
1195 * but it is obsoleted by pmtu discovery).
1197 * Note, that in modern internet, where routing is unreliable
1198 * and in each dark corner broken firewalls sit, sending random
1199 * errors ordered by their masters even this two messages finally lose
1200 * their original sense (even Linux sends invalid PORT_UNREACHs)
1202 * Now we are in compliance with RFCs.
1207 if (!sock_owned_by_user(sk) && inet->recverr) {
1209 sk->sk_error_report(sk);
1210 } else { /* Only an error on timeout */
1211 sk->sk_err_soft = err;
1219 /* This routine computes an IPv4 TCP checksum. */
1220 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
1221 struct sk_buff *skb)
1223 struct inet_opt *inet = inet_sk(sk);
1225 if (skb->ip_summed == CHECKSUM_HW) {
1226 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
1227 skb->csum = offsetof(struct tcphdr, check);
1229 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
1230 csum_partial((char *)th,
1237 * This routine will send an RST to the other tcp.
1239 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
1241 * Answer: if a packet caused RST, it is not for a socket
1242 * existing in our system, if it is matched to a socket,
1243 * it is just duplicate segment or bug in other side's TCP.
1244 * So that we build reply only basing on parameters
1245 * arrived with segment.
1246 * Exception: precedence violation. We do not implement it in any case.
1249 static void tcp_v4_send_reset(struct sk_buff *skb)
1251 struct tcphdr *th = skb->h.th;
1253 struct ip_reply_arg arg;
1255 /* Never send a reset in response to a reset. */
1259 if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
1262 /* Swap the send and the receive. */
1263 memset(&rth, 0, sizeof(struct tcphdr));
1264 rth.dest = th->source;
1265 rth.source = th->dest;
1266 rth.doff = sizeof(struct tcphdr) / 4;
1270 rth.seq = th->ack_seq;
1273 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
1274 skb->len - (th->doff << 2));
1277 memset(&arg, 0, sizeof arg);
1278 arg.iov[0].iov_base = (unsigned char *)&rth;
1279 arg.iov[0].iov_len = sizeof rth;
1280 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1281 skb->nh.iph->saddr, /*XXX*/
1282 sizeof(struct tcphdr), IPPROTO_TCP, 0);
1283 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1285 ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
1287 TCP_INC_STATS_BH(TcpOutSegs);
1288 TCP_INC_STATS_BH(TcpOutRsts);
1291 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
1292 outside socket context is ugly, certainly. What can I do?
1295 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
1298 struct tcphdr *th = skb->h.th;
1303 struct ip_reply_arg arg;
1305 memset(&rep.th, 0, sizeof(struct tcphdr));
1306 memset(&arg, 0, sizeof arg);
1308 arg.iov[0].iov_base = (unsigned char *)&rep;
1309 arg.iov[0].iov_len = sizeof(rep.th);
1311 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1312 (TCPOPT_TIMESTAMP << 8) |
1314 rep.tsopt[1] = htonl(tcp_time_stamp);
1315 rep.tsopt[2] = htonl(ts);
1316 arg.iov[0].iov_len = sizeof(rep);
1319 /* Swap the send and the receive. */
1320 rep.th.dest = th->source;
1321 rep.th.source = th->dest;
1322 rep.th.doff = arg.iov[0].iov_len / 4;
1323 rep.th.seq = htonl(seq);
1324 rep.th.ack_seq = htonl(ack);
1326 rep.th.window = htons(win);
1328 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1329 skb->nh.iph->saddr, /*XXX*/
1330 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1331 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1333 ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
1335 TCP_INC_STATS_BH(TcpOutSegs);
1338 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
1340 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
1342 tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
1343 tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
1348 static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req)
1350 tcp_v4_send_ack(skb, req->snt_isn + 1, req->rcv_isn + 1, req->rcv_wnd,
1354 static struct dst_entry* tcp_v4_route_req(struct sock *sk,
1355 struct open_request *req)
1358 struct ip_options *opt = req->af.v4_req.opt;
1359 struct flowi fl = { .oif = sk->sk_bound_dev_if,
1361 { .daddr = ((opt && opt->srr) ?
1363 req->af.v4_req.rmt_addr),
1364 .saddr = req->af.v4_req.loc_addr,
1365 .tos = RT_CONN_FLAGS(sk) } },
1366 .proto = IPPROTO_TCP,
1368 { .sport = inet_sk(sk)->sport,
1369 .dport = req->rmt_port } } };
1371 if (ip_route_output_flow(&rt, &fl, sk, 0)) {
1372 IP_INC_STATS_BH(IpOutNoRoutes);
1375 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
1377 IP_INC_STATS_BH(IpOutNoRoutes);
1384 * Send a SYN-ACK after having received an ACK.
1385 * This still operates on a open_request only, not on a big
1388 static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
1389 struct dst_entry *dst)
1392 struct sk_buff * skb;
1394 /* First, grab a route. */
1395 if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1398 skb = tcp_make_synack(sk, dst, req);
1401 struct tcphdr *th = skb->h.th;
1403 th->check = tcp_v4_check(th, skb->len,
1404 req->af.v4_req.loc_addr,
1405 req->af.v4_req.rmt_addr,
1406 csum_partial((char *)th, skb->len,
1409 err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr,
1410 req->af.v4_req.rmt_addr,
1411 req->af.v4_req.opt);
1412 if (err == NET_XMIT_CN)
1422 * IPv4 open_request destructor.
1424 static void tcp_v4_or_free(struct open_request *req)
1426 if (req->af.v4_req.opt)
1427 kfree(req->af.v4_req.opt);
1430 static inline void syn_flood_warning(struct sk_buff *skb)
1432 static unsigned long warntime;
1434 if (time_after(jiffies, (warntime + HZ * 60))) {
1437 "possible SYN flooding on port %d. Sending cookies.\n",
1438 ntohs(skb->h.th->dest));
1443 * Save and compile IPv4 options into the open_request if needed.
1445 static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
1446 struct sk_buff *skb)
1448 struct ip_options *opt = &(IPCB(skb)->opt);
1449 struct ip_options *dopt = NULL;
1451 if (opt && opt->optlen) {
1452 int opt_size = optlength(opt);
1453 dopt = kmalloc(opt_size, GFP_ATOMIC);
1455 if (ip_options_echo(dopt, skb)) {
1465 * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
1466 * One SYN_RECV socket costs about 80bytes on a 32bit machine.
1467 * It would be better to replace it with a global counter for all sockets
1468 * but then some measure against one socket starving all other sockets
1471 * It was 128 by default. Experiments with real servers show, that
1472 * it is absolutely not enough even at 100conn/sec. 256 cures most
1473 * of problems. This value is adjusted to 128 for very small machines
1474 * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
1475 * Further increasing requires to change hash table size.
1477 int sysctl_max_syn_backlog = 256;
1479 struct or_calltable or_ipv4 = {
1481 .rtx_syn_ack = tcp_v4_send_synack,
1482 .send_ack = tcp_v4_or_send_ack,
1483 .destructor = tcp_v4_or_free,
1484 .send_reset = tcp_v4_send_reset,
1487 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1490 struct open_request *req;
1491 __u32 saddr = skb->nh.iph->saddr;
1492 __u32 daddr = skb->nh.iph->daddr;
1493 __u32 isn = TCP_SKB_CB(skb)->when;
1494 struct dst_entry *dst = NULL;
1495 #ifdef CONFIG_SYN_COOKIES
1496 int want_cookie = 0;
1498 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1501 /* Never answer to SYNs send to broadcast or multicast */
1502 if (((struct rtable *)skb->dst)->rt_flags &
1503 (RTCF_BROADCAST | RTCF_MULTICAST))
1506 /* TW buckets are converted to open requests without
1507 * limitations, they conserve resources and peer is
1508 * evidently real one.
1510 if (tcp_synq_is_full(sk) && !isn) {
1511 #ifdef CONFIG_SYN_COOKIES
1512 if (sysctl_tcp_syncookies) {
1519 /* Accept backlog is full. If we have already queued enough
1520 * of warm entries in syn queue, drop request. It is better than
1521 * clogging syn queue with openreqs with exponentially increasing
1524 if (tcp_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
1527 req = tcp_openreq_alloc();
1531 tcp_clear_options(&tp);
1533 tp.user_mss = tcp_sk(sk)->user_mss;
1535 tcp_parse_options(skb, &tp, 0);
1538 tcp_clear_options(&tp);
1542 if (tp.saw_tstamp && !tp.rcv_tsval) {
1543 /* Some OSes (unknown ones, but I see them on web server, which
1544 * contains information interesting only for windows'
1545 * users) do not send their stamp in SYN. It is easy case.
1546 * We simply do not advertise TS support.
1551 tp.tstamp_ok = tp.saw_tstamp;
1553 tcp_openreq_init(req, &tp, skb);
1555 req->af.v4_req.loc_addr = daddr;
1556 req->af.v4_req.rmt_addr = saddr;
1557 req->af.v4_req.opt = tcp_v4_save_options(sk, skb);
1558 req->class = &or_ipv4;
1560 TCP_ECN_create_request(req, skb->h.th);
1563 #ifdef CONFIG_SYN_COOKIES
1564 syn_flood_warning(skb);
1566 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1568 struct inet_peer *peer = NULL;
1570 /* VJ's idea. We save last timestamp seen
1571 * from the destination in peer table, when entering
1572 * state TIME-WAIT, and check against it before
1573 * accepting new connection request.
1575 * If "isn" is not zero, this request hit alive
1576 * timewait bucket, so that all the necessary checks
1577 * are made in the function processing timewait state.
1579 if (tp.saw_tstamp &&
1580 sysctl_tcp_tw_recycle &&
1581 (dst = tcp_v4_route_req(sk, req)) != NULL &&
1582 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1583 peer->v4daddr == saddr) {
1584 if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1585 (s32)(peer->tcp_ts - req->ts_recent) >
1587 NET_INC_STATS_BH(PAWSPassiveRejected);
1592 /* Kill the following clause, if you dislike this way. */
1593 else if (!sysctl_tcp_syncookies &&
1594 (sysctl_max_syn_backlog - tcp_synq_len(sk) <
1595 (sysctl_max_syn_backlog >> 2)) &&
1596 (!peer || !peer->tcp_ts_stamp) &&
1597 (!dst || !dst_metric(dst, RTAX_RTT))) {
1598 /* Without syncookies last quarter of
1599 * backlog is filled with destinations,
1600 * proven to be alive.
1601 * It means that we continue to communicate
1602 * to destinations, already remembered
1603 * to the moment of synflood.
1605 NETDEBUG(if (net_ratelimit()) \
1606 printk(KERN_DEBUG "TCP: drop open "
1607 "request from %u.%u."
1610 ntohs(skb->h.th->source)));
1615 isn = tcp_v4_init_sequence(sk, skb);
1619 if (tcp_v4_send_synack(sk, req, dst))
1623 tcp_openreq_free(req);
1625 tcp_v4_synq_add(sk, req);
1630 tcp_openreq_free(req);
1632 TCP_INC_STATS_BH(TcpAttemptFails);
1638 * The three way handshake has completed - we got a valid synack -
1639 * now create the new socket.
1641 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1642 struct open_request *req,
1643 struct dst_entry *dst)
1645 struct inet_opt *newinet;
1646 struct tcp_opt *newtp;
1649 if (tcp_acceptq_is_full(sk))
1652 if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1655 newsk = tcp_create_openreq_child(sk, req, skb);
1659 newsk->sk_dst_cache = dst;
1660 tcp_v4_setup_caps(newsk, dst);
1662 newtp = tcp_sk(newsk);
1663 newinet = inet_sk(newsk);
1664 newinet->daddr = req->af.v4_req.rmt_addr;
1665 newinet->rcv_saddr = req->af.v4_req.loc_addr;
1666 newinet->saddr = req->af.v4_req.loc_addr;
1667 newinet->opt = req->af.v4_req.opt;
1668 req->af.v4_req.opt = NULL;
1669 newinet->mc_index = tcp_v4_iif(skb);
1670 newinet->mc_ttl = skb->nh.iph->ttl;
1671 newtp->ext_header_len = 0;
1673 newtp->ext_header_len = newinet->opt->optlen;
1674 newtp->ext2_header_len = dst->header_len;
1675 newinet->id = newtp->write_seq ^ jiffies;
1677 tcp_sync_mss(newsk, dst_pmtu(dst));
1678 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1679 tcp_initialize_rcv_mss(newsk);
1681 __tcp_v4_hash(newsk, 0);
1682 __tcp_inherit_port(sk, newsk);
1687 NET_INC_STATS_BH(ListenOverflows);
1689 NET_INC_STATS_BH(ListenDrops);
1694 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1696 struct tcphdr *th = skb->h.th;
1697 struct iphdr *iph = skb->nh.iph;
1698 struct tcp_opt *tp = tcp_sk(sk);
1700 struct open_request **prev;
1701 /* Find possible connection requests. */
1702 struct open_request *req = tcp_v4_search_req(tp, &prev, th->source,
1703 iph->saddr, iph->daddr);
1705 return tcp_check_req(sk, skb, req, prev);
1707 nsk = __tcp_v4_lookup_established(skb->nh.iph->saddr,
1714 if (nsk->sk_state != TCP_TIME_WAIT) {
1718 tcp_tw_put((struct tcp_tw_bucket *)nsk);
1722 #ifdef CONFIG_SYN_COOKIES
1723 if (!th->rst && !th->syn && th->ack)
1724 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1729 static int tcp_v4_checksum_init(struct sk_buff *skb)
1731 if (skb->ip_summed == CHECKSUM_HW) {
1732 skb->ip_summed = CHECKSUM_UNNECESSARY;
1733 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1734 skb->nh.iph->daddr, skb->csum))
1737 NETDEBUG(if (net_ratelimit())
1738 printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
1739 skb->ip_summed = CHECKSUM_NONE;
1741 if (skb->len <= 76) {
1742 if (tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1744 skb_checksum(skb, 0, skb->len, 0)))
1746 skb->ip_summed = CHECKSUM_UNNECESSARY;
1748 skb->csum = ~tcp_v4_check(skb->h.th, skb->len,
1750 skb->nh.iph->daddr, 0);
1756 /* The socket must have it's spinlock held when we get
1759 * We have a potential double-lock case here, so even when
1760 * doing backlog processing we use the BH locking scheme.
1761 * This is because we cannot sleep with the original spinlock
1764 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1766 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1767 TCP_CHECK_TIMER(sk);
1768 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1770 TCP_CHECK_TIMER(sk);
1774 if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
1777 if (sk->sk_state == TCP_LISTEN) {
1778 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1783 if (tcp_child_process(sk, nsk, skb))
1789 TCP_CHECK_TIMER(sk);
1790 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1792 TCP_CHECK_TIMER(sk);
1796 tcp_v4_send_reset(skb);
1799 /* Be careful here. If this function gets more complicated and
1800 * gcc suffers from register pressure on the x86, sk (in %ebx)
1801 * might be destroyed here. This current version compiles correctly,
1802 * but you have been warned.
1807 TCP_INC_STATS_BH(TcpInErrs);
1815 int tcp_v4_rcv(struct sk_buff *skb)
1821 if (skb->pkt_type != PACKET_HOST)
1824 /* Count it even if it's bad */
1825 TCP_INC_STATS_BH(TcpInSegs);
1827 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1832 if (th->doff < sizeof(struct tcphdr) / 4)
1834 if (!pskb_may_pull(skb, th->doff * 4))
1837 /* An explanation is required here, I think.
1838 * Packet length and doff are validated by header prediction,
1839 * provided case of th->doff==0 is elimineted.
1840 * So, we defer the checks. */
1841 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1842 tcp_v4_checksum_init(skb) < 0))
1846 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1847 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1848 skb->len - th->doff * 4);
1849 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1850 TCP_SKB_CB(skb)->when = 0;
1851 TCP_SKB_CB(skb)->flags = skb->nh.iph->tos;
1852 TCP_SKB_CB(skb)->sacked = 0;
1854 sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source,
1855 skb->nh.iph->daddr, ntohs(th->dest),
1862 if (sk->sk_state == TCP_TIME_WAIT)
1865 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1866 goto discard_and_relse;
1868 if (sk_filter(sk, skb, 0))
1869 goto discard_and_relse;
1875 if (!sock_owned_by_user(sk)) {
1876 if (!tcp_prequeue(sk, skb))
1877 ret = tcp_v4_do_rcv(sk, skb);
1879 sk_add_backlog(sk, skb);
1887 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1890 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1892 TCP_INC_STATS_BH(TcpInErrs);
1894 tcp_v4_send_reset(skb);
1898 /* Discard frame. */
1907 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1908 tcp_tw_put((struct tcp_tw_bucket *) sk);
1912 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1913 TCP_INC_STATS_BH(TcpInErrs);
1914 tcp_tw_put((struct tcp_tw_bucket *) sk);
1917 switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
1918 skb, th, skb->len)) {
1920 struct sock *sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr,
1924 tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
1925 tcp_tw_put((struct tcp_tw_bucket *)sk);
1929 /* Fall through to ACK */
1932 tcp_v4_timewait_ack(sk, skb);
1936 case TCP_TW_SUCCESS:;
1941 /* With per-bucket locks this operation is not-atomic, so that
1942 * this version is not worse.
1944 static void __tcp_v4_rehash(struct sock *sk)
1946 sk->sk_prot->unhash(sk);
1947 sk->sk_prot->hash(sk);
1950 static int tcp_v4_reselect_saddr(struct sock *sk)
1952 struct inet_opt *inet = inet_sk(sk);
1955 __u32 old_saddr = inet->saddr;
1957 __u32 daddr = inet->daddr;
1959 if (inet->opt && inet->opt->srr)
1960 daddr = inet->opt->faddr;
1962 /* Query new route. */
1963 err = ip_route_connect(&rt, daddr, 0,
1964 RT_TOS(inet->tos) | sk->sk_localroute,
1965 sk->sk_bound_dev_if,
1967 inet->sport, inet->dport, sk);
1971 __sk_dst_set(sk, &rt->u.dst);
1972 tcp_v4_setup_caps(sk, &rt->u.dst);
1973 tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
1975 new_saddr = rt->rt_src;
1977 if (new_saddr == old_saddr)
1980 if (sysctl_ip_dynaddr > 1) {
1981 printk(KERN_INFO "tcp_v4_rebuild_header(): shifting inet->"
1982 "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n",
1984 NIPQUAD(new_saddr));
1987 inet->saddr = new_saddr;
1988 inet->rcv_saddr = new_saddr;
1990 /* XXX The only one ugly spot where we need to
1991 * XXX really change the sockets identity after
1992 * XXX it has entered the hashes. -DaveM
1994 * Besides that, it does not check for connection
1995 * uniqueness. Wait for troubles.
1997 __tcp_v4_rehash(sk);
2001 int tcp_v4_rebuild_header(struct sock *sk)
2003 struct inet_opt *inet = inet_sk(sk);
2004 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
2008 /* Route is OK, nothing to do. */
2013 daddr = inet->daddr;
2014 if (inet->opt && inet->opt->srr)
2015 daddr = inet->opt->faddr;
2018 struct flowi fl = { .oif = sk->sk_bound_dev_if,
2021 .saddr = inet->saddr,
2022 .tos = RT_CONN_FLAGS(sk) } },
2023 .proto = IPPROTO_TCP,
2025 { .sport = inet->sport,
2026 .dport = inet->dport } } };
2028 err = ip_route_output_flow(&rt, &fl, sk, 0);
2031 __sk_dst_set(sk, &rt->u.dst);
2032 tcp_v4_setup_caps(sk, &rt->u.dst);
2033 tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
2037 /* Routing failed... */
2038 sk->sk_route_caps = 0;
2040 if (!sysctl_ip_dynaddr ||
2041 sk->sk_state != TCP_SYN_SENT ||
2042 (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
2043 (err = tcp_v4_reselect_saddr(sk)) != 0)
2044 sk->sk_err_soft = -err;
2049 static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
2051 struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;
2052 struct inet_opt *inet = inet_sk(sk);
2054 sin->sin_family = AF_INET;
2055 sin->sin_addr.s_addr = inet->daddr;
2056 sin->sin_port = inet->dport;
2059 /* VJ's idea. Save last timestamp seen from this destination
2060 * and hold it at least for normal timewait interval to use for duplicate
2061 * segment detection in subsequent connections, before they enter synchronized
2065 int tcp_v4_remember_stamp(struct sock *sk)
2067 struct inet_opt *inet = inet_sk(sk);
2068 struct tcp_opt *tp = tcp_sk(sk);
2069 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
2070 struct inet_peer *peer = NULL;
2073 if (!rt || rt->rt_dst != inet->daddr) {
2074 peer = inet_getpeer(inet->daddr, 1);
2078 rt_bind_peer(rt, 1);
2083 if ((s32)(peer->tcp_ts - tp->ts_recent) <= 0 ||
2084 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2085 peer->tcp_ts_stamp <= tp->ts_recent_stamp)) {
2086 peer->tcp_ts_stamp = tp->ts_recent_stamp;
2087 peer->tcp_ts = tp->ts_recent;
2097 int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
2099 struct inet_peer *peer = NULL;
2101 peer = inet_getpeer(tw->tw_daddr, 1);
2104 if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 ||
2105 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2106 peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) {
2107 peer->tcp_ts_stamp = tw->tw_ts_recent_stamp;
2108 peer->tcp_ts = tw->tw_ts_recent;
2117 struct tcp_func ipv4_specific = {
2118 .queue_xmit = ip_queue_xmit,
2119 .send_check = tcp_v4_send_check,
2120 .rebuild_header = tcp_v4_rebuild_header,
2121 .conn_request = tcp_v4_conn_request,
2122 .syn_recv_sock = tcp_v4_syn_recv_sock,
2123 .remember_stamp = tcp_v4_remember_stamp,
2124 .net_header_len = sizeof(struct iphdr),
2125 .setsockopt = ip_setsockopt,
2126 .getsockopt = ip_getsockopt,
2127 .addr2sockaddr = v4_addr2sockaddr,
2128 .sockaddr_len = sizeof(struct sockaddr_in),
2131 /* NOTE: A lot of things set to zero explicitly by call to
2132 * sk_alloc() so need not be done here.
2134 static int tcp_v4_init_sock(struct sock *sk)
2136 struct tcp_opt *tp = tcp_sk(sk);
2138 skb_queue_head_init(&tp->out_of_order_queue);
2139 tcp_init_xmit_timers(sk);
2140 tcp_prequeue_init(tp);
2142 tp->rto = TCP_TIMEOUT_INIT;
2143 tp->mdev = TCP_TIMEOUT_INIT;
2145 /* So many TCP implementations out there (incorrectly) count the
2146 * initial SYN frame in their delayed-ACK and congestion control
2147 * algorithms that we must have the following bandaid to talk
2148 * efficiently to them. -DaveM
2152 /* See draft-stevens-tcpca-spec-01 for discussion of the
2153 * initialization of these values.
2155 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
2156 tp->snd_cwnd_clamp = ~0;
2157 tp->mss_cache = 536;
2159 tp->reordering = sysctl_tcp_reordering;
2161 sk->sk_state = TCP_CLOSE;
2163 sk->sk_write_space = tcp_write_space;
2164 sk->sk_use_write_queue = 1;
2166 tp->af_specific = &ipv4_specific;
2168 sk->sk_sndbuf = sysctl_tcp_wmem[1];
2169 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
2171 atomic_inc(&tcp_sockets_allocated);
2176 static int tcp_v4_destroy_sock(struct sock *sk)
2178 struct tcp_opt *tp = tcp_sk(sk);
2180 tcp_clear_xmit_timers(sk);
2182 /* Cleanup up the write buffer. */
2183 tcp_writequeue_purge(sk);
2185 /* Cleans up our, hopefully empty, out_of_order_queue. */
2186 __skb_queue_purge(&tp->out_of_order_queue);
2188 /* Clean prequeue, it must be empty really */
2189 __skb_queue_purge(&tp->ucopy.prequeue);
2191 /* Clean up a referenced TCP bind bucket. */
2195 /* If sendmsg cached page exists, toss it. */
2196 if (inet_sk(sk)->sndmsg_page)
2197 __free_page(inet_sk(sk)->sndmsg_page);
2199 atomic_dec(&tcp_sockets_allocated);
2204 #ifdef CONFIG_PROC_FS
2205 /* Proc filesystem TCP sock list dumping. */
2207 static inline struct tcp_tw_bucket *tw_head(struct hlist_head *head)
2209 return hlist_empty(head) ? NULL :
2210 list_entry(head->first, struct tcp_tw_bucket, tw_node);
2213 static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw)
2215 return tw->tw_node.next ?
2216 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2219 static void *listening_get_next(struct seq_file *seq, void *cur)
2222 struct hlist_node *node;
2223 struct sock *sk = cur;
2224 struct tcp_iter_state* st = seq->private;
2228 sk = sk_head(&tcp_listening_hash[0]);
2234 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2235 struct open_request *req = cur;
2237 tp = tcp_sk(st->syn_wait_sk);
2241 if (!vx_check(req->sk->sk_xid, VX_IDENT|VX_WATCH))
2243 if (req->class->family == st->family) {
2249 if (++st->sbucket >= TCP_SYNQ_HSIZE)
2252 req = tp->listen_opt->syn_table[st->sbucket];
2254 sk = sk_next(st->syn_wait_sk);
2255 st->state = TCP_SEQ_STATE_LISTENING;
2256 read_unlock_bh(&tp->syn_wait_lock);
2260 sk_for_each_from(sk, node) {
2261 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2263 if (sk->sk_family == st->family) {
2268 read_lock_bh(&tp->syn_wait_lock);
2269 if (tp->listen_opt && tp->listen_opt->qlen) {
2270 st->uid = sock_i_uid(sk);
2271 st->syn_wait_sk = sk;
2272 st->state = TCP_SEQ_STATE_OPENREQ;
2276 read_unlock_bh(&tp->syn_wait_lock);
2278 if (++st->bucket < TCP_LHTABLE_SIZE) {
2279 sk = sk_head(&tcp_listening_hash[st->bucket]);
2287 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2289 void *rc = listening_get_next(seq, NULL);
2291 while (rc && *pos) {
2292 rc = listening_get_next(seq, rc);
2298 static void *established_get_first(struct seq_file *seq)
2300 struct tcp_iter_state* st = seq->private;
2303 for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) {
2305 struct hlist_node *node;
2306 struct tcp_tw_bucket *tw;
2308 read_lock(&tcp_ehash[st->bucket].lock);
2309 sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) {
2310 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2312 if (sk->sk_family != st->family)
2317 st->state = TCP_SEQ_STATE_TIME_WAIT;
2318 tw_for_each(tw, node,
2319 &tcp_ehash[st->bucket + tcp_ehash_size].chain) {
2320 if (!vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))
2322 if (tw->tw_family != st->family)
2327 read_unlock(&tcp_ehash[st->bucket].lock);
2328 st->state = TCP_SEQ_STATE_ESTABLISHED;
2334 static void *established_get_next(struct seq_file *seq, void *cur)
2336 struct sock *sk = cur;
2337 struct tcp_tw_bucket *tw;
2338 struct hlist_node *node;
2339 struct tcp_iter_state* st = seq->private;
2343 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2347 while (tw && tw->tw_family != st->family &&
2348 !vx_check(tw->tw_xid, VX_IDENT|VX_WATCH)) {
2355 read_unlock(&tcp_ehash[st->bucket].lock);
2356 st->state = TCP_SEQ_STATE_ESTABLISHED;
2357 if (++st->bucket < tcp_ehash_size) {
2358 read_lock(&tcp_ehash[st->bucket].lock);
2359 sk = sk_head(&tcp_ehash[st->bucket].chain);
2367 sk_for_each_from(sk, node) {
2368 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2370 if (sk->sk_family == st->family)
2374 st->state = TCP_SEQ_STATE_TIME_WAIT;
2375 tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain);
2383 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2385 void *rc = established_get_first(seq);
2388 rc = established_get_next(seq, rc);
2394 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2397 struct tcp_iter_state* st = seq->private;
2400 st->state = TCP_SEQ_STATE_LISTENING;
2401 rc = listening_get_idx(seq, &pos);
2404 tcp_listen_unlock();
2406 st->state = TCP_SEQ_STATE_ESTABLISHED;
2407 rc = established_get_idx(seq, pos);
2413 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2415 struct tcp_iter_state* st = seq->private;
2416 st->state = TCP_SEQ_STATE_LISTENING;
2418 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2421 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2424 struct tcp_iter_state* st;
2426 if (v == SEQ_START_TOKEN) {
2427 rc = tcp_get_idx(seq, 0);
2432 switch (st->state) {
2433 case TCP_SEQ_STATE_OPENREQ:
2434 case TCP_SEQ_STATE_LISTENING:
2435 rc = listening_get_next(seq, v);
2437 tcp_listen_unlock();
2439 st->state = TCP_SEQ_STATE_ESTABLISHED;
2440 rc = established_get_first(seq);
2443 case TCP_SEQ_STATE_ESTABLISHED:
2444 case TCP_SEQ_STATE_TIME_WAIT:
2445 rc = established_get_next(seq, v);
2453 static void tcp_seq_stop(struct seq_file *seq, void *v)
2455 struct tcp_iter_state* st = seq->private;
2457 switch (st->state) {
2458 case TCP_SEQ_STATE_OPENREQ:
2460 struct tcp_opt *tp = tcp_sk(st->syn_wait_sk);
2461 read_unlock_bh(&tp->syn_wait_lock);
2463 case TCP_SEQ_STATE_LISTENING:
2464 if (v != SEQ_START_TOKEN)
2465 tcp_listen_unlock();
2467 case TCP_SEQ_STATE_TIME_WAIT:
2468 case TCP_SEQ_STATE_ESTABLISHED:
2470 read_unlock(&tcp_ehash[st->bucket].lock);
2476 static int tcp_seq_open(struct inode *inode, struct file *file)
2478 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2479 struct seq_file *seq;
2480 struct tcp_iter_state *s;
2483 if (unlikely(afinfo == NULL))
2486 s = kmalloc(sizeof(*s), GFP_KERNEL);
2489 memset(s, 0, sizeof(*s));
2490 s->family = afinfo->family;
2491 s->seq_ops.start = tcp_seq_start;
2492 s->seq_ops.next = tcp_seq_next;
2493 s->seq_ops.show = afinfo->seq_show;
2494 s->seq_ops.stop = tcp_seq_stop;
2496 rc = seq_open(file, &s->seq_ops);
2499 seq = file->private_data;
2508 int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
2511 struct proc_dir_entry *p;
2515 afinfo->seq_fops->owner = afinfo->owner;
2516 afinfo->seq_fops->open = tcp_seq_open;
2517 afinfo->seq_fops->read = seq_read;
2518 afinfo->seq_fops->llseek = seq_lseek;
2519 afinfo->seq_fops->release = seq_release_private;
2521 p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
2529 void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
2533 proc_net_remove(afinfo->name);
2534 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
2537 static void get_openreq4(struct sock *sk, struct open_request *req,
2538 char *tmpbuf, int i, int uid)
2540 int ttd = req->expires - jiffies;
2542 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2543 " %02X %08X:%08X %02X:%08X %08X %5d %8d %u %d %p",
2545 req->af.v4_req.loc_addr,
2546 ntohs(inet_sk(sk)->sport),
2547 req->af.v4_req.rmt_addr,
2548 ntohs(req->rmt_port),
2550 0, 0, /* could print option size, but that is af dependent. */
2551 1, /* timers active (only the expire timer) */
2552 jiffies_to_clock_t(ttd),
2555 0, /* non standard timer */
2556 0, /* open_requests have no inode */
2557 atomic_read(&sk->sk_refcnt),
2561 static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
2564 unsigned long timer_expires;
2565 struct tcp_opt *tp = tcp_sk(sp);
2566 struct inet_opt *inet = inet_sk(sp);
2567 unsigned int dest = inet->daddr;
2568 unsigned int src = inet->rcv_saddr;
2569 __u16 destp = ntohs(inet->dport);
2570 __u16 srcp = ntohs(inet->sport);
2572 if (tp->pending == TCP_TIME_RETRANS) {
2574 timer_expires = tp->timeout;
2575 } else if (tp->pending == TCP_TIME_PROBE0) {
2577 timer_expires = tp->timeout;
2578 } else if (timer_pending(&sp->sk_timer)) {
2580 timer_expires = sp->sk_timer.expires;
2583 timer_expires = jiffies;
2586 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2587 "%08X %5d %8d %lu %d %p %u %u %u %u %d",
2588 i, src, srcp, dest, destp, sp->sk_state,
2589 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
2591 jiffies_to_clock_t(timer_expires - jiffies),
2596 atomic_read(&sp->sk_refcnt), sp,
2597 tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong,
2599 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
2602 static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i)
2604 unsigned int dest, src;
2606 int ttd = tw->tw_ttd - jiffies;
2611 dest = tw->tw_daddr;
2612 src = tw->tw_rcv_saddr;
2613 destp = ntohs(tw->tw_dport);
2614 srcp = ntohs(tw->tw_sport);
2616 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2617 " %02X %08X:%08X %02X:%08X %08X %5d %8d %d %d %p",
2618 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2619 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2620 atomic_read(&tw->tw_refcnt), tw);
2625 static int tcp4_seq_show(struct seq_file *seq, void *v)
2627 struct tcp_iter_state* st;
2628 char tmpbuf[TMPSZ + 1];
2630 if (v == SEQ_START_TOKEN) {
2631 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2632 " sl local_address rem_address st tx_queue "
2633 "rx_queue tr tm->when retrnsmt uid timeout "
2639 switch (st->state) {
2640 case TCP_SEQ_STATE_LISTENING:
2641 case TCP_SEQ_STATE_ESTABLISHED:
2642 get_tcp4_sock(v, tmpbuf, st->num);
2644 case TCP_SEQ_STATE_OPENREQ:
2645 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
2647 case TCP_SEQ_STATE_TIME_WAIT:
2648 get_timewait4_sock(v, tmpbuf, st->num);
2651 seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
2656 static struct file_operations tcp4_seq_fops;
2657 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2658 .owner = THIS_MODULE,
2661 .seq_show = tcp4_seq_show,
2662 .seq_fops = &tcp4_seq_fops,
2665 int __init tcp4_proc_init(void)
2667 return tcp_proc_register(&tcp4_seq_afinfo);
2670 void tcp4_proc_exit(void)
2672 tcp_proc_unregister(&tcp4_seq_afinfo);
2674 #endif /* CONFIG_PROC_FS */
2676 struct proto tcp_prot = {
2679 .connect = tcp_v4_connect,
2680 .disconnect = tcp_disconnect,
2681 .accept = tcp_accept,
2683 .init = tcp_v4_init_sock,
2684 .destroy = tcp_v4_destroy_sock,
2685 .shutdown = tcp_shutdown,
2686 .setsockopt = tcp_setsockopt,
2687 .getsockopt = tcp_getsockopt,
2688 .sendmsg = tcp_sendmsg,
2689 .recvmsg = tcp_recvmsg,
2690 .backlog_rcv = tcp_v4_do_rcv,
2691 .hash = tcp_v4_hash,
2692 .unhash = tcp_unhash,
2693 .get_port = tcp_v4_get_port,
2698 void __init tcp_v4_init(struct net_proto_family *ops)
2700 int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
2702 panic("Failed to create the TCP control socket.\n");
2703 tcp_socket->sk->sk_allocation = GFP_ATOMIC;
2704 inet_sk(tcp_socket->sk)->uc_ttl = -1;
2706 /* Unhash it so that IP input processing does not even
2707 * see it, we do not wish this socket to see incoming
2710 tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
2713 EXPORT_SYMBOL(ipv4_specific);
2714 EXPORT_SYMBOL(tcp_bind_hash);
2715 EXPORT_SYMBOL(tcp_bucket_create);
2716 EXPORT_SYMBOL(tcp_hashinfo);
2717 EXPORT_SYMBOL(tcp_inherit_port);
2718 EXPORT_SYMBOL(tcp_listen_wlock);
2719 EXPORT_SYMBOL(tcp_port_rover);
2720 EXPORT_SYMBOL(tcp_prot);
2721 EXPORT_SYMBOL(tcp_put_port);
2722 EXPORT_SYMBOL(tcp_unhash);
2723 EXPORT_SYMBOL(tcp_v4_conn_request);
2724 EXPORT_SYMBOL(tcp_v4_connect);
2725 EXPORT_SYMBOL(tcp_v4_do_rcv);
2726 EXPORT_SYMBOL(tcp_v4_lookup_listener);
2727 EXPORT_SYMBOL(tcp_v4_rebuild_header);
2728 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2729 EXPORT_SYMBOL(tcp_v4_send_check);
2730 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2732 #ifdef CONFIG_PROC_FS
2733 EXPORT_SYMBOL(tcp_proc_register);
2734 EXPORT_SYMBOL(tcp_proc_unregister);
2736 #ifdef CONFIG_SYSCTL
2737 EXPORT_SYMBOL(sysctl_local_port_range);
2738 EXPORT_SYMBOL(sysctl_max_syn_backlog);
2739 EXPORT_SYMBOL(sysctl_tcp_low_latency);