2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
10 * IPv4 specific functions
15 * linux/ipv4/tcp_input.c
16 * linux/ipv4/tcp_output.c
18 * See tcp.c for author information
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
28 * David S. Miller : New socket lookup architecture.
29 * This code is dedicated to John Dyson.
30 * David S. Miller : Change semantics of established hash,
31 * half is devoted to TIME_WAIT sockets
32 * and the rest go in the other half.
33 * Andi Kleen : Add support for syncookies and fixed
34 * some bugs: ip options weren't passed to
35 * the TCP layer, missed a check for an
37 * Andi Kleen : Implemented fast path mtu discovery.
38 * Fixed many serious bugs in the
39 * open_request handling and moved
40 * most of it into the af independent code.
41 * Added tail drop and some other bugfixes.
42 * Added new listen sematics.
43 * Mike McLagan : Routing by source
44 * Juan Jose Ciarlante: ip_dynaddr bits
45 * Andi Kleen: various fixes.
46 * Vitaly E. Lavrov : Transparent proxy revived after year
48 * Andi Kleen : Fix new listen.
49 * Andi Kleen : Fix accept error reporting.
50 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
51 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
52 * a single port at the same time.
55 #include <linux/config.h>
57 #include <linux/types.h>
58 #include <linux/fcntl.h>
59 #include <linux/module.h>
60 #include <linux/random.h>
61 #include <linux/cache.h>
62 #include <linux/jhash.h>
63 #include <linux/init.h>
64 #include <linux/times.h>
69 #include <net/inet_common.h>
72 #include <linux/inet.h>
73 #include <linux/ipv6.h>
74 #include <linux/stddef.h>
75 #include <linux/proc_fs.h>
76 #include <linux/seq_file.h>
77 #include <linux/vserver/debug.h>
79 extern int sysctl_ip_dynaddr;
80 int sysctl_tcp_tw_reuse;
81 int sysctl_tcp_low_latency;
83 /* Check TCP sequence numbers in ICMP packets. */
84 #define ICMP_MIN_LENGTH 8
86 /* Socket used for sending RSTs */
87 static struct socket *tcp_socket;
89 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
92 struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
93 .__tcp_lhash_lock = RW_LOCK_UNLOCKED,
94 .__tcp_lhash_users = ATOMIC_INIT(0),
96 = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait),
97 .__tcp_portalloc_lock = SPIN_LOCK_UNLOCKED
101 * This array holds the first and last local port number.
102 * For high-usage systems, use sysctl to change this to
105 int sysctl_local_port_range[2] = { 1024, 4999 };
106 int tcp_port_rover = 1024 - 1;
108 static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
109 __u32 faddr, __u16 fport)
111 int h = (laddr ^ lport) ^ (faddr ^ fport);
114 return h & (tcp_ehash_size - 1);
117 static __inline__ int tcp_sk_hashfn(struct sock *sk)
119 struct inet_opt *inet = inet_sk(sk);
120 __u32 laddr = inet->rcv_saddr;
121 __u16 lport = inet->num;
122 __u32 faddr = inet->daddr;
123 __u16 fport = inet->dport;
125 return tcp_hashfn(laddr, lport, faddr, fport);
128 /* Allocate and initialize a new TCP local port bind bucket.
129 * The bindhash mutex for snum's hash chain must be held here.
131 struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
134 struct tcp_bind_bucket *tb = kmem_cache_alloc(tcp_bucket_cachep,
139 INIT_HLIST_HEAD(&tb->owners);
140 hlist_add_head(&tb->node, &head->chain);
145 /* Caller must hold hashbucket lock for this tb with local BH disabled */
146 void tcp_bucket_destroy(struct tcp_bind_bucket *tb)
148 if (hlist_empty(&tb->owners)) {
149 __hlist_del(&tb->node);
150 kmem_cache_free(tcp_bucket_cachep, tb);
154 /* Caller must disable local BH processing. */
155 static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
157 struct tcp_bind_hashbucket *head =
158 &tcp_bhash[tcp_bhashfn(inet_sk(child)->num)];
159 struct tcp_bind_bucket *tb;
161 spin_lock(&head->lock);
162 tb = tcp_sk(sk)->bind_hash;
163 sk_add_bind_node(child, &tb->owners);
164 tcp_sk(child)->bind_hash = tb;
165 spin_unlock(&head->lock);
168 inline void tcp_inherit_port(struct sock *sk, struct sock *child)
171 __tcp_inherit_port(sk, child);
175 void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
178 inet_sk(sk)->num = snum;
179 sk_add_bind_node(sk, &tb->owners);
180 tcp_sk(sk)->bind_hash = tb;
184 Return 1 if addr match the socket IP list
185 or the socket is INADDR_ANY
187 static inline int tcp_in_list(struct sock *sk, u32 addr)
189 struct nx_info *nxi = sk->sk_nx_info;
191 vxdprintk(VXD_CBIT(net, 2), "tcp_in_list(%p) %p,%p;%lx",
192 sk, nxi, sk->sk_socket,
193 (sk->sk_socket?sk->sk_socket->flags:0));
200 if (nxi->ipv4[i] == addr)
203 else if (!tcp_v4_rcv_saddr(sk) || tcp_v4_rcv_saddr(sk) == addr)
209 Check if the addresses in sk1 conflict with those in sk2
211 int tcp_ipv4_addr_conflict(struct sock *sk1, struct sock *sk2)
214 vxdprintk(VXD_CBIT(net, 5),
215 "tcp_ipv4_addr_conflict(%p,%p) %p,%p;%lx %p,%p;%lx",
217 sk1->sk_nx_info, sk1->sk_socket,
218 (sk1->sk_socket?sk1->sk_socket->flags:0),
219 sk2->sk_nx_info, sk2->sk_socket,
220 (sk2->sk_socket?sk2->sk_socket->flags:0));
222 if (tcp_v4_rcv_saddr(sk1)) {
223 /* Bind to one address only */
224 return tcp_in_list (sk2, tcp_v4_rcv_saddr(sk1));
225 } else if (sk1->sk_nx_info) {
226 /* A restricted bind(any) */
227 struct nx_info *nxi = sk1->sk_nx_info;
232 if (tcp_in_list (sk2, nxi->ipv4[i]))
234 } else /* A bind(any) do not allow other bind on the same port */
239 static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
242 struct hlist_node *node;
243 int reuse = sk->sk_reuse;
245 sk_for_each_bound(sk2, node, &tb->owners) {
247 !tcp_v6_ipv6only(sk2) &&
248 (!sk->sk_bound_dev_if ||
249 !sk2->sk_bound_dev_if ||
250 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
251 if (!reuse || !sk2->sk_reuse ||
252 sk2->sk_state == TCP_LISTEN) {
253 if (tcp_ipv4_addr_conflict(sk, sk2))
261 /* Obtain a reference to a local port for the given sock,
262 * if snum is zero it means select any available local port.
264 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
266 struct tcp_bind_hashbucket *head;
267 struct hlist_node *node;
268 struct tcp_bind_bucket *tb;
273 int low = sysctl_local_port_range[0];
274 int high = sysctl_local_port_range[1];
275 int remaining = (high - low) + 1;
278 spin_lock(&tcp_portalloc_lock);
279 rover = tcp_port_rover;
282 if (rover < low || rover > high)
284 head = &tcp_bhash[tcp_bhashfn(rover)];
285 spin_lock(&head->lock);
286 tb_for_each(tb, node, &head->chain)
287 if (tb->port == rover)
291 spin_unlock(&head->lock);
292 } while (--remaining > 0);
293 tcp_port_rover = rover;
294 spin_unlock(&tcp_portalloc_lock);
296 /* Exhausted local port range during search? */
301 /* OK, here is the one we will use. HEAD is
302 * non-NULL and we hold it's mutex.
306 head = &tcp_bhash[tcp_bhashfn(snum)];
307 spin_lock(&head->lock);
308 tb_for_each(tb, node, &head->chain)
309 if (tb->port == snum)
315 if (!hlist_empty(&tb->owners)) {
316 if (sk->sk_reuse > 1)
318 if (tb->fastreuse > 0 &&
319 sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
323 if (tcp_bind_conflict(sk, tb))
329 if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
331 if (hlist_empty(&tb->owners)) {
332 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
336 } else if (tb->fastreuse &&
337 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
340 if (!tcp_sk(sk)->bind_hash)
341 tcp_bind_hash(sk, tb, snum);
342 BUG_TRAP(tcp_sk(sk)->bind_hash == tb);
346 spin_unlock(&head->lock);
352 /* Get rid of any references to a local port held by the
355 static void __tcp_put_port(struct sock *sk)
357 struct inet_opt *inet = inet_sk(sk);
358 struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)];
359 struct tcp_bind_bucket *tb;
361 spin_lock(&head->lock);
362 tb = tcp_sk(sk)->bind_hash;
363 __sk_del_bind_node(sk);
364 tcp_sk(sk)->bind_hash = NULL;
366 tcp_bucket_destroy(tb);
367 spin_unlock(&head->lock);
370 void tcp_put_port(struct sock *sk)
377 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
378 * Look, when several writers sleep and reader wakes them up, all but one
379 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
380 * this, _but_ remember, it adds useless work on UP machines (wake up each
381 * exclusive lock release). It should be ifdefed really.
384 void tcp_listen_wlock(void)
386 write_lock(&tcp_lhash_lock);
388 if (atomic_read(&tcp_lhash_users)) {
392 prepare_to_wait_exclusive(&tcp_lhash_wait,
393 &wait, TASK_UNINTERRUPTIBLE);
394 if (!atomic_read(&tcp_lhash_users))
396 write_unlock_bh(&tcp_lhash_lock);
398 write_lock_bh(&tcp_lhash_lock);
401 finish_wait(&tcp_lhash_wait, &wait);
405 static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
407 struct hlist_head *list;
410 BUG_TRAP(sk_unhashed(sk));
411 if (listen_possible && sk->sk_state == TCP_LISTEN) {
412 list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
413 lock = &tcp_lhash_lock;
416 list = &tcp_ehash[(sk->sk_hashent = tcp_sk_hashfn(sk))].chain;
417 lock = &tcp_ehash[sk->sk_hashent].lock;
420 __sk_add_node(sk, list);
421 sock_prot_inc_use(sk->sk_prot);
423 if (listen_possible && sk->sk_state == TCP_LISTEN)
424 wake_up(&tcp_lhash_wait);
427 static void tcp_v4_hash(struct sock *sk)
429 if (sk->sk_state != TCP_CLOSE) {
431 __tcp_v4_hash(sk, 1);
436 void tcp_unhash(struct sock *sk)
443 if (sk->sk_state == TCP_LISTEN) {
446 lock = &tcp_lhash_lock;
448 struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
450 write_lock_bh(&head->lock);
453 if (__sk_del_node_init(sk))
454 sock_prot_dec_use(sk->sk_prot);
455 write_unlock_bh(lock);
458 if (sk->sk_state == TCP_LISTEN)
459 wake_up(&tcp_lhash_wait);
463 Check if an address is in the list
465 static inline int tcp_addr_in_list(
468 struct nx_info *nx_info)
470 if (rcv_saddr == daddr)
472 else if (rcv_saddr == 0) {
473 /* Accept any address or check the list */
477 int n = nx_info->nbipv4;
481 if (nx_info->ipv4[i] == daddr)
490 /* Don't inline this cruft. Here are some nice properties to
491 * exploit here. The BSD API does not allow a listening TCP
492 * to specify the remote port nor the remote address for the
493 * connection. So always assume those are both wildcarded
494 * during the search since they can never be otherwise.
496 static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr,
497 unsigned short hnum, int dif)
499 struct sock *result = NULL, *sk;
500 struct hlist_node *node;
504 sk_for_each(sk, node, head) {
505 struct inet_opt *inet = inet_sk(sk);
507 if (inet->num == hnum && !ipv6_only_sock(sk)) {
508 __u32 rcv_saddr = inet->rcv_saddr;
510 score = (sk->sk_family == PF_INET ? 1 : 0);
511 if (tcp_addr_in_list(rcv_saddr, daddr, sk->sk_nx_info))
515 if (sk->sk_bound_dev_if) {
516 if (sk->sk_bound_dev_if != dif)
522 if (score > hiscore) {
531 /* Optimize the common listener case. */
532 inline struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum,
535 struct sock *sk = NULL;
536 struct hlist_head *head;
538 read_lock(&tcp_lhash_lock);
539 head = &tcp_listening_hash[tcp_lhashfn(hnum)];
540 if (!hlist_empty(head)) {
541 struct inet_opt *inet = inet_sk((sk = __sk_head(head)));
543 if (inet->num == hnum && !sk->sk_node.next &&
544 (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
545 tcp_addr_in_list(inet->rcv_saddr, daddr, sk->sk_nx_info) &&
546 !sk->sk_bound_dev_if)
548 sk = __tcp_v4_lookup_listener(head, daddr, hnum, dif);
554 read_unlock(&tcp_lhash_lock);
558 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
559 * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
561 * Local BH must be disabled here.
564 static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
568 struct tcp_ehash_bucket *head;
569 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
570 __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
572 struct hlist_node *node;
573 /* Optimize here for direct hit, only listening connections can
574 * have wildcards anyways.
576 int hash = tcp_hashfn(daddr, hnum, saddr, sport);
577 head = &tcp_ehash[hash];
578 read_lock(&head->lock);
579 sk_for_each(sk, node, &head->chain) {
580 if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
581 goto hit; /* You sunk my battleship! */
584 /* Must check for a TIME_WAIT'er before going to listener hash. */
585 sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
586 if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
591 read_unlock(&head->lock);
598 static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport,
599 u32 daddr, u16 hnum, int dif)
601 struct sock *sk = __tcp_v4_lookup_established(saddr, sport,
604 return sk ? : tcp_v4_lookup_listener(daddr, hnum, dif);
607 inline struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr,
613 sk = __tcp_v4_lookup(saddr, sport, daddr, ntohs(dport), dif);
619 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
621 return secure_tcp_sequence_number(skb->nh.iph->daddr,
627 /* called with local bh disabled */
628 static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
629 struct tcp_tw_bucket **twp)
631 struct inet_opt *inet = inet_sk(sk);
632 u32 daddr = inet->rcv_saddr;
633 u32 saddr = inet->daddr;
634 int dif = sk->sk_bound_dev_if;
635 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
636 __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
637 int hash = tcp_hashfn(daddr, lport, saddr, inet->dport);
638 struct tcp_ehash_bucket *head = &tcp_ehash[hash];
640 struct hlist_node *node;
641 struct tcp_tw_bucket *tw;
643 write_lock(&head->lock);
645 /* Check TIME-WAIT sockets first. */
646 sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
647 tw = (struct tcp_tw_bucket *)sk2;
649 if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
650 struct tcp_opt *tp = tcp_sk(sk);
652 /* With PAWS, it is safe from the viewpoint
653 of data integrity. Even without PAWS it
654 is safe provided sequence spaces do not
655 overlap i.e. at data rates <= 80Mbit/sec.
657 Actually, the idea is close to VJ's one,
658 only timestamp cache is held not per host,
659 but per port pair and TW bucket is used
662 If TW bucket has been already destroyed we
663 fall back to VJ's scheme and use initial
664 timestamp retrieved from peer table.
666 if (tw->tw_ts_recent_stamp &&
667 (!twp || (sysctl_tcp_tw_reuse &&
669 tw->tw_ts_recent_stamp > 1))) {
671 tw->tw_snd_nxt + 65535 + 2) == 0)
673 tp->ts_recent = tw->tw_ts_recent;
674 tp->ts_recent_stamp = tw->tw_ts_recent_stamp;
683 /* And established part... */
684 sk_for_each(sk2, node, &head->chain) {
685 if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif))
690 /* Must record num and sport now. Otherwise we will see
691 * in hash table socket with a funny identity. */
693 inet->sport = htons(lport);
694 sk->sk_hashent = hash;
695 BUG_TRAP(sk_unhashed(sk));
696 __sk_add_node(sk, &head->chain);
697 sock_prot_inc_use(sk->sk_prot);
698 write_unlock(&head->lock);
702 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
704 /* Silly. Should hash-dance instead... */
705 tcp_tw_deschedule(tw);
706 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
714 write_unlock(&head->lock);
715 return -EADDRNOTAVAIL;
719 * Bind a port for a connect operation and hash it.
721 static int tcp_v4_hash_connect(struct sock *sk)
723 unsigned short snum = inet_sk(sk)->num;
724 struct tcp_bind_hashbucket *head;
725 struct tcp_bind_bucket *tb;
730 int low = sysctl_local_port_range[0];
731 int high = sysctl_local_port_range[1];
732 int remaining = (high - low) + 1;
733 struct hlist_node *node;
734 struct tcp_tw_bucket *tw = NULL;
738 /* TODO. Actually it is not so bad idea to remove
739 * tcp_portalloc_lock before next submission to Linus.
740 * As soon as we touch this place at all it is time to think.
742 * Now it protects single _advisory_ variable tcp_port_rover,
743 * hence it is mostly useless.
744 * Code will work nicely if we just delete it, but
745 * I am afraid in contented case it will work not better or
746 * even worse: another cpu just will hit the same bucket
748 * So some cpu salt could remove both contention and
749 * memory pingpong. Any ideas how to do this in a nice way?
751 spin_lock(&tcp_portalloc_lock);
752 rover = tcp_port_rover;
756 if ((rover < low) || (rover > high))
758 head = &tcp_bhash[tcp_bhashfn(rover)];
759 spin_lock(&head->lock);
761 /* Does not bother with rcv_saddr checks,
762 * because the established check is already
765 tb_for_each(tb, node, &head->chain) {
766 if (tb->port == rover) {
767 BUG_TRAP(!hlist_empty(&tb->owners));
768 if (tb->fastreuse >= 0)
770 if (!__tcp_v4_check_established(sk,
778 tb = tcp_bucket_create(head, rover);
780 spin_unlock(&head->lock);
787 spin_unlock(&head->lock);
788 } while (--remaining > 0);
789 tcp_port_rover = rover;
790 spin_unlock(&tcp_portalloc_lock);
794 return -EADDRNOTAVAIL;
797 /* All locks still held and bhs disabled */
798 tcp_port_rover = rover;
799 spin_unlock(&tcp_portalloc_lock);
801 tcp_bind_hash(sk, tb, rover);
802 if (sk_unhashed(sk)) {
803 inet_sk(sk)->sport = htons(rover);
804 __tcp_v4_hash(sk, 0);
806 spin_unlock(&head->lock);
809 tcp_tw_deschedule(tw);
817 head = &tcp_bhash[tcp_bhashfn(snum)];
818 tb = tcp_sk(sk)->bind_hash;
819 spin_lock_bh(&head->lock);
820 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
821 __tcp_v4_hash(sk, 0);
822 spin_unlock_bh(&head->lock);
825 spin_unlock(&head->lock);
826 /* No definite answer... Walk to established hash table */
827 ret = __tcp_v4_check_established(sk, snum, NULL);
834 /* This will initiate an outgoing connection. */
835 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
837 struct inet_opt *inet = inet_sk(sk);
838 struct tcp_opt *tp = tcp_sk(sk);
839 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
845 if (addr_len < sizeof(struct sockaddr_in))
848 if (usin->sin_family != AF_INET)
849 return -EAFNOSUPPORT;
851 nexthop = daddr = usin->sin_addr.s_addr;
852 if (inet->opt && inet->opt->srr) {
855 nexthop = inet->opt->faddr;
858 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
859 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
861 inet->sport, usin->sin_port, sk);
865 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
870 if (!inet->opt || !inet->opt->srr)
874 inet->saddr = rt->rt_src;
875 inet->rcv_saddr = inet->saddr;
877 if (tp->ts_recent_stamp && inet->daddr != daddr) {
878 /* Reset inherited state */
880 tp->ts_recent_stamp = 0;
884 if (sysctl_tcp_tw_recycle &&
885 !tp->ts_recent_stamp && rt->rt_dst == daddr) {
886 struct inet_peer *peer = rt_get_peer(rt);
888 /* VJ's idea. We save last timestamp seen from
889 * the destination in peer table, when entering state TIME-WAIT
890 * and initialize ts_recent from it, when trying new connection.
893 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
894 tp->ts_recent_stamp = peer->tcp_ts_stamp;
895 tp->ts_recent = peer->tcp_ts;
899 inet->dport = usin->sin_port;
902 tp->ext_header_len = 0;
904 tp->ext_header_len = inet->opt->optlen;
908 /* Socket identity is still unknown (sport may be zero).
909 * However we set state to SYN-SENT and not releasing socket
910 * lock select source port, enter ourselves into the hash tables and
911 * complete initialization after this.
913 tcp_set_state(sk, TCP_SYN_SENT);
914 err = tcp_v4_hash_connect(sk);
918 err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
922 /* OK, now commit destination to socket. */
923 __sk_dst_set(sk, &rt->u.dst);
924 tcp_v4_setup_caps(sk, &rt->u.dst);
925 tp->ext2_header_len = rt->u.dst.header_len;
928 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
933 inet->id = tp->write_seq ^ jiffies;
935 err = tcp_connect(sk);
943 /* This unhashes the socket and releases the local port, if necessary. */
944 tcp_set_state(sk, TCP_CLOSE);
946 sk->sk_route_caps = 0;
951 static __inline__ int tcp_v4_iif(struct sk_buff *skb)
953 return ((struct rtable *)skb->dst)->rt_iif;
956 static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
958 return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
961 static struct open_request *tcp_v4_search_req(struct tcp_opt *tp,
962 struct open_request ***prevp,
964 __u32 raddr, __u32 laddr)
966 struct tcp_listen_opt *lopt = tp->listen_opt;
967 struct open_request *req, **prev;
969 for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
970 (req = *prev) != NULL;
971 prev = &req->dl_next) {
972 if (req->rmt_port == rport &&
973 req->af.v4_req.rmt_addr == raddr &&
974 req->af.v4_req.loc_addr == laddr &&
975 TCP_INET_FAMILY(req->class->family)) {
985 static void tcp_v4_synq_add(struct sock *sk, struct open_request *req)
987 struct tcp_opt *tp = tcp_sk(sk);
988 struct tcp_listen_opt *lopt = tp->listen_opt;
989 u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
991 req->expires = jiffies + TCP_TIMEOUT_INIT;
994 req->dl_next = lopt->syn_table[h];
996 write_lock(&tp->syn_wait_lock);
997 lopt->syn_table[h] = req;
998 write_unlock(&tp->syn_wait_lock);
1005 * This routine does path mtu discovery as defined in RFC1191.
1007 static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
1010 struct dst_entry *dst;
1011 struct inet_opt *inet = inet_sk(sk);
1012 struct tcp_opt *tp = tcp_sk(sk);
1014 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
1015 * send out by Linux are always <576bytes so they should go through
1018 if (sk->sk_state == TCP_LISTEN)
1021 /* We don't check in the destentry if pmtu discovery is forbidden
1022 * on this route. We just assume that no packet_to_big packets
1023 * are send back when pmtu discovery is not active.
1024 * There is a small race when the user changes this flag in the
1025 * route, but I think that's acceptable.
1027 if ((dst = __sk_dst_check(sk, 0)) == NULL)
1030 dst->ops->update_pmtu(dst, mtu);
1032 /* Something is about to be wrong... Remember soft error
1033 * for the case, if this connection will not able to recover.
1035 if (mtu < dst_pmtu(dst) && ip_dont_fragment(sk, dst))
1036 sk->sk_err_soft = EMSGSIZE;
1038 mtu = dst_pmtu(dst);
1040 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
1041 tp->pmtu_cookie > mtu) {
1042 tcp_sync_mss(sk, mtu);
1044 /* Resend the TCP packet because it's
1045 * clear that the old packet has been
1046 * dropped. This is the new "fast" path mtu
1049 tcp_simple_retransmit(sk);
1050 } /* else let the usual retransmit timer handle it */
1054 * This routine is called by the ICMP module when it gets some
1055 * sort of error condition. If err < 0 then the socket should
1056 * be closed and the error returned to the user. If err > 0
1057 * it's just the icmp type << 8 | icmp code. After adjustment
1058 * header points to the first 8 bytes of the tcp header. We need
1059 * to find the appropriate port.
1061 * The locking strategy used here is very "optimistic". When
1062 * someone else accesses the socket the ICMP is just dropped
1063 * and for some paths there is no check at all.
1064 * A more general error queue to queue errors for later handling
1065 * is probably better.
1069 void tcp_v4_err(struct sk_buff *skb, u32 info)
1071 struct iphdr *iph = (struct iphdr *)skb->data;
1072 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1074 struct inet_opt *inet;
1075 int type = skb->h.icmph->type;
1076 int code = skb->h.icmph->code;
1081 if (skb->len < (iph->ihl << 2) + 8) {
1082 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
1086 sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,
1087 th->source, tcp_v4_iif(skb));
1089 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
1092 if (sk->sk_state == TCP_TIME_WAIT) {
1093 tcp_tw_put((struct tcp_tw_bucket *)sk);
1098 /* If too many ICMPs get dropped on busy
1099 * servers this needs to be solved differently.
1101 if (sock_owned_by_user(sk))
1102 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
1104 if (sk->sk_state == TCP_CLOSE)
1108 seq = ntohl(th->seq);
1109 if (sk->sk_state != TCP_LISTEN &&
1110 !between(seq, tp->snd_una, tp->snd_nxt)) {
1111 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
1116 case ICMP_SOURCE_QUENCH:
1117 /* This is deprecated, but if someone generated it,
1118 * we have no reasons to ignore it.
1120 if (!sock_owned_by_user(sk))
1123 case ICMP_PARAMETERPROB:
1126 case ICMP_DEST_UNREACH:
1127 if (code > NR_ICMP_UNREACH)
1130 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
1131 if (!sock_owned_by_user(sk))
1132 do_pmtu_discovery(sk, iph, info);
1136 err = icmp_err_convert[code].errno;
1138 case ICMP_TIME_EXCEEDED:
1145 switch (sk->sk_state) {
1146 struct open_request *req, **prev;
1148 if (sock_owned_by_user(sk))
1151 req = tcp_v4_search_req(tp, &prev, th->dest,
1152 iph->daddr, iph->saddr);
1156 /* ICMPs are not backlogged, hence we cannot get
1157 an established socket here.
1161 if (seq != req->snt_isn) {
1162 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
1167 * Still in SYN_RECV, just remove it silently.
1168 * There is no good way to pass the error to the newly
1169 * created socket, and POSIX does not want network
1170 * errors returned from accept().
1172 tcp_synq_drop(sk, req, prev);
1176 case TCP_SYN_RECV: /* Cannot happen.
1177 It can f.e. if SYNs crossed.
1179 if (!sock_owned_by_user(sk)) {
1180 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1183 sk->sk_error_report(sk);
1187 sk->sk_err_soft = err;
1192 /* If we've already connected we will keep trying
1193 * until we time out, or the user gives up.
1195 * rfc1122 4.2.3.9 allows to consider as hard errors
1196 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
1197 * but it is obsoleted by pmtu discovery).
1199 * Note, that in modern internet, where routing is unreliable
1200 * and in each dark corner broken firewalls sit, sending random
1201 * errors ordered by their masters even this two messages finally lose
1202 * their original sense (even Linux sends invalid PORT_UNREACHs)
1204 * Now we are in compliance with RFCs.
1209 if (!sock_owned_by_user(sk) && inet->recverr) {
1211 sk->sk_error_report(sk);
1212 } else { /* Only an error on timeout */
1213 sk->sk_err_soft = err;
1221 /* This routine computes an IPv4 TCP checksum. */
1222 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
1223 struct sk_buff *skb)
1225 struct inet_opt *inet = inet_sk(sk);
1227 if (skb->ip_summed == CHECKSUM_HW) {
1228 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
1229 skb->csum = offsetof(struct tcphdr, check);
1231 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
1232 csum_partial((char *)th,
1239 * This routine will send an RST to the other tcp.
1241 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
1243 * Answer: if a packet caused RST, it is not for a socket
1244 * existing in our system, if it is matched to a socket,
1245 * it is just duplicate segment or bug in other side's TCP.
1246 * So that we build reply only basing on parameters
1247 * arrived with segment.
1248 * Exception: precedence violation. We do not implement it in any case.
1251 static void tcp_v4_send_reset(struct sk_buff *skb)
1253 struct tcphdr *th = skb->h.th;
1255 struct ip_reply_arg arg;
1257 /* Never send a reset in response to a reset. */
1261 if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
1264 /* Swap the send and the receive. */
1265 memset(&rth, 0, sizeof(struct tcphdr));
1266 rth.dest = th->source;
1267 rth.source = th->dest;
1268 rth.doff = sizeof(struct tcphdr) / 4;
1272 rth.seq = th->ack_seq;
1275 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
1276 skb->len - (th->doff << 2));
1279 memset(&arg, 0, sizeof arg);
1280 arg.iov[0].iov_base = (unsigned char *)&rth;
1281 arg.iov[0].iov_len = sizeof rth;
1282 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1283 skb->nh.iph->saddr, /*XXX*/
1284 sizeof(struct tcphdr), IPPROTO_TCP, 0);
1285 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1287 ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
1289 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1290 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1293 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
1294 outside socket context is ugly, certainly. What can I do?
1297 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
1300 struct tcphdr *th = skb->h.th;
1305 struct ip_reply_arg arg;
1307 memset(&rep.th, 0, sizeof(struct tcphdr));
1308 memset(&arg, 0, sizeof arg);
1310 arg.iov[0].iov_base = (unsigned char *)&rep;
1311 arg.iov[0].iov_len = sizeof(rep.th);
1313 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1314 (TCPOPT_TIMESTAMP << 8) |
1316 rep.tsopt[1] = htonl(tcp_time_stamp);
1317 rep.tsopt[2] = htonl(ts);
1318 arg.iov[0].iov_len = sizeof(rep);
1321 /* Swap the send and the receive. */
1322 rep.th.dest = th->source;
1323 rep.th.source = th->dest;
1324 rep.th.doff = arg.iov[0].iov_len / 4;
1325 rep.th.seq = htonl(seq);
1326 rep.th.ack_seq = htonl(ack);
1328 rep.th.window = htons(win);
1330 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1331 skb->nh.iph->saddr, /*XXX*/
1332 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1333 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1335 ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
1337 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1340 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
1342 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
1344 tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
1345 tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
1350 static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req)
1352 tcp_v4_send_ack(skb, req->snt_isn + 1, req->rcv_isn + 1, req->rcv_wnd,
1356 static struct dst_entry* tcp_v4_route_req(struct sock *sk,
1357 struct open_request *req)
1360 struct ip_options *opt = req->af.v4_req.opt;
1361 struct flowi fl = { .oif = sk->sk_bound_dev_if,
1363 { .daddr = ((opt && opt->srr) ?
1365 req->af.v4_req.rmt_addr),
1366 .saddr = req->af.v4_req.loc_addr,
1367 .tos = RT_CONN_FLAGS(sk) } },
1368 .proto = IPPROTO_TCP,
1370 { .sport = inet_sk(sk)->sport,
1371 .dport = req->rmt_port } } };
1373 if (ip_route_output_flow(&rt, &fl, sk, 0)) {
1374 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1377 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
1379 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1386 * Send a SYN-ACK after having received an ACK.
1387 * This still operates on a open_request only, not on a big
1390 static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
1391 struct dst_entry *dst)
1394 struct sk_buff * skb;
1396 /* First, grab a route. */
1397 if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1400 skb = tcp_make_synack(sk, dst, req);
1403 struct tcphdr *th = skb->h.th;
1405 th->check = tcp_v4_check(th, skb->len,
1406 req->af.v4_req.loc_addr,
1407 req->af.v4_req.rmt_addr,
1408 csum_partial((char *)th, skb->len,
1411 err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr,
1412 req->af.v4_req.rmt_addr,
1413 req->af.v4_req.opt);
1414 if (err == NET_XMIT_CN)
1424 * IPv4 open_request destructor.
1426 static void tcp_v4_or_free(struct open_request *req)
1428 if (req->af.v4_req.opt)
1429 kfree(req->af.v4_req.opt);
1432 static inline void syn_flood_warning(struct sk_buff *skb)
1434 static unsigned long warntime;
1436 if (time_after(jiffies, (warntime + HZ * 60))) {
1439 "possible SYN flooding on port %d. Sending cookies.\n",
1440 ntohs(skb->h.th->dest));
1445 * Save and compile IPv4 options into the open_request if needed.
1447 static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
1448 struct sk_buff *skb)
1450 struct ip_options *opt = &(IPCB(skb)->opt);
1451 struct ip_options *dopt = NULL;
1453 if (opt && opt->optlen) {
1454 int opt_size = optlength(opt);
1455 dopt = kmalloc(opt_size, GFP_ATOMIC);
1457 if (ip_options_echo(dopt, skb)) {
1467 * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
1468 * One SYN_RECV socket costs about 80bytes on a 32bit machine.
1469 * It would be better to replace it with a global counter for all sockets
1470 * but then some measure against one socket starving all other sockets
1473 * It was 128 by default. Experiments with real servers show, that
1474 * it is absolutely not enough even at 100conn/sec. 256 cures most
1475 * of problems. This value is adjusted to 128 for very small machines
1476 * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
1477 * Further increasing requires to change hash table size.
1479 int sysctl_max_syn_backlog = 256;
1481 struct or_calltable or_ipv4 = {
1483 .rtx_syn_ack = tcp_v4_send_synack,
1484 .send_ack = tcp_v4_or_send_ack,
1485 .destructor = tcp_v4_or_free,
1486 .send_reset = tcp_v4_send_reset,
1489 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1492 struct open_request *req;
1493 __u32 saddr = skb->nh.iph->saddr;
1494 __u32 daddr = skb->nh.iph->daddr;
1495 __u32 isn = TCP_SKB_CB(skb)->when;
1496 struct dst_entry *dst = NULL;
1497 #ifdef CONFIG_SYN_COOKIES
1498 int want_cookie = 0;
1500 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1503 /* Never answer to SYNs send to broadcast or multicast */
1504 if (((struct rtable *)skb->dst)->rt_flags &
1505 (RTCF_BROADCAST | RTCF_MULTICAST))
1508 /* TW buckets are converted to open requests without
1509 * limitations, they conserve resources and peer is
1510 * evidently real one.
1512 if (tcp_synq_is_full(sk) && !isn) {
1513 #ifdef CONFIG_SYN_COOKIES
1514 if (sysctl_tcp_syncookies) {
1521 /* Accept backlog is full. If we have already queued enough
1522 * of warm entries in syn queue, drop request. It is better than
1523 * clogging syn queue with openreqs with exponentially increasing
1526 if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
1529 req = tcp_openreq_alloc();
1533 tcp_clear_options(&tp);
1535 tp.user_mss = tcp_sk(sk)->user_mss;
1537 tcp_parse_options(skb, &tp, 0);
1540 tcp_clear_options(&tp);
1544 if (tp.saw_tstamp && !tp.rcv_tsval) {
1545 /* Some OSes (unknown ones, but I see them on web server, which
1546 * contains information interesting only for windows'
1547 * users) do not send their stamp in SYN. It is easy case.
1548 * We simply do not advertise TS support.
1553 tp.tstamp_ok = tp.saw_tstamp;
1555 tcp_openreq_init(req, &tp, skb);
1557 req->af.v4_req.loc_addr = daddr;
1558 req->af.v4_req.rmt_addr = saddr;
1559 req->af.v4_req.opt = tcp_v4_save_options(sk, skb);
1560 req->class = &or_ipv4;
1562 TCP_ECN_create_request(req, skb->h.th);
1565 #ifdef CONFIG_SYN_COOKIES
1566 syn_flood_warning(skb);
1568 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1570 struct inet_peer *peer = NULL;
1572 /* VJ's idea. We save last timestamp seen
1573 * from the destination in peer table, when entering
1574 * state TIME-WAIT, and check against it before
1575 * accepting new connection request.
1577 * If "isn" is not zero, this request hit alive
1578 * timewait bucket, so that all the necessary checks
1579 * are made in the function processing timewait state.
1581 if (tp.saw_tstamp &&
1582 sysctl_tcp_tw_recycle &&
1583 (dst = tcp_v4_route_req(sk, req)) != NULL &&
1584 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1585 peer->v4daddr == saddr) {
1586 if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1587 (s32)(peer->tcp_ts - req->ts_recent) >
1589 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
1594 /* Kill the following clause, if you dislike this way. */
1595 else if (!sysctl_tcp_syncookies &&
1596 (sysctl_max_syn_backlog - tcp_synq_len(sk) <
1597 (sysctl_max_syn_backlog >> 2)) &&
1598 (!peer || !peer->tcp_ts_stamp) &&
1599 (!dst || !dst_metric(dst, RTAX_RTT))) {
1600 /* Without syncookies last quarter of
1601 * backlog is filled with destinations,
1602 * proven to be alive.
1603 * It means that we continue to communicate
1604 * to destinations, already remembered
1605 * to the moment of synflood.
1607 NETDEBUG(if (net_ratelimit()) \
1608 printk(KERN_DEBUG "TCP: drop open "
1609 "request from %u.%u."
1612 ntohs(skb->h.th->source)));
1617 isn = tcp_v4_init_sequence(sk, skb);
1621 if (tcp_v4_send_synack(sk, req, dst))
1625 tcp_openreq_free(req);
1627 tcp_v4_synq_add(sk, req);
1632 tcp_openreq_free(req);
1634 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1640 * The three way handshake has completed - we got a valid synack -
1641 * now create the new socket.
1643 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1644 struct open_request *req,
1645 struct dst_entry *dst)
1647 struct inet_opt *newinet;
1648 struct tcp_opt *newtp;
1651 if (sk_acceptq_is_full(sk))
1654 if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1657 newsk = tcp_create_openreq_child(sk, req, skb);
1661 newsk->sk_dst_cache = dst;
1662 tcp_v4_setup_caps(newsk, dst);
1664 newtp = tcp_sk(newsk);
1665 newinet = inet_sk(newsk);
1666 newinet->daddr = req->af.v4_req.rmt_addr;
1667 newinet->rcv_saddr = req->af.v4_req.loc_addr;
1668 newinet->saddr = req->af.v4_req.loc_addr;
1669 newinet->opt = req->af.v4_req.opt;
1670 req->af.v4_req.opt = NULL;
1671 newinet->mc_index = tcp_v4_iif(skb);
1672 newinet->mc_ttl = skb->nh.iph->ttl;
1673 newtp->ext_header_len = 0;
1675 newtp->ext_header_len = newinet->opt->optlen;
1676 newtp->ext2_header_len = dst->header_len;
1677 newinet->id = newtp->write_seq ^ jiffies;
1679 tcp_sync_mss(newsk, dst_pmtu(dst));
1680 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1681 tcp_initialize_rcv_mss(newsk);
1683 __tcp_v4_hash(newsk, 0);
1684 __tcp_inherit_port(sk, newsk);
1689 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1691 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1696 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1698 struct tcphdr *th = skb->h.th;
1699 struct iphdr *iph = skb->nh.iph;
1700 struct tcp_opt *tp = tcp_sk(sk);
1702 struct open_request **prev;
1703 /* Find possible connection requests. */
1704 struct open_request *req = tcp_v4_search_req(tp, &prev, th->source,
1705 iph->saddr, iph->daddr);
1707 return tcp_check_req(sk, skb, req, prev);
1709 nsk = __tcp_v4_lookup_established(skb->nh.iph->saddr,
1716 if (nsk->sk_state != TCP_TIME_WAIT) {
1720 tcp_tw_put((struct tcp_tw_bucket *)nsk);
1724 #ifdef CONFIG_SYN_COOKIES
1725 if (!th->rst && !th->syn && th->ack)
1726 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1731 static int tcp_v4_checksum_init(struct sk_buff *skb)
1733 if (skb->ip_summed == CHECKSUM_HW) {
1734 skb->ip_summed = CHECKSUM_UNNECESSARY;
1735 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1736 skb->nh.iph->daddr, skb->csum))
1739 NETDEBUG(if (net_ratelimit())
1740 printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
1741 skb->ip_summed = CHECKSUM_NONE;
1743 if (skb->len <= 76) {
1744 if (tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1746 skb_checksum(skb, 0, skb->len, 0)))
1748 skb->ip_summed = CHECKSUM_UNNECESSARY;
1750 skb->csum = ~tcp_v4_check(skb->h.th, skb->len,
1752 skb->nh.iph->daddr, 0);
1758 /* The socket must have it's spinlock held when we get
1761 * We have a potential double-lock case here, so even when
1762 * doing backlog processing we use the BH locking scheme.
1763 * This is because we cannot sleep with the original spinlock
1766 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1768 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1769 TCP_CHECK_TIMER(sk);
1770 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1772 TCP_CHECK_TIMER(sk);
1776 if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
1779 if (sk->sk_state == TCP_LISTEN) {
1780 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1785 if (tcp_child_process(sk, nsk, skb))
1791 TCP_CHECK_TIMER(sk);
1792 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1794 TCP_CHECK_TIMER(sk);
1798 tcp_v4_send_reset(skb);
1801 /* Be careful here. If this function gets more complicated and
1802 * gcc suffers from register pressure on the x86, sk (in %ebx)
1803 * might be destroyed here. This current version compiles correctly,
1804 * but you have been warned.
1809 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1817 int tcp_v4_rcv(struct sk_buff *skb)
1823 if (skb->pkt_type != PACKET_HOST)
1826 /* Count it even if it's bad */
1827 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1829 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1834 if (th->doff < sizeof(struct tcphdr) / 4)
1836 if (!pskb_may_pull(skb, th->doff * 4))
1839 /* An explanation is required here, I think.
1840 * Packet length and doff are validated by header prediction,
1841 * provided case of th->doff==0 is elimineted.
1842 * So, we defer the checks. */
1843 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1844 tcp_v4_checksum_init(skb) < 0))
1848 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1849 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1850 skb->len - th->doff * 4);
1851 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1852 TCP_SKB_CB(skb)->when = 0;
1853 TCP_SKB_CB(skb)->flags = skb->nh.iph->tos;
1854 TCP_SKB_CB(skb)->sacked = 0;
1856 sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source,
1857 skb->nh.iph->daddr, ntohs(th->dest),
1864 if (sk->sk_state == TCP_TIME_WAIT)
1867 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1868 goto discard_and_relse;
1870 if (sk_filter(sk, skb, 0))
1871 goto discard_and_relse;
1877 if (!sock_owned_by_user(sk)) {
1878 if (!tcp_prequeue(sk, skb))
1879 ret = tcp_v4_do_rcv(sk, skb);
1881 sk_add_backlog(sk, skb);
1889 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1892 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1894 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1896 tcp_v4_send_reset(skb);
1900 /* Discard frame. */
1909 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1910 tcp_tw_put((struct tcp_tw_bucket *) sk);
1914 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1915 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1916 tcp_tw_put((struct tcp_tw_bucket *) sk);
1919 switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
1920 skb, th, skb->len)) {
1922 struct sock *sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr,
1926 tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
1927 tcp_tw_put((struct tcp_tw_bucket *)sk);
1931 /* Fall through to ACK */
1934 tcp_v4_timewait_ack(sk, skb);
1938 case TCP_TW_SUCCESS:;
1943 /* With per-bucket locks this operation is not-atomic, so that
1944 * this version is not worse.
1946 static void __tcp_v4_rehash(struct sock *sk)
1948 sk->sk_prot->unhash(sk);
1949 sk->sk_prot->hash(sk);
1952 static int tcp_v4_reselect_saddr(struct sock *sk)
1954 struct inet_opt *inet = inet_sk(sk);
1957 __u32 old_saddr = inet->saddr;
1959 __u32 daddr = inet->daddr;
1961 if (inet->opt && inet->opt->srr)
1962 daddr = inet->opt->faddr;
1964 /* Query new route. */
1965 err = ip_route_connect(&rt, daddr, 0,
1966 RT_TOS(inet->tos) | sk->sk_localroute,
1967 sk->sk_bound_dev_if,
1969 inet->sport, inet->dport, sk);
1973 __sk_dst_set(sk, &rt->u.dst);
1974 tcp_v4_setup_caps(sk, &rt->u.dst);
1975 tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
1977 new_saddr = rt->rt_src;
1979 if (new_saddr == old_saddr)
1982 if (sysctl_ip_dynaddr > 1) {
1983 printk(KERN_INFO "tcp_v4_rebuild_header(): shifting inet->"
1984 "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n",
1986 NIPQUAD(new_saddr));
1989 inet->saddr = new_saddr;
1990 inet->rcv_saddr = new_saddr;
1992 /* XXX The only one ugly spot where we need to
1993 * XXX really change the sockets identity after
1994 * XXX it has entered the hashes. -DaveM
1996 * Besides that, it does not check for connection
1997 * uniqueness. Wait for troubles.
1999 __tcp_v4_rehash(sk);
2003 int tcp_v4_rebuild_header(struct sock *sk)
2005 struct inet_opt *inet = inet_sk(sk);
2006 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
2010 /* Route is OK, nothing to do. */
2015 daddr = inet->daddr;
2016 if (inet->opt && inet->opt->srr)
2017 daddr = inet->opt->faddr;
2020 struct flowi fl = { .oif = sk->sk_bound_dev_if,
2023 .saddr = inet->saddr,
2024 .tos = RT_CONN_FLAGS(sk) } },
2025 .proto = IPPROTO_TCP,
2027 { .sport = inet->sport,
2028 .dport = inet->dport } } };
2030 err = ip_route_output_flow(&rt, &fl, sk, 0);
2033 __sk_dst_set(sk, &rt->u.dst);
2034 tcp_v4_setup_caps(sk, &rt->u.dst);
2035 tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
2039 /* Routing failed... */
2040 sk->sk_route_caps = 0;
2042 if (!sysctl_ip_dynaddr ||
2043 sk->sk_state != TCP_SYN_SENT ||
2044 (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
2045 (err = tcp_v4_reselect_saddr(sk)) != 0)
2046 sk->sk_err_soft = -err;
2051 static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
2053 struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;
2054 struct inet_opt *inet = inet_sk(sk);
2056 sin->sin_family = AF_INET;
2057 sin->sin_addr.s_addr = inet->daddr;
2058 sin->sin_port = inet->dport;
2061 /* VJ's idea. Save last timestamp seen from this destination
2062 * and hold it at least for normal timewait interval to use for duplicate
2063 * segment detection in subsequent connections, before they enter synchronized
2067 int tcp_v4_remember_stamp(struct sock *sk)
2069 struct inet_opt *inet = inet_sk(sk);
2070 struct tcp_opt *tp = tcp_sk(sk);
2071 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
2072 struct inet_peer *peer = NULL;
2075 if (!rt || rt->rt_dst != inet->daddr) {
2076 peer = inet_getpeer(inet->daddr, 1);
2080 rt_bind_peer(rt, 1);
2085 if ((s32)(peer->tcp_ts - tp->ts_recent) <= 0 ||
2086 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2087 peer->tcp_ts_stamp <= tp->ts_recent_stamp)) {
2088 peer->tcp_ts_stamp = tp->ts_recent_stamp;
2089 peer->tcp_ts = tp->ts_recent;
2099 int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
2101 struct inet_peer *peer = NULL;
2103 peer = inet_getpeer(tw->tw_daddr, 1);
2106 if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 ||
2107 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2108 peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) {
2109 peer->tcp_ts_stamp = tw->tw_ts_recent_stamp;
2110 peer->tcp_ts = tw->tw_ts_recent;
2119 struct tcp_func ipv4_specific = {
2120 .queue_xmit = ip_queue_xmit,
2121 .send_check = tcp_v4_send_check,
2122 .rebuild_header = tcp_v4_rebuild_header,
2123 .conn_request = tcp_v4_conn_request,
2124 .syn_recv_sock = tcp_v4_syn_recv_sock,
2125 .remember_stamp = tcp_v4_remember_stamp,
2126 .net_header_len = sizeof(struct iphdr),
2127 .setsockopt = ip_setsockopt,
2128 .getsockopt = ip_getsockopt,
2129 .addr2sockaddr = v4_addr2sockaddr,
2130 .sockaddr_len = sizeof(struct sockaddr_in),
2133 /* NOTE: A lot of things set to zero explicitly by call to
2134 * sk_alloc() so need not be done here.
2136 static int tcp_v4_init_sock(struct sock *sk)
2138 struct tcp_opt *tp = tcp_sk(sk);
2140 skb_queue_head_init(&tp->out_of_order_queue);
2141 tcp_init_xmit_timers(sk);
2142 tcp_prequeue_init(tp);
2144 tp->rto = TCP_TIMEOUT_INIT;
2145 tp->mdev = TCP_TIMEOUT_INIT;
2147 /* So many TCP implementations out there (incorrectly) count the
2148 * initial SYN frame in their delayed-ACK and congestion control
2149 * algorithms that we must have the following bandaid to talk
2150 * efficiently to them. -DaveM
2154 /* See draft-stevens-tcpca-spec-01 for discussion of the
2155 * initialization of these values.
2157 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
2158 tp->snd_cwnd_clamp = ~0;
2159 tp->mss_cache = 536;
2161 tp->reordering = sysctl_tcp_reordering;
2163 sk->sk_state = TCP_CLOSE;
2165 sk->sk_write_space = sk_stream_write_space;
2166 sk->sk_use_write_queue = 1;
2168 tp->af_specific = &ipv4_specific;
2170 sk->sk_sndbuf = sysctl_tcp_wmem[1];
2171 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
2173 atomic_inc(&tcp_sockets_allocated);
2178 int tcp_v4_destroy_sock(struct sock *sk)
2180 struct tcp_opt *tp = tcp_sk(sk);
2182 tcp_clear_xmit_timers(sk);
2184 /* Cleanup up the write buffer. */
2185 sk_stream_writequeue_purge(sk);
2187 /* Cleans up our, hopefully empty, out_of_order_queue. */
2188 __skb_queue_purge(&tp->out_of_order_queue);
2190 /* Clean prequeue, it must be empty really */
2191 __skb_queue_purge(&tp->ucopy.prequeue);
2193 /* Clean up a referenced TCP bind bucket. */
2198 * If sendmsg cached page exists, toss it.
2200 if (sk->sk_sndmsg_page) {
2201 __free_page(sk->sk_sndmsg_page);
2202 sk->sk_sndmsg_page = NULL;
2205 atomic_dec(&tcp_sockets_allocated);
2210 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2212 #ifdef CONFIG_PROC_FS
2213 /* Proc filesystem TCP sock list dumping. */
2215 static inline struct tcp_tw_bucket *tw_head(struct hlist_head *head)
2217 return hlist_empty(head) ? NULL :
2218 list_entry(head->first, struct tcp_tw_bucket, tw_node);
2221 static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw)
2223 return tw->tw_node.next ?
2224 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2227 static void *listening_get_next(struct seq_file *seq, void *cur)
2230 struct hlist_node *node;
2231 struct sock *sk = cur;
2232 struct tcp_iter_state* st = seq->private;
2236 sk = sk_head(&tcp_listening_hash[0]);
2242 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2243 struct open_request *req = cur;
2245 tp = tcp_sk(st->syn_wait_sk);
2249 vxdprintk(VXD_CBIT(net, 6),
2250 "sk,req: %p [#%d] (from %d)",
2251 req->sk, req->sk->sk_xid, current->xid);
2252 if (!vx_check(req->sk->sk_xid, VX_IDENT|VX_WATCH))
2254 if (req->class->family == st->family) {
2260 if (++st->sbucket >= TCP_SYNQ_HSIZE)
2263 req = tp->listen_opt->syn_table[st->sbucket];
2265 sk = sk_next(st->syn_wait_sk);
2266 st->state = TCP_SEQ_STATE_LISTENING;
2267 read_unlock_bh(&tp->syn_wait_lock);
2271 sk_for_each_from(sk, node) {
2272 vxdprintk(VXD_CBIT(net, 6), "sk: %p [#%d] (from %d)",
2273 sk, sk->sk_xid, current->xid);
2274 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2276 if (sk->sk_family == st->family) {
2281 read_lock_bh(&tp->syn_wait_lock);
2282 if (tp->listen_opt && tp->listen_opt->qlen) {
2283 st->uid = sock_i_uid(sk);
2284 st->syn_wait_sk = sk;
2285 st->state = TCP_SEQ_STATE_OPENREQ;
2289 read_unlock_bh(&tp->syn_wait_lock);
2291 if (++st->bucket < TCP_LHTABLE_SIZE) {
2292 sk = sk_head(&tcp_listening_hash[st->bucket]);
2300 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2302 void *rc = listening_get_next(seq, NULL);
2304 while (rc && *pos) {
2305 rc = listening_get_next(seq, rc);
2311 static void *established_get_first(struct seq_file *seq)
2313 struct tcp_iter_state* st = seq->private;
2316 for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) {
2318 struct hlist_node *node;
2319 struct tcp_tw_bucket *tw;
2321 read_lock(&tcp_ehash[st->bucket].lock);
2322 sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) {
2323 vxdprintk(VXD_CBIT(net, 6),
2324 "sk,egf: %p [#%d] (from %d)",
2325 sk, sk->sk_xid, current->xid);
2326 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2328 if (sk->sk_family != st->family)
2333 st->state = TCP_SEQ_STATE_TIME_WAIT;
2334 tw_for_each(tw, node,
2335 &tcp_ehash[st->bucket + tcp_ehash_size].chain) {
2336 vxdprintk(VXD_CBIT(net, 6),
2337 "tw: %p [#%d] (from %d)",
2338 tw, tw->tw_xid, current->xid);
2339 if (!vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))
2341 if (tw->tw_family != st->family)
2346 read_unlock(&tcp_ehash[st->bucket].lock);
2347 st->state = TCP_SEQ_STATE_ESTABLISHED;
2353 static void *established_get_next(struct seq_file *seq, void *cur)
2355 struct sock *sk = cur;
2356 struct tcp_tw_bucket *tw;
2357 struct hlist_node *node;
2358 struct tcp_iter_state* st = seq->private;
2362 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2366 while (tw && (tw->tw_family != st->family ||
2367 !vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))) {
2374 read_unlock(&tcp_ehash[st->bucket].lock);
2375 st->state = TCP_SEQ_STATE_ESTABLISHED;
2376 if (++st->bucket < tcp_ehash_size) {
2377 read_lock(&tcp_ehash[st->bucket].lock);
2378 sk = sk_head(&tcp_ehash[st->bucket].chain);
2386 sk_for_each_from(sk, node) {
2387 vxdprintk(VXD_CBIT(net, 6),
2388 "sk,egn: %p [#%d] (from %d)",
2389 sk, sk->sk_xid, current->xid);
2390 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2392 if (sk->sk_family == st->family)
2396 st->state = TCP_SEQ_STATE_TIME_WAIT;
2397 tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain);
2405 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2407 void *rc = established_get_first(seq);
2410 rc = established_get_next(seq, rc);
2416 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2419 struct tcp_iter_state* st = seq->private;
2422 st->state = TCP_SEQ_STATE_LISTENING;
2423 rc = listening_get_idx(seq, &pos);
2426 tcp_listen_unlock();
2428 st->state = TCP_SEQ_STATE_ESTABLISHED;
2429 rc = established_get_idx(seq, pos);
2435 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2437 struct tcp_iter_state* st = seq->private;
2438 st->state = TCP_SEQ_STATE_LISTENING;
2440 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2443 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2446 struct tcp_iter_state* st;
2448 if (v == SEQ_START_TOKEN) {
2449 rc = tcp_get_idx(seq, 0);
2454 switch (st->state) {
2455 case TCP_SEQ_STATE_OPENREQ:
2456 case TCP_SEQ_STATE_LISTENING:
2457 rc = listening_get_next(seq, v);
2459 tcp_listen_unlock();
2461 st->state = TCP_SEQ_STATE_ESTABLISHED;
2462 rc = established_get_first(seq);
2465 case TCP_SEQ_STATE_ESTABLISHED:
2466 case TCP_SEQ_STATE_TIME_WAIT:
2467 rc = established_get_next(seq, v);
2475 static void tcp_seq_stop(struct seq_file *seq, void *v)
2477 struct tcp_iter_state* st = seq->private;
2479 switch (st->state) {
2480 case TCP_SEQ_STATE_OPENREQ:
2482 struct tcp_opt *tp = tcp_sk(st->syn_wait_sk);
2483 read_unlock_bh(&tp->syn_wait_lock);
2485 case TCP_SEQ_STATE_LISTENING:
2486 if (v != SEQ_START_TOKEN)
2487 tcp_listen_unlock();
2489 case TCP_SEQ_STATE_TIME_WAIT:
2490 case TCP_SEQ_STATE_ESTABLISHED:
2492 read_unlock(&tcp_ehash[st->bucket].lock);
2498 static int tcp_seq_open(struct inode *inode, struct file *file)
2500 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2501 struct seq_file *seq;
2502 struct tcp_iter_state *s;
2505 if (unlikely(afinfo == NULL))
2508 s = kmalloc(sizeof(*s), GFP_KERNEL);
2511 memset(s, 0, sizeof(*s));
2512 s->family = afinfo->family;
2513 s->seq_ops.start = tcp_seq_start;
2514 s->seq_ops.next = tcp_seq_next;
2515 s->seq_ops.show = afinfo->seq_show;
2516 s->seq_ops.stop = tcp_seq_stop;
2518 rc = seq_open(file, &s->seq_ops);
2521 seq = file->private_data;
2530 int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
2533 struct proc_dir_entry *p;
2537 afinfo->seq_fops->owner = afinfo->owner;
2538 afinfo->seq_fops->open = tcp_seq_open;
2539 afinfo->seq_fops->read = seq_read;
2540 afinfo->seq_fops->llseek = seq_lseek;
2541 afinfo->seq_fops->release = seq_release_private;
2543 p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
2551 void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
2555 proc_net_remove(afinfo->name);
2556 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
2559 static void get_openreq4(struct sock *sk, struct open_request *req,
2560 char *tmpbuf, int i, int uid)
2562 int ttd = req->expires - jiffies;
2564 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2565 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
2567 req->af.v4_req.loc_addr,
2568 ntohs(inet_sk(sk)->sport),
2569 req->af.v4_req.rmt_addr,
2570 ntohs(req->rmt_port),
2572 0, 0, /* could print option size, but that is af dependent. */
2573 1, /* timers active (only the expire timer) */
2574 jiffies_to_clock_t(ttd),
2577 0, /* non standard timer */
2578 0, /* open_requests have no inode */
2579 atomic_read(&sk->sk_refcnt),
2583 static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
2586 unsigned long timer_expires;
2587 struct tcp_opt *tp = tcp_sk(sp);
2588 struct inet_opt *inet = inet_sk(sp);
2589 unsigned int dest = inet->daddr;
2590 unsigned int src = inet->rcv_saddr;
2591 __u16 destp = ntohs(inet->dport);
2592 __u16 srcp = ntohs(inet->sport);
2594 if (tp->pending == TCP_TIME_RETRANS) {
2596 timer_expires = tp->timeout;
2597 } else if (tp->pending == TCP_TIME_PROBE0) {
2599 timer_expires = tp->timeout;
2600 } else if (timer_pending(&sp->sk_timer)) {
2602 timer_expires = sp->sk_timer.expires;
2605 timer_expires = jiffies;
2608 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2609 "%08X %5d %8d %lu %d %p %u %u %u %u %d",
2610 i, src, srcp, dest, destp, sp->sk_state,
2611 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
2613 jiffies_to_clock_t(timer_expires - jiffies),
2618 atomic_read(&sp->sk_refcnt), sp,
2619 tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong,
2621 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
2624 static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i)
2626 unsigned int dest, src;
2628 int ttd = tw->tw_ttd - jiffies;
2633 dest = tw->tw_daddr;
2634 src = tw->tw_rcv_saddr;
2635 destp = ntohs(tw->tw_dport);
2636 srcp = ntohs(tw->tw_sport);
2638 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2639 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p",
2640 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2641 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2642 atomic_read(&tw->tw_refcnt), tw);
2647 static int tcp4_seq_show(struct seq_file *seq, void *v)
2649 struct tcp_iter_state* st;
2650 char tmpbuf[TMPSZ + 1];
2652 if (v == SEQ_START_TOKEN) {
2653 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2654 " sl local_address rem_address st tx_queue "
2655 "rx_queue tr tm->when retrnsmt uid timeout "
2661 switch (st->state) {
2662 case TCP_SEQ_STATE_LISTENING:
2663 case TCP_SEQ_STATE_ESTABLISHED:
2664 get_tcp4_sock(v, tmpbuf, st->num);
2666 case TCP_SEQ_STATE_OPENREQ:
2667 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
2669 case TCP_SEQ_STATE_TIME_WAIT:
2670 get_timewait4_sock(v, tmpbuf, st->num);
2673 seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
2678 static struct file_operations tcp4_seq_fops;
2679 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2680 .owner = THIS_MODULE,
2683 .seq_show = tcp4_seq_show,
2684 .seq_fops = &tcp4_seq_fops,
2687 int __init tcp4_proc_init(void)
2689 return tcp_proc_register(&tcp4_seq_afinfo);
2692 void tcp4_proc_exit(void)
2694 tcp_proc_unregister(&tcp4_seq_afinfo);
2696 #endif /* CONFIG_PROC_FS */
2698 struct proto tcp_prot = {
2701 .connect = tcp_v4_connect,
2702 .disconnect = tcp_disconnect,
2703 .accept = tcp_accept,
2705 .init = tcp_v4_init_sock,
2706 .destroy = tcp_v4_destroy_sock,
2707 .shutdown = tcp_shutdown,
2708 .setsockopt = tcp_setsockopt,
2709 .getsockopt = tcp_getsockopt,
2710 .sendmsg = tcp_sendmsg,
2711 .recvmsg = tcp_recvmsg,
2712 .backlog_rcv = tcp_v4_do_rcv,
2713 .hash = tcp_v4_hash,
2714 .unhash = tcp_unhash,
2715 .get_port = tcp_v4_get_port,
2716 .enter_memory_pressure = tcp_enter_memory_pressure,
2717 .sockets_allocated = &tcp_sockets_allocated,
2718 .memory_allocated = &tcp_memory_allocated,
2719 .memory_pressure = &tcp_memory_pressure,
2720 .sysctl_mem = sysctl_tcp_mem,
2721 .sysctl_wmem = sysctl_tcp_wmem,
2722 .sysctl_rmem = sysctl_tcp_rmem,
2723 .max_header = MAX_TCP_HEADER,
2728 void __init tcp_v4_init(struct net_proto_family *ops)
2730 int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
2732 panic("Failed to create the TCP control socket.\n");
2733 tcp_socket->sk->sk_allocation = GFP_ATOMIC;
2734 inet_sk(tcp_socket->sk)->uc_ttl = -1;
2736 /* Unhash it so that IP input processing does not even
2737 * see it, we do not wish this socket to see incoming
2740 tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
2743 EXPORT_SYMBOL(ipv4_specific);
2744 EXPORT_SYMBOL(tcp_bind_hash);
2745 EXPORT_SYMBOL(tcp_bucket_create);
2746 EXPORT_SYMBOL(tcp_hashinfo);
2747 EXPORT_SYMBOL(tcp_inherit_port);
2748 EXPORT_SYMBOL(tcp_listen_wlock);
2749 EXPORT_SYMBOL(tcp_port_rover);
2750 EXPORT_SYMBOL(tcp_prot);
2751 EXPORT_SYMBOL(tcp_put_port);
2752 EXPORT_SYMBOL(tcp_unhash);
2753 EXPORT_SYMBOL(tcp_v4_conn_request);
2754 EXPORT_SYMBOL(tcp_v4_connect);
2755 EXPORT_SYMBOL(tcp_v4_do_rcv);
2756 EXPORT_SYMBOL(tcp_v4_lookup_listener);
2757 EXPORT_SYMBOL(tcp_v4_rebuild_header);
2758 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2759 EXPORT_SYMBOL(tcp_v4_send_check);
2760 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2762 #ifdef CONFIG_PROC_FS
2763 EXPORT_SYMBOL(tcp_proc_register);
2764 EXPORT_SYMBOL(tcp_proc_unregister);
2766 #ifdef CONFIG_SYSCTL
2767 EXPORT_SYMBOL(sysctl_local_port_range);
2768 EXPORT_SYMBOL(sysctl_max_syn_backlog);
2769 EXPORT_SYMBOL(sysctl_tcp_low_latency);