2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
10 * IPv4 specific functions
15 * linux/ipv4/tcp_input.c
16 * linux/ipv4/tcp_output.c
18 * See tcp.c for author information
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
28 * David S. Miller : New socket lookup architecture.
29 * This code is dedicated to John Dyson.
30 * David S. Miller : Change semantics of established hash,
31 * half is devoted to TIME_WAIT sockets
32 * and the rest go in the other half.
33 * Andi Kleen : Add support for syncookies and fixed
34 * some bugs: ip options weren't passed to
35 * the TCP layer, missed a check for an
37 * Andi Kleen : Implemented fast path mtu discovery.
38 * Fixed many serious bugs in the
39 * open_request handling and moved
40 * most of it into the af independent code.
41 * Added tail drop and some other bugfixes.
42 * Added new listen sematics.
43 * Mike McLagan : Routing by source
44 * Juan Jose Ciarlante: ip_dynaddr bits
45 * Andi Kleen: various fixes.
46 * Vitaly E. Lavrov : Transparent proxy revived after year
48 * Andi Kleen : Fix new listen.
49 * Andi Kleen : Fix accept error reporting.
50 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
51 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
52 * a single port at the same time.
55 #include <linux/config.h>
57 #include <linux/types.h>
58 #include <linux/fcntl.h>
59 #include <linux/module.h>
60 #include <linux/random.h>
61 #include <linux/cache.h>
62 #include <linux/jhash.h>
63 #include <linux/init.h>
64 #include <linux/times.h>
69 #include <net/inet_common.h>
72 #include <linux/inet.h>
73 #include <linux/ipv6.h>
74 #include <linux/stddef.h>
75 #include <linux/proc_fs.h>
76 #include <linux/seq_file.h>
77 #include <linux/vserver/debug.h>
79 extern int sysctl_ip_dynaddr;
80 int sysctl_tcp_tw_reuse;
81 int sysctl_tcp_low_latency;
83 /* Check TCP sequence numbers in ICMP packets. */
84 #define ICMP_MIN_LENGTH 8
86 /* Socket used for sending RSTs */
87 static struct socket *tcp_socket;
89 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
92 struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
93 .__tcp_lhash_lock = RW_LOCK_UNLOCKED,
94 .__tcp_lhash_users = ATOMIC_INIT(0),
96 = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait),
97 .__tcp_portalloc_lock = SPIN_LOCK_UNLOCKED
101 * This array holds the first and last local port number.
102 * For high-usage systems, use sysctl to change this to
105 int sysctl_local_port_range[2] = { 1024, 4999 };
106 int tcp_port_rover = 1024 - 1;
108 static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
109 __u32 faddr, __u16 fport)
111 int h = (laddr ^ lport) ^ (faddr ^ fport);
114 return h & (tcp_ehash_size - 1);
117 static __inline__ int tcp_sk_hashfn(struct sock *sk)
119 struct inet_opt *inet = inet_sk(sk);
120 __u32 laddr = inet->rcv_saddr;
121 __u16 lport = inet->num;
122 __u32 faddr = inet->daddr;
123 __u16 fport = inet->dport;
125 return tcp_hashfn(laddr, lport, faddr, fport);
128 /* Allocate and initialize a new TCP local port bind bucket.
129 * The bindhash mutex for snum's hash chain must be held here.
131 struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
134 struct tcp_bind_bucket *tb = kmem_cache_alloc(tcp_bucket_cachep,
139 INIT_HLIST_HEAD(&tb->owners);
140 hlist_add_head(&tb->node, &head->chain);
145 /* Caller must hold hashbucket lock for this tb with local BH disabled */
146 void tcp_bucket_destroy(struct tcp_bind_bucket *tb)
148 if (hlist_empty(&tb->owners)) {
149 __hlist_del(&tb->node);
150 kmem_cache_free(tcp_bucket_cachep, tb);
154 /* Caller must disable local BH processing. */
155 static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
157 struct tcp_bind_hashbucket *head =
158 &tcp_bhash[tcp_bhashfn(inet_sk(child)->num)];
159 struct tcp_bind_bucket *tb;
161 spin_lock(&head->lock);
162 tb = tcp_sk(sk)->bind_hash;
163 sk_add_bind_node(child, &tb->owners);
164 tcp_sk(child)->bind_hash = tb;
165 spin_unlock(&head->lock);
168 inline void tcp_inherit_port(struct sock *sk, struct sock *child)
171 __tcp_inherit_port(sk, child);
175 void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
178 inet_sk(sk)->num = snum;
179 sk_add_bind_node(sk, &tb->owners);
180 tcp_sk(sk)->bind_hash = tb;
183 static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
185 const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk);
187 struct hlist_node *node;
188 int reuse = sk->sk_reuse;
190 sk_for_each_bound(sk2, node, &tb->owners) {
192 !tcp_v6_ipv6only(sk2) &&
193 (!sk->sk_bound_dev_if ||
194 !sk2->sk_bound_dev_if ||
195 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
196 if (!reuse || !sk2->sk_reuse ||
197 sk2->sk_state == TCP_LISTEN) {
198 const u32 sk2_rcv_saddr = tcp_v4_rcv_saddr(sk2);
199 if (!sk2_rcv_saddr || !sk_rcv_saddr ||
200 sk2_rcv_saddr == sk_rcv_saddr)
208 /* Obtain a reference to a local port for the given sock,
209 * if snum is zero it means select any available local port.
211 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
213 struct tcp_bind_hashbucket *head;
214 struct hlist_node *node;
215 struct tcp_bind_bucket *tb;
220 int low = sysctl_local_port_range[0];
221 int high = sysctl_local_port_range[1];
222 int remaining = (high - low) + 1;
225 spin_lock(&tcp_portalloc_lock);
226 rover = tcp_port_rover;
229 if (rover < low || rover > high)
231 head = &tcp_bhash[tcp_bhashfn(rover)];
232 spin_lock(&head->lock);
233 tb_for_each(tb, node, &head->chain)
234 if (tb->port == rover)
238 spin_unlock(&head->lock);
239 } while (--remaining > 0);
240 tcp_port_rover = rover;
241 spin_unlock(&tcp_portalloc_lock);
243 /* Exhausted local port range during search? */
248 /* OK, here is the one we will use. HEAD is
249 * non-NULL and we hold it's mutex.
253 head = &tcp_bhash[tcp_bhashfn(snum)];
254 spin_lock(&head->lock);
255 tb_for_each(tb, node, &head->chain)
256 if (tb->port == snum)
262 if (!hlist_empty(&tb->owners)) {
263 if (sk->sk_reuse > 1)
265 if (tb->fastreuse > 0 &&
266 sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
270 if (tcp_bind_conflict(sk, tb))
276 if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
278 if (hlist_empty(&tb->owners)) {
279 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
283 } else if (tb->fastreuse &&
284 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
287 if (!tcp_sk(sk)->bind_hash)
288 tcp_bind_hash(sk, tb, snum);
289 BUG_TRAP(tcp_sk(sk)->bind_hash == tb);
293 spin_unlock(&head->lock);
299 /* Get rid of any references to a local port held by the
302 static void __tcp_put_port(struct sock *sk)
304 struct inet_opt *inet = inet_sk(sk);
305 struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)];
306 struct tcp_bind_bucket *tb;
308 spin_lock(&head->lock);
309 tb = tcp_sk(sk)->bind_hash;
310 __sk_del_bind_node(sk);
311 tcp_sk(sk)->bind_hash = NULL;
313 tcp_bucket_destroy(tb);
314 spin_unlock(&head->lock);
317 void tcp_put_port(struct sock *sk)
324 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
325 * Look, when several writers sleep and reader wakes them up, all but one
326 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
327 * this, _but_ remember, it adds useless work on UP machines (wake up each
328 * exclusive lock release). It should be ifdefed really.
331 void tcp_listen_wlock(void)
333 write_lock(&tcp_lhash_lock);
335 if (atomic_read(&tcp_lhash_users)) {
339 prepare_to_wait_exclusive(&tcp_lhash_wait,
340 &wait, TASK_UNINTERRUPTIBLE);
341 if (!atomic_read(&tcp_lhash_users))
343 write_unlock_bh(&tcp_lhash_lock);
345 write_lock_bh(&tcp_lhash_lock);
348 finish_wait(&tcp_lhash_wait, &wait);
352 static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
354 struct hlist_head *list;
357 BUG_TRAP(sk_unhashed(sk));
358 if (listen_possible && sk->sk_state == TCP_LISTEN) {
359 list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
360 lock = &tcp_lhash_lock;
363 list = &tcp_ehash[(sk->sk_hashent = tcp_sk_hashfn(sk))].chain;
364 lock = &tcp_ehash[sk->sk_hashent].lock;
367 __sk_add_node(sk, list);
368 sock_prot_inc_use(sk->sk_prot);
370 if (listen_possible && sk->sk_state == TCP_LISTEN)
371 wake_up(&tcp_lhash_wait);
374 static void tcp_v4_hash(struct sock *sk)
376 if (sk->sk_state != TCP_CLOSE) {
378 __tcp_v4_hash(sk, 1);
383 void tcp_unhash(struct sock *sk)
390 if (sk->sk_state == TCP_LISTEN) {
393 lock = &tcp_lhash_lock;
395 struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
397 write_lock_bh(&head->lock);
400 if (__sk_del_node_init(sk))
401 sock_prot_dec_use(sk->sk_prot);
402 write_unlock_bh(lock);
405 if (sk->sk_state == TCP_LISTEN)
406 wake_up(&tcp_lhash_wait);
409 /* Don't inline this cruft. Here are some nice properties to
410 * exploit here. The BSD API does not allow a listening TCP
411 * to specify the remote port nor the remote address for the
412 * connection. So always assume those are both wildcarded
413 * during the search since they can never be otherwise.
415 static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr,
416 unsigned short hnum, int dif)
418 struct sock *result = NULL, *sk;
419 struct hlist_node *node;
423 sk_for_each(sk, node, head) {
424 struct inet_opt *inet = inet_sk(sk);
426 if (inet->num == hnum && !ipv6_only_sock(sk)) {
427 __u32 rcv_saddr = inet->rcv_saddr;
429 score = (sk->sk_family == PF_INET ? 1 : 0);
431 if (rcv_saddr != daddr)
435 if (sk->sk_bound_dev_if) {
436 if (sk->sk_bound_dev_if != dif)
442 if (score > hiscore) {
451 /* Optimize the common listener case. */
452 inline struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum,
455 struct sock *sk = NULL;
456 struct hlist_head *head;
458 read_lock(&tcp_lhash_lock);
459 head = &tcp_listening_hash[tcp_lhashfn(hnum)];
460 if (!hlist_empty(head)) {
461 struct inet_opt *inet = inet_sk((sk = __sk_head(head)));
462 if (inet->num == hnum && !sk->sk_node.next &&
463 (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
464 (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
465 !sk->sk_bound_dev_if)
467 sk = __tcp_v4_lookup_listener(head, daddr, hnum, dif);
473 read_unlock(&tcp_lhash_lock);
477 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
478 * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
480 * Local BH must be disabled here.
483 static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
487 struct tcp_ehash_bucket *head;
488 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
489 __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
491 struct hlist_node *node;
492 /* Optimize here for direct hit, only listening connections can
493 * have wildcards anyways.
495 int hash = tcp_hashfn(daddr, hnum, saddr, sport);
496 head = &tcp_ehash[hash];
497 read_lock(&head->lock);
498 sk_for_each(sk, node, &head->chain) {
499 if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
500 goto hit; /* You sunk my battleship! */
503 /* Must check for a TIME_WAIT'er before going to listener hash. */
504 sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
505 if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
510 read_unlock(&head->lock);
517 static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport,
518 u32 daddr, u16 hnum, int dif)
520 struct sock *sk = __tcp_v4_lookup_established(saddr, sport,
523 return sk ? : tcp_v4_lookup_listener(daddr, hnum, dif);
526 inline struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr,
532 sk = __tcp_v4_lookup(saddr, sport, daddr, ntohs(dport), dif);
538 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
540 return secure_tcp_sequence_number(skb->nh.iph->daddr,
546 /* called with local bh disabled */
547 static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
548 struct tcp_tw_bucket **twp)
550 struct inet_opt *inet = inet_sk(sk);
551 u32 daddr = inet->rcv_saddr;
552 u32 saddr = inet->daddr;
553 int dif = sk->sk_bound_dev_if;
554 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
555 __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
556 int hash = tcp_hashfn(daddr, lport, saddr, inet->dport);
557 struct tcp_ehash_bucket *head = &tcp_ehash[hash];
559 struct hlist_node *node;
560 struct tcp_tw_bucket *tw;
562 write_lock(&head->lock);
564 /* Check TIME-WAIT sockets first. */
565 sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
566 tw = (struct tcp_tw_bucket *)sk2;
568 if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
569 struct tcp_opt *tp = tcp_sk(sk);
571 /* With PAWS, it is safe from the viewpoint
572 of data integrity. Even without PAWS it
573 is safe provided sequence spaces do not
574 overlap i.e. at data rates <= 80Mbit/sec.
576 Actually, the idea is close to VJ's one,
577 only timestamp cache is held not per host,
578 but per port pair and TW bucket is used
581 If TW bucket has been already destroyed we
582 fall back to VJ's scheme and use initial
583 timestamp retrieved from peer table.
585 if (tw->tw_ts_recent_stamp &&
586 (!twp || (sysctl_tcp_tw_reuse &&
588 tw->tw_ts_recent_stamp > 1))) {
590 tw->tw_snd_nxt + 65535 + 2) == 0)
592 tp->ts_recent = tw->tw_ts_recent;
593 tp->ts_recent_stamp = tw->tw_ts_recent_stamp;
602 /* And established part... */
603 sk_for_each(sk2, node, &head->chain) {
604 if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif))
609 /* Must record num and sport now. Otherwise we will see
610 * in hash table socket with a funny identity. */
612 inet->sport = htons(lport);
613 sk->sk_hashent = hash;
614 BUG_TRAP(sk_unhashed(sk));
615 __sk_add_node(sk, &head->chain);
616 sock_prot_inc_use(sk->sk_prot);
617 write_unlock(&head->lock);
621 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
623 /* Silly. Should hash-dance instead... */
624 tcp_tw_deschedule(tw);
625 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
633 write_unlock(&head->lock);
634 return -EADDRNOTAVAIL;
638 * Bind a port for a connect operation and hash it.
640 static int tcp_v4_hash_connect(struct sock *sk)
642 unsigned short snum = inet_sk(sk)->num;
643 struct tcp_bind_hashbucket *head;
644 struct tcp_bind_bucket *tb;
649 int low = sysctl_local_port_range[0];
650 int high = sysctl_local_port_range[1];
651 int remaining = (high - low) + 1;
652 struct hlist_node *node;
653 struct tcp_tw_bucket *tw = NULL;
657 /* TODO. Actually it is not so bad idea to remove
658 * tcp_portalloc_lock before next submission to Linus.
659 * As soon as we touch this place at all it is time to think.
661 * Now it protects single _advisory_ variable tcp_port_rover,
662 * hence it is mostly useless.
663 * Code will work nicely if we just delete it, but
664 * I am afraid in contented case it will work not better or
665 * even worse: another cpu just will hit the same bucket
667 * So some cpu salt could remove both contention and
668 * memory pingpong. Any ideas how to do this in a nice way?
670 spin_lock(&tcp_portalloc_lock);
671 rover = tcp_port_rover;
675 if ((rover < low) || (rover > high))
677 head = &tcp_bhash[tcp_bhashfn(rover)];
678 spin_lock(&head->lock);
680 /* Does not bother with rcv_saddr checks,
681 * because the established check is already
684 tb_for_each(tb, node, &head->chain) {
685 if (tb->port == rover) {
686 BUG_TRAP(!hlist_empty(&tb->owners));
687 if (tb->fastreuse >= 0)
689 if (!__tcp_v4_check_established(sk,
697 tb = tcp_bucket_create(head, rover);
699 spin_unlock(&head->lock);
706 spin_unlock(&head->lock);
707 } while (--remaining > 0);
708 tcp_port_rover = rover;
709 spin_unlock(&tcp_portalloc_lock);
713 return -EADDRNOTAVAIL;
716 /* All locks still held and bhs disabled */
717 tcp_port_rover = rover;
718 spin_unlock(&tcp_portalloc_lock);
720 tcp_bind_hash(sk, tb, rover);
721 if (sk_unhashed(sk)) {
722 inet_sk(sk)->sport = htons(rover);
723 __tcp_v4_hash(sk, 0);
725 spin_unlock(&head->lock);
728 tcp_tw_deschedule(tw);
736 head = &tcp_bhash[tcp_bhashfn(snum)];
737 tb = tcp_sk(sk)->bind_hash;
738 spin_lock_bh(&head->lock);
739 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
740 __tcp_v4_hash(sk, 0);
741 spin_unlock_bh(&head->lock);
744 spin_unlock(&head->lock);
745 /* No definite answer... Walk to established hash table */
746 ret = __tcp_v4_check_established(sk, snum, NULL);
753 /* This will initiate an outgoing connection. */
754 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
756 struct inet_opt *inet = inet_sk(sk);
757 struct tcp_opt *tp = tcp_sk(sk);
758 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
764 if (addr_len < sizeof(struct sockaddr_in))
767 if (usin->sin_family != AF_INET)
768 return -EAFNOSUPPORT;
770 nexthop = daddr = usin->sin_addr.s_addr;
771 if (inet->opt && inet->opt->srr) {
774 nexthop = inet->opt->faddr;
777 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
778 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
780 inet->sport, usin->sin_port, sk);
784 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
789 if (!inet->opt || !inet->opt->srr)
793 inet->saddr = rt->rt_src;
794 inet->rcv_saddr = inet->saddr;
796 if (tp->ts_recent_stamp && inet->daddr != daddr) {
797 /* Reset inherited state */
799 tp->ts_recent_stamp = 0;
803 if (sysctl_tcp_tw_recycle &&
804 !tp->ts_recent_stamp && rt->rt_dst == daddr) {
805 struct inet_peer *peer = rt_get_peer(rt);
807 /* VJ's idea. We save last timestamp seen from
808 * the destination in peer table, when entering state TIME-WAIT
809 * and initialize ts_recent from it, when trying new connection.
812 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
813 tp->ts_recent_stamp = peer->tcp_ts_stamp;
814 tp->ts_recent = peer->tcp_ts;
818 inet->dport = usin->sin_port;
821 tp->ext_header_len = 0;
823 tp->ext_header_len = inet->opt->optlen;
827 /* Socket identity is still unknown (sport may be zero).
828 * However we set state to SYN-SENT and not releasing socket
829 * lock select source port, enter ourselves into the hash tables and
830 * complete initialization after this.
832 tcp_set_state(sk, TCP_SYN_SENT);
833 err = tcp_v4_hash_connect(sk);
837 err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
841 /* OK, now commit destination to socket. */
842 __sk_dst_set(sk, &rt->u.dst);
843 tcp_v4_setup_caps(sk, &rt->u.dst);
844 tp->ext2_header_len = rt->u.dst.header_len;
847 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
852 inet->id = tp->write_seq ^ jiffies;
854 err = tcp_connect(sk);
862 /* This unhashes the socket and releases the local port, if necessary. */
863 tcp_set_state(sk, TCP_CLOSE);
865 sk->sk_route_caps = 0;
870 static __inline__ int tcp_v4_iif(struct sk_buff *skb)
872 return ((struct rtable *)skb->dst)->rt_iif;
875 static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
877 return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
880 static struct open_request *tcp_v4_search_req(struct tcp_opt *tp,
881 struct open_request ***prevp,
883 __u32 raddr, __u32 laddr)
885 struct tcp_listen_opt *lopt = tp->listen_opt;
886 struct open_request *req, **prev;
888 for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
889 (req = *prev) != NULL;
890 prev = &req->dl_next) {
891 if (req->rmt_port == rport &&
892 req->af.v4_req.rmt_addr == raddr &&
893 req->af.v4_req.loc_addr == laddr &&
894 TCP_INET_FAMILY(req->class->family)) {
904 static void tcp_v4_synq_add(struct sock *sk, struct open_request *req)
906 struct tcp_opt *tp = tcp_sk(sk);
907 struct tcp_listen_opt *lopt = tp->listen_opt;
908 u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
910 req->expires = jiffies + TCP_TIMEOUT_INIT;
913 req->dl_next = lopt->syn_table[h];
915 write_lock(&tp->syn_wait_lock);
916 lopt->syn_table[h] = req;
917 write_unlock(&tp->syn_wait_lock);
919 #ifdef CONFIG_ACCEPT_QUEUES
920 tcp_synq_added(sk, req);
928 * This routine does path mtu discovery as defined in RFC1191.
930 static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
933 struct dst_entry *dst;
934 struct inet_opt *inet = inet_sk(sk);
935 struct tcp_opt *tp = tcp_sk(sk);
937 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
938 * send out by Linux are always <576bytes so they should go through
941 if (sk->sk_state == TCP_LISTEN)
944 /* We don't check in the destentry if pmtu discovery is forbidden
945 * on this route. We just assume that no packet_to_big packets
946 * are send back when pmtu discovery is not active.
947 * There is a small race when the user changes this flag in the
948 * route, but I think that's acceptable.
950 if ((dst = __sk_dst_check(sk, 0)) == NULL)
953 dst->ops->update_pmtu(dst, mtu);
955 /* Something is about to be wrong... Remember soft error
956 * for the case, if this connection will not able to recover.
958 if (mtu < dst_pmtu(dst) && ip_dont_fragment(sk, dst))
959 sk->sk_err_soft = EMSGSIZE;
963 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
964 tp->pmtu_cookie > mtu) {
965 tcp_sync_mss(sk, mtu);
967 /* Resend the TCP packet because it's
968 * clear that the old packet has been
969 * dropped. This is the new "fast" path mtu
972 tcp_simple_retransmit(sk);
973 } /* else let the usual retransmit timer handle it */
977 * This routine is called by the ICMP module when it gets some
978 * sort of error condition. If err < 0 then the socket should
979 * be closed and the error returned to the user. If err > 0
980 * it's just the icmp type << 8 | icmp code. After adjustment
981 * header points to the first 8 bytes of the tcp header. We need
982 * to find the appropriate port.
984 * The locking strategy used here is very "optimistic". When
985 * someone else accesses the socket the ICMP is just dropped
986 * and for some paths there is no check at all.
987 * A more general error queue to queue errors for later handling
988 * is probably better.
992 void tcp_v4_err(struct sk_buff *skb, u32 info)
994 struct iphdr *iph = (struct iphdr *)skb->data;
995 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
997 struct inet_opt *inet;
998 int type = skb->h.icmph->type;
999 int code = skb->h.icmph->code;
1004 if (skb->len < (iph->ihl << 2) + 8) {
1005 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
1009 sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,
1010 th->source, tcp_v4_iif(skb));
1012 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
1015 if (sk->sk_state == TCP_TIME_WAIT) {
1016 tcp_tw_put((struct tcp_tw_bucket *)sk);
1021 /* If too many ICMPs get dropped on busy
1022 * servers this needs to be solved differently.
1024 if (sock_owned_by_user(sk))
1025 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
1027 if (sk->sk_state == TCP_CLOSE)
1031 seq = ntohl(th->seq);
1032 if (sk->sk_state != TCP_LISTEN &&
1033 !between(seq, tp->snd_una, tp->snd_nxt)) {
1034 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
1039 case ICMP_SOURCE_QUENCH:
1040 /* This is deprecated, but if someone generated it,
1041 * we have no reasons to ignore it.
1043 if (!sock_owned_by_user(sk))
1046 case ICMP_PARAMETERPROB:
1049 case ICMP_DEST_UNREACH:
1050 if (code > NR_ICMP_UNREACH)
1053 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
1054 if (!sock_owned_by_user(sk))
1055 do_pmtu_discovery(sk, iph, info);
1059 err = icmp_err_convert[code].errno;
1061 case ICMP_TIME_EXCEEDED:
1068 switch (sk->sk_state) {
1069 struct open_request *req, **prev;
1071 if (sock_owned_by_user(sk))
1074 req = tcp_v4_search_req(tp, &prev, th->dest,
1075 iph->daddr, iph->saddr);
1079 /* ICMPs are not backlogged, hence we cannot get
1080 an established socket here.
1084 if (seq != req->snt_isn) {
1085 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
1090 * Still in SYN_RECV, just remove it silently.
1091 * There is no good way to pass the error to the newly
1092 * created socket, and POSIX does not want network
1093 * errors returned from accept().
1095 tcp_synq_drop(sk, req, prev);
1099 case TCP_SYN_RECV: /* Cannot happen.
1100 It can f.e. if SYNs crossed.
1102 if (!sock_owned_by_user(sk)) {
1103 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1106 sk->sk_error_report(sk);
1110 sk->sk_err_soft = err;
1115 /* If we've already connected we will keep trying
1116 * until we time out, or the user gives up.
1118 * rfc1122 4.2.3.9 allows to consider as hard errors
1119 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
1120 * but it is obsoleted by pmtu discovery).
1122 * Note, that in modern internet, where routing is unreliable
1123 * and in each dark corner broken firewalls sit, sending random
1124 * errors ordered by their masters even this two messages finally lose
1125 * their original sense (even Linux sends invalid PORT_UNREACHs)
1127 * Now we are in compliance with RFCs.
1132 if (!sock_owned_by_user(sk) && inet->recverr) {
1134 sk->sk_error_report(sk);
1135 } else { /* Only an error on timeout */
1136 sk->sk_err_soft = err;
1144 /* This routine computes an IPv4 TCP checksum. */
1145 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
1146 struct sk_buff *skb)
1148 struct inet_opt *inet = inet_sk(sk);
1150 if (skb->ip_summed == CHECKSUM_HW) {
1151 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
1152 skb->csum = offsetof(struct tcphdr, check);
1154 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
1155 csum_partial((char *)th,
1162 * This routine will send an RST to the other tcp.
1164 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
1166 * Answer: if a packet caused RST, it is not for a socket
1167 * existing in our system, if it is matched to a socket,
1168 * it is just duplicate segment or bug in other side's TCP.
1169 * So that we build reply only basing on parameters
1170 * arrived with segment.
1171 * Exception: precedence violation. We do not implement it in any case.
1174 static void tcp_v4_send_reset(struct sk_buff *skb)
1176 struct tcphdr *th = skb->h.th;
1178 struct ip_reply_arg arg;
1180 /* Never send a reset in response to a reset. */
1184 if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
1187 /* Swap the send and the receive. */
1188 memset(&rth, 0, sizeof(struct tcphdr));
1189 rth.dest = th->source;
1190 rth.source = th->dest;
1191 rth.doff = sizeof(struct tcphdr) / 4;
1195 rth.seq = th->ack_seq;
1198 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
1199 skb->len - (th->doff << 2));
1202 memset(&arg, 0, sizeof arg);
1203 arg.iov[0].iov_base = (unsigned char *)&rth;
1204 arg.iov[0].iov_len = sizeof rth;
1205 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1206 skb->nh.iph->saddr, /*XXX*/
1207 sizeof(struct tcphdr), IPPROTO_TCP, 0);
1208 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1210 ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
1212 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1213 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1216 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
1217 outside socket context is ugly, certainly. What can I do?
1220 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
1223 struct tcphdr *th = skb->h.th;
1228 struct ip_reply_arg arg;
1230 memset(&rep.th, 0, sizeof(struct tcphdr));
1231 memset(&arg, 0, sizeof arg);
1233 arg.iov[0].iov_base = (unsigned char *)&rep;
1234 arg.iov[0].iov_len = sizeof(rep.th);
1236 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1237 (TCPOPT_TIMESTAMP << 8) |
1239 rep.tsopt[1] = htonl(tcp_time_stamp);
1240 rep.tsopt[2] = htonl(ts);
1241 arg.iov[0].iov_len = sizeof(rep);
1244 /* Swap the send and the receive. */
1245 rep.th.dest = th->source;
1246 rep.th.source = th->dest;
1247 rep.th.doff = arg.iov[0].iov_len / 4;
1248 rep.th.seq = htonl(seq);
1249 rep.th.ack_seq = htonl(ack);
1251 rep.th.window = htons(win);
1253 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1254 skb->nh.iph->saddr, /*XXX*/
1255 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1256 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1258 ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
1260 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1263 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
1265 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
1267 tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
1268 tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
1273 static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req)
1275 tcp_v4_send_ack(skb, req->snt_isn + 1, req->rcv_isn + 1, req->rcv_wnd,
1279 static struct dst_entry* tcp_v4_route_req(struct sock *sk,
1280 struct open_request *req)
1283 struct ip_options *opt = req->af.v4_req.opt;
1284 struct flowi fl = { .oif = sk->sk_bound_dev_if,
1286 { .daddr = ((opt && opt->srr) ?
1288 req->af.v4_req.rmt_addr),
1289 .saddr = req->af.v4_req.loc_addr,
1290 .tos = RT_CONN_FLAGS(sk) } },
1291 .proto = IPPROTO_TCP,
1293 { .sport = inet_sk(sk)->sport,
1294 .dport = req->rmt_port } } };
1296 if (ip_route_output_flow(&rt, &fl, sk, 0)) {
1297 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1300 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
1302 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1309 * Send a SYN-ACK after having received an ACK.
1310 * This still operates on a open_request only, not on a big
1313 static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
1314 struct dst_entry *dst)
1317 struct sk_buff * skb;
1319 /* First, grab a route. */
1320 if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1323 skb = tcp_make_synack(sk, dst, req);
1326 struct tcphdr *th = skb->h.th;
1328 th->check = tcp_v4_check(th, skb->len,
1329 req->af.v4_req.loc_addr,
1330 req->af.v4_req.rmt_addr,
1331 csum_partial((char *)th, skb->len,
1334 err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr,
1335 req->af.v4_req.rmt_addr,
1336 req->af.v4_req.opt);
1337 if (err == NET_XMIT_CN)
1347 * IPv4 open_request destructor.
1349 static void tcp_v4_or_free(struct open_request *req)
1351 if (req->af.v4_req.opt)
1352 kfree(req->af.v4_req.opt);
1355 static inline void syn_flood_warning(struct sk_buff *skb)
1357 static unsigned long warntime;
1359 if (time_after(jiffies, (warntime + HZ * 60))) {
1362 "possible SYN flooding on port %d. Sending cookies.\n",
1363 ntohs(skb->h.th->dest));
1368 * Save and compile IPv4 options into the open_request if needed.
1370 static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
1371 struct sk_buff *skb)
1373 struct ip_options *opt = &(IPCB(skb)->opt);
1374 struct ip_options *dopt = NULL;
1376 if (opt && opt->optlen) {
1377 int opt_size = optlength(opt);
1378 dopt = kmalloc(opt_size, GFP_ATOMIC);
1380 if (ip_options_echo(dopt, skb)) {
1390 * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
1391 * One SYN_RECV socket costs about 80bytes on a 32bit machine.
1392 * It would be better to replace it with a global counter for all sockets
1393 * but then some measure against one socket starving all other sockets
1396 * It was 128 by default. Experiments with real servers show, that
1397 * it is absolutely not enough even at 100conn/sec. 256 cures most
1398 * of problems. This value is adjusted to 128 for very small machines
1399 * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
1400 * Further increasing requires to change hash table size.
1402 int sysctl_max_syn_backlog = 256;
1404 struct or_calltable or_ipv4 = {
1406 .rtx_syn_ack = tcp_v4_send_synack,
1407 .send_ack = tcp_v4_or_send_ack,
1408 .destructor = tcp_v4_or_free,
1409 .send_reset = tcp_v4_send_reset,
1412 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1415 struct open_request *req;
1416 __u32 saddr = skb->nh.iph->saddr;
1417 __u32 daddr = skb->nh.iph->daddr;
1418 __u32 isn = TCP_SKB_CB(skb)->when;
1419 struct dst_entry *dst = NULL;
1420 #ifdef CONFIG_ACCEPT_QUEUES
1423 #ifdef CONFIG_SYN_COOKIES
1424 int want_cookie = 0;
1426 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1429 /* Never answer to SYNs send to broadcast or multicast */
1430 if (((struct rtable *)skb->dst)->rt_flags &
1431 (RTCF_BROADCAST | RTCF_MULTICAST))
1434 /* TW buckets are converted to open requests without
1435 * limitations, they conserve resources and peer is
1436 * evidently real one.
1438 if (tcp_synq_is_full(sk) && !isn) {
1439 #ifdef CONFIG_SYN_COOKIES
1440 if (sysctl_tcp_syncookies) {
1447 #ifdef CONFIG_ACCEPT_QUEUES
1448 class = (skb->nfmark <= 0) ? 0 :
1449 ((skb->nfmark >= NUM_ACCEPT_QUEUES) ? 0: skb->nfmark);
1451 * Accept only if the class has shares set or if the default class
1452 * i.e. class 0 has shares
1454 if (!(tcp_sk(sk)->acceptq[class].aq_ratio)) {
1455 if (tcp_sk(sk)->acceptq[0].aq_ratio)
1462 /* Accept backlog is full. If we have already queued enough
1463 * of warm entries in syn queue, drop request. It is better than
1464 * clogging syn queue with openreqs with exponentially increasing
1467 #ifdef CONFIG_ACCEPT_QUEUES
1468 if (sk_acceptq_is_full(sk, class) && tcp_synq_young(sk, class) > 1)
1470 if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
1474 req = tcp_openreq_alloc();
1478 tcp_clear_options(&tp);
1480 tp.user_mss = tcp_sk(sk)->user_mss;
1482 tcp_parse_options(skb, &tp, 0);
1485 tcp_clear_options(&tp);
1489 if (tp.saw_tstamp && !tp.rcv_tsval) {
1490 /* Some OSes (unknown ones, but I see them on web server, which
1491 * contains information interesting only for windows'
1492 * users) do not send their stamp in SYN. It is easy case.
1493 * We simply do not advertise TS support.
1498 tp.tstamp_ok = tp.saw_tstamp;
1500 tcp_openreq_init(req, &tp, skb);
1501 #ifdef CONFIG_ACCEPT_QUEUES
1502 req->acceptq_class = class;
1503 req->acceptq_time_stamp = jiffies;
1505 req->af.v4_req.loc_addr = daddr;
1506 req->af.v4_req.rmt_addr = saddr;
1507 req->af.v4_req.opt = tcp_v4_save_options(sk, skb);
1508 req->class = &or_ipv4;
1510 TCP_ECN_create_request(req, skb->h.th);
1513 #ifdef CONFIG_SYN_COOKIES
1514 syn_flood_warning(skb);
1516 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1518 struct inet_peer *peer = NULL;
1520 /* VJ's idea. We save last timestamp seen
1521 * from the destination in peer table, when entering
1522 * state TIME-WAIT, and check against it before
1523 * accepting new connection request.
1525 * If "isn" is not zero, this request hit alive
1526 * timewait bucket, so that all the necessary checks
1527 * are made in the function processing timewait state.
1529 if (tp.saw_tstamp &&
1530 sysctl_tcp_tw_recycle &&
1531 (dst = tcp_v4_route_req(sk, req)) != NULL &&
1532 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1533 peer->v4daddr == saddr) {
1534 if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1535 (s32)(peer->tcp_ts - req->ts_recent) >
1537 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
1542 /* Kill the following clause, if you dislike this way. */
1543 else if (!sysctl_tcp_syncookies &&
1544 (sysctl_max_syn_backlog - tcp_synq_len(sk) <
1545 (sysctl_max_syn_backlog >> 2)) &&
1546 (!peer || !peer->tcp_ts_stamp) &&
1547 (!dst || !dst_metric(dst, RTAX_RTT))) {
1548 /* Without syncookies last quarter of
1549 * backlog is filled with destinations,
1550 * proven to be alive.
1551 * It means that we continue to communicate
1552 * to destinations, already remembered
1553 * to the moment of synflood.
1555 NETDEBUG(if (net_ratelimit()) \
1556 printk(KERN_DEBUG "TCP: drop open "
1557 "request from %u.%u."
1560 ntohs(skb->h.th->source)));
1565 isn = tcp_v4_init_sequence(sk, skb);
1569 if (tcp_v4_send_synack(sk, req, dst))
1573 tcp_openreq_free(req);
1575 tcp_v4_synq_add(sk, req);
1580 tcp_openreq_free(req);
1582 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1588 * The three way handshake has completed - we got a valid synack -
1589 * now create the new socket.
1591 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1592 struct open_request *req,
1593 struct dst_entry *dst)
1595 struct inet_opt *newinet;
1596 struct tcp_opt *newtp;
1599 #ifdef CONFIG_ACCEPT_QUEUES
1600 if (sk_acceptq_is_full(sk, req->acceptq_class))
1602 if (sk_acceptq_is_full(sk))
1606 if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1609 newsk = tcp_create_openreq_child(sk, req, skb);
1613 newsk->sk_dst_cache = dst;
1614 tcp_v4_setup_caps(newsk, dst);
1616 newtp = tcp_sk(newsk);
1617 newinet = inet_sk(newsk);
1618 newinet->daddr = req->af.v4_req.rmt_addr;
1619 newinet->rcv_saddr = req->af.v4_req.loc_addr;
1620 newinet->saddr = req->af.v4_req.loc_addr;
1621 newinet->opt = req->af.v4_req.opt;
1622 req->af.v4_req.opt = NULL;
1623 newinet->mc_index = tcp_v4_iif(skb);
1624 newinet->mc_ttl = skb->nh.iph->ttl;
1625 newtp->ext_header_len = 0;
1627 newtp->ext_header_len = newinet->opt->optlen;
1628 newtp->ext2_header_len = dst->header_len;
1629 newinet->id = newtp->write_seq ^ jiffies;
1631 tcp_sync_mss(newsk, dst_pmtu(dst));
1632 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1633 tcp_initialize_rcv_mss(newsk);
1635 __tcp_v4_hash(newsk, 0);
1636 __tcp_inherit_port(sk, newsk);
1641 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1643 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1648 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1650 struct tcphdr *th = skb->h.th;
1651 struct iphdr *iph = skb->nh.iph;
1652 struct tcp_opt *tp = tcp_sk(sk);
1654 struct open_request **prev;
1655 /* Find possible connection requests. */
1656 struct open_request *req = tcp_v4_search_req(tp, &prev, th->source,
1657 iph->saddr, iph->daddr);
1659 return tcp_check_req(sk, skb, req, prev);
1661 nsk = __tcp_v4_lookup_established(skb->nh.iph->saddr,
1668 if (nsk->sk_state != TCP_TIME_WAIT) {
1672 tcp_tw_put((struct tcp_tw_bucket *)nsk);
1676 #ifdef CONFIG_SYN_COOKIES
1677 if (!th->rst && !th->syn && th->ack)
1678 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1683 static int tcp_v4_checksum_init(struct sk_buff *skb)
1685 if (skb->ip_summed == CHECKSUM_HW) {
1686 skb->ip_summed = CHECKSUM_UNNECESSARY;
1687 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1688 skb->nh.iph->daddr, skb->csum))
1691 NETDEBUG(if (net_ratelimit())
1692 printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
1693 skb->ip_summed = CHECKSUM_NONE;
1695 if (skb->len <= 76) {
1696 if (tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1698 skb_checksum(skb, 0, skb->len, 0)))
1700 skb->ip_summed = CHECKSUM_UNNECESSARY;
1702 skb->csum = ~tcp_v4_check(skb->h.th, skb->len,
1704 skb->nh.iph->daddr, 0);
1710 /* The socket must have it's spinlock held when we get
1713 * We have a potential double-lock case here, so even when
1714 * doing backlog processing we use the BH locking scheme.
1715 * This is because we cannot sleep with the original spinlock
1718 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1720 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1721 TCP_CHECK_TIMER(sk);
1722 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1724 TCP_CHECK_TIMER(sk);
1728 if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
1731 if (sk->sk_state == TCP_LISTEN) {
1732 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1737 if (tcp_child_process(sk, nsk, skb))
1743 TCP_CHECK_TIMER(sk);
1744 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1746 TCP_CHECK_TIMER(sk);
1750 tcp_v4_send_reset(skb);
1753 /* Be careful here. If this function gets more complicated and
1754 * gcc suffers from register pressure on the x86, sk (in %ebx)
1755 * might be destroyed here. This current version compiles correctly,
1756 * but you have been warned.
1761 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1765 extern struct proto_ops inet_stream_ops;
1767 extern int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
1773 int tcp_v4_rcv(struct sk_buff *skb)
1779 if (skb->pkt_type != PACKET_HOST)
1782 /* Count it even if it's bad */
1783 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1785 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1790 if (th->doff < sizeof(struct tcphdr) / 4)
1792 if (!pskb_may_pull(skb, th->doff * 4))
1795 /* An explanation is required here, I think.
1796 * Packet length and doff are validated by header prediction,
1797 * provided case of th->doff==0 is elimineted.
1798 * So, we defer the checks. */
1799 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1800 tcp_v4_checksum_init(skb) < 0))
1804 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1805 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1806 skb->len - th->doff * 4);
1807 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1808 TCP_SKB_CB(skb)->when = 0;
1809 TCP_SKB_CB(skb)->flags = skb->nh.iph->tos;
1810 TCP_SKB_CB(skb)->sacked = 0;
1812 sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source,
1813 skb->nh.iph->daddr, ntohs(th->dest),
1820 /* Silently drop if VNET is active (if INET bind() has been
1821 * overridden) and the context is not entitled to read the
1824 if (inet_stream_ops.bind != inet_bind &&
1825 (int) sk->sk_xid > 0 && sk->sk_xid != skb->xid)
1828 if (sk->sk_state == TCP_TIME_WAIT)
1831 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1832 goto discard_and_relse;
1834 if (sk_filter(sk, skb, 0))
1835 goto discard_and_relse;
1841 if (!sock_owned_by_user(sk)) {
1842 if (!tcp_prequeue(sk, skb))
1843 ret = tcp_v4_do_rcv(sk, skb);
1845 sk_add_backlog(sk, skb);
1853 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1856 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1858 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1860 tcp_v4_send_reset(skb);
1864 /* Discard frame. */
1873 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1874 tcp_tw_put((struct tcp_tw_bucket *) sk);
1878 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1879 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1880 tcp_tw_put((struct tcp_tw_bucket *) sk);
1883 switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
1884 skb, th, skb->len)) {
1886 struct sock *sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr,
1890 tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
1891 tcp_tw_put((struct tcp_tw_bucket *)sk);
1895 /* Fall through to ACK */
1898 tcp_v4_timewait_ack(sk, skb);
1902 case TCP_TW_SUCCESS:;
1907 /* With per-bucket locks this operation is not-atomic, so that
1908 * this version is not worse.
1910 static void __tcp_v4_rehash(struct sock *sk)
1912 sk->sk_prot->unhash(sk);
1913 sk->sk_prot->hash(sk);
1916 static int tcp_v4_reselect_saddr(struct sock *sk)
1918 struct inet_opt *inet = inet_sk(sk);
1921 __u32 old_saddr = inet->saddr;
1923 __u32 daddr = inet->daddr;
1925 if (inet->opt && inet->opt->srr)
1926 daddr = inet->opt->faddr;
1928 /* Query new route. */
1929 err = ip_route_connect(&rt, daddr, 0,
1930 RT_TOS(inet->tos) | sk->sk_localroute,
1931 sk->sk_bound_dev_if,
1933 inet->sport, inet->dport, sk);
1937 __sk_dst_set(sk, &rt->u.dst);
1938 tcp_v4_setup_caps(sk, &rt->u.dst);
1939 tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
1941 new_saddr = rt->rt_src;
1943 if (new_saddr == old_saddr)
1946 if (sysctl_ip_dynaddr > 1) {
1947 printk(KERN_INFO "tcp_v4_rebuild_header(): shifting inet->"
1948 "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n",
1950 NIPQUAD(new_saddr));
1953 inet->saddr = new_saddr;
1954 inet->rcv_saddr = new_saddr;
1956 /* XXX The only one ugly spot where we need to
1957 * XXX really change the sockets identity after
1958 * XXX it has entered the hashes. -DaveM
1960 * Besides that, it does not check for connection
1961 * uniqueness. Wait for troubles.
1963 __tcp_v4_rehash(sk);
1967 int tcp_v4_rebuild_header(struct sock *sk)
1969 struct inet_opt *inet = inet_sk(sk);
1970 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1974 /* Route is OK, nothing to do. */
1979 daddr = inet->daddr;
1980 if (inet->opt && inet->opt->srr)
1981 daddr = inet->opt->faddr;
1984 struct flowi fl = { .oif = sk->sk_bound_dev_if,
1987 .saddr = inet->saddr,
1988 .tos = RT_CONN_FLAGS(sk) } },
1989 .proto = IPPROTO_TCP,
1991 { .sport = inet->sport,
1992 .dport = inet->dport } } };
1994 err = ip_route_output_flow(&rt, &fl, sk, 0);
1997 __sk_dst_set(sk, &rt->u.dst);
1998 tcp_v4_setup_caps(sk, &rt->u.dst);
1999 tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
2003 /* Routing failed... */
2004 sk->sk_route_caps = 0;
2006 if (!sysctl_ip_dynaddr ||
2007 sk->sk_state != TCP_SYN_SENT ||
2008 (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
2009 (err = tcp_v4_reselect_saddr(sk)) != 0)
2010 sk->sk_err_soft = -err;
2015 static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
2017 struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;
2018 struct inet_opt *inet = inet_sk(sk);
2020 sin->sin_family = AF_INET;
2021 sin->sin_addr.s_addr = inet->daddr;
2022 sin->sin_port = inet->dport;
2025 /* VJ's idea. Save last timestamp seen from this destination
2026 * and hold it at least for normal timewait interval to use for duplicate
2027 * segment detection in subsequent connections, before they enter synchronized
2031 int tcp_v4_remember_stamp(struct sock *sk)
2033 struct inet_opt *inet = inet_sk(sk);
2034 struct tcp_opt *tp = tcp_sk(sk);
2035 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
2036 struct inet_peer *peer = NULL;
2039 if (!rt || rt->rt_dst != inet->daddr) {
2040 peer = inet_getpeer(inet->daddr, 1);
2044 rt_bind_peer(rt, 1);
2049 if ((s32)(peer->tcp_ts - tp->ts_recent) <= 0 ||
2050 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2051 peer->tcp_ts_stamp <= tp->ts_recent_stamp)) {
2052 peer->tcp_ts_stamp = tp->ts_recent_stamp;
2053 peer->tcp_ts = tp->ts_recent;
2063 int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
2065 struct inet_peer *peer = NULL;
2067 peer = inet_getpeer(tw->tw_daddr, 1);
2070 if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 ||
2071 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2072 peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) {
2073 peer->tcp_ts_stamp = tw->tw_ts_recent_stamp;
2074 peer->tcp_ts = tw->tw_ts_recent;
2083 struct tcp_func ipv4_specific = {
2084 .queue_xmit = ip_queue_xmit,
2085 .send_check = tcp_v4_send_check,
2086 .rebuild_header = tcp_v4_rebuild_header,
2087 .conn_request = tcp_v4_conn_request,
2088 .syn_recv_sock = tcp_v4_syn_recv_sock,
2089 .remember_stamp = tcp_v4_remember_stamp,
2090 .net_header_len = sizeof(struct iphdr),
2091 .setsockopt = ip_setsockopt,
2092 .getsockopt = ip_getsockopt,
2093 .addr2sockaddr = v4_addr2sockaddr,
2094 .sockaddr_len = sizeof(struct sockaddr_in),
2097 /* NOTE: A lot of things set to zero explicitly by call to
2098 * sk_alloc() so need not be done here.
2100 static int tcp_v4_init_sock(struct sock *sk)
2102 struct tcp_opt *tp = tcp_sk(sk);
2104 skb_queue_head_init(&tp->out_of_order_queue);
2105 tcp_init_xmit_timers(sk);
2106 tcp_prequeue_init(tp);
2108 tp->rto = TCP_TIMEOUT_INIT;
2109 tp->mdev = TCP_TIMEOUT_INIT;
2111 /* So many TCP implementations out there (incorrectly) count the
2112 * initial SYN frame in their delayed-ACK and congestion control
2113 * algorithms that we must have the following bandaid to talk
2114 * efficiently to them. -DaveM
2118 /* See draft-stevens-tcpca-spec-01 for discussion of the
2119 * initialization of these values.
2121 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
2122 tp->snd_cwnd_clamp = ~0;
2123 tp->mss_cache = 536;
2125 tp->reordering = sysctl_tcp_reordering;
2127 sk->sk_state = TCP_CLOSE;
2129 sk->sk_write_space = sk_stream_write_space;
2130 sk->sk_use_write_queue = 1;
2132 tp->af_specific = &ipv4_specific;
2134 sk->sk_sndbuf = sysctl_tcp_wmem[1];
2135 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
2137 atomic_inc(&tcp_sockets_allocated);
2142 int tcp_v4_destroy_sock(struct sock *sk)
2144 struct tcp_opt *tp = tcp_sk(sk);
2146 tcp_clear_xmit_timers(sk);
2148 /* Cleanup up the write buffer. */
2149 sk_stream_writequeue_purge(sk);
2151 /* Cleans up our, hopefully empty, out_of_order_queue. */
2152 __skb_queue_purge(&tp->out_of_order_queue);
2154 /* Clean prequeue, it must be empty really */
2155 __skb_queue_purge(&tp->ucopy.prequeue);
2157 /* Clean up a referenced TCP bind bucket. */
2162 * If sendmsg cached page exists, toss it.
2164 if (sk->sk_sndmsg_page) {
2165 __free_page(sk->sk_sndmsg_page);
2166 sk->sk_sndmsg_page = NULL;
2169 atomic_dec(&tcp_sockets_allocated);
2174 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2176 #ifdef CONFIG_PROC_FS
2177 /* Proc filesystem TCP sock list dumping. */
2179 static inline struct tcp_tw_bucket *tw_head(struct hlist_head *head)
2181 return hlist_empty(head) ? NULL :
2182 list_entry(head->first, struct tcp_tw_bucket, tw_node);
2185 static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw)
2187 return tw->tw_node.next ?
2188 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2191 static void *listening_get_next(struct seq_file *seq, void *cur)
2194 struct hlist_node *node;
2195 struct sock *sk = cur;
2196 struct tcp_iter_state* st = seq->private;
2200 sk = sk_head(&tcp_listening_hash[0]);
2206 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2207 struct open_request *req = cur;
2209 tp = tcp_sk(st->syn_wait_sk);
2213 vxdprintk(VXD_CBIT(net, 6),
2214 "sk,req: %p [#%d] (from %d)",
2215 req->sk, req->sk->sk_xid, current->xid);
2216 if (!vx_check(req->sk->sk_xid, VX_IDENT|VX_WATCH))
2218 if (req->class->family == st->family) {
2224 if (++st->sbucket >= TCP_SYNQ_HSIZE)
2227 req = tp->listen_opt->syn_table[st->sbucket];
2229 sk = sk_next(st->syn_wait_sk);
2230 st->state = TCP_SEQ_STATE_LISTENING;
2231 read_unlock_bh(&tp->syn_wait_lock);
2235 sk_for_each_from(sk, node) {
2236 vxdprintk(VXD_CBIT(net, 6), "sk: %p [#%d] (from %d)",
2237 sk, sk->sk_xid, current->xid);
2238 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2240 if (sk->sk_family == st->family) {
2245 read_lock_bh(&tp->syn_wait_lock);
2246 if (tp->listen_opt && tp->listen_opt->qlen) {
2247 st->uid = sock_i_uid(sk);
2248 st->syn_wait_sk = sk;
2249 st->state = TCP_SEQ_STATE_OPENREQ;
2253 read_unlock_bh(&tp->syn_wait_lock);
2255 if (++st->bucket < TCP_LHTABLE_SIZE) {
2256 sk = sk_head(&tcp_listening_hash[st->bucket]);
2264 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2266 void *rc = listening_get_next(seq, NULL);
2268 while (rc && *pos) {
2269 rc = listening_get_next(seq, rc);
2275 static void *established_get_first(struct seq_file *seq)
2277 struct tcp_iter_state* st = seq->private;
2280 for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) {
2282 struct hlist_node *node;
2283 struct tcp_tw_bucket *tw;
2285 read_lock(&tcp_ehash[st->bucket].lock);
2286 sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) {
2287 vxdprintk(VXD_CBIT(net, 6),
2288 "sk,egf: %p [#%d] (from %d)",
2289 sk, sk->sk_xid, current->xid);
2290 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2292 if (sk->sk_family != st->family)
2297 st->state = TCP_SEQ_STATE_TIME_WAIT;
2298 tw_for_each(tw, node,
2299 &tcp_ehash[st->bucket + tcp_ehash_size].chain) {
2300 vxdprintk(VXD_CBIT(net, 6),
2301 "tw: %p [#%d] (from %d)",
2302 tw, tw->tw_xid, current->xid);
2303 if (!vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))
2305 if (tw->tw_family != st->family)
2310 read_unlock(&tcp_ehash[st->bucket].lock);
2311 st->state = TCP_SEQ_STATE_ESTABLISHED;
2317 static void *established_get_next(struct seq_file *seq, void *cur)
2319 struct sock *sk = cur;
2320 struct tcp_tw_bucket *tw;
2321 struct hlist_node *node;
2322 struct tcp_iter_state* st = seq->private;
2326 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2330 while (tw && (tw->tw_family != st->family ||
2331 !vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))) {
2338 read_unlock(&tcp_ehash[st->bucket].lock);
2339 st->state = TCP_SEQ_STATE_ESTABLISHED;
2340 if (++st->bucket < tcp_ehash_size) {
2341 read_lock(&tcp_ehash[st->bucket].lock);
2342 sk = sk_head(&tcp_ehash[st->bucket].chain);
2350 sk_for_each_from(sk, node) {
2351 vxdprintk(VXD_CBIT(net, 6),
2352 "sk,egn: %p [#%d] (from %d)",
2353 sk, sk->sk_xid, current->xid);
2354 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2356 if (sk->sk_family == st->family)
2360 st->state = TCP_SEQ_STATE_TIME_WAIT;
2361 tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain);
2369 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2371 void *rc = established_get_first(seq);
2374 rc = established_get_next(seq, rc);
2380 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2383 struct tcp_iter_state* st = seq->private;
2386 st->state = TCP_SEQ_STATE_LISTENING;
2387 rc = listening_get_idx(seq, &pos);
2390 tcp_listen_unlock();
2392 st->state = TCP_SEQ_STATE_ESTABLISHED;
2393 rc = established_get_idx(seq, pos);
2399 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2401 struct tcp_iter_state* st = seq->private;
2402 st->state = TCP_SEQ_STATE_LISTENING;
2404 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2407 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2410 struct tcp_iter_state* st;
2412 if (v == SEQ_START_TOKEN) {
2413 rc = tcp_get_idx(seq, 0);
2418 switch (st->state) {
2419 case TCP_SEQ_STATE_OPENREQ:
2420 case TCP_SEQ_STATE_LISTENING:
2421 rc = listening_get_next(seq, v);
2423 tcp_listen_unlock();
2425 st->state = TCP_SEQ_STATE_ESTABLISHED;
2426 rc = established_get_first(seq);
2429 case TCP_SEQ_STATE_ESTABLISHED:
2430 case TCP_SEQ_STATE_TIME_WAIT:
2431 rc = established_get_next(seq, v);
2439 static void tcp_seq_stop(struct seq_file *seq, void *v)
2441 struct tcp_iter_state* st = seq->private;
2443 switch (st->state) {
2444 case TCP_SEQ_STATE_OPENREQ:
2446 struct tcp_opt *tp = tcp_sk(st->syn_wait_sk);
2447 read_unlock_bh(&tp->syn_wait_lock);
2449 case TCP_SEQ_STATE_LISTENING:
2450 if (v != SEQ_START_TOKEN)
2451 tcp_listen_unlock();
2453 case TCP_SEQ_STATE_TIME_WAIT:
2454 case TCP_SEQ_STATE_ESTABLISHED:
2456 read_unlock(&tcp_ehash[st->bucket].lock);
2462 static int tcp_seq_open(struct inode *inode, struct file *file)
2464 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2465 struct seq_file *seq;
2466 struct tcp_iter_state *s;
2469 if (unlikely(afinfo == NULL))
2472 s = kmalloc(sizeof(*s), GFP_KERNEL);
2475 memset(s, 0, sizeof(*s));
2476 s->family = afinfo->family;
2477 s->seq_ops.start = tcp_seq_start;
2478 s->seq_ops.next = tcp_seq_next;
2479 s->seq_ops.show = afinfo->seq_show;
2480 s->seq_ops.stop = tcp_seq_stop;
2482 rc = seq_open(file, &s->seq_ops);
2485 seq = file->private_data;
2494 int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
2497 struct proc_dir_entry *p;
2501 afinfo->seq_fops->owner = afinfo->owner;
2502 afinfo->seq_fops->open = tcp_seq_open;
2503 afinfo->seq_fops->read = seq_read;
2504 afinfo->seq_fops->llseek = seq_lseek;
2505 afinfo->seq_fops->release = seq_release_private;
2507 p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
2515 void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
2519 proc_net_remove(afinfo->name);
2520 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
2523 static void get_openreq4(struct sock *sk, struct open_request *req,
2524 char *tmpbuf, int i, int uid)
2526 int ttd = req->expires - jiffies;
2528 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2529 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
2531 req->af.v4_req.loc_addr,
2532 ntohs(inet_sk(sk)->sport),
2533 req->af.v4_req.rmt_addr,
2534 ntohs(req->rmt_port),
2536 0, 0, /* could print option size, but that is af dependent. */
2537 1, /* timers active (only the expire timer) */
2538 jiffies_to_clock_t(ttd),
2541 0, /* non standard timer */
2542 0, /* open_requests have no inode */
2543 atomic_read(&sk->sk_refcnt),
2547 static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
2550 unsigned long timer_expires;
2551 struct tcp_opt *tp = tcp_sk(sp);
2552 struct inet_opt *inet = inet_sk(sp);
2553 unsigned int dest = inet->daddr;
2554 unsigned int src = inet->rcv_saddr;
2555 __u16 destp = ntohs(inet->dport);
2556 __u16 srcp = ntohs(inet->sport);
2558 if (tp->pending == TCP_TIME_RETRANS) {
2560 timer_expires = tp->timeout;
2561 } else if (tp->pending == TCP_TIME_PROBE0) {
2563 timer_expires = tp->timeout;
2564 } else if (timer_pending(&sp->sk_timer)) {
2566 timer_expires = sp->sk_timer.expires;
2569 timer_expires = jiffies;
2572 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2573 "%08X %5d %8d %lu %d %p %u %u %u %u %d",
2574 i, src, srcp, dest, destp, sp->sk_state,
2575 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
2577 jiffies_to_clock_t(timer_expires - jiffies),
2582 atomic_read(&sp->sk_refcnt), sp,
2583 tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong,
2585 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
2588 static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i)
2590 unsigned int dest, src;
2592 int ttd = tw->tw_ttd - jiffies;
2597 dest = tw->tw_daddr;
2598 src = tw->tw_rcv_saddr;
2599 destp = ntohs(tw->tw_dport);
2600 srcp = ntohs(tw->tw_sport);
2602 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2603 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p",
2604 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2605 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2606 atomic_read(&tw->tw_refcnt), tw);
2611 static int tcp4_seq_show(struct seq_file *seq, void *v)
2613 struct tcp_iter_state* st;
2614 char tmpbuf[TMPSZ + 1];
2616 if (v == SEQ_START_TOKEN) {
2617 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2618 " sl local_address rem_address st tx_queue "
2619 "rx_queue tr tm->when retrnsmt uid timeout "
2625 switch (st->state) {
2626 case TCP_SEQ_STATE_LISTENING:
2627 case TCP_SEQ_STATE_ESTABLISHED:
2628 get_tcp4_sock(v, tmpbuf, st->num);
2630 case TCP_SEQ_STATE_OPENREQ:
2631 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
2633 case TCP_SEQ_STATE_TIME_WAIT:
2634 get_timewait4_sock(v, tmpbuf, st->num);
2637 seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
2642 static struct file_operations tcp4_seq_fops;
2643 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2644 .owner = THIS_MODULE,
2647 .seq_show = tcp4_seq_show,
2648 .seq_fops = &tcp4_seq_fops,
2651 int __init tcp4_proc_init(void)
2653 return tcp_proc_register(&tcp4_seq_afinfo);
2656 void tcp4_proc_exit(void)
2658 tcp_proc_unregister(&tcp4_seq_afinfo);
2660 #endif /* CONFIG_PROC_FS */
2662 struct proto tcp_prot = {
2665 .connect = tcp_v4_connect,
2666 .disconnect = tcp_disconnect,
2667 .accept = tcp_accept,
2669 .init = tcp_v4_init_sock,
2670 .destroy = tcp_v4_destroy_sock,
2671 .shutdown = tcp_shutdown,
2672 .setsockopt = tcp_setsockopt,
2673 .getsockopt = tcp_getsockopt,
2674 .sendmsg = tcp_sendmsg,
2675 .recvmsg = tcp_recvmsg,
2676 .backlog_rcv = tcp_v4_do_rcv,
2677 .hash = tcp_v4_hash,
2678 .unhash = tcp_unhash,
2679 .get_port = tcp_v4_get_port,
2680 .enter_memory_pressure = tcp_enter_memory_pressure,
2681 .sockets_allocated = &tcp_sockets_allocated,
2682 .memory_allocated = &tcp_memory_allocated,
2683 .memory_pressure = &tcp_memory_pressure,
2684 .sysctl_mem = sysctl_tcp_mem,
2685 .sysctl_wmem = sysctl_tcp_wmem,
2686 .sysctl_rmem = sysctl_tcp_rmem,
2687 .max_header = MAX_TCP_HEADER,
2692 void __init tcp_v4_init(struct net_proto_family *ops)
2694 int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
2696 panic("Failed to create the TCP control socket.\n");
2697 tcp_socket->sk->sk_allocation = GFP_ATOMIC;
2698 inet_sk(tcp_socket->sk)->uc_ttl = -1;
2700 /* Unhash it so that IP input processing does not even
2701 * see it, we do not wish this socket to see incoming
2704 tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
2707 EXPORT_SYMBOL(ipv4_specific);
2708 EXPORT_SYMBOL(tcp_bind_hash);
2709 EXPORT_SYMBOL(tcp_bucket_create);
2710 EXPORT_SYMBOL(tcp_hashinfo);
2711 EXPORT_SYMBOL(tcp_inherit_port);
2712 EXPORT_SYMBOL(tcp_listen_wlock);
2713 EXPORT_SYMBOL(tcp_port_rover);
2714 EXPORT_SYMBOL(tcp_prot);
2715 EXPORT_SYMBOL(tcp_put_port);
2716 EXPORT_SYMBOL(tcp_unhash);
2717 EXPORT_SYMBOL(tcp_v4_conn_request);
2718 EXPORT_SYMBOL(tcp_v4_connect);
2719 EXPORT_SYMBOL(tcp_v4_do_rcv);
2720 EXPORT_SYMBOL(tcp_v4_lookup_listener);
2721 EXPORT_SYMBOL(tcp_v4_rebuild_header);
2722 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2723 EXPORT_SYMBOL(tcp_v4_send_check);
2724 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2726 #ifdef CONFIG_PROC_FS
2727 EXPORT_SYMBOL(tcp_proc_register);
2728 EXPORT_SYMBOL(tcp_proc_unregister);
2730 #ifdef CONFIG_SYSCTL
2731 EXPORT_SYMBOL(sysctl_local_port_range);
2732 EXPORT_SYMBOL(sysctl_max_syn_backlog);
2733 EXPORT_SYMBOL(sysctl_tcp_low_latency);