2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
10 * IPv4 specific functions
15 * linux/ipv4/tcp_input.c
16 * linux/ipv4/tcp_output.c
18 * See tcp.c for author information
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
28 * David S. Miller : New socket lookup architecture.
29 * This code is dedicated to John Dyson.
30 * David S. Miller : Change semantics of established hash,
31 * half is devoted to TIME_WAIT sockets
32 * and the rest go in the other half.
33 * Andi Kleen : Add support for syncookies and fixed
34 * some bugs: ip options weren't passed to
35 * the TCP layer, missed a check for an
37 * Andi Kleen : Implemented fast path mtu discovery.
38 * Fixed many serious bugs in the
39 * open_request handling and moved
40 * most of it into the af independent code.
41 * Added tail drop and some other bugfixes.
42 * Added new listen sematics.
43 * Mike McLagan : Routing by source
44 * Juan Jose Ciarlante: ip_dynaddr bits
45 * Andi Kleen: various fixes.
46 * Vitaly E. Lavrov : Transparent proxy revived after year
48 * Andi Kleen : Fix new listen.
49 * Andi Kleen : Fix accept error reporting.
50 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
51 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
52 * a single port at the same time.
55 #include <linux/config.h>
57 #include <linux/types.h>
58 #include <linux/fcntl.h>
59 #include <linux/module.h>
60 #include <linux/random.h>
61 #include <linux/cache.h>
62 #include <linux/jhash.h>
63 #include <linux/init.h>
64 #include <linux/times.h>
69 #include <net/inet_common.h>
72 #include <linux/inet.h>
73 #include <linux/ipv6.h>
74 #include <linux/stddef.h>
75 #include <linux/proc_fs.h>
76 #include <linux/seq_file.h>
78 #include <linux/vs_base.h>
80 extern int sysctl_ip_dynaddr;
81 int sysctl_tcp_tw_reuse;
82 int sysctl_tcp_low_latency;
84 /* Check TCP sequence numbers in ICMP packets. */
85 #define ICMP_MIN_LENGTH 8
87 /* Socket used for sending RSTs */
88 static struct socket *tcp_socket;
90 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
93 struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
94 .__tcp_lhash_lock = RW_LOCK_UNLOCKED,
95 .__tcp_lhash_users = ATOMIC_INIT(0),
97 = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait),
98 .__tcp_portalloc_lock = SPIN_LOCK_UNLOCKED
102 * This array holds the first and last local port number.
103 * For high-usage systems, use sysctl to change this to
106 int sysctl_local_port_range[2] = { 1024, 4999 };
107 int tcp_port_rover = 1024 - 1;
109 static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
110 __u32 faddr, __u16 fport)
112 int h = (laddr ^ lport) ^ (faddr ^ fport);
115 return h & (tcp_ehash_size - 1);
118 static __inline__ int tcp_sk_hashfn(struct sock *sk)
120 struct inet_opt *inet = inet_sk(sk);
121 __u32 laddr = inet->rcv_saddr;
122 __u16 lport = inet->num;
123 __u32 faddr = inet->daddr;
124 __u16 fport = inet->dport;
126 return tcp_hashfn(laddr, lport, faddr, fport);
129 /* Allocate and initialize a new TCP local port bind bucket.
130 * The bindhash mutex for snum's hash chain must be held here.
132 struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
135 struct tcp_bind_bucket *tb = kmem_cache_alloc(tcp_bucket_cachep,
140 INIT_HLIST_HEAD(&tb->owners);
141 hlist_add_head(&tb->node, &head->chain);
146 /* Caller must hold hashbucket lock for this tb with local BH disabled */
147 void tcp_bucket_destroy(struct tcp_bind_bucket *tb)
149 if (hlist_empty(&tb->owners)) {
150 __hlist_del(&tb->node);
151 kmem_cache_free(tcp_bucket_cachep, tb);
155 /* Caller must disable local BH processing. */
156 static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
158 struct tcp_bind_hashbucket *head =
159 &tcp_bhash[tcp_bhashfn(inet_sk(child)->num)];
160 struct tcp_bind_bucket *tb;
162 spin_lock(&head->lock);
163 tb = tcp_sk(sk)->bind_hash;
164 sk_add_bind_node(child, &tb->owners);
165 tcp_sk(child)->bind_hash = tb;
166 spin_unlock(&head->lock);
169 inline void tcp_inherit_port(struct sock *sk, struct sock *child)
172 __tcp_inherit_port(sk, child);
176 void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
179 inet_sk(sk)->num = snum;
180 sk_add_bind_node(sk, &tb->owners);
181 tcp_sk(sk)->bind_hash = tb;
184 static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
186 const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk);
188 struct hlist_node *node;
189 int reuse = sk->sk_reuse;
191 sk_for_each_bound(sk2, node, &tb->owners) {
193 !tcp_v6_ipv6only(sk2) &&
194 (!sk->sk_bound_dev_if ||
195 !sk2->sk_bound_dev_if ||
196 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
197 if (!reuse || !sk2->sk_reuse ||
198 sk2->sk_state == TCP_LISTEN) {
199 const u32 sk2_rcv_saddr = tcp_v4_rcv_saddr(sk2);
200 if (!sk2_rcv_saddr || !sk_rcv_saddr ||
201 sk2_rcv_saddr == sk_rcv_saddr)
209 /* Obtain a reference to a local port for the given sock,
210 * if snum is zero it means select any available local port.
212 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
214 struct tcp_bind_hashbucket *head;
215 struct hlist_node *node;
216 struct tcp_bind_bucket *tb;
221 int low = sysctl_local_port_range[0];
222 int high = sysctl_local_port_range[1];
223 int remaining = (high - low) + 1;
226 spin_lock(&tcp_portalloc_lock);
227 rover = tcp_port_rover;
230 if (rover < low || rover > high)
232 head = &tcp_bhash[tcp_bhashfn(rover)];
233 spin_lock(&head->lock);
234 tb_for_each(tb, node, &head->chain)
235 if (tb->port == rover)
239 spin_unlock(&head->lock);
240 } while (--remaining > 0);
241 tcp_port_rover = rover;
242 spin_unlock(&tcp_portalloc_lock);
244 /* Exhausted local port range during search? */
249 /* OK, here is the one we will use. HEAD is
250 * non-NULL and we hold it's mutex.
254 head = &tcp_bhash[tcp_bhashfn(snum)];
255 spin_lock(&head->lock);
256 tb_for_each(tb, node, &head->chain)
257 if (tb->port == snum)
263 if (!hlist_empty(&tb->owners)) {
264 if (sk->sk_reuse > 1)
266 if (tb->fastreuse > 0 &&
267 sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
271 if (tcp_bind_conflict(sk, tb))
277 if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
279 if (hlist_empty(&tb->owners)) {
280 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
284 } else if (tb->fastreuse &&
285 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
288 if (!tcp_sk(sk)->bind_hash)
289 tcp_bind_hash(sk, tb, snum);
290 BUG_TRAP(tcp_sk(sk)->bind_hash == tb);
294 spin_unlock(&head->lock);
300 /* Get rid of any references to a local port held by the
303 static void __tcp_put_port(struct sock *sk)
305 struct inet_opt *inet = inet_sk(sk);
306 struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)];
307 struct tcp_bind_bucket *tb;
309 spin_lock(&head->lock);
310 tb = tcp_sk(sk)->bind_hash;
311 __sk_del_bind_node(sk);
312 tcp_sk(sk)->bind_hash = NULL;
314 tcp_bucket_destroy(tb);
315 spin_unlock(&head->lock);
318 void tcp_put_port(struct sock *sk)
325 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
326 * Look, when several writers sleep and reader wakes them up, all but one
327 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
328 * this, _but_ remember, it adds useless work on UP machines (wake up each
329 * exclusive lock release). It should be ifdefed really.
332 void tcp_listen_wlock(void)
334 write_lock(&tcp_lhash_lock);
336 if (atomic_read(&tcp_lhash_users)) {
340 prepare_to_wait_exclusive(&tcp_lhash_wait,
341 &wait, TASK_UNINTERRUPTIBLE);
342 if (!atomic_read(&tcp_lhash_users))
344 write_unlock_bh(&tcp_lhash_lock);
346 write_lock_bh(&tcp_lhash_lock);
349 finish_wait(&tcp_lhash_wait, &wait);
353 static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
355 struct hlist_head *list;
358 BUG_TRAP(sk_unhashed(sk));
359 if (listen_possible && sk->sk_state == TCP_LISTEN) {
360 list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
361 lock = &tcp_lhash_lock;
364 list = &tcp_ehash[(sk->sk_hashent = tcp_sk_hashfn(sk))].chain;
365 lock = &tcp_ehash[sk->sk_hashent].lock;
368 __sk_add_node(sk, list);
369 sock_prot_inc_use(sk->sk_prot);
371 if (listen_possible && sk->sk_state == TCP_LISTEN)
372 wake_up(&tcp_lhash_wait);
375 static void tcp_v4_hash(struct sock *sk)
377 if (sk->sk_state != TCP_CLOSE) {
379 __tcp_v4_hash(sk, 1);
384 void tcp_unhash(struct sock *sk)
391 if (sk->sk_state == TCP_LISTEN) {
394 lock = &tcp_lhash_lock;
396 struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
398 write_lock_bh(&head->lock);
401 if (__sk_del_node_init(sk))
402 sock_prot_dec_use(sk->sk_prot);
403 write_unlock_bh(lock);
406 if (sk->sk_state == TCP_LISTEN)
407 wake_up(&tcp_lhash_wait);
410 /* Don't inline this cruft. Here are some nice properties to
411 * exploit here. The BSD API does not allow a listening TCP
412 * to specify the remote port nor the remote address for the
413 * connection. So always assume those are both wildcarded
414 * during the search since they can never be otherwise.
416 static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr,
417 unsigned short hnum, int dif)
419 struct sock *result = NULL, *sk;
420 struct hlist_node *node;
424 sk_for_each(sk, node, head) {
425 struct inet_opt *inet = inet_sk(sk);
427 if (inet->num == hnum && !ipv6_only_sock(sk)) {
428 __u32 rcv_saddr = inet->rcv_saddr;
430 score = (sk->sk_family == PF_INET ? 1 : 0);
432 if (rcv_saddr != daddr)
436 if (sk->sk_bound_dev_if) {
437 if (sk->sk_bound_dev_if != dif)
443 if (score > hiscore) {
452 /* Optimize the common listener case. */
453 inline struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum,
456 struct sock *sk = NULL;
457 struct hlist_head *head;
459 read_lock(&tcp_lhash_lock);
460 head = &tcp_listening_hash[tcp_lhashfn(hnum)];
461 if (!hlist_empty(head)) {
462 struct inet_opt *inet = inet_sk((sk = __sk_head(head)));
463 if (inet->num == hnum && !sk->sk_node.next &&
464 (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
465 (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
466 !sk->sk_bound_dev_if)
468 sk = __tcp_v4_lookup_listener(head, daddr, hnum, dif);
474 read_unlock(&tcp_lhash_lock);
478 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
479 * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
481 * Local BH must be disabled here.
484 static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
488 struct tcp_ehash_bucket *head;
489 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
490 __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
492 struct hlist_node *node;
493 /* Optimize here for direct hit, only listening connections can
494 * have wildcards anyways.
496 int hash = tcp_hashfn(daddr, hnum, saddr, sport);
497 head = &tcp_ehash[hash];
498 read_lock(&head->lock);
499 sk_for_each(sk, node, &head->chain) {
500 if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
501 goto hit; /* You sunk my battleship! */
504 /* Must check for a TIME_WAIT'er before going to listener hash. */
505 sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
506 if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
511 read_unlock(&head->lock);
518 static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport,
519 u32 daddr, u16 hnum, int dif)
521 struct sock *sk = __tcp_v4_lookup_established(saddr, sport,
524 return sk ? : tcp_v4_lookup_listener(daddr, hnum, dif);
527 inline struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr,
533 sk = __tcp_v4_lookup(saddr, sport, daddr, ntohs(dport), dif);
539 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
541 return secure_tcp_sequence_number(skb->nh.iph->daddr,
547 /* called with local bh disabled */
548 static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
549 struct tcp_tw_bucket **twp)
551 struct inet_opt *inet = inet_sk(sk);
552 u32 daddr = inet->rcv_saddr;
553 u32 saddr = inet->daddr;
554 int dif = sk->sk_bound_dev_if;
555 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
556 __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
557 int hash = tcp_hashfn(daddr, lport, saddr, inet->dport);
558 struct tcp_ehash_bucket *head = &tcp_ehash[hash];
560 struct hlist_node *node;
561 struct tcp_tw_bucket *tw;
563 write_lock(&head->lock);
565 /* Check TIME-WAIT sockets first. */
566 sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
567 tw = (struct tcp_tw_bucket *)sk2;
569 if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
570 struct tcp_opt *tp = tcp_sk(sk);
572 /* With PAWS, it is safe from the viewpoint
573 of data integrity. Even without PAWS it
574 is safe provided sequence spaces do not
575 overlap i.e. at data rates <= 80Mbit/sec.
577 Actually, the idea is close to VJ's one,
578 only timestamp cache is held not per host,
579 but per port pair and TW bucket is used
582 If TW bucket has been already destroyed we
583 fall back to VJ's scheme and use initial
584 timestamp retrieved from peer table.
586 if (tw->tw_ts_recent_stamp &&
587 (!twp || (sysctl_tcp_tw_reuse &&
589 tw->tw_ts_recent_stamp > 1))) {
591 tw->tw_snd_nxt + 65535 + 2) == 0)
593 tp->ts_recent = tw->tw_ts_recent;
594 tp->ts_recent_stamp = tw->tw_ts_recent_stamp;
603 /* And established part... */
604 sk_for_each(sk2, node, &head->chain) {
605 if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif))
610 /* Must record num and sport now. Otherwise we will see
611 * in hash table socket with a funny identity. */
613 inet->sport = htons(lport);
614 sk->sk_hashent = hash;
615 BUG_TRAP(sk_unhashed(sk));
616 __sk_add_node(sk, &head->chain);
617 sock_prot_inc_use(sk->sk_prot);
618 write_unlock(&head->lock);
622 NET_INC_STATS_BH(TimeWaitRecycled);
624 /* Silly. Should hash-dance instead... */
625 tcp_tw_deschedule(tw);
626 NET_INC_STATS_BH(TimeWaitRecycled);
634 write_unlock(&head->lock);
635 return -EADDRNOTAVAIL;
639 * Bind a port for a connect operation and hash it.
641 static int tcp_v4_hash_connect(struct sock *sk)
643 unsigned short snum = inet_sk(sk)->num;
644 struct tcp_bind_hashbucket *head;
645 struct tcp_bind_bucket *tb;
650 int low = sysctl_local_port_range[0];
651 int high = sysctl_local_port_range[1];
652 int remaining = (high - low) + 1;
653 struct hlist_node *node;
654 struct tcp_tw_bucket *tw = NULL;
658 /* TODO. Actually it is not so bad idea to remove
659 * tcp_portalloc_lock before next submission to Linus.
660 * As soon as we touch this place at all it is time to think.
662 * Now it protects single _advisory_ variable tcp_port_rover,
663 * hence it is mostly useless.
664 * Code will work nicely if we just delete it, but
665 * I am afraid in contented case it will work not better or
666 * even worse: another cpu just will hit the same bucket
668 * So some cpu salt could remove both contention and
669 * memory pingpong. Any ideas how to do this in a nice way?
671 spin_lock(&tcp_portalloc_lock);
672 rover = tcp_port_rover;
676 if ((rover < low) || (rover > high))
678 head = &tcp_bhash[tcp_bhashfn(rover)];
679 spin_lock(&head->lock);
681 /* Does not bother with rcv_saddr checks,
682 * because the established check is already
685 tb_for_each(tb, node, &head->chain) {
686 if (tb->port == rover) {
687 BUG_TRAP(!hlist_empty(&tb->owners));
688 if (tb->fastreuse >= 0)
690 if (!__tcp_v4_check_established(sk,
698 tb = tcp_bucket_create(head, rover);
700 spin_unlock(&head->lock);
707 spin_unlock(&head->lock);
708 } while (--remaining > 0);
709 tcp_port_rover = rover;
710 spin_unlock(&tcp_portalloc_lock);
714 return -EADDRNOTAVAIL;
717 /* All locks still held and bhs disabled */
718 tcp_port_rover = rover;
719 spin_unlock(&tcp_portalloc_lock);
721 tcp_bind_hash(sk, tb, rover);
722 if (sk_unhashed(sk)) {
723 inet_sk(sk)->sport = htons(rover);
724 __tcp_v4_hash(sk, 0);
726 spin_unlock(&head->lock);
729 tcp_tw_deschedule(tw);
737 head = &tcp_bhash[tcp_bhashfn(snum)];
738 tb = tcp_sk(sk)->bind_hash;
739 spin_lock_bh(&head->lock);
740 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
741 __tcp_v4_hash(sk, 0);
742 spin_unlock_bh(&head->lock);
745 spin_unlock(&head->lock);
746 /* No definite answer... Walk to established hash table */
747 ret = __tcp_v4_check_established(sk, snum, NULL);
754 /* This will initiate an outgoing connection. */
755 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
757 struct inet_opt *inet = inet_sk(sk);
758 struct tcp_opt *tp = tcp_sk(sk);
759 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
765 if (addr_len < sizeof(struct sockaddr_in))
768 if (usin->sin_family != AF_INET)
769 return -EAFNOSUPPORT;
771 nexthop = daddr = usin->sin_addr.s_addr;
772 if (inet->opt && inet->opt->srr) {
775 nexthop = inet->opt->faddr;
778 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
779 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
781 inet->sport, usin->sin_port, sk);
785 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
790 if (!inet->opt || !inet->opt->srr)
794 inet->saddr = rt->rt_src;
795 inet->rcv_saddr = inet->saddr;
797 if (tp->ts_recent_stamp && inet->daddr != daddr) {
798 /* Reset inherited state */
800 tp->ts_recent_stamp = 0;
804 if (sysctl_tcp_tw_recycle &&
805 !tp->ts_recent_stamp && rt->rt_dst == daddr) {
806 struct inet_peer *peer = rt_get_peer(rt);
808 /* VJ's idea. We save last timestamp seen from
809 * the destination in peer table, when entering state TIME-WAIT
810 * and initialize ts_recent from it, when trying new connection.
813 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
814 tp->ts_recent_stamp = peer->tcp_ts_stamp;
815 tp->ts_recent = peer->tcp_ts;
819 inet->dport = usin->sin_port;
822 tp->ext_header_len = 0;
824 tp->ext_header_len = inet->opt->optlen;
828 /* Socket identity is still unknown (sport may be zero).
829 * However we set state to SYN-SENT and not releasing socket
830 * lock select source port, enter ourselves into the hash tables and
831 * complete initialization after this.
833 tcp_set_state(sk, TCP_SYN_SENT);
834 err = tcp_v4_hash_connect(sk);
838 err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
842 /* OK, now commit destination to socket. */
843 __sk_dst_set(sk, &rt->u.dst);
844 tcp_v4_setup_caps(sk, &rt->u.dst);
845 tp->ext2_header_len = rt->u.dst.header_len;
848 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
853 inet->id = tp->write_seq ^ jiffies;
855 err = tcp_connect(sk);
863 /* This unhashes the socket and releases the local port, if necessary. */
864 tcp_set_state(sk, TCP_CLOSE);
866 sk->sk_route_caps = 0;
871 static __inline__ int tcp_v4_iif(struct sk_buff *skb)
873 return ((struct rtable *)skb->dst)->rt_iif;
876 static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
878 return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
881 static struct open_request *tcp_v4_search_req(struct tcp_opt *tp,
882 struct open_request ***prevp,
884 __u32 raddr, __u32 laddr)
886 struct tcp_listen_opt *lopt = tp->listen_opt;
887 struct open_request *req, **prev;
889 for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
890 (req = *prev) != NULL;
891 prev = &req->dl_next) {
892 if (req->rmt_port == rport &&
893 req->af.v4_req.rmt_addr == raddr &&
894 req->af.v4_req.loc_addr == laddr &&
895 TCP_INET_FAMILY(req->class->family)) {
905 static void tcp_v4_synq_add(struct sock *sk, struct open_request *req)
907 struct tcp_opt *tp = tcp_sk(sk);
908 struct tcp_listen_opt *lopt = tp->listen_opt;
909 u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
911 req->expires = jiffies + TCP_TIMEOUT_INIT;
914 req->dl_next = lopt->syn_table[h];
916 write_lock(&tp->syn_wait_lock);
917 lopt->syn_table[h] = req;
918 write_unlock(&tp->syn_wait_lock);
920 #ifdef CONFIG_ACCEPT_QUEUES
921 tcp_synq_added(sk, req);
929 * This routine does path mtu discovery as defined in RFC1191.
931 static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
934 struct dst_entry *dst;
935 struct inet_opt *inet = inet_sk(sk);
936 struct tcp_opt *tp = tcp_sk(sk);
938 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
939 * send out by Linux are always <576bytes so they should go through
942 if (sk->sk_state == TCP_LISTEN)
945 /* We don't check in the destentry if pmtu discovery is forbidden
946 * on this route. We just assume that no packet_to_big packets
947 * are send back when pmtu discovery is not active.
948 * There is a small race when the user changes this flag in the
949 * route, but I think that's acceptable.
951 if ((dst = __sk_dst_check(sk, 0)) == NULL)
954 dst->ops->update_pmtu(dst, mtu);
956 /* Something is about to be wrong... Remember soft error
957 * for the case, if this connection will not able to recover.
959 if (mtu < dst_pmtu(dst) && ip_dont_fragment(sk, dst))
960 sk->sk_err_soft = EMSGSIZE;
964 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
965 tp->pmtu_cookie > mtu) {
966 tcp_sync_mss(sk, mtu);
968 /* Resend the TCP packet because it's
969 * clear that the old packet has been
970 * dropped. This is the new "fast" path mtu
973 tcp_simple_retransmit(sk);
974 } /* else let the usual retransmit timer handle it */
978 * This routine is called by the ICMP module when it gets some
979 * sort of error condition. If err < 0 then the socket should
980 * be closed and the error returned to the user. If err > 0
981 * it's just the icmp type << 8 | icmp code. After adjustment
982 * header points to the first 8 bytes of the tcp header. We need
983 * to find the appropriate port.
985 * The locking strategy used here is very "optimistic". When
986 * someone else accesses the socket the ICMP is just dropped
987 * and for some paths there is no check at all.
988 * A more general error queue to queue errors for later handling
989 * is probably better.
993 void tcp_v4_err(struct sk_buff *skb, u32 info)
995 struct iphdr *iph = (struct iphdr *)skb->data;
996 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
998 struct inet_opt *inet;
999 int type = skb->h.icmph->type;
1000 int code = skb->h.icmph->code;
1005 if (skb->len < (iph->ihl << 2) + 8) {
1006 ICMP_INC_STATS_BH(IcmpInErrors);
1010 sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,
1011 th->source, tcp_v4_iif(skb));
1013 ICMP_INC_STATS_BH(IcmpInErrors);
1016 if (sk->sk_state == TCP_TIME_WAIT) {
1017 tcp_tw_put((struct tcp_tw_bucket *)sk);
1022 /* If too many ICMPs get dropped on busy
1023 * servers this needs to be solved differently.
1025 if (sock_owned_by_user(sk))
1026 NET_INC_STATS_BH(LockDroppedIcmps);
1028 if (sk->sk_state == TCP_CLOSE)
1032 seq = ntohl(th->seq);
1033 if (sk->sk_state != TCP_LISTEN &&
1034 !between(seq, tp->snd_una, tp->snd_nxt)) {
1035 NET_INC_STATS(OutOfWindowIcmps);
1040 case ICMP_SOURCE_QUENCH:
1041 /* This is deprecated, but if someone generated it,
1042 * we have no reasons to ignore it.
1044 if (!sock_owned_by_user(sk))
1047 case ICMP_PARAMETERPROB:
1050 case ICMP_DEST_UNREACH:
1051 if (code > NR_ICMP_UNREACH)
1054 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
1055 if (!sock_owned_by_user(sk))
1056 do_pmtu_discovery(sk, iph, info);
1060 err = icmp_err_convert[code].errno;
1062 case ICMP_TIME_EXCEEDED:
1069 switch (sk->sk_state) {
1070 struct open_request *req, **prev;
1072 if (sock_owned_by_user(sk))
1075 req = tcp_v4_search_req(tp, &prev, th->dest,
1076 iph->daddr, iph->saddr);
1080 /* ICMPs are not backlogged, hence we cannot get
1081 an established socket here.
1085 if (seq != req->snt_isn) {
1086 NET_INC_STATS_BH(OutOfWindowIcmps);
1091 * Still in SYN_RECV, just remove it silently.
1092 * There is no good way to pass the error to the newly
1093 * created socket, and POSIX does not want network
1094 * errors returned from accept().
1096 tcp_synq_drop(sk, req, prev);
1100 case TCP_SYN_RECV: /* Cannot happen.
1101 It can f.e. if SYNs crossed.
1103 if (!sock_owned_by_user(sk)) {
1104 TCP_INC_STATS_BH(TcpAttemptFails);
1107 sk->sk_error_report(sk);
1111 sk->sk_err_soft = err;
1116 /* If we've already connected we will keep trying
1117 * until we time out, or the user gives up.
1119 * rfc1122 4.2.3.9 allows to consider as hard errors
1120 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
1121 * but it is obsoleted by pmtu discovery).
1123 * Note, that in modern internet, where routing is unreliable
1124 * and in each dark corner broken firewalls sit, sending random
1125 * errors ordered by their masters even this two messages finally lose
1126 * their original sense (even Linux sends invalid PORT_UNREACHs)
1128 * Now we are in compliance with RFCs.
1133 if (!sock_owned_by_user(sk) && inet->recverr) {
1135 sk->sk_error_report(sk);
1136 } else { /* Only an error on timeout */
1137 sk->sk_err_soft = err;
1145 /* This routine computes an IPv4 TCP checksum. */
1146 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
1147 struct sk_buff *skb)
1149 struct inet_opt *inet = inet_sk(sk);
1151 if (skb->ip_summed == CHECKSUM_HW) {
1152 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
1153 skb->csum = offsetof(struct tcphdr, check);
1155 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
1156 csum_partial((char *)th,
1163 * This routine will send an RST to the other tcp.
1165 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
1167 * Answer: if a packet caused RST, it is not for a socket
1168 * existing in our system, if it is matched to a socket,
1169 * it is just duplicate segment or bug in other side's TCP.
1170 * So that we build reply only basing on parameters
1171 * arrived with segment.
1172 * Exception: precedence violation. We do not implement it in any case.
1175 static void tcp_v4_send_reset(struct sk_buff *skb)
1177 struct tcphdr *th = skb->h.th;
1179 struct ip_reply_arg arg;
1181 /* Never send a reset in response to a reset. */
1185 if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
1188 /* Swap the send and the receive. */
1189 memset(&rth, 0, sizeof(struct tcphdr));
1190 rth.dest = th->source;
1191 rth.source = th->dest;
1192 rth.doff = sizeof(struct tcphdr) / 4;
1196 rth.seq = th->ack_seq;
1199 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
1200 skb->len - (th->doff << 2));
1203 memset(&arg, 0, sizeof arg);
1204 arg.iov[0].iov_base = (unsigned char *)&rth;
1205 arg.iov[0].iov_len = sizeof rth;
1206 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1207 skb->nh.iph->saddr, /*XXX*/
1208 sizeof(struct tcphdr), IPPROTO_TCP, 0);
1209 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1211 ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
1213 TCP_INC_STATS_BH(TcpOutSegs);
1214 TCP_INC_STATS_BH(TcpOutRsts);
1217 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
1218 outside socket context is ugly, certainly. What can I do?
1221 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
1224 struct tcphdr *th = skb->h.th;
1229 struct ip_reply_arg arg;
1231 memset(&rep.th, 0, sizeof(struct tcphdr));
1232 memset(&arg, 0, sizeof arg);
1234 arg.iov[0].iov_base = (unsigned char *)&rep;
1235 arg.iov[0].iov_len = sizeof(rep.th);
1237 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1238 (TCPOPT_TIMESTAMP << 8) |
1240 rep.tsopt[1] = htonl(tcp_time_stamp);
1241 rep.tsopt[2] = htonl(ts);
1242 arg.iov[0].iov_len = sizeof(rep);
1245 /* Swap the send and the receive. */
1246 rep.th.dest = th->source;
1247 rep.th.source = th->dest;
1248 rep.th.doff = arg.iov[0].iov_len / 4;
1249 rep.th.seq = htonl(seq);
1250 rep.th.ack_seq = htonl(ack);
1252 rep.th.window = htons(win);
1254 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1255 skb->nh.iph->saddr, /*XXX*/
1256 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1257 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1259 ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
1261 TCP_INC_STATS_BH(TcpOutSegs);
1264 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
1266 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
1268 tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
1269 tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
1274 static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req)
1276 tcp_v4_send_ack(skb, req->snt_isn + 1, req->rcv_isn + 1, req->rcv_wnd,
1280 static struct dst_entry* tcp_v4_route_req(struct sock *sk,
1281 struct open_request *req)
1284 struct ip_options *opt = req->af.v4_req.opt;
1285 struct flowi fl = { .oif = sk->sk_bound_dev_if,
1287 { .daddr = ((opt && opt->srr) ?
1289 req->af.v4_req.rmt_addr),
1290 .saddr = req->af.v4_req.loc_addr,
1291 .tos = RT_CONN_FLAGS(sk) } },
1292 .proto = IPPROTO_TCP,
1294 { .sport = inet_sk(sk)->sport,
1295 .dport = req->rmt_port } } };
1297 if (ip_route_output_flow(&rt, &fl, sk, 0)) {
1298 IP_INC_STATS_BH(OutNoRoutes);
1301 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
1303 IP_INC_STATS_BH(OutNoRoutes);
1310 * Send a SYN-ACK after having received an ACK.
1311 * This still operates on a open_request only, not on a big
1314 static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
1315 struct dst_entry *dst)
1318 struct sk_buff * skb;
1320 /* First, grab a route. */
1321 if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1324 skb = tcp_make_synack(sk, dst, req);
1327 struct tcphdr *th = skb->h.th;
1329 th->check = tcp_v4_check(th, skb->len,
1330 req->af.v4_req.loc_addr,
1331 req->af.v4_req.rmt_addr,
1332 csum_partial((char *)th, skb->len,
1335 err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr,
1336 req->af.v4_req.rmt_addr,
1337 req->af.v4_req.opt);
1338 if (err == NET_XMIT_CN)
1348 * IPv4 open_request destructor.
1350 static void tcp_v4_or_free(struct open_request *req)
1352 if (req->af.v4_req.opt)
1353 kfree(req->af.v4_req.opt);
1356 static inline void syn_flood_warning(struct sk_buff *skb)
1358 static unsigned long warntime;
1360 if (time_after(jiffies, (warntime + HZ * 60))) {
1363 "possible SYN flooding on port %d. Sending cookies.\n",
1364 ntohs(skb->h.th->dest));
1369 * Save and compile IPv4 options into the open_request if needed.
1371 static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
1372 struct sk_buff *skb)
1374 struct ip_options *opt = &(IPCB(skb)->opt);
1375 struct ip_options *dopt = NULL;
1377 if (opt && opt->optlen) {
1378 int opt_size = optlength(opt);
1379 dopt = kmalloc(opt_size, GFP_ATOMIC);
1381 if (ip_options_echo(dopt, skb)) {
1391 * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
1392 * One SYN_RECV socket costs about 80bytes on a 32bit machine.
1393 * It would be better to replace it with a global counter for all sockets
1394 * but then some measure against one socket starving all other sockets
1397 * It was 128 by default. Experiments with real servers show, that
1398 * it is absolutely not enough even at 100conn/sec. 256 cures most
1399 * of problems. This value is adjusted to 128 for very small machines
1400 * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
1401 * Further increasing requires to change hash table size.
1403 int sysctl_max_syn_backlog = 256;
1405 struct or_calltable or_ipv4 = {
1407 .rtx_syn_ack = tcp_v4_send_synack,
1408 .send_ack = tcp_v4_or_send_ack,
1409 .destructor = tcp_v4_or_free,
1410 .send_reset = tcp_v4_send_reset,
1413 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1416 struct open_request *req;
1417 __u32 saddr = skb->nh.iph->saddr;
1418 __u32 daddr = skb->nh.iph->daddr;
1419 __u32 isn = TCP_SKB_CB(skb)->when;
1420 struct dst_entry *dst = NULL;
1421 #ifdef CONFIG_ACCEPT_QUEUES
1424 #ifdef CONFIG_SYN_COOKIES
1425 int want_cookie = 0;
1427 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1430 /* Never answer to SYNs send to broadcast or multicast */
1431 if (((struct rtable *)skb->dst)->rt_flags &
1432 (RTCF_BROADCAST | RTCF_MULTICAST))
1435 /* TW buckets are converted to open requests without
1436 * limitations, they conserve resources and peer is
1437 * evidently real one.
1439 if (tcp_synq_is_full(sk) && !isn) {
1440 #ifdef CONFIG_SYN_COOKIES
1441 if (sysctl_tcp_syncookies) {
1448 #ifdef CONFIG_ACCEPT_QUEUES
1449 class = (skb->nfmark <= 0) ? 0 :
1450 ((skb->nfmark >= NUM_ACCEPT_QUEUES) ? 0: skb->nfmark);
1452 * Accept only if the class has shares set or if the default class
1453 * i.e. class 0 has shares
1455 if (!(tcp_sk(sk)->acceptq[class].aq_ratio)) {
1456 if (tcp_sk(sk)->acceptq[0].aq_ratio)
1463 /* Accept backlog is full. If we have already queued enough
1464 * of warm entries in syn queue, drop request. It is better than
1465 * clogging syn queue with openreqs with exponentially increasing
1468 #ifdef CONFIG_ACCEPT_QUEUES
1469 if (sk_acceptq_is_full(sk, class) && tcp_synq_young(sk, class) > 1)
1471 if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
1475 req = tcp_openreq_alloc();
1479 tcp_clear_options(&tp);
1481 tp.user_mss = tcp_sk(sk)->user_mss;
1483 tcp_parse_options(skb, &tp, 0);
1486 tcp_clear_options(&tp);
1490 if (tp.saw_tstamp && !tp.rcv_tsval) {
1491 /* Some OSes (unknown ones, but I see them on web server, which
1492 * contains information interesting only for windows'
1493 * users) do not send their stamp in SYN. It is easy case.
1494 * We simply do not advertise TS support.
1499 tp.tstamp_ok = tp.saw_tstamp;
1501 tcp_openreq_init(req, &tp, skb);
1502 #ifdef CONFIG_ACCEPT_QUEUES
1503 req->acceptq_class = class;
1504 req->acceptq_time_stamp = jiffies;
1506 req->af.v4_req.loc_addr = daddr;
1507 req->af.v4_req.rmt_addr = saddr;
1508 req->af.v4_req.opt = tcp_v4_save_options(sk, skb);
1509 req->class = &or_ipv4;
1511 TCP_ECN_create_request(req, skb->h.th);
1514 #ifdef CONFIG_SYN_COOKIES
1515 syn_flood_warning(skb);
1517 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1519 struct inet_peer *peer = NULL;
1521 /* VJ's idea. We save last timestamp seen
1522 * from the destination in peer table, when entering
1523 * state TIME-WAIT, and check against it before
1524 * accepting new connection request.
1526 * If "isn" is not zero, this request hit alive
1527 * timewait bucket, so that all the necessary checks
1528 * are made in the function processing timewait state.
1530 if (tp.saw_tstamp &&
1531 sysctl_tcp_tw_recycle &&
1532 (dst = tcp_v4_route_req(sk, req)) != NULL &&
1533 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1534 peer->v4daddr == saddr) {
1535 if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1536 (s32)(peer->tcp_ts - req->ts_recent) >
1538 NET_INC_STATS_BH(PAWSPassiveRejected);
1543 /* Kill the following clause, if you dislike this way. */
1544 else if (!sysctl_tcp_syncookies &&
1545 (sysctl_max_syn_backlog - tcp_synq_len(sk) <
1546 (sysctl_max_syn_backlog >> 2)) &&
1547 (!peer || !peer->tcp_ts_stamp) &&
1548 (!dst || !dst_metric(dst, RTAX_RTT))) {
1549 /* Without syncookies last quarter of
1550 * backlog is filled with destinations,
1551 * proven to be alive.
1552 * It means that we continue to communicate
1553 * to destinations, already remembered
1554 * to the moment of synflood.
1556 NETDEBUG(if (net_ratelimit()) \
1557 printk(KERN_DEBUG "TCP: drop open "
1558 "request from %u.%u."
1561 ntohs(skb->h.th->source)));
1566 isn = tcp_v4_init_sequence(sk, skb);
1570 if (tcp_v4_send_synack(sk, req, dst))
1574 tcp_openreq_free(req);
1576 tcp_v4_synq_add(sk, req);
1581 tcp_openreq_free(req);
1583 TCP_INC_STATS_BH(TcpAttemptFails);
1589 * The three way handshake has completed - we got a valid synack -
1590 * now create the new socket.
1592 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1593 struct open_request *req,
1594 struct dst_entry *dst)
1596 struct inet_opt *newinet;
1597 struct tcp_opt *newtp;
1600 #ifdef CONFIG_ACCEPT_QUEUES
1601 if (sk_acceptq_is_full(sk, req->acceptq_class))
1603 if (sk_acceptq_is_full(sk))
1607 if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1610 newsk = tcp_create_openreq_child(sk, req, skb);
1614 newsk->sk_dst_cache = dst;
1615 tcp_v4_setup_caps(newsk, dst);
1617 newtp = tcp_sk(newsk);
1618 newinet = inet_sk(newsk);
1619 newinet->daddr = req->af.v4_req.rmt_addr;
1620 newinet->rcv_saddr = req->af.v4_req.loc_addr;
1621 newinet->saddr = req->af.v4_req.loc_addr;
1622 newinet->opt = req->af.v4_req.opt;
1623 req->af.v4_req.opt = NULL;
1624 newinet->mc_index = tcp_v4_iif(skb);
1625 newinet->mc_ttl = skb->nh.iph->ttl;
1626 newtp->ext_header_len = 0;
1628 newtp->ext_header_len = newinet->opt->optlen;
1629 newtp->ext2_header_len = dst->header_len;
1630 newinet->id = newtp->write_seq ^ jiffies;
1632 tcp_sync_mss(newsk, dst_pmtu(dst));
1633 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1634 tcp_initialize_rcv_mss(newsk);
1636 __tcp_v4_hash(newsk, 0);
1637 __tcp_inherit_port(sk, newsk);
1642 NET_INC_STATS_BH(ListenOverflows);
1644 NET_INC_STATS_BH(ListenDrops);
1649 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1651 struct tcphdr *th = skb->h.th;
1652 struct iphdr *iph = skb->nh.iph;
1653 struct tcp_opt *tp = tcp_sk(sk);
1655 struct open_request **prev;
1656 /* Find possible connection requests. */
1657 struct open_request *req = tcp_v4_search_req(tp, &prev, th->source,
1658 iph->saddr, iph->daddr);
1660 return tcp_check_req(sk, skb, req, prev);
1662 nsk = __tcp_v4_lookup_established(skb->nh.iph->saddr,
1669 if (nsk->sk_state != TCP_TIME_WAIT) {
1673 tcp_tw_put((struct tcp_tw_bucket *)nsk);
1677 #ifdef CONFIG_SYN_COOKIES
1678 if (!th->rst && !th->syn && th->ack)
1679 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1684 static int tcp_v4_checksum_init(struct sk_buff *skb)
1686 if (skb->ip_summed == CHECKSUM_HW) {
1687 skb->ip_summed = CHECKSUM_UNNECESSARY;
1688 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1689 skb->nh.iph->daddr, skb->csum))
1692 NETDEBUG(if (net_ratelimit())
1693 printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
1694 skb->ip_summed = CHECKSUM_NONE;
1696 if (skb->len <= 76) {
1697 if (tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1699 skb_checksum(skb, 0, skb->len, 0)))
1701 skb->ip_summed = CHECKSUM_UNNECESSARY;
1703 skb->csum = ~tcp_v4_check(skb->h.th, skb->len,
1705 skb->nh.iph->daddr, 0);
1711 /* The socket must have it's spinlock held when we get
1714 * We have a potential double-lock case here, so even when
1715 * doing backlog processing we use the BH locking scheme.
1716 * This is because we cannot sleep with the original spinlock
1719 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1721 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1722 TCP_CHECK_TIMER(sk);
1723 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1725 TCP_CHECK_TIMER(sk);
1729 if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
1732 if (sk->sk_state == TCP_LISTEN) {
1733 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1738 if (tcp_child_process(sk, nsk, skb))
1744 TCP_CHECK_TIMER(sk);
1745 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1747 TCP_CHECK_TIMER(sk);
1751 tcp_v4_send_reset(skb);
1754 /* Be careful here. If this function gets more complicated and
1755 * gcc suffers from register pressure on the x86, sk (in %ebx)
1756 * might be destroyed here. This current version compiles correctly,
1757 * but you have been warned.
1762 TCP_INC_STATS_BH(TcpInErrs);
1770 int tcp_v4_rcv(struct sk_buff *skb)
1776 if (skb->pkt_type != PACKET_HOST)
1779 /* Count it even if it's bad */
1780 TCP_INC_STATS_BH(TcpInSegs);
1782 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1787 if (th->doff < sizeof(struct tcphdr) / 4)
1789 if (!pskb_may_pull(skb, th->doff * 4))
1792 /* An explanation is required here, I think.
1793 * Packet length and doff are validated by header prediction,
1794 * provided case of th->doff==0 is elimineted.
1795 * So, we defer the checks. */
1796 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1797 tcp_v4_checksum_init(skb) < 0))
1801 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1802 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1803 skb->len - th->doff * 4);
1804 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1805 TCP_SKB_CB(skb)->when = 0;
1806 TCP_SKB_CB(skb)->flags = skb->nh.iph->tos;
1807 TCP_SKB_CB(skb)->sacked = 0;
1809 sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source,
1810 skb->nh.iph->daddr, ntohs(th->dest),
1817 /* Silently drop if the context is not entitled to read the
1820 if (sk->sk_xid && sk->sk_xid != skb->xid)
1823 if (sk->sk_state == TCP_TIME_WAIT)
1826 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1827 goto discard_and_relse;
1829 if (sk_filter(sk, skb, 0))
1830 goto discard_and_relse;
1836 if (!sock_owned_by_user(sk)) {
1837 if (!tcp_prequeue(sk, skb))
1838 ret = tcp_v4_do_rcv(sk, skb);
1840 sk_add_backlog(sk, skb);
1848 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1851 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1853 TCP_INC_STATS_BH(TcpInErrs);
1855 tcp_v4_send_reset(skb);
1859 /* Discard frame. */
1868 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1869 tcp_tw_put((struct tcp_tw_bucket *) sk);
1873 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1874 TCP_INC_STATS_BH(TcpInErrs);
1875 tcp_tw_put((struct tcp_tw_bucket *) sk);
1878 switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
1879 skb, th, skb->len)) {
1881 struct sock *sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr,
1885 tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
1886 tcp_tw_put((struct tcp_tw_bucket *)sk);
1890 /* Fall through to ACK */
1893 tcp_v4_timewait_ack(sk, skb);
1897 case TCP_TW_SUCCESS:;
1902 /* With per-bucket locks this operation is not-atomic, so that
1903 * this version is not worse.
1905 static void __tcp_v4_rehash(struct sock *sk)
1907 sk->sk_prot->unhash(sk);
1908 sk->sk_prot->hash(sk);
1911 static int tcp_v4_reselect_saddr(struct sock *sk)
1913 struct inet_opt *inet = inet_sk(sk);
1916 __u32 old_saddr = inet->saddr;
1918 __u32 daddr = inet->daddr;
1920 if (inet->opt && inet->opt->srr)
1921 daddr = inet->opt->faddr;
1923 /* Query new route. */
1924 err = ip_route_connect(&rt, daddr, 0,
1925 RT_TOS(inet->tos) | sk->sk_localroute,
1926 sk->sk_bound_dev_if,
1928 inet->sport, inet->dport, sk);
1932 __sk_dst_set(sk, &rt->u.dst);
1933 tcp_v4_setup_caps(sk, &rt->u.dst);
1934 tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
1936 new_saddr = rt->rt_src;
1938 if (new_saddr == old_saddr)
1941 if (sysctl_ip_dynaddr > 1) {
1942 printk(KERN_INFO "tcp_v4_rebuild_header(): shifting inet->"
1943 "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n",
1945 NIPQUAD(new_saddr));
1948 inet->saddr = new_saddr;
1949 inet->rcv_saddr = new_saddr;
1951 /* XXX The only one ugly spot where we need to
1952 * XXX really change the sockets identity after
1953 * XXX it has entered the hashes. -DaveM
1955 * Besides that, it does not check for connection
1956 * uniqueness. Wait for troubles.
1958 __tcp_v4_rehash(sk);
1962 int tcp_v4_rebuild_header(struct sock *sk)
1964 struct inet_opt *inet = inet_sk(sk);
1965 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1969 /* Route is OK, nothing to do. */
1974 daddr = inet->daddr;
1975 if (inet->opt && inet->opt->srr)
1976 daddr = inet->opt->faddr;
1979 struct flowi fl = { .oif = sk->sk_bound_dev_if,
1982 .saddr = inet->saddr,
1983 .tos = RT_CONN_FLAGS(sk) } },
1984 .proto = IPPROTO_TCP,
1986 { .sport = inet->sport,
1987 .dport = inet->dport } } };
1989 err = ip_route_output_flow(&rt, &fl, sk, 0);
1992 __sk_dst_set(sk, &rt->u.dst);
1993 tcp_v4_setup_caps(sk, &rt->u.dst);
1994 tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
1998 /* Routing failed... */
1999 sk->sk_route_caps = 0;
2001 if (!sysctl_ip_dynaddr ||
2002 sk->sk_state != TCP_SYN_SENT ||
2003 (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
2004 (err = tcp_v4_reselect_saddr(sk)) != 0)
2005 sk->sk_err_soft = -err;
2010 static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
2012 struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;
2013 struct inet_opt *inet = inet_sk(sk);
2015 sin->sin_family = AF_INET;
2016 sin->sin_addr.s_addr = inet->daddr;
2017 sin->sin_port = inet->dport;
2020 /* VJ's idea. Save last timestamp seen from this destination
2021 * and hold it at least for normal timewait interval to use for duplicate
2022 * segment detection in subsequent connections, before they enter synchronized
2026 int tcp_v4_remember_stamp(struct sock *sk)
2028 struct inet_opt *inet = inet_sk(sk);
2029 struct tcp_opt *tp = tcp_sk(sk);
2030 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
2031 struct inet_peer *peer = NULL;
2034 if (!rt || rt->rt_dst != inet->daddr) {
2035 peer = inet_getpeer(inet->daddr, 1);
2039 rt_bind_peer(rt, 1);
2044 if ((s32)(peer->tcp_ts - tp->ts_recent) <= 0 ||
2045 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2046 peer->tcp_ts_stamp <= tp->ts_recent_stamp)) {
2047 peer->tcp_ts_stamp = tp->ts_recent_stamp;
2048 peer->tcp_ts = tp->ts_recent;
2058 int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
2060 struct inet_peer *peer = NULL;
2062 peer = inet_getpeer(tw->tw_daddr, 1);
2065 if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 ||
2066 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2067 peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) {
2068 peer->tcp_ts_stamp = tw->tw_ts_recent_stamp;
2069 peer->tcp_ts = tw->tw_ts_recent;
2078 struct tcp_func ipv4_specific = {
2079 .queue_xmit = ip_queue_xmit,
2080 .send_check = tcp_v4_send_check,
2081 .rebuild_header = tcp_v4_rebuild_header,
2082 .conn_request = tcp_v4_conn_request,
2083 .syn_recv_sock = tcp_v4_syn_recv_sock,
2084 .remember_stamp = tcp_v4_remember_stamp,
2085 .net_header_len = sizeof(struct iphdr),
2086 .setsockopt = ip_setsockopt,
2087 .getsockopt = ip_getsockopt,
2088 .addr2sockaddr = v4_addr2sockaddr,
2089 .sockaddr_len = sizeof(struct sockaddr_in),
2092 /* NOTE: A lot of things set to zero explicitly by call to
2093 * sk_alloc() so need not be done here.
2095 static int tcp_v4_init_sock(struct sock *sk)
2097 struct tcp_opt *tp = tcp_sk(sk);
2099 skb_queue_head_init(&tp->out_of_order_queue);
2100 tcp_init_xmit_timers(sk);
2101 tcp_prequeue_init(tp);
2103 tp->rto = TCP_TIMEOUT_INIT;
2104 tp->mdev = TCP_TIMEOUT_INIT;
2106 /* So many TCP implementations out there (incorrectly) count the
2107 * initial SYN frame in their delayed-ACK and congestion control
2108 * algorithms that we must have the following bandaid to talk
2109 * efficiently to them. -DaveM
2113 /* See draft-stevens-tcpca-spec-01 for discussion of the
2114 * initialization of these values.
2116 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
2117 tp->snd_cwnd_clamp = ~0;
2118 tp->mss_cache = 536;
2120 tp->reordering = sysctl_tcp_reordering;
2122 sk->sk_state = TCP_CLOSE;
2124 sk->sk_write_space = sk_stream_write_space;
2125 sk->sk_use_write_queue = 1;
2127 tp->af_specific = &ipv4_specific;
2129 sk->sk_sndbuf = sysctl_tcp_wmem[1];
2130 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
2132 atomic_inc(&tcp_sockets_allocated);
2137 int tcp_v4_destroy_sock(struct sock *sk)
2139 struct tcp_opt *tp = tcp_sk(sk);
2141 tcp_clear_xmit_timers(sk);
2143 /* Cleanup up the write buffer. */
2144 sk_stream_writequeue_purge(sk);
2146 /* Cleans up our, hopefully empty, out_of_order_queue. */
2147 __skb_queue_purge(&tp->out_of_order_queue);
2149 /* Clean prequeue, it must be empty really */
2150 __skb_queue_purge(&tp->ucopy.prequeue);
2152 /* Clean up a referenced TCP bind bucket. */
2156 atomic_dec(&tcp_sockets_allocated);
2161 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2163 #ifdef CONFIG_PROC_FS
2164 /* Proc filesystem TCP sock list dumping. */
2166 static inline struct tcp_tw_bucket *tw_head(struct hlist_head *head)
2168 return hlist_empty(head) ? NULL :
2169 list_entry(head->first, struct tcp_tw_bucket, tw_node);
2172 static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw)
2174 return tw->tw_node.next ?
2175 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2178 static void *listening_get_next(struct seq_file *seq, void *cur)
2181 struct hlist_node *node;
2182 struct sock *sk = cur;
2183 struct tcp_iter_state* st = seq->private;
2187 sk = sk_head(&tcp_listening_hash[0]);
2193 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2194 struct open_request *req = cur;
2196 tp = tcp_sk(st->syn_wait_sk);
2200 if (!vx_check(req->sk->sk_xid, VX_IDENT|VX_WATCH))
2202 if (req->class->family == st->family) {
2208 if (++st->sbucket >= TCP_SYNQ_HSIZE)
2211 req = tp->listen_opt->syn_table[st->sbucket];
2213 sk = sk_next(st->syn_wait_sk);
2214 st->state = TCP_SEQ_STATE_LISTENING;
2215 read_unlock_bh(&tp->syn_wait_lock);
2219 sk_for_each_from(sk, node) {
2220 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2222 if (sk->sk_family == st->family) {
2227 read_lock_bh(&tp->syn_wait_lock);
2228 if (tp->listen_opt && tp->listen_opt->qlen) {
2229 st->uid = sock_i_uid(sk);
2230 st->syn_wait_sk = sk;
2231 st->state = TCP_SEQ_STATE_OPENREQ;
2235 read_unlock_bh(&tp->syn_wait_lock);
2237 if (++st->bucket < TCP_LHTABLE_SIZE) {
2238 sk = sk_head(&tcp_listening_hash[st->bucket]);
2246 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2248 void *rc = listening_get_next(seq, NULL);
2250 while (rc && *pos) {
2251 rc = listening_get_next(seq, rc);
2257 static void *established_get_first(struct seq_file *seq)
2259 struct tcp_iter_state* st = seq->private;
2262 for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) {
2264 struct hlist_node *node;
2265 struct tcp_tw_bucket *tw;
2267 read_lock(&tcp_ehash[st->bucket].lock);
2268 sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) {
2269 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2271 if (sk->sk_family != st->family)
2276 st->state = TCP_SEQ_STATE_TIME_WAIT;
2277 tw_for_each(tw, node,
2278 &tcp_ehash[st->bucket + tcp_ehash_size].chain) {
2279 if (!vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))
2281 if (tw->tw_family != st->family)
2286 read_unlock(&tcp_ehash[st->bucket].lock);
2287 st->state = TCP_SEQ_STATE_ESTABLISHED;
2293 static void *established_get_next(struct seq_file *seq, void *cur)
2295 struct sock *sk = cur;
2296 struct tcp_tw_bucket *tw;
2297 struct hlist_node *node;
2298 struct tcp_iter_state* st = seq->private;
2302 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2306 while (tw && tw->tw_family != st->family &&
2307 !vx_check(tw->tw_xid, VX_IDENT|VX_WATCH)) {
2314 read_unlock(&tcp_ehash[st->bucket].lock);
2315 st->state = TCP_SEQ_STATE_ESTABLISHED;
2316 if (++st->bucket < tcp_ehash_size) {
2317 read_lock(&tcp_ehash[st->bucket].lock);
2318 sk = sk_head(&tcp_ehash[st->bucket].chain);
2326 sk_for_each_from(sk, node) {
2327 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2329 if (sk->sk_family == st->family)
2333 st->state = TCP_SEQ_STATE_TIME_WAIT;
2334 tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain);
2342 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2344 void *rc = established_get_first(seq);
2347 rc = established_get_next(seq, rc);
2353 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2356 struct tcp_iter_state* st = seq->private;
2359 st->state = TCP_SEQ_STATE_LISTENING;
2360 rc = listening_get_idx(seq, &pos);
2363 tcp_listen_unlock();
2365 st->state = TCP_SEQ_STATE_ESTABLISHED;
2366 rc = established_get_idx(seq, pos);
2372 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2374 struct tcp_iter_state* st = seq->private;
2375 st->state = TCP_SEQ_STATE_LISTENING;
2377 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2380 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2383 struct tcp_iter_state* st;
2385 if (v == SEQ_START_TOKEN) {
2386 rc = tcp_get_idx(seq, 0);
2391 switch (st->state) {
2392 case TCP_SEQ_STATE_OPENREQ:
2393 case TCP_SEQ_STATE_LISTENING:
2394 rc = listening_get_next(seq, v);
2396 tcp_listen_unlock();
2398 st->state = TCP_SEQ_STATE_ESTABLISHED;
2399 rc = established_get_first(seq);
2402 case TCP_SEQ_STATE_ESTABLISHED:
2403 case TCP_SEQ_STATE_TIME_WAIT:
2404 rc = established_get_next(seq, v);
2412 static void tcp_seq_stop(struct seq_file *seq, void *v)
2414 struct tcp_iter_state* st = seq->private;
2416 switch (st->state) {
2417 case TCP_SEQ_STATE_OPENREQ:
2419 struct tcp_opt *tp = tcp_sk(st->syn_wait_sk);
2420 read_unlock_bh(&tp->syn_wait_lock);
2422 case TCP_SEQ_STATE_LISTENING:
2423 if (v != SEQ_START_TOKEN)
2424 tcp_listen_unlock();
2426 case TCP_SEQ_STATE_TIME_WAIT:
2427 case TCP_SEQ_STATE_ESTABLISHED:
2429 read_unlock(&tcp_ehash[st->bucket].lock);
2435 static int tcp_seq_open(struct inode *inode, struct file *file)
2437 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2438 struct seq_file *seq;
2439 struct tcp_iter_state *s;
2442 if (unlikely(afinfo == NULL))
2445 s = kmalloc(sizeof(*s), GFP_KERNEL);
2448 memset(s, 0, sizeof(*s));
2449 s->family = afinfo->family;
2450 s->seq_ops.start = tcp_seq_start;
2451 s->seq_ops.next = tcp_seq_next;
2452 s->seq_ops.show = afinfo->seq_show;
2453 s->seq_ops.stop = tcp_seq_stop;
2455 rc = seq_open(file, &s->seq_ops);
2458 seq = file->private_data;
2467 int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
2470 struct proc_dir_entry *p;
2474 afinfo->seq_fops->owner = afinfo->owner;
2475 afinfo->seq_fops->open = tcp_seq_open;
2476 afinfo->seq_fops->read = seq_read;
2477 afinfo->seq_fops->llseek = seq_lseek;
2478 afinfo->seq_fops->release = seq_release_private;
2480 p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
2488 void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
2492 proc_net_remove(afinfo->name);
2493 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
2496 static void get_openreq4(struct sock *sk, struct open_request *req,
2497 char *tmpbuf, int i, int uid)
2499 int ttd = req->expires - jiffies;
2501 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2502 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
2504 req->af.v4_req.loc_addr,
2505 ntohs(inet_sk(sk)->sport),
2506 req->af.v4_req.rmt_addr,
2507 ntohs(req->rmt_port),
2509 0, 0, /* could print option size, but that is af dependent. */
2510 1, /* timers active (only the expire timer) */
2511 jiffies_to_clock_t(ttd),
2514 0, /* non standard timer */
2515 0, /* open_requests have no inode */
2516 atomic_read(&sk->sk_refcnt),
2520 static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
2523 unsigned long timer_expires;
2524 struct tcp_opt *tp = tcp_sk(sp);
2525 struct inet_opt *inet = inet_sk(sp);
2526 unsigned int dest = inet->daddr;
2527 unsigned int src = inet->rcv_saddr;
2528 __u16 destp = ntohs(inet->dport);
2529 __u16 srcp = ntohs(inet->sport);
2531 if (tp->pending == TCP_TIME_RETRANS) {
2533 timer_expires = tp->timeout;
2534 } else if (tp->pending == TCP_TIME_PROBE0) {
2536 timer_expires = tp->timeout;
2537 } else if (timer_pending(&sp->sk_timer)) {
2539 timer_expires = sp->sk_timer.expires;
2542 timer_expires = jiffies;
2545 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2546 "%08X %5d %8d %lu %d %p %u %u %u %u %d",
2547 i, src, srcp, dest, destp, sp->sk_state,
2548 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
2550 jiffies_to_clock_t(timer_expires - jiffies),
2555 atomic_read(&sp->sk_refcnt), sp,
2556 tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong,
2558 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
2561 static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i)
2563 unsigned int dest, src;
2565 int ttd = tw->tw_ttd - jiffies;
2570 dest = tw->tw_daddr;
2571 src = tw->tw_rcv_saddr;
2572 destp = ntohs(tw->tw_dport);
2573 srcp = ntohs(tw->tw_sport);
2575 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2576 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p",
2577 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2578 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2579 atomic_read(&tw->tw_refcnt), tw);
2584 static int tcp4_seq_show(struct seq_file *seq, void *v)
2586 struct tcp_iter_state* st;
2587 char tmpbuf[TMPSZ + 1];
2589 if (v == SEQ_START_TOKEN) {
2590 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2591 " sl local_address rem_address st tx_queue "
2592 "rx_queue tr tm->when retrnsmt uid timeout "
2598 switch (st->state) {
2599 case TCP_SEQ_STATE_LISTENING:
2600 case TCP_SEQ_STATE_ESTABLISHED:
2601 get_tcp4_sock(v, tmpbuf, st->num);
2603 case TCP_SEQ_STATE_OPENREQ:
2604 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
2606 case TCP_SEQ_STATE_TIME_WAIT:
2607 get_timewait4_sock(v, tmpbuf, st->num);
2610 seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
2615 static struct file_operations tcp4_seq_fops;
2616 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2617 .owner = THIS_MODULE,
2620 .seq_show = tcp4_seq_show,
2621 .seq_fops = &tcp4_seq_fops,
2624 int __init tcp4_proc_init(void)
2626 return tcp_proc_register(&tcp4_seq_afinfo);
2629 void tcp4_proc_exit(void)
2631 tcp_proc_unregister(&tcp4_seq_afinfo);
2633 #endif /* CONFIG_PROC_FS */
2635 struct proto tcp_prot = {
2638 .connect = tcp_v4_connect,
2639 .disconnect = tcp_disconnect,
2640 .accept = tcp_accept,
2642 .init = tcp_v4_init_sock,
2643 .destroy = tcp_v4_destroy_sock,
2644 .shutdown = tcp_shutdown,
2645 .setsockopt = tcp_setsockopt,
2646 .getsockopt = tcp_getsockopt,
2647 .sendmsg = tcp_sendmsg,
2648 .recvmsg = tcp_recvmsg,
2649 .backlog_rcv = tcp_v4_do_rcv,
2650 .hash = tcp_v4_hash,
2651 .unhash = tcp_unhash,
2652 .get_port = tcp_v4_get_port,
2653 .enter_memory_pressure = tcp_enter_memory_pressure,
2654 .sockets_allocated = &tcp_sockets_allocated,
2655 .memory_allocated = &tcp_memory_allocated,
2656 .memory_pressure = &tcp_memory_pressure,
2657 .sysctl_mem = sysctl_tcp_mem,
2658 .sysctl_wmem = sysctl_tcp_wmem,
2659 .sysctl_rmem = sysctl_tcp_rmem,
2660 .max_header = MAX_TCP_HEADER,
2665 void __init tcp_v4_init(struct net_proto_family *ops)
2667 int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
2669 panic("Failed to create the TCP control socket.\n");
2670 tcp_socket->sk->sk_allocation = GFP_ATOMIC;
2671 inet_sk(tcp_socket->sk)->uc_ttl = -1;
2673 /* Unhash it so that IP input processing does not even
2674 * see it, we do not wish this socket to see incoming
2677 tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
2680 EXPORT_SYMBOL(ipv4_specific);
2681 EXPORT_SYMBOL(tcp_bind_hash);
2682 EXPORT_SYMBOL(tcp_bucket_create);
2683 EXPORT_SYMBOL(tcp_hashinfo);
2684 EXPORT_SYMBOL(tcp_inherit_port);
2685 EXPORT_SYMBOL(tcp_listen_wlock);
2686 EXPORT_SYMBOL(tcp_port_rover);
2687 EXPORT_SYMBOL(tcp_prot);
2688 EXPORT_SYMBOL(tcp_put_port);
2689 EXPORT_SYMBOL(tcp_unhash);
2690 EXPORT_SYMBOL(tcp_v4_conn_request);
2691 EXPORT_SYMBOL(tcp_v4_connect);
2692 EXPORT_SYMBOL(tcp_v4_do_rcv);
2693 EXPORT_SYMBOL(tcp_v4_lookup_listener);
2694 EXPORT_SYMBOL(tcp_v4_rebuild_header);
2695 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2696 EXPORT_SYMBOL(tcp_v4_send_check);
2697 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2699 #ifdef CONFIG_PROC_FS
2700 EXPORT_SYMBOL(tcp_proc_register);
2701 EXPORT_SYMBOL(tcp_proc_unregister);
2703 #ifdef CONFIG_SYSCTL
2704 EXPORT_SYMBOL(sysctl_local_port_range);
2705 EXPORT_SYMBOL(sysctl_max_syn_backlog);
2706 EXPORT_SYMBOL(sysctl_tcp_low_latency);