2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
10 * IPv4 specific functions
15 * linux/ipv4/tcp_input.c
16 * linux/ipv4/tcp_output.c
18 * See tcp.c for author information
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
28 * David S. Miller : New socket lookup architecture.
29 * This code is dedicated to John Dyson.
30 * David S. Miller : Change semantics of established hash,
31 * half is devoted to TIME_WAIT sockets
32 * and the rest go in the other half.
33 * Andi Kleen : Add support for syncookies and fixed
34 * some bugs: ip options weren't passed to
35 * the TCP layer, missed a check for an
37 * Andi Kleen : Implemented fast path mtu discovery.
38 * Fixed many serious bugs in the
39 * open_request handling and moved
40 * most of it into the af independent code.
41 * Added tail drop and some other bugfixes.
42 * Added new listen sematics.
43 * Mike McLagan : Routing by source
44 * Juan Jose Ciarlante: ip_dynaddr bits
45 * Andi Kleen: various fixes.
46 * Vitaly E. Lavrov : Transparent proxy revived after year
48 * Andi Kleen : Fix new listen.
49 * Andi Kleen : Fix accept error reporting.
50 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
51 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
52 * a single port at the same time.
55 #include <linux/config.h>
57 #include <linux/types.h>
58 #include <linux/fcntl.h>
59 #include <linux/module.h>
60 #include <linux/random.h>
61 #include <linux/cache.h>
62 #include <linux/jhash.h>
63 #include <linux/init.h>
64 #include <linux/times.h>
69 #include <net/inet_common.h>
72 #include <linux/inet.h>
73 #include <linux/ipv6.h>
74 #include <linux/stddef.h>
75 #include <linux/proc_fs.h>
76 #include <linux/seq_file.h>
78 extern int sysctl_ip_dynaddr;
79 int sysctl_tcp_tw_reuse;
80 int sysctl_tcp_low_latency;
82 /* Check TCP sequence numbers in ICMP packets. */
83 #define ICMP_MIN_LENGTH 8
85 /* Socket used for sending RSTs */
86 static struct socket *tcp_socket;
88 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
91 struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
92 .__tcp_lhash_lock = RW_LOCK_UNLOCKED,
93 .__tcp_lhash_users = ATOMIC_INIT(0),
95 = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait),
96 .__tcp_portalloc_lock = SPIN_LOCK_UNLOCKED
100 * This array holds the first and last local port number.
101 * For high-usage systems, use sysctl to change this to
104 int sysctl_local_port_range[2] = { 1024, 4999 };
105 int tcp_port_rover = 1024 - 1;
107 static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
108 __u32 faddr, __u16 fport)
110 int h = (laddr ^ lport) ^ (faddr ^ fport);
113 return h & (tcp_ehash_size - 1);
116 static __inline__ int tcp_sk_hashfn(struct sock *sk)
118 struct inet_opt *inet = inet_sk(sk);
119 __u32 laddr = inet->rcv_saddr;
120 __u16 lport = inet->num;
121 __u32 faddr = inet->daddr;
122 __u16 fport = inet->dport;
124 return tcp_hashfn(laddr, lport, faddr, fport);
127 /* Allocate and initialize a new TCP local port bind bucket.
128 * The bindhash mutex for snum's hash chain must be held here.
130 struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
133 struct tcp_bind_bucket *tb = kmem_cache_alloc(tcp_bucket_cachep,
138 INIT_HLIST_HEAD(&tb->owners);
139 hlist_add_head(&tb->node, &head->chain);
144 /* Caller must hold hashbucket lock for this tb with local BH disabled */
145 void tcp_bucket_destroy(struct tcp_bind_bucket *tb)
147 if (hlist_empty(&tb->owners)) {
148 __hlist_del(&tb->node);
149 kmem_cache_free(tcp_bucket_cachep, tb);
153 /* Caller must disable local BH processing. */
154 static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
156 struct tcp_bind_hashbucket *head =
157 &tcp_bhash[tcp_bhashfn(inet_sk(child)->num)];
158 struct tcp_bind_bucket *tb;
160 spin_lock(&head->lock);
161 tb = tcp_sk(sk)->bind_hash;
162 sk_add_bind_node(child, &tb->owners);
163 tcp_sk(child)->bind_hash = tb;
164 spin_unlock(&head->lock);
167 inline void tcp_inherit_port(struct sock *sk, struct sock *child)
170 __tcp_inherit_port(sk, child);
174 void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
177 inet_sk(sk)->num = snum;
178 sk_add_bind_node(sk, &tb->owners);
179 tcp_sk(sk)->bind_hash = tb;
182 static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
184 const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk);
186 struct hlist_node *node;
187 int reuse = sk->sk_reuse;
189 sk_for_each_bound(sk2, node, &tb->owners) {
191 !tcp_v6_ipv6only(sk2) &&
192 (!sk->sk_bound_dev_if ||
193 !sk2->sk_bound_dev_if ||
194 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
195 if (!reuse || !sk2->sk_reuse ||
196 sk2->sk_state == TCP_LISTEN) {
197 const u32 sk2_rcv_saddr = tcp_v4_rcv_saddr(sk2);
198 if (!sk2_rcv_saddr || !sk_rcv_saddr ||
199 sk2_rcv_saddr == sk_rcv_saddr)
207 /* Obtain a reference to a local port for the given sock,
208 * if snum is zero it means select any available local port.
210 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
212 struct tcp_bind_hashbucket *head;
213 struct hlist_node *node;
214 struct tcp_bind_bucket *tb;
219 int low = sysctl_local_port_range[0];
220 int high = sysctl_local_port_range[1];
221 int remaining = (high - low) + 1;
224 spin_lock(&tcp_portalloc_lock);
225 rover = tcp_port_rover;
228 if (rover < low || rover > high)
230 head = &tcp_bhash[tcp_bhashfn(rover)];
231 spin_lock(&head->lock);
232 tb_for_each(tb, node, &head->chain)
233 if (tb->port == rover)
237 spin_unlock(&head->lock);
238 } while (--remaining > 0);
239 tcp_port_rover = rover;
240 spin_unlock(&tcp_portalloc_lock);
242 /* Exhausted local port range during search? */
247 /* OK, here is the one we will use. HEAD is
248 * non-NULL and we hold it's mutex.
252 head = &tcp_bhash[tcp_bhashfn(snum)];
253 spin_lock(&head->lock);
254 tb_for_each(tb, node, &head->chain)
255 if (tb->port == snum)
261 if (!hlist_empty(&tb->owners)) {
262 if (sk->sk_reuse > 1)
264 if (tb->fastreuse > 0 &&
265 sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
269 if (tcp_bind_conflict(sk, tb))
275 if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
277 if (hlist_empty(&tb->owners)) {
278 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
282 } else if (tb->fastreuse &&
283 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
286 if (!tcp_sk(sk)->bind_hash)
287 tcp_bind_hash(sk, tb, snum);
288 BUG_TRAP(tcp_sk(sk)->bind_hash == tb);
292 spin_unlock(&head->lock);
298 /* Get rid of any references to a local port held by the
301 static void __tcp_put_port(struct sock *sk)
303 struct inet_opt *inet = inet_sk(sk);
304 struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)];
305 struct tcp_bind_bucket *tb;
307 spin_lock(&head->lock);
308 tb = tcp_sk(sk)->bind_hash;
309 __sk_del_bind_node(sk);
310 tcp_sk(sk)->bind_hash = NULL;
312 tcp_bucket_destroy(tb);
313 spin_unlock(&head->lock);
316 void tcp_put_port(struct sock *sk)
323 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
324 * Look, when several writers sleep and reader wakes them up, all but one
325 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
326 * this, _but_ remember, it adds useless work on UP machines (wake up each
327 * exclusive lock release). It should be ifdefed really.
330 void tcp_listen_wlock(void)
332 write_lock(&tcp_lhash_lock);
334 if (atomic_read(&tcp_lhash_users)) {
338 prepare_to_wait_exclusive(&tcp_lhash_wait,
339 &wait, TASK_UNINTERRUPTIBLE);
340 if (!atomic_read(&tcp_lhash_users))
342 write_unlock_bh(&tcp_lhash_lock);
344 write_lock_bh(&tcp_lhash_lock);
347 finish_wait(&tcp_lhash_wait, &wait);
351 static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
353 struct hlist_head *list;
356 BUG_TRAP(sk_unhashed(sk));
357 if (listen_possible && sk->sk_state == TCP_LISTEN) {
358 list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
359 lock = &tcp_lhash_lock;
362 list = &tcp_ehash[(sk->sk_hashent = tcp_sk_hashfn(sk))].chain;
363 lock = &tcp_ehash[sk->sk_hashent].lock;
366 __sk_add_node(sk, list);
367 sock_prot_inc_use(sk->sk_prot);
369 if (listen_possible && sk->sk_state == TCP_LISTEN)
370 wake_up(&tcp_lhash_wait);
373 static void tcp_v4_hash(struct sock *sk)
375 if (sk->sk_state != TCP_CLOSE) {
377 __tcp_v4_hash(sk, 1);
382 void tcp_unhash(struct sock *sk)
389 if (sk->sk_state == TCP_LISTEN) {
392 lock = &tcp_lhash_lock;
394 struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
396 write_lock_bh(&head->lock);
399 if (__sk_del_node_init(sk))
400 sock_prot_dec_use(sk->sk_prot);
401 write_unlock_bh(lock);
404 if (sk->sk_state == TCP_LISTEN)
405 wake_up(&tcp_lhash_wait);
408 /* Don't inline this cruft. Here are some nice properties to
409 * exploit here. The BSD API does not allow a listening TCP
410 * to specify the remote port nor the remote address for the
411 * connection. So always assume those are both wildcarded
412 * during the search since they can never be otherwise.
414 static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr,
415 unsigned short hnum, int dif)
417 struct sock *result = NULL, *sk;
418 struct hlist_node *node;
422 sk_for_each(sk, node, head) {
423 struct inet_opt *inet = inet_sk(sk);
425 if (inet->num == hnum && !ipv6_only_sock(sk)) {
426 __u32 rcv_saddr = inet->rcv_saddr;
428 score = (sk->sk_family == PF_INET ? 1 : 0);
430 if (rcv_saddr != daddr)
434 if (sk->sk_bound_dev_if) {
435 if (sk->sk_bound_dev_if != dif)
441 if (score > hiscore) {
450 /* Optimize the common listener case. */
451 inline struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum,
454 struct sock *sk = NULL;
455 struct hlist_head *head;
457 read_lock(&tcp_lhash_lock);
458 head = &tcp_listening_hash[tcp_lhashfn(hnum)];
459 if (!hlist_empty(head)) {
460 struct inet_opt *inet = inet_sk((sk = __sk_head(head)));
462 if (inet->num == hnum && !sk->sk_node.next &&
463 (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
464 (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
465 !sk->sk_bound_dev_if)
467 sk = __tcp_v4_lookup_listener(head, daddr, hnum, dif);
473 read_unlock(&tcp_lhash_lock);
477 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
478 * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
480 * Local BH must be disabled here.
483 static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
487 struct tcp_ehash_bucket *head;
488 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
489 __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
491 struct hlist_node *node;
492 /* Optimize here for direct hit, only listening connections can
493 * have wildcards anyways.
495 int hash = tcp_hashfn(daddr, hnum, saddr, sport);
496 head = &tcp_ehash[hash];
497 read_lock(&head->lock);
498 sk_for_each(sk, node, &head->chain) {
499 if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
500 goto hit; /* You sunk my battleship! */
503 /* Must check for a TIME_WAIT'er before going to listener hash. */
504 sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
505 if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
510 read_unlock(&head->lock);
517 static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport,
518 u32 daddr, u16 hnum, int dif)
520 struct sock *sk = __tcp_v4_lookup_established(saddr, sport,
523 return sk ? : tcp_v4_lookup_listener(daddr, hnum, dif);
526 inline struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr,
532 sk = __tcp_v4_lookup(saddr, sport, daddr, ntohs(dport), dif);
538 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
540 return secure_tcp_sequence_number(skb->nh.iph->daddr,
546 /* called with local bh disabled */
547 static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
548 struct tcp_tw_bucket **twp)
550 struct inet_opt *inet = inet_sk(sk);
551 u32 daddr = inet->rcv_saddr;
552 u32 saddr = inet->daddr;
553 int dif = sk->sk_bound_dev_if;
554 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
555 __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
556 int hash = tcp_hashfn(daddr, lport, saddr, inet->dport);
557 struct tcp_ehash_bucket *head = &tcp_ehash[hash];
559 struct hlist_node *node;
560 struct tcp_tw_bucket *tw;
562 write_lock(&head->lock);
564 /* Check TIME-WAIT sockets first. */
565 sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
566 tw = (struct tcp_tw_bucket *)sk2;
568 if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
569 struct tcp_opt *tp = tcp_sk(sk);
571 /* With PAWS, it is safe from the viewpoint
572 of data integrity. Even without PAWS it
573 is safe provided sequence spaces do not
574 overlap i.e. at data rates <= 80Mbit/sec.
576 Actually, the idea is close to VJ's one,
577 only timestamp cache is held not per host,
578 but per port pair and TW bucket is used
581 If TW bucket has been already destroyed we
582 fall back to VJ's scheme and use initial
583 timestamp retrieved from peer table.
585 if (tw->tw_ts_recent_stamp &&
586 (!twp || (sysctl_tcp_tw_reuse &&
588 tw->tw_ts_recent_stamp > 1))) {
590 tw->tw_snd_nxt + 65535 + 2) == 0)
592 tp->ts_recent = tw->tw_ts_recent;
593 tp->ts_recent_stamp = tw->tw_ts_recent_stamp;
602 /* And established part... */
603 sk_for_each(sk2, node, &head->chain) {
604 if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif))
609 /* Must record num and sport now. Otherwise we will see
610 * in hash table socket with a funny identity. */
612 inet->sport = htons(lport);
613 sk->sk_hashent = hash;
614 BUG_TRAP(sk_unhashed(sk));
615 __sk_add_node(sk, &head->chain);
616 sock_prot_inc_use(sk->sk_prot);
617 write_unlock(&head->lock);
621 NET_INC_STATS_BH(TimeWaitRecycled);
623 /* Silly. Should hash-dance instead... */
624 tcp_tw_deschedule(tw);
625 NET_INC_STATS_BH(TimeWaitRecycled);
633 write_unlock(&head->lock);
634 return -EADDRNOTAVAIL;
638 * Bind a port for a connect operation and hash it.
640 static int tcp_v4_hash_connect(struct sock *sk)
642 unsigned short snum = inet_sk(sk)->num;
643 struct tcp_bind_hashbucket *head;
644 struct tcp_bind_bucket *tb;
649 int low = sysctl_local_port_range[0];
650 int high = sysctl_local_port_range[1];
651 int remaining = (high - low) + 1;
652 struct hlist_node *node;
653 struct tcp_tw_bucket *tw = NULL;
657 /* TODO. Actually it is not so bad idea to remove
658 * tcp_portalloc_lock before next submission to Linus.
659 * As soon as we touch this place at all it is time to think.
661 * Now it protects single _advisory_ variable tcp_port_rover,
662 * hence it is mostly useless.
663 * Code will work nicely if we just delete it, but
664 * I am afraid in contented case it will work not better or
665 * even worse: another cpu just will hit the same bucket
667 * So some cpu salt could remove both contention and
668 * memory pingpong. Any ideas how to do this in a nice way?
670 spin_lock(&tcp_portalloc_lock);
671 rover = tcp_port_rover;
675 if ((rover < low) || (rover > high))
677 head = &tcp_bhash[tcp_bhashfn(rover)];
678 spin_lock(&head->lock);
680 /* Does not bother with rcv_saddr checks,
681 * because the established check is already
684 tb_for_each(tb, node, &head->chain) {
685 if (tb->port == rover) {
686 BUG_TRAP(!hlist_empty(&tb->owners));
687 if (tb->fastreuse >= 0)
689 if (!__tcp_v4_check_established(sk,
697 tb = tcp_bucket_create(head, rover);
699 spin_unlock(&head->lock);
706 spin_unlock(&head->lock);
707 } while (--remaining > 0);
708 tcp_port_rover = rover;
709 spin_unlock(&tcp_portalloc_lock);
713 return -EADDRNOTAVAIL;
716 /* All locks still held and bhs disabled */
717 tcp_port_rover = rover;
718 spin_unlock(&tcp_portalloc_lock);
720 tcp_bind_hash(sk, tb, rover);
721 if (sk_unhashed(sk)) {
722 inet_sk(sk)->sport = htons(rover);
723 __tcp_v4_hash(sk, 0);
725 spin_unlock(&head->lock);
728 tcp_tw_deschedule(tw);
736 head = &tcp_bhash[tcp_bhashfn(snum)];
737 tb = tcp_sk(sk)->bind_hash;
738 spin_lock_bh(&head->lock);
739 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
740 __tcp_v4_hash(sk, 0);
741 spin_unlock_bh(&head->lock);
744 spin_unlock(&head->lock);
745 /* No definite answer... Walk to established hash table */
746 ret = __tcp_v4_check_established(sk, snum, NULL);
753 /* This will initiate an outgoing connection. */
754 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
756 struct inet_opt *inet = inet_sk(sk);
757 struct tcp_opt *tp = tcp_sk(sk);
758 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
764 if (addr_len < sizeof(struct sockaddr_in))
767 if (usin->sin_family != AF_INET)
768 return -EAFNOSUPPORT;
770 nexthop = daddr = usin->sin_addr.s_addr;
771 if (inet->opt && inet->opt->srr) {
774 nexthop = inet->opt->faddr;
777 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
778 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
780 inet->sport, usin->sin_port, sk);
784 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
789 if (!inet->opt || !inet->opt->srr)
793 inet->saddr = rt->rt_src;
794 inet->rcv_saddr = inet->saddr;
796 if (tp->ts_recent_stamp && inet->daddr != daddr) {
797 /* Reset inherited state */
799 tp->ts_recent_stamp = 0;
803 if (sysctl_tcp_tw_recycle &&
804 !tp->ts_recent_stamp && rt->rt_dst == daddr) {
805 struct inet_peer *peer = rt_get_peer(rt);
807 /* VJ's idea. We save last timestamp seen from
808 * the destination in peer table, when entering state TIME-WAIT
809 * and initialize ts_recent from it, when trying new connection.
812 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
813 tp->ts_recent_stamp = peer->tcp_ts_stamp;
814 tp->ts_recent = peer->tcp_ts;
818 inet->dport = usin->sin_port;
821 tp->ext_header_len = 0;
823 tp->ext_header_len = inet->opt->optlen;
827 /* Socket identity is still unknown (sport may be zero).
828 * However we set state to SYN-SENT and not releasing socket
829 * lock select source port, enter ourselves into the hash tables and
830 * complete initialization after this.
832 tcp_set_state(sk, TCP_SYN_SENT);
833 err = tcp_v4_hash_connect(sk);
837 err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
841 /* OK, now commit destination to socket. */
842 __sk_dst_set(sk, &rt->u.dst);
843 tcp_v4_setup_caps(sk, &rt->u.dst);
844 tp->ext2_header_len = rt->u.dst.header_len;
847 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
852 inet->id = tp->write_seq ^ jiffies;
854 err = tcp_connect(sk);
862 /* This unhashes the socket and releases the local port, if necessary. */
863 tcp_set_state(sk, TCP_CLOSE);
865 sk->sk_route_caps = 0;
870 static __inline__ int tcp_v4_iif(struct sk_buff *skb)
872 return ((struct rtable *)skb->dst)->rt_iif;
875 static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
877 return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
880 static struct open_request *tcp_v4_search_req(struct tcp_opt *tp,
881 struct open_request ***prevp,
883 __u32 raddr, __u32 laddr)
885 struct tcp_listen_opt *lopt = tp->listen_opt;
886 struct open_request *req, **prev;
888 for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
889 (req = *prev) != NULL;
890 prev = &req->dl_next) {
891 if (req->rmt_port == rport &&
892 req->af.v4_req.rmt_addr == raddr &&
893 req->af.v4_req.loc_addr == laddr &&
894 TCP_INET_FAMILY(req->class->family)) {
904 static void tcp_v4_synq_add(struct sock *sk, struct open_request *req)
906 struct tcp_opt *tp = tcp_sk(sk);
907 struct tcp_listen_opt *lopt = tp->listen_opt;
908 u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
910 req->expires = jiffies + TCP_TIMEOUT_INIT;
913 req->dl_next = lopt->syn_table[h];
915 write_lock(&tp->syn_wait_lock);
916 lopt->syn_table[h] = req;
917 write_unlock(&tp->syn_wait_lock);
924 * This routine does path mtu discovery as defined in RFC1191.
926 static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
929 struct dst_entry *dst;
930 struct inet_opt *inet = inet_sk(sk);
931 struct tcp_opt *tp = tcp_sk(sk);
933 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
934 * send out by Linux are always <576bytes so they should go through
937 if (sk->sk_state == TCP_LISTEN)
940 /* We don't check in the destentry if pmtu discovery is forbidden
941 * on this route. We just assume that no packet_to_big packets
942 * are send back when pmtu discovery is not active.
943 * There is a small race when the user changes this flag in the
944 * route, but I think that's acceptable.
946 if ((dst = __sk_dst_check(sk, 0)) == NULL)
949 dst->ops->update_pmtu(dst, mtu);
951 /* Something is about to be wrong... Remember soft error
952 * for the case, if this connection will not able to recover.
954 if (mtu < dst_pmtu(dst) && ip_dont_fragment(sk, dst))
955 sk->sk_err_soft = EMSGSIZE;
959 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
960 tp->pmtu_cookie > mtu) {
961 tcp_sync_mss(sk, mtu);
963 /* Resend the TCP packet because it's
964 * clear that the old packet has been
965 * dropped. This is the new "fast" path mtu
968 tcp_simple_retransmit(sk);
969 } /* else let the usual retransmit timer handle it */
973 * This routine is called by the ICMP module when it gets some
974 * sort of error condition. If err < 0 then the socket should
975 * be closed and the error returned to the user. If err > 0
976 * it's just the icmp type << 8 | icmp code. After adjustment
977 * header points to the first 8 bytes of the tcp header. We need
978 * to find the appropriate port.
980 * The locking strategy used here is very "optimistic". When
981 * someone else accesses the socket the ICMP is just dropped
982 * and for some paths there is no check at all.
983 * A more general error queue to queue errors for later handling
984 * is probably better.
988 void tcp_v4_err(struct sk_buff *skb, u32 info)
990 struct iphdr *iph = (struct iphdr *)skb->data;
991 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
993 struct inet_opt *inet;
994 int type = skb->h.icmph->type;
995 int code = skb->h.icmph->code;
1000 if (skb->len < (iph->ihl << 2) + 8) {
1001 ICMP_INC_STATS_BH(IcmpInErrors);
1005 sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,
1006 th->source, tcp_v4_iif(skb));
1008 ICMP_INC_STATS_BH(IcmpInErrors);
1011 if (sk->sk_state == TCP_TIME_WAIT) {
1012 tcp_tw_put((struct tcp_tw_bucket *)sk);
1017 /* If too many ICMPs get dropped on busy
1018 * servers this needs to be solved differently.
1020 if (sock_owned_by_user(sk))
1021 NET_INC_STATS_BH(LockDroppedIcmps);
1023 if (sk->sk_state == TCP_CLOSE)
1027 seq = ntohl(th->seq);
1028 if (sk->sk_state != TCP_LISTEN &&
1029 !between(seq, tp->snd_una, tp->snd_nxt)) {
1030 NET_INC_STATS(OutOfWindowIcmps);
1035 case ICMP_SOURCE_QUENCH:
1036 /* This is deprecated, but if someone generated it,
1037 * we have no reasons to ignore it.
1039 if (!sock_owned_by_user(sk))
1042 case ICMP_PARAMETERPROB:
1045 case ICMP_DEST_UNREACH:
1046 if (code > NR_ICMP_UNREACH)
1049 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
1050 if (!sock_owned_by_user(sk))
1051 do_pmtu_discovery(sk, iph, info);
1055 err = icmp_err_convert[code].errno;
1057 case ICMP_TIME_EXCEEDED:
1064 switch (sk->sk_state) {
1065 struct open_request *req, **prev;
1067 if (sock_owned_by_user(sk))
1070 req = tcp_v4_search_req(tp, &prev, th->dest,
1071 iph->daddr, iph->saddr);
1075 /* ICMPs are not backlogged, hence we cannot get
1076 an established socket here.
1080 if (seq != req->snt_isn) {
1081 NET_INC_STATS_BH(OutOfWindowIcmps);
1086 * Still in SYN_RECV, just remove it silently.
1087 * There is no good way to pass the error to the newly
1088 * created socket, and POSIX does not want network
1089 * errors returned from accept().
1091 tcp_synq_drop(sk, req, prev);
1095 case TCP_SYN_RECV: /* Cannot happen.
1096 It can f.e. if SYNs crossed.
1098 if (!sock_owned_by_user(sk)) {
1099 TCP_INC_STATS_BH(TcpAttemptFails);
1102 sk->sk_error_report(sk);
1106 sk->sk_err_soft = err;
1111 /* If we've already connected we will keep trying
1112 * until we time out, or the user gives up.
1114 * rfc1122 4.2.3.9 allows to consider as hard errors
1115 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
1116 * but it is obsoleted by pmtu discovery).
1118 * Note, that in modern internet, where routing is unreliable
1119 * and in each dark corner broken firewalls sit, sending random
1120 * errors ordered by their masters even this two messages finally lose
1121 * their original sense (even Linux sends invalid PORT_UNREACHs)
1123 * Now we are in compliance with RFCs.
1128 if (!sock_owned_by_user(sk) && inet->recverr) {
1130 sk->sk_error_report(sk);
1131 } else { /* Only an error on timeout */
1132 sk->sk_err_soft = err;
1140 /* This routine computes an IPv4 TCP checksum. */
1141 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
1142 struct sk_buff *skb)
1144 struct inet_opt *inet = inet_sk(sk);
1146 if (skb->ip_summed == CHECKSUM_HW) {
1147 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
1148 skb->csum = offsetof(struct tcphdr, check);
1150 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
1151 csum_partial((char *)th,
1158 * This routine will send an RST to the other tcp.
1160 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
1162 * Answer: if a packet caused RST, it is not for a socket
1163 * existing in our system, if it is matched to a socket,
1164 * it is just duplicate segment or bug in other side's TCP.
1165 * So that we build reply only basing on parameters
1166 * arrived with segment.
1167 * Exception: precedence violation. We do not implement it in any case.
1170 static void tcp_v4_send_reset(struct sk_buff *skb)
1172 struct tcphdr *th = skb->h.th;
1174 struct ip_reply_arg arg;
1176 /* Never send a reset in response to a reset. */
1180 if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
1183 /* Swap the send and the receive. */
1184 memset(&rth, 0, sizeof(struct tcphdr));
1185 rth.dest = th->source;
1186 rth.source = th->dest;
1187 rth.doff = sizeof(struct tcphdr) / 4;
1191 rth.seq = th->ack_seq;
1194 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
1195 skb->len - (th->doff << 2));
1198 memset(&arg, 0, sizeof arg);
1199 arg.iov[0].iov_base = (unsigned char *)&rth;
1200 arg.iov[0].iov_len = sizeof rth;
1201 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1202 skb->nh.iph->saddr, /*XXX*/
1203 sizeof(struct tcphdr), IPPROTO_TCP, 0);
1204 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1206 ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
1208 TCP_INC_STATS_BH(TcpOutSegs);
1209 TCP_INC_STATS_BH(TcpOutRsts);
1212 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
1213 outside socket context is ugly, certainly. What can I do?
1216 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
1219 struct tcphdr *th = skb->h.th;
1224 struct ip_reply_arg arg;
1226 memset(&rep.th, 0, sizeof(struct tcphdr));
1227 memset(&arg, 0, sizeof arg);
1229 arg.iov[0].iov_base = (unsigned char *)&rep;
1230 arg.iov[0].iov_len = sizeof(rep.th);
1232 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1233 (TCPOPT_TIMESTAMP << 8) |
1235 rep.tsopt[1] = htonl(tcp_time_stamp);
1236 rep.tsopt[2] = htonl(ts);
1237 arg.iov[0].iov_len = sizeof(rep);
1240 /* Swap the send and the receive. */
1241 rep.th.dest = th->source;
1242 rep.th.source = th->dest;
1243 rep.th.doff = arg.iov[0].iov_len / 4;
1244 rep.th.seq = htonl(seq);
1245 rep.th.ack_seq = htonl(ack);
1247 rep.th.window = htons(win);
1249 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1250 skb->nh.iph->saddr, /*XXX*/
1251 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1252 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1254 ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
1256 TCP_INC_STATS_BH(TcpOutSegs);
1259 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
1261 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
1263 tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
1264 tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
1269 static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req)
1271 tcp_v4_send_ack(skb, req->snt_isn + 1, req->rcv_isn + 1, req->rcv_wnd,
1275 static struct dst_entry* tcp_v4_route_req(struct sock *sk,
1276 struct open_request *req)
1279 struct ip_options *opt = req->af.v4_req.opt;
1280 struct flowi fl = { .oif = sk->sk_bound_dev_if,
1282 { .daddr = ((opt && opt->srr) ?
1284 req->af.v4_req.rmt_addr),
1285 .saddr = req->af.v4_req.loc_addr,
1286 .tos = RT_CONN_FLAGS(sk) } },
1287 .proto = IPPROTO_TCP,
1289 { .sport = inet_sk(sk)->sport,
1290 .dport = req->rmt_port } } };
1292 if (ip_route_output_flow(&rt, &fl, sk, 0)) {
1293 IP_INC_STATS_BH(IpOutNoRoutes);
1296 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
1298 IP_INC_STATS_BH(IpOutNoRoutes);
1305 * Send a SYN-ACK after having received an ACK.
1306 * This still operates on a open_request only, not on a big
1309 static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
1310 struct dst_entry *dst)
1313 struct sk_buff * skb;
1315 /* First, grab a route. */
1316 if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1319 skb = tcp_make_synack(sk, dst, req);
1322 struct tcphdr *th = skb->h.th;
1324 th->check = tcp_v4_check(th, skb->len,
1325 req->af.v4_req.loc_addr,
1326 req->af.v4_req.rmt_addr,
1327 csum_partial((char *)th, skb->len,
1330 err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr,
1331 req->af.v4_req.rmt_addr,
1332 req->af.v4_req.opt);
1333 if (err == NET_XMIT_CN)
1343 * IPv4 open_request destructor.
1345 static void tcp_v4_or_free(struct open_request *req)
1347 if (req->af.v4_req.opt)
1348 kfree(req->af.v4_req.opt);
1351 static inline void syn_flood_warning(struct sk_buff *skb)
1353 static unsigned long warntime;
1355 if (time_after(jiffies, (warntime + HZ * 60))) {
1358 "possible SYN flooding on port %d. Sending cookies.\n",
1359 ntohs(skb->h.th->dest));
1364 * Save and compile IPv4 options into the open_request if needed.
1366 static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
1367 struct sk_buff *skb)
1369 struct ip_options *opt = &(IPCB(skb)->opt);
1370 struct ip_options *dopt = NULL;
1372 if (opt && opt->optlen) {
1373 int opt_size = optlength(opt);
1374 dopt = kmalloc(opt_size, GFP_ATOMIC);
1376 if (ip_options_echo(dopt, skb)) {
1386 * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
1387 * One SYN_RECV socket costs about 80bytes on a 32bit machine.
1388 * It would be better to replace it with a global counter for all sockets
1389 * but then some measure against one socket starving all other sockets
1392 * It was 128 by default. Experiments with real servers show, that
1393 * it is absolutely not enough even at 100conn/sec. 256 cures most
1394 * of problems. This value is adjusted to 128 for very small machines
1395 * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
1396 * Further increasing requires to change hash table size.
1398 int sysctl_max_syn_backlog = 256;
1400 struct or_calltable or_ipv4 = {
1402 .rtx_syn_ack = tcp_v4_send_synack,
1403 .send_ack = tcp_v4_or_send_ack,
1404 .destructor = tcp_v4_or_free,
1405 .send_reset = tcp_v4_send_reset,
1408 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1411 struct open_request *req;
1412 __u32 saddr = skb->nh.iph->saddr;
1413 __u32 daddr = skb->nh.iph->daddr;
1414 __u32 isn = TCP_SKB_CB(skb)->when;
1415 struct dst_entry *dst = NULL;
1416 #ifdef CONFIG_SYN_COOKIES
1417 int want_cookie = 0;
1419 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1422 /* Never answer to SYNs send to broadcast or multicast */
1423 if (((struct rtable *)skb->dst)->rt_flags &
1424 (RTCF_BROADCAST | RTCF_MULTICAST))
1427 /* TW buckets are converted to open requests without
1428 * limitations, they conserve resources and peer is
1429 * evidently real one.
1431 if (tcp_synq_is_full(sk) && !isn) {
1432 #ifdef CONFIG_SYN_COOKIES
1433 if (sysctl_tcp_syncookies) {
1440 /* Accept backlog is full. If we have already queued enough
1441 * of warm entries in syn queue, drop request. It is better than
1442 * clogging syn queue with openreqs with exponentially increasing
1445 if (tcp_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
1448 req = tcp_openreq_alloc();
1452 tcp_clear_options(&tp);
1454 tp.user_mss = tcp_sk(sk)->user_mss;
1456 tcp_parse_options(skb, &tp, 0);
1459 tcp_clear_options(&tp);
1463 if (tp.saw_tstamp && !tp.rcv_tsval) {
1464 /* Some OSes (unknown ones, but I see them on web server, which
1465 * contains information interesting only for windows'
1466 * users) do not send their stamp in SYN. It is easy case.
1467 * We simply do not advertise TS support.
1472 tp.tstamp_ok = tp.saw_tstamp;
1474 tcp_openreq_init(req, &tp, skb);
1476 req->af.v4_req.loc_addr = daddr;
1477 req->af.v4_req.rmt_addr = saddr;
1478 req->af.v4_req.opt = tcp_v4_save_options(sk, skb);
1479 req->class = &or_ipv4;
1481 TCP_ECN_create_request(req, skb->h.th);
1484 #ifdef CONFIG_SYN_COOKIES
1485 syn_flood_warning(skb);
1487 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1489 struct inet_peer *peer = NULL;
1491 /* VJ's idea. We save last timestamp seen
1492 * from the destination in peer table, when entering
1493 * state TIME-WAIT, and check against it before
1494 * accepting new connection request.
1496 * If "isn" is not zero, this request hit alive
1497 * timewait bucket, so that all the necessary checks
1498 * are made in the function processing timewait state.
1500 if (tp.saw_tstamp &&
1501 sysctl_tcp_tw_recycle &&
1502 (dst = tcp_v4_route_req(sk, req)) != NULL &&
1503 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1504 peer->v4daddr == saddr) {
1505 if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1506 (s32)(peer->tcp_ts - req->ts_recent) >
1508 NET_INC_STATS_BH(PAWSPassiveRejected);
1513 /* Kill the following clause, if you dislike this way. */
1514 else if (!sysctl_tcp_syncookies &&
1515 (sysctl_max_syn_backlog - tcp_synq_len(sk) <
1516 (sysctl_max_syn_backlog >> 2)) &&
1517 (!peer || !peer->tcp_ts_stamp) &&
1518 (!dst || !dst_metric(dst, RTAX_RTT))) {
1519 /* Without syncookies last quarter of
1520 * backlog is filled with destinations,
1521 * proven to be alive.
1522 * It means that we continue to communicate
1523 * to destinations, already remembered
1524 * to the moment of synflood.
1526 NETDEBUG(if (net_ratelimit()) \
1527 printk(KERN_DEBUG "TCP: drop open "
1528 "request from %u.%u."
1531 ntohs(skb->h.th->source)));
1536 isn = tcp_v4_init_sequence(sk, skb);
1540 if (tcp_v4_send_synack(sk, req, dst))
1544 tcp_openreq_free(req);
1546 tcp_v4_synq_add(sk, req);
1551 tcp_openreq_free(req);
1553 TCP_INC_STATS_BH(TcpAttemptFails);
1559 * The three way handshake has completed - we got a valid synack -
1560 * now create the new socket.
1562 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1563 struct open_request *req,
1564 struct dst_entry *dst)
1566 struct inet_opt *newinet;
1567 struct tcp_opt *newtp;
1570 if (tcp_acceptq_is_full(sk))
1573 if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1576 newsk = tcp_create_openreq_child(sk, req, skb);
1580 newsk->sk_dst_cache = dst;
1581 tcp_v4_setup_caps(newsk, dst);
1583 newtp = tcp_sk(newsk);
1584 newinet = inet_sk(newsk);
1585 newinet->daddr = req->af.v4_req.rmt_addr;
1586 newinet->rcv_saddr = req->af.v4_req.loc_addr;
1587 newinet->saddr = req->af.v4_req.loc_addr;
1588 newinet->opt = req->af.v4_req.opt;
1589 req->af.v4_req.opt = NULL;
1590 newinet->mc_index = tcp_v4_iif(skb);
1591 newinet->mc_ttl = skb->nh.iph->ttl;
1592 newtp->ext_header_len = 0;
1594 newtp->ext_header_len = newinet->opt->optlen;
1595 newtp->ext2_header_len = dst->header_len;
1596 newinet->id = newtp->write_seq ^ jiffies;
1598 tcp_sync_mss(newsk, dst_pmtu(dst));
1599 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1600 tcp_initialize_rcv_mss(newsk);
1602 __tcp_v4_hash(newsk, 0);
1603 __tcp_inherit_port(sk, newsk);
1608 NET_INC_STATS_BH(ListenOverflows);
1610 NET_INC_STATS_BH(ListenDrops);
1615 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1617 struct tcphdr *th = skb->h.th;
1618 struct iphdr *iph = skb->nh.iph;
1619 struct tcp_opt *tp = tcp_sk(sk);
1621 struct open_request **prev;
1622 /* Find possible connection requests. */
1623 struct open_request *req = tcp_v4_search_req(tp, &prev, th->source,
1624 iph->saddr, iph->daddr);
1626 return tcp_check_req(sk, skb, req, prev);
1628 nsk = __tcp_v4_lookup_established(skb->nh.iph->saddr,
1635 if (nsk->sk_state != TCP_TIME_WAIT) {
1639 tcp_tw_put((struct tcp_tw_bucket *)nsk);
1643 #ifdef CONFIG_SYN_COOKIES
1644 if (!th->rst && !th->syn && th->ack)
1645 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1650 static int tcp_v4_checksum_init(struct sk_buff *skb)
1652 if (skb->ip_summed == CHECKSUM_HW) {
1653 skb->ip_summed = CHECKSUM_UNNECESSARY;
1654 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1655 skb->nh.iph->daddr, skb->csum))
1658 NETDEBUG(if (net_ratelimit())
1659 printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
1660 skb->ip_summed = CHECKSUM_NONE;
1662 if (skb->len <= 76) {
1663 if (tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1665 skb_checksum(skb, 0, skb->len, 0)))
1667 skb->ip_summed = CHECKSUM_UNNECESSARY;
1669 skb->csum = ~tcp_v4_check(skb->h.th, skb->len,
1671 skb->nh.iph->daddr, 0);
1677 /* The socket must have it's spinlock held when we get
1680 * We have a potential double-lock case here, so even when
1681 * doing backlog processing we use the BH locking scheme.
1682 * This is because we cannot sleep with the original spinlock
1685 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1687 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1688 TCP_CHECK_TIMER(sk);
1689 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1691 TCP_CHECK_TIMER(sk);
1695 if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
1698 if (sk->sk_state == TCP_LISTEN) {
1699 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1704 if (tcp_child_process(sk, nsk, skb))
1710 TCP_CHECK_TIMER(sk);
1711 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1713 TCP_CHECK_TIMER(sk);
1717 tcp_v4_send_reset(skb);
1720 /* Be careful here. If this function gets more complicated and
1721 * gcc suffers from register pressure on the x86, sk (in %ebx)
1722 * might be destroyed here. This current version compiles correctly,
1723 * but you have been warned.
1728 TCP_INC_STATS_BH(TcpInErrs);
1736 int tcp_v4_rcv(struct sk_buff *skb)
1742 if (skb->pkt_type != PACKET_HOST)
1745 /* Count it even if it's bad */
1746 TCP_INC_STATS_BH(TcpInSegs);
1748 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1753 if (th->doff < sizeof(struct tcphdr) / 4)
1755 if (!pskb_may_pull(skb, th->doff * 4))
1758 /* An explanation is required here, I think.
1759 * Packet length and doff are validated by header prediction,
1760 * provided case of th->doff==0 is elimineted.
1761 * So, we defer the checks. */
1762 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1763 tcp_v4_checksum_init(skb) < 0))
1767 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1768 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1769 skb->len - th->doff * 4);
1770 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1771 TCP_SKB_CB(skb)->when = 0;
1772 TCP_SKB_CB(skb)->flags = skb->nh.iph->tos;
1773 TCP_SKB_CB(skb)->sacked = 0;
1775 sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source,
1776 skb->nh.iph->daddr, ntohs(th->dest),
1783 if (sk->sk_state == TCP_TIME_WAIT)
1786 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1787 goto discard_and_relse;
1789 if (sk_filter(sk, skb, 0))
1790 goto discard_and_relse;
1796 if (!sock_owned_by_user(sk)) {
1797 if (!tcp_prequeue(sk, skb))
1798 ret = tcp_v4_do_rcv(sk, skb);
1800 sk_add_backlog(sk, skb);
1808 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1811 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1813 TCP_INC_STATS_BH(TcpInErrs);
1815 tcp_v4_send_reset(skb);
1819 /* Discard frame. */
1828 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1829 tcp_tw_put((struct tcp_tw_bucket *) sk);
1833 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1834 TCP_INC_STATS_BH(TcpInErrs);
1835 tcp_tw_put((struct tcp_tw_bucket *) sk);
1838 switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
1839 skb, th, skb->len)) {
1841 struct sock *sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr,
1845 tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
1846 tcp_tw_put((struct tcp_tw_bucket *)sk);
1850 /* Fall through to ACK */
1853 tcp_v4_timewait_ack(sk, skb);
1857 case TCP_TW_SUCCESS:;
1862 /* With per-bucket locks this operation is not-atomic, so that
1863 * this version is not worse.
1865 static void __tcp_v4_rehash(struct sock *sk)
1867 sk->sk_prot->unhash(sk);
1868 sk->sk_prot->hash(sk);
1871 static int tcp_v4_reselect_saddr(struct sock *sk)
1873 struct inet_opt *inet = inet_sk(sk);
1876 __u32 old_saddr = inet->saddr;
1878 __u32 daddr = inet->daddr;
1880 if (inet->opt && inet->opt->srr)
1881 daddr = inet->opt->faddr;
1883 /* Query new route. */
1884 err = ip_route_connect(&rt, daddr, 0,
1885 RT_TOS(inet->tos) | sk->sk_localroute,
1886 sk->sk_bound_dev_if,
1888 inet->sport, inet->dport, sk);
1892 __sk_dst_set(sk, &rt->u.dst);
1893 tcp_v4_setup_caps(sk, &rt->u.dst);
1894 tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
1896 new_saddr = rt->rt_src;
1898 if (new_saddr == old_saddr)
1901 if (sysctl_ip_dynaddr > 1) {
1902 printk(KERN_INFO "tcp_v4_rebuild_header(): shifting inet->"
1903 "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n",
1905 NIPQUAD(new_saddr));
1908 inet->saddr = new_saddr;
1909 inet->rcv_saddr = new_saddr;
1911 /* XXX The only one ugly spot where we need to
1912 * XXX really change the sockets identity after
1913 * XXX it has entered the hashes. -DaveM
1915 * Besides that, it does not check for connection
1916 * uniqueness. Wait for troubles.
1918 __tcp_v4_rehash(sk);
1922 int tcp_v4_rebuild_header(struct sock *sk)
1924 struct inet_opt *inet = inet_sk(sk);
1925 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1929 /* Route is OK, nothing to do. */
1934 daddr = inet->daddr;
1935 if (inet->opt && inet->opt->srr)
1936 daddr = inet->opt->faddr;
1939 struct flowi fl = { .oif = sk->sk_bound_dev_if,
1942 .saddr = inet->saddr,
1943 .tos = RT_CONN_FLAGS(sk) } },
1944 .proto = IPPROTO_TCP,
1946 { .sport = inet->sport,
1947 .dport = inet->dport } } };
1949 err = ip_route_output_flow(&rt, &fl, sk, 0);
1952 __sk_dst_set(sk, &rt->u.dst);
1953 tcp_v4_setup_caps(sk, &rt->u.dst);
1954 tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
1958 /* Routing failed... */
1959 sk->sk_route_caps = 0;
1961 if (!sysctl_ip_dynaddr ||
1962 sk->sk_state != TCP_SYN_SENT ||
1963 (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
1964 (err = tcp_v4_reselect_saddr(sk)) != 0)
1965 sk->sk_err_soft = -err;
1970 static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
1972 struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;
1973 struct inet_opt *inet = inet_sk(sk);
1975 sin->sin_family = AF_INET;
1976 sin->sin_addr.s_addr = inet->daddr;
1977 sin->sin_port = inet->dport;
1980 /* VJ's idea. Save last timestamp seen from this destination
1981 * and hold it at least for normal timewait interval to use for duplicate
1982 * segment detection in subsequent connections, before they enter synchronized
1986 int tcp_v4_remember_stamp(struct sock *sk)
1988 struct inet_opt *inet = inet_sk(sk);
1989 struct tcp_opt *tp = tcp_sk(sk);
1990 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1991 struct inet_peer *peer = NULL;
1994 if (!rt || rt->rt_dst != inet->daddr) {
1995 peer = inet_getpeer(inet->daddr, 1);
1999 rt_bind_peer(rt, 1);
2004 if ((s32)(peer->tcp_ts - tp->ts_recent) <= 0 ||
2005 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2006 peer->tcp_ts_stamp <= tp->ts_recent_stamp)) {
2007 peer->tcp_ts_stamp = tp->ts_recent_stamp;
2008 peer->tcp_ts = tp->ts_recent;
2018 int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
2020 struct inet_peer *peer = NULL;
2022 peer = inet_getpeer(tw->tw_daddr, 1);
2025 if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 ||
2026 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2027 peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) {
2028 peer->tcp_ts_stamp = tw->tw_ts_recent_stamp;
2029 peer->tcp_ts = tw->tw_ts_recent;
2038 struct tcp_func ipv4_specific = {
2039 .queue_xmit = ip_queue_xmit,
2040 .send_check = tcp_v4_send_check,
2041 .rebuild_header = tcp_v4_rebuild_header,
2042 .conn_request = tcp_v4_conn_request,
2043 .syn_recv_sock = tcp_v4_syn_recv_sock,
2044 .remember_stamp = tcp_v4_remember_stamp,
2045 .net_header_len = sizeof(struct iphdr),
2046 .setsockopt = ip_setsockopt,
2047 .getsockopt = ip_getsockopt,
2048 .addr2sockaddr = v4_addr2sockaddr,
2049 .sockaddr_len = sizeof(struct sockaddr_in),
2052 /* NOTE: A lot of things set to zero explicitly by call to
2053 * sk_alloc() so need not be done here.
2055 static int tcp_v4_init_sock(struct sock *sk)
2057 struct tcp_opt *tp = tcp_sk(sk);
2059 skb_queue_head_init(&tp->out_of_order_queue);
2060 tcp_init_xmit_timers(sk);
2061 tcp_prequeue_init(tp);
2063 tp->rto = TCP_TIMEOUT_INIT;
2064 tp->mdev = TCP_TIMEOUT_INIT;
2066 /* So many TCP implementations out there (incorrectly) count the
2067 * initial SYN frame in their delayed-ACK and congestion control
2068 * algorithms that we must have the following bandaid to talk
2069 * efficiently to them. -DaveM
2073 /* See draft-stevens-tcpca-spec-01 for discussion of the
2074 * initialization of these values.
2076 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
2077 tp->snd_cwnd_clamp = ~0;
2078 tp->mss_cache = 536;
2080 tp->reordering = sysctl_tcp_reordering;
2082 sk->sk_state = TCP_CLOSE;
2084 sk->sk_write_space = tcp_write_space;
2085 sk->sk_use_write_queue = 1;
2087 tp->af_specific = &ipv4_specific;
2089 sk->sk_sndbuf = sysctl_tcp_wmem[1];
2090 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
2092 atomic_inc(&tcp_sockets_allocated);
2097 static int tcp_v4_destroy_sock(struct sock *sk)
2099 struct tcp_opt *tp = tcp_sk(sk);
2101 tcp_clear_xmit_timers(sk);
2103 /* Cleanup up the write buffer. */
2104 tcp_writequeue_purge(sk);
2106 /* Cleans up our, hopefully empty, out_of_order_queue. */
2107 __skb_queue_purge(&tp->out_of_order_queue);
2109 /* Clean prequeue, it must be empty really */
2110 __skb_queue_purge(&tp->ucopy.prequeue);
2112 /* Clean up a referenced TCP bind bucket. */
2116 /* If sendmsg cached page exists, toss it. */
2117 if (inet_sk(sk)->sndmsg_page)
2118 __free_page(inet_sk(sk)->sndmsg_page);
2120 atomic_dec(&tcp_sockets_allocated);
2125 #ifdef CONFIG_PROC_FS
2126 /* Proc filesystem TCP sock list dumping. */
2128 static inline struct tcp_tw_bucket *tw_head(struct hlist_head *head)
2130 return hlist_empty(head) ? NULL :
2131 list_entry(head->first, struct tcp_tw_bucket, tw_node);
2134 static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw)
2136 return tw->tw_node.next ?
2137 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2140 static void *listening_get_next(struct seq_file *seq, void *cur)
2143 struct hlist_node *node;
2144 struct sock *sk = cur;
2145 struct tcp_iter_state* st = seq->private;
2149 sk = sk_head(&tcp_listening_hash[0]);
2155 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2156 struct open_request *req = cur;
2158 tp = tcp_sk(st->syn_wait_sk);
2162 if (req->class->family == st->family) {
2168 if (++st->sbucket >= TCP_SYNQ_HSIZE)
2171 req = tp->listen_opt->syn_table[st->sbucket];
2173 sk = sk_next(st->syn_wait_sk);
2174 st->state = TCP_SEQ_STATE_LISTENING;
2175 read_unlock_bh(&tp->syn_wait_lock);
2179 sk_for_each_from(sk, node) {
2180 if (sk->sk_family == st->family) {
2185 read_lock_bh(&tp->syn_wait_lock);
2186 if (tp->listen_opt && tp->listen_opt->qlen) {
2187 st->uid = sock_i_uid(sk);
2188 st->syn_wait_sk = sk;
2189 st->state = TCP_SEQ_STATE_OPENREQ;
2193 read_unlock_bh(&tp->syn_wait_lock);
2195 if (++st->bucket < TCP_LHTABLE_SIZE) {
2196 sk = sk_head(&tcp_listening_hash[st->bucket]);
2204 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2206 void *rc = listening_get_next(seq, NULL);
2208 while (rc && *pos) {
2209 rc = listening_get_next(seq, rc);
2215 static void *established_get_first(struct seq_file *seq)
2217 struct tcp_iter_state* st = seq->private;
2220 for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) {
2222 struct hlist_node *node;
2223 struct tcp_tw_bucket *tw;
2225 read_lock(&tcp_ehash[st->bucket].lock);
2226 sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) {
2227 if (sk->sk_family != st->family) {
2233 st->state = TCP_SEQ_STATE_TIME_WAIT;
2234 tw_for_each(tw, node,
2235 &tcp_ehash[st->bucket + tcp_ehash_size].chain) {
2236 if (tw->tw_family != st->family) {
2242 read_unlock(&tcp_ehash[st->bucket].lock);
2243 st->state = TCP_SEQ_STATE_ESTABLISHED;
2249 static void *established_get_next(struct seq_file *seq, void *cur)
2251 struct sock *sk = cur;
2252 struct tcp_tw_bucket *tw;
2253 struct hlist_node *node;
2254 struct tcp_iter_state* st = seq->private;
2258 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2262 while (tw && tw->tw_family != st->family) {
2269 read_unlock(&tcp_ehash[st->bucket].lock);
2270 st->state = TCP_SEQ_STATE_ESTABLISHED;
2271 if (++st->bucket < tcp_ehash_size) {
2272 read_lock(&tcp_ehash[st->bucket].lock);
2273 sk = sk_head(&tcp_ehash[st->bucket].chain);
2281 sk_for_each_from(sk, node) {
2282 if (sk->sk_family == st->family)
2286 st->state = TCP_SEQ_STATE_TIME_WAIT;
2287 tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain);
2295 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2297 void *rc = established_get_first(seq);
2300 rc = established_get_next(seq, rc);
2306 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2309 struct tcp_iter_state* st = seq->private;
2312 st->state = TCP_SEQ_STATE_LISTENING;
2313 rc = listening_get_idx(seq, &pos);
2316 tcp_listen_unlock();
2318 st->state = TCP_SEQ_STATE_ESTABLISHED;
2319 rc = established_get_idx(seq, pos);
2325 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2327 struct tcp_iter_state* st = seq->private;
2328 st->state = TCP_SEQ_STATE_LISTENING;
2330 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2333 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2336 struct tcp_iter_state* st;
2338 if (v == SEQ_START_TOKEN) {
2339 rc = tcp_get_idx(seq, 0);
2344 switch (st->state) {
2345 case TCP_SEQ_STATE_OPENREQ:
2346 case TCP_SEQ_STATE_LISTENING:
2347 rc = listening_get_next(seq, v);
2349 tcp_listen_unlock();
2351 st->state = TCP_SEQ_STATE_ESTABLISHED;
2352 rc = established_get_first(seq);
2355 case TCP_SEQ_STATE_ESTABLISHED:
2356 case TCP_SEQ_STATE_TIME_WAIT:
2357 rc = established_get_next(seq, v);
2365 static void tcp_seq_stop(struct seq_file *seq, void *v)
2367 struct tcp_iter_state* st = seq->private;
2369 switch (st->state) {
2370 case TCP_SEQ_STATE_OPENREQ:
2372 struct tcp_opt *tp = tcp_sk(st->syn_wait_sk);
2373 read_unlock_bh(&tp->syn_wait_lock);
2375 case TCP_SEQ_STATE_LISTENING:
2376 if (v != SEQ_START_TOKEN)
2377 tcp_listen_unlock();
2379 case TCP_SEQ_STATE_TIME_WAIT:
2380 case TCP_SEQ_STATE_ESTABLISHED:
2382 read_unlock(&tcp_ehash[st->bucket].lock);
2388 static int tcp_seq_open(struct inode *inode, struct file *file)
2390 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2391 struct seq_file *seq;
2392 struct tcp_iter_state *s;
2395 if (unlikely(afinfo == NULL))
2398 s = kmalloc(sizeof(*s), GFP_KERNEL);
2401 memset(s, 0, sizeof(*s));
2402 s->family = afinfo->family;
2403 s->seq_ops.start = tcp_seq_start;
2404 s->seq_ops.next = tcp_seq_next;
2405 s->seq_ops.show = afinfo->seq_show;
2406 s->seq_ops.stop = tcp_seq_stop;
2408 rc = seq_open(file, &s->seq_ops);
2411 seq = file->private_data;
2420 int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
2423 struct proc_dir_entry *p;
2427 afinfo->seq_fops->owner = afinfo->owner;
2428 afinfo->seq_fops->open = tcp_seq_open;
2429 afinfo->seq_fops->read = seq_read;
2430 afinfo->seq_fops->llseek = seq_lseek;
2431 afinfo->seq_fops->release = seq_release_private;
2433 p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
2441 void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
2445 proc_net_remove(afinfo->name);
2446 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
2449 static void get_openreq4(struct sock *sk, struct open_request *req,
2450 char *tmpbuf, int i, int uid)
2452 int ttd = req->expires - jiffies;
2454 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2455 " %02X %08X:%08X %02X:%08X %08X %5d %8d %u %d %p",
2457 req->af.v4_req.loc_addr,
2458 ntohs(inet_sk(sk)->sport),
2459 req->af.v4_req.rmt_addr,
2460 ntohs(req->rmt_port),
2462 0, 0, /* could print option size, but that is af dependent. */
2463 1, /* timers active (only the expire timer) */
2464 jiffies_to_clock_t(ttd),
2467 0, /* non standard timer */
2468 0, /* open_requests have no inode */
2469 atomic_read(&sk->sk_refcnt),
2473 static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
2476 unsigned long timer_expires;
2477 struct tcp_opt *tp = tcp_sk(sp);
2478 struct inet_opt *inet = inet_sk(sp);
2479 unsigned int dest = inet->daddr;
2480 unsigned int src = inet->rcv_saddr;
2481 __u16 destp = ntohs(inet->dport);
2482 __u16 srcp = ntohs(inet->sport);
2484 if (tp->pending == TCP_TIME_RETRANS) {
2486 timer_expires = tp->timeout;
2487 } else if (tp->pending == TCP_TIME_PROBE0) {
2489 timer_expires = tp->timeout;
2490 } else if (timer_pending(&sp->sk_timer)) {
2492 timer_expires = sp->sk_timer.expires;
2495 timer_expires = jiffies;
2498 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2499 "%08X %5d %8d %lu %d %p %u %u %u %u %d",
2500 i, src, srcp, dest, destp, sp->sk_state,
2501 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
2503 jiffies_to_clock_t(timer_expires - jiffies),
2508 atomic_read(&sp->sk_refcnt), sp,
2509 tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong,
2511 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
2514 static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i)
2516 unsigned int dest, src;
2518 int ttd = tw->tw_ttd - jiffies;
2523 dest = tw->tw_daddr;
2524 src = tw->tw_rcv_saddr;
2525 destp = ntohs(tw->tw_dport);
2526 srcp = ntohs(tw->tw_sport);
2528 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2529 " %02X %08X:%08X %02X:%08X %08X %5d %8d %d %d %p",
2530 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2531 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2532 atomic_read(&tw->tw_refcnt), tw);
2537 static int tcp4_seq_show(struct seq_file *seq, void *v)
2539 struct tcp_iter_state* st;
2540 char tmpbuf[TMPSZ + 1];
2542 if (v == SEQ_START_TOKEN) {
2543 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2544 " sl local_address rem_address st tx_queue "
2545 "rx_queue tr tm->when retrnsmt uid timeout "
2551 switch (st->state) {
2552 case TCP_SEQ_STATE_LISTENING:
2553 case TCP_SEQ_STATE_ESTABLISHED:
2554 get_tcp4_sock(v, tmpbuf, st->num);
2556 case TCP_SEQ_STATE_OPENREQ:
2557 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
2559 case TCP_SEQ_STATE_TIME_WAIT:
2560 get_timewait4_sock(v, tmpbuf, st->num);
2563 seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
2568 static struct file_operations tcp4_seq_fops;
2569 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2570 .owner = THIS_MODULE,
2573 .seq_show = tcp4_seq_show,
2574 .seq_fops = &tcp4_seq_fops,
2577 int __init tcp4_proc_init(void)
2579 return tcp_proc_register(&tcp4_seq_afinfo);
2582 void tcp4_proc_exit(void)
2584 tcp_proc_unregister(&tcp4_seq_afinfo);
2586 #endif /* CONFIG_PROC_FS */
2588 struct proto tcp_prot = {
2591 .connect = tcp_v4_connect,
2592 .disconnect = tcp_disconnect,
2593 .accept = tcp_accept,
2595 .init = tcp_v4_init_sock,
2596 .destroy = tcp_v4_destroy_sock,
2597 .shutdown = tcp_shutdown,
2598 .setsockopt = tcp_setsockopt,
2599 .getsockopt = tcp_getsockopt,
2600 .sendmsg = tcp_sendmsg,
2601 .recvmsg = tcp_recvmsg,
2602 .backlog_rcv = tcp_v4_do_rcv,
2603 .hash = tcp_v4_hash,
2604 .unhash = tcp_unhash,
2605 .get_port = tcp_v4_get_port,
2610 void __init tcp_v4_init(struct net_proto_family *ops)
2612 int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
2614 panic("Failed to create the TCP control socket.\n");
2615 tcp_socket->sk->sk_allocation = GFP_ATOMIC;
2616 inet_sk(tcp_socket->sk)->uc_ttl = -1;
2618 /* Unhash it so that IP input processing does not even
2619 * see it, we do not wish this socket to see incoming
2622 tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
2625 EXPORT_SYMBOL(ipv4_specific);
2626 EXPORT_SYMBOL(tcp_bind_hash);
2627 EXPORT_SYMBOL(tcp_bucket_create);
2628 EXPORT_SYMBOL(tcp_hashinfo);
2629 EXPORT_SYMBOL(tcp_inherit_port);
2630 EXPORT_SYMBOL(tcp_listen_wlock);
2631 EXPORT_SYMBOL(tcp_port_rover);
2632 EXPORT_SYMBOL(tcp_prot);
2633 EXPORT_SYMBOL(tcp_put_port);
2634 EXPORT_SYMBOL(tcp_unhash);
2635 EXPORT_SYMBOL(tcp_v4_conn_request);
2636 EXPORT_SYMBOL(tcp_v4_connect);
2637 EXPORT_SYMBOL(tcp_v4_do_rcv);
2638 EXPORT_SYMBOL(tcp_v4_lookup_listener);
2639 EXPORT_SYMBOL(tcp_v4_rebuild_header);
2640 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2641 EXPORT_SYMBOL(tcp_v4_send_check);
2642 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2644 #ifdef CONFIG_PROC_FS
2645 EXPORT_SYMBOL(tcp_proc_register);
2646 EXPORT_SYMBOL(tcp_proc_unregister);
2648 #ifdef CONFIG_SYSCTL
2649 EXPORT_SYMBOL(sysctl_local_port_range);
2650 EXPORT_SYMBOL(sysctl_max_syn_backlog);
2651 EXPORT_SYMBOL(sysctl_tcp_low_latency);