2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
10 * IPv4 specific functions
15 * linux/ipv4/tcp_input.c
16 * linux/ipv4/tcp_output.c
18 * See tcp.c for author information
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
28 * David S. Miller : New socket lookup architecture.
29 * This code is dedicated to John Dyson.
30 * David S. Miller : Change semantics of established hash,
31 * half is devoted to TIME_WAIT sockets
32 * and the rest go in the other half.
33 * Andi Kleen : Add support for syncookies and fixed
34 * some bugs: ip options weren't passed to
35 * the TCP layer, missed a check for an
37 * Andi Kleen : Implemented fast path mtu discovery.
38 * Fixed many serious bugs in the
39 * open_request handling and moved
40 * most of it into the af independent code.
41 * Added tail drop and some other bugfixes.
42 * Added new listen sematics.
43 * Mike McLagan : Routing by source
44 * Juan Jose Ciarlante: ip_dynaddr bits
45 * Andi Kleen: various fixes.
46 * Vitaly E. Lavrov : Transparent proxy revived after year
48 * Andi Kleen : Fix new listen.
49 * Andi Kleen : Fix accept error reporting.
50 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
51 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
52 * a single port at the same time.
55 #include <linux/config.h>
57 #include <linux/types.h>
58 #include <linux/fcntl.h>
59 #include <linux/module.h>
60 #include <linux/random.h>
61 #include <linux/cache.h>
62 #include <linux/jhash.h>
63 #include <linux/init.h>
64 #include <linux/times.h>
69 #include <net/inet_common.h>
72 #include <linux/inet.h>
73 #include <linux/ipv6.h>
74 #include <linux/stddef.h>
75 #include <linux/proc_fs.h>
76 #include <linux/seq_file.h>
78 extern int sysctl_ip_dynaddr;
79 int sysctl_tcp_tw_reuse;
80 int sysctl_tcp_low_latency;
82 /* Check TCP sequence numbers in ICMP packets. */
83 #define ICMP_MIN_LENGTH 8
85 /* Socket used for sending RSTs */
86 static struct socket *tcp_socket;
88 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
91 struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
92 .__tcp_lhash_lock = RW_LOCK_UNLOCKED,
93 .__tcp_lhash_users = ATOMIC_INIT(0),
95 = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait),
96 .__tcp_portalloc_lock = SPIN_LOCK_UNLOCKED
100 * This array holds the first and last local port number.
101 * For high-usage systems, use sysctl to change this to
104 int sysctl_local_port_range[2] = { 1024, 4999 };
105 int tcp_port_rover = 1024 - 1;
107 static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
108 __u32 faddr, __u16 fport)
110 int h = (laddr ^ lport) ^ (faddr ^ fport);
113 return h & (tcp_ehash_size - 1);
116 static __inline__ int tcp_sk_hashfn(struct sock *sk)
118 struct inet_opt *inet = inet_sk(sk);
119 __u32 laddr = inet->rcv_saddr;
120 __u16 lport = inet->num;
121 __u32 faddr = inet->daddr;
122 __u16 fport = inet->dport;
124 return tcp_hashfn(laddr, lport, faddr, fport);
127 /* Allocate and initialize a new TCP local port bind bucket.
128 * The bindhash mutex for snum's hash chain must be held here.
130 struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
133 struct tcp_bind_bucket *tb = kmem_cache_alloc(tcp_bucket_cachep,
138 INIT_HLIST_HEAD(&tb->owners);
139 hlist_add_head(&tb->node, &head->chain);
144 /* Caller must hold hashbucket lock for this tb with local BH disabled */
145 void tcp_bucket_destroy(struct tcp_bind_bucket *tb)
147 if (hlist_empty(&tb->owners)) {
148 __hlist_del(&tb->node);
149 kmem_cache_free(tcp_bucket_cachep, tb);
153 /* Caller must disable local BH processing. */
154 static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
156 struct tcp_bind_hashbucket *head =
157 &tcp_bhash[tcp_bhashfn(inet_sk(child)->num)];
158 struct tcp_bind_bucket *tb;
160 spin_lock(&head->lock);
161 tb = tcp_sk(sk)->bind_hash;
162 sk_add_bind_node(child, &tb->owners);
163 tcp_sk(child)->bind_hash = tb;
164 spin_unlock(&head->lock);
167 inline void tcp_inherit_port(struct sock *sk, struct sock *child)
170 __tcp_inherit_port(sk, child);
174 void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
177 inet_sk(sk)->num = snum;
178 sk_add_bind_node(sk, &tb->owners);
179 tcp_sk(sk)->bind_hash = tb;
182 static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
184 const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk);
186 struct hlist_node *node;
187 int reuse = sk->sk_reuse;
189 sk_for_each_bound(sk2, node, &tb->owners) {
191 !tcp_v6_ipv6only(sk2) &&
192 (!sk->sk_bound_dev_if ||
193 !sk2->sk_bound_dev_if ||
194 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
195 if (!reuse || !sk2->sk_reuse ||
196 sk2->sk_state == TCP_LISTEN) {
197 const u32 sk2_rcv_saddr = tcp_v4_rcv_saddr(sk2);
198 if (!sk2_rcv_saddr || !sk_rcv_saddr ||
199 sk2_rcv_saddr == sk_rcv_saddr)
207 /* Obtain a reference to a local port for the given sock,
208 * if snum is zero it means select any available local port.
210 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
212 struct tcp_bind_hashbucket *head;
213 struct hlist_node *node;
214 struct tcp_bind_bucket *tb;
219 int low = sysctl_local_port_range[0];
220 int high = sysctl_local_port_range[1];
221 int remaining = (high - low) + 1;
224 spin_lock(&tcp_portalloc_lock);
225 rover = tcp_port_rover;
228 if (rover < low || rover > high)
230 head = &tcp_bhash[tcp_bhashfn(rover)];
231 spin_lock(&head->lock);
232 tb_for_each(tb, node, &head->chain)
233 if (tb->port == rover)
237 spin_unlock(&head->lock);
238 } while (--remaining > 0);
239 tcp_port_rover = rover;
240 spin_unlock(&tcp_portalloc_lock);
242 /* Exhausted local port range during search? */
247 /* OK, here is the one we will use. HEAD is
248 * non-NULL and we hold it's mutex.
252 head = &tcp_bhash[tcp_bhashfn(snum)];
253 spin_lock(&head->lock);
254 tb_for_each(tb, node, &head->chain)
255 if (tb->port == snum)
261 if (!hlist_empty(&tb->owners)) {
262 if (sk->sk_reuse > 1)
264 if (tb->fastreuse > 0 &&
265 sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
269 if (tcp_bind_conflict(sk, tb))
275 if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
277 if (hlist_empty(&tb->owners)) {
278 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
282 } else if (tb->fastreuse &&
283 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
286 if (!tcp_sk(sk)->bind_hash)
287 tcp_bind_hash(sk, tb, snum);
288 BUG_TRAP(tcp_sk(sk)->bind_hash == tb);
292 spin_unlock(&head->lock);
298 /* Get rid of any references to a local port held by the
301 static void __tcp_put_port(struct sock *sk)
303 struct inet_opt *inet = inet_sk(sk);
304 struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)];
305 struct tcp_bind_bucket *tb;
307 spin_lock(&head->lock);
308 tb = tcp_sk(sk)->bind_hash;
309 __sk_del_bind_node(sk);
310 tcp_sk(sk)->bind_hash = NULL;
312 tcp_bucket_destroy(tb);
313 spin_unlock(&head->lock);
316 void tcp_put_port(struct sock *sk)
323 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
324 * Look, when several writers sleep and reader wakes them up, all but one
325 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
326 * this, _but_ remember, it adds useless work on UP machines (wake up each
327 * exclusive lock release). It should be ifdefed really.
330 void tcp_listen_wlock(void)
332 write_lock(&tcp_lhash_lock);
334 if (atomic_read(&tcp_lhash_users)) {
338 prepare_to_wait_exclusive(&tcp_lhash_wait,
339 &wait, TASK_UNINTERRUPTIBLE);
340 if (!atomic_read(&tcp_lhash_users))
342 write_unlock_bh(&tcp_lhash_lock);
344 write_lock_bh(&tcp_lhash_lock);
347 finish_wait(&tcp_lhash_wait, &wait);
351 static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
353 struct hlist_head *list;
356 BUG_TRAP(sk_unhashed(sk));
357 if (listen_possible && sk->sk_state == TCP_LISTEN) {
358 list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
359 lock = &tcp_lhash_lock;
362 list = &tcp_ehash[(sk->sk_hashent = tcp_sk_hashfn(sk))].chain;
363 lock = &tcp_ehash[sk->sk_hashent].lock;
366 __sk_add_node(sk, list);
367 sock_prot_inc_use(sk->sk_prot);
369 if (listen_possible && sk->sk_state == TCP_LISTEN)
370 wake_up(&tcp_lhash_wait);
373 static void tcp_v4_hash(struct sock *sk)
375 if (sk->sk_state != TCP_CLOSE) {
377 __tcp_v4_hash(sk, 1);
382 void tcp_unhash(struct sock *sk)
389 if (sk->sk_state == TCP_LISTEN) {
392 lock = &tcp_lhash_lock;
394 struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
396 write_lock_bh(&head->lock);
399 if (__sk_del_node_init(sk))
400 sock_prot_dec_use(sk->sk_prot);
401 write_unlock_bh(lock);
404 if (sk->sk_state == TCP_LISTEN)
405 wake_up(&tcp_lhash_wait);
408 /* Don't inline this cruft. Here are some nice properties to
409 * exploit here. The BSD API does not allow a listening TCP
410 * to specify the remote port nor the remote address for the
411 * connection. So always assume those are both wildcarded
412 * during the search since they can never be otherwise.
414 static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr,
415 unsigned short hnum, int dif)
417 struct sock *result = NULL, *sk;
418 struct hlist_node *node;
422 sk_for_each(sk, node, head) {
423 struct inet_opt *inet = inet_sk(sk);
425 if (inet->num == hnum && !ipv6_only_sock(sk)) {
426 __u32 rcv_saddr = inet->rcv_saddr;
428 score = (sk->sk_family == PF_INET ? 1 : 0);
430 if (rcv_saddr != daddr)
434 if (sk->sk_bound_dev_if) {
435 if (sk->sk_bound_dev_if != dif)
441 if (score > hiscore) {
450 /* Optimize the common listener case. */
451 struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum, int dif)
453 struct sock *sk = NULL;
454 struct hlist_head *head;
456 read_lock(&tcp_lhash_lock);
457 head = &tcp_listening_hash[tcp_lhashfn(hnum)];
458 if (!hlist_empty(head)) {
459 struct inet_opt *inet = inet_sk((sk = __sk_head(head)));
461 if (inet->num == hnum && !sk->sk_node.next &&
462 (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
463 (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
464 !sk->sk_bound_dev_if)
466 sk = __tcp_v4_lookup_listener(head, daddr, hnum, dif);
472 read_unlock(&tcp_lhash_lock);
476 EXPORT_SYMBOL_GPL(tcp_v4_lookup_listener);
478 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
479 * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
481 * Local BH must be disabled here.
484 static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
488 struct tcp_ehash_bucket *head;
489 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
490 __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
492 struct hlist_node *node;
493 /* Optimize here for direct hit, only listening connections can
494 * have wildcards anyways.
496 int hash = tcp_hashfn(daddr, hnum, saddr, sport);
497 head = &tcp_ehash[hash];
498 read_lock(&head->lock);
499 sk_for_each(sk, node, &head->chain) {
500 if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
501 goto hit; /* You sunk my battleship! */
504 /* Must check for a TIME_WAIT'er before going to listener hash. */
505 sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
506 if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
511 read_unlock(&head->lock);
518 static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport,
519 u32 daddr, u16 hnum, int dif)
521 struct sock *sk = __tcp_v4_lookup_established(saddr, sport,
524 return sk ? : tcp_v4_lookup_listener(daddr, hnum, dif);
527 inline struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr,
533 sk = __tcp_v4_lookup(saddr, sport, daddr, ntohs(dport), dif);
539 EXPORT_SYMBOL_GPL(tcp_v4_lookup);
541 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
543 return secure_tcp_sequence_number(skb->nh.iph->daddr,
549 /* called with local bh disabled */
550 static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
551 struct tcp_tw_bucket **twp)
553 struct inet_opt *inet = inet_sk(sk);
554 u32 daddr = inet->rcv_saddr;
555 u32 saddr = inet->daddr;
556 int dif = sk->sk_bound_dev_if;
557 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
558 __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
559 int hash = tcp_hashfn(daddr, lport, saddr, inet->dport);
560 struct tcp_ehash_bucket *head = &tcp_ehash[hash];
562 struct hlist_node *node;
563 struct tcp_tw_bucket *tw;
565 write_lock(&head->lock);
567 /* Check TIME-WAIT sockets first. */
568 sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
569 tw = (struct tcp_tw_bucket *)sk2;
571 if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
572 struct tcp_opt *tp = tcp_sk(sk);
574 /* With PAWS, it is safe from the viewpoint
575 of data integrity. Even without PAWS it
576 is safe provided sequence spaces do not
577 overlap i.e. at data rates <= 80Mbit/sec.
579 Actually, the idea is close to VJ's one,
580 only timestamp cache is held not per host,
581 but per port pair and TW bucket is used
584 If TW bucket has been already destroyed we
585 fall back to VJ's scheme and use initial
586 timestamp retrieved from peer table.
588 if (tw->tw_ts_recent_stamp &&
589 (!twp || (sysctl_tcp_tw_reuse &&
591 tw->tw_ts_recent_stamp > 1))) {
593 tw->tw_snd_nxt + 65535 + 2) == 0)
595 tp->ts_recent = tw->tw_ts_recent;
596 tp->ts_recent_stamp = tw->tw_ts_recent_stamp;
605 /* And established part... */
606 sk_for_each(sk2, node, &head->chain) {
607 if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif))
612 /* Must record num and sport now. Otherwise we will see
613 * in hash table socket with a funny identity. */
615 inet->sport = htons(lport);
616 sk->sk_hashent = hash;
617 BUG_TRAP(sk_unhashed(sk));
618 __sk_add_node(sk, &head->chain);
619 sock_prot_inc_use(sk->sk_prot);
620 write_unlock(&head->lock);
624 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
626 /* Silly. Should hash-dance instead... */
627 tcp_tw_deschedule(tw);
628 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
636 write_unlock(&head->lock);
637 return -EADDRNOTAVAIL;
641 * Bind a port for a connect operation and hash it.
643 static int tcp_v4_hash_connect(struct sock *sk)
645 unsigned short snum = inet_sk(sk)->num;
646 struct tcp_bind_hashbucket *head;
647 struct tcp_bind_bucket *tb;
652 int low = sysctl_local_port_range[0];
653 int high = sysctl_local_port_range[1];
654 int remaining = (high - low) + 1;
655 struct hlist_node *node;
656 struct tcp_tw_bucket *tw = NULL;
660 /* TODO. Actually it is not so bad idea to remove
661 * tcp_portalloc_lock before next submission to Linus.
662 * As soon as we touch this place at all it is time to think.
664 * Now it protects single _advisory_ variable tcp_port_rover,
665 * hence it is mostly useless.
666 * Code will work nicely if we just delete it, but
667 * I am afraid in contented case it will work not better or
668 * even worse: another cpu just will hit the same bucket
670 * So some cpu salt could remove both contention and
671 * memory pingpong. Any ideas how to do this in a nice way?
673 spin_lock(&tcp_portalloc_lock);
674 rover = tcp_port_rover;
678 if ((rover < low) || (rover > high))
680 head = &tcp_bhash[tcp_bhashfn(rover)];
681 spin_lock(&head->lock);
683 /* Does not bother with rcv_saddr checks,
684 * because the established check is already
687 tb_for_each(tb, node, &head->chain) {
688 if (tb->port == rover) {
689 BUG_TRAP(!hlist_empty(&tb->owners));
690 if (tb->fastreuse >= 0)
692 if (!__tcp_v4_check_established(sk,
700 tb = tcp_bucket_create(head, rover);
702 spin_unlock(&head->lock);
709 spin_unlock(&head->lock);
710 } while (--remaining > 0);
711 tcp_port_rover = rover;
712 spin_unlock(&tcp_portalloc_lock);
716 return -EADDRNOTAVAIL;
719 /* All locks still held and bhs disabled */
720 tcp_port_rover = rover;
721 spin_unlock(&tcp_portalloc_lock);
723 tcp_bind_hash(sk, tb, rover);
724 if (sk_unhashed(sk)) {
725 inet_sk(sk)->sport = htons(rover);
726 __tcp_v4_hash(sk, 0);
728 spin_unlock(&head->lock);
731 tcp_tw_deschedule(tw);
739 head = &tcp_bhash[tcp_bhashfn(snum)];
740 tb = tcp_sk(sk)->bind_hash;
741 spin_lock_bh(&head->lock);
742 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
743 __tcp_v4_hash(sk, 0);
744 spin_unlock_bh(&head->lock);
747 spin_unlock(&head->lock);
748 /* No definite answer... Walk to established hash table */
749 ret = __tcp_v4_check_established(sk, snum, NULL);
756 /* This will initiate an outgoing connection. */
757 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
759 struct inet_opt *inet = inet_sk(sk);
760 struct tcp_opt *tp = tcp_sk(sk);
761 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
767 if (addr_len < sizeof(struct sockaddr_in))
770 if (usin->sin_family != AF_INET)
771 return -EAFNOSUPPORT;
773 nexthop = daddr = usin->sin_addr.s_addr;
774 if (inet->opt && inet->opt->srr) {
777 nexthop = inet->opt->faddr;
780 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
781 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
783 inet->sport, usin->sin_port, sk);
787 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
792 if (!inet->opt || !inet->opt->srr)
796 inet->saddr = rt->rt_src;
797 inet->rcv_saddr = inet->saddr;
799 if (tp->ts_recent_stamp && inet->daddr != daddr) {
800 /* Reset inherited state */
802 tp->ts_recent_stamp = 0;
806 if (sysctl_tcp_tw_recycle &&
807 !tp->ts_recent_stamp && rt->rt_dst == daddr) {
808 struct inet_peer *peer = rt_get_peer(rt);
810 /* VJ's idea. We save last timestamp seen from
811 * the destination in peer table, when entering state TIME-WAIT
812 * and initialize ts_recent from it, when trying new connection.
815 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
816 tp->ts_recent_stamp = peer->tcp_ts_stamp;
817 tp->ts_recent = peer->tcp_ts;
821 inet->dport = usin->sin_port;
824 tp->ext_header_len = 0;
826 tp->ext_header_len = inet->opt->optlen;
830 /* Socket identity is still unknown (sport may be zero).
831 * However we set state to SYN-SENT and not releasing socket
832 * lock select source port, enter ourselves into the hash tables and
833 * complete initialization after this.
835 tcp_set_state(sk, TCP_SYN_SENT);
836 err = tcp_v4_hash_connect(sk);
840 err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
844 /* OK, now commit destination to socket. */
845 __sk_dst_set(sk, &rt->u.dst);
846 tcp_v4_setup_caps(sk, &rt->u.dst);
847 tp->ext2_header_len = rt->u.dst.header_len;
850 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
855 inet->id = tp->write_seq ^ jiffies;
857 err = tcp_connect(sk);
865 /* This unhashes the socket and releases the local port, if necessary. */
866 tcp_set_state(sk, TCP_CLOSE);
868 sk->sk_route_caps = 0;
873 static __inline__ int tcp_v4_iif(struct sk_buff *skb)
875 return ((struct rtable *)skb->dst)->rt_iif;
878 static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
880 return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
883 static struct open_request *tcp_v4_search_req(struct tcp_opt *tp,
884 struct open_request ***prevp,
886 __u32 raddr, __u32 laddr)
888 struct tcp_listen_opt *lopt = tp->listen_opt;
889 struct open_request *req, **prev;
891 for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
892 (req = *prev) != NULL;
893 prev = &req->dl_next) {
894 if (req->rmt_port == rport &&
895 req->af.v4_req.rmt_addr == raddr &&
896 req->af.v4_req.loc_addr == laddr &&
897 TCP_INET_FAMILY(req->class->family)) {
907 static void tcp_v4_synq_add(struct sock *sk, struct open_request *req)
909 struct tcp_opt *tp = tcp_sk(sk);
910 struct tcp_listen_opt *lopt = tp->listen_opt;
911 u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
913 req->expires = jiffies + TCP_TIMEOUT_INIT;
916 req->dl_next = lopt->syn_table[h];
918 write_lock(&tp->syn_wait_lock);
919 lopt->syn_table[h] = req;
920 write_unlock(&tp->syn_wait_lock);
927 * This routine does path mtu discovery as defined in RFC1191.
929 static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
932 struct dst_entry *dst;
933 struct inet_opt *inet = inet_sk(sk);
934 struct tcp_opt *tp = tcp_sk(sk);
936 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
937 * send out by Linux are always <576bytes so they should go through
940 if (sk->sk_state == TCP_LISTEN)
943 /* We don't check in the destentry if pmtu discovery is forbidden
944 * on this route. We just assume that no packet_to_big packets
945 * are send back when pmtu discovery is not active.
946 * There is a small race when the user changes this flag in the
947 * route, but I think that's acceptable.
949 if ((dst = __sk_dst_check(sk, 0)) == NULL)
952 dst->ops->update_pmtu(dst, mtu);
954 /* Something is about to be wrong... Remember soft error
955 * for the case, if this connection will not able to recover.
957 if (mtu < dst_pmtu(dst) && ip_dont_fragment(sk, dst))
958 sk->sk_err_soft = EMSGSIZE;
962 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
963 tp->pmtu_cookie > mtu) {
964 tcp_sync_mss(sk, mtu);
966 /* Resend the TCP packet because it's
967 * clear that the old packet has been
968 * dropped. This is the new "fast" path mtu
971 tcp_simple_retransmit(sk);
972 } /* else let the usual retransmit timer handle it */
976 * This routine is called by the ICMP module when it gets some
977 * sort of error condition. If err < 0 then the socket should
978 * be closed and the error returned to the user. If err > 0
979 * it's just the icmp type << 8 | icmp code. After adjustment
980 * header points to the first 8 bytes of the tcp header. We need
981 * to find the appropriate port.
983 * The locking strategy used here is very "optimistic". When
984 * someone else accesses the socket the ICMP is just dropped
985 * and for some paths there is no check at all.
986 * A more general error queue to queue errors for later handling
987 * is probably better.
991 void tcp_v4_err(struct sk_buff *skb, u32 info)
993 struct iphdr *iph = (struct iphdr *)skb->data;
994 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
996 struct inet_opt *inet;
997 int type = skb->h.icmph->type;
998 int code = skb->h.icmph->code;
1003 if (skb->len < (iph->ihl << 2) + 8) {
1004 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
1008 sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,
1009 th->source, tcp_v4_iif(skb));
1011 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
1014 if (sk->sk_state == TCP_TIME_WAIT) {
1015 tcp_tw_put((struct tcp_tw_bucket *)sk);
1020 /* If too many ICMPs get dropped on busy
1021 * servers this needs to be solved differently.
1023 if (sock_owned_by_user(sk))
1024 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
1026 if (sk->sk_state == TCP_CLOSE)
1030 seq = ntohl(th->seq);
1031 if (sk->sk_state != TCP_LISTEN &&
1032 !between(seq, tp->snd_una, tp->snd_nxt)) {
1033 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
1038 case ICMP_SOURCE_QUENCH:
1039 /* Just silently ignore these. */
1041 case ICMP_PARAMETERPROB:
1044 case ICMP_DEST_UNREACH:
1045 if (code > NR_ICMP_UNREACH)
1048 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
1049 if (!sock_owned_by_user(sk))
1050 do_pmtu_discovery(sk, iph, info);
1054 err = icmp_err_convert[code].errno;
1056 case ICMP_TIME_EXCEEDED:
1063 switch (sk->sk_state) {
1064 struct open_request *req, **prev;
1066 if (sock_owned_by_user(sk))
1069 req = tcp_v4_search_req(tp, &prev, th->dest,
1070 iph->daddr, iph->saddr);
1074 /* ICMPs are not backlogged, hence we cannot get
1075 an established socket here.
1079 if (seq != req->snt_isn) {
1080 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
1085 * Still in SYN_RECV, just remove it silently.
1086 * There is no good way to pass the error to the newly
1087 * created socket, and POSIX does not want network
1088 * errors returned from accept().
1090 tcp_synq_drop(sk, req, prev);
1094 case TCP_SYN_RECV: /* Cannot happen.
1095 It can f.e. if SYNs crossed.
1097 if (!sock_owned_by_user(sk)) {
1098 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1101 sk->sk_error_report(sk);
1105 sk->sk_err_soft = err;
1110 /* If we've already connected we will keep trying
1111 * until we time out, or the user gives up.
1113 * rfc1122 4.2.3.9 allows to consider as hard errors
1114 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
1115 * but it is obsoleted by pmtu discovery).
1117 * Note, that in modern internet, where routing is unreliable
1118 * and in each dark corner broken firewalls sit, sending random
1119 * errors ordered by their masters even this two messages finally lose
1120 * their original sense (even Linux sends invalid PORT_UNREACHs)
1122 * Now we are in compliance with RFCs.
1127 if (!sock_owned_by_user(sk) && inet->recverr) {
1129 sk->sk_error_report(sk);
1130 } else { /* Only an error on timeout */
1131 sk->sk_err_soft = err;
1139 /* This routine computes an IPv4 TCP checksum. */
1140 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
1141 struct sk_buff *skb)
1143 struct inet_opt *inet = inet_sk(sk);
1145 if (skb->ip_summed == CHECKSUM_HW) {
1146 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
1147 skb->csum = offsetof(struct tcphdr, check);
1149 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
1150 csum_partial((char *)th,
1157 * This routine will send an RST to the other tcp.
1159 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
1161 * Answer: if a packet caused RST, it is not for a socket
1162 * existing in our system, if it is matched to a socket,
1163 * it is just duplicate segment or bug in other side's TCP.
1164 * So that we build reply only basing on parameters
1165 * arrived with segment.
1166 * Exception: precedence violation. We do not implement it in any case.
1169 static void tcp_v4_send_reset(struct sk_buff *skb)
1171 struct tcphdr *th = skb->h.th;
1173 struct ip_reply_arg arg;
1175 /* Never send a reset in response to a reset. */
1179 if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
1182 /* Swap the send and the receive. */
1183 memset(&rth, 0, sizeof(struct tcphdr));
1184 rth.dest = th->source;
1185 rth.source = th->dest;
1186 rth.doff = sizeof(struct tcphdr) / 4;
1190 rth.seq = th->ack_seq;
1193 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
1194 skb->len - (th->doff << 2));
1197 memset(&arg, 0, sizeof arg);
1198 arg.iov[0].iov_base = (unsigned char *)&rth;
1199 arg.iov[0].iov_len = sizeof rth;
1200 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1201 skb->nh.iph->saddr, /*XXX*/
1202 sizeof(struct tcphdr), IPPROTO_TCP, 0);
1203 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1205 ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
1207 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1208 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1211 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
1212 outside socket context is ugly, certainly. What can I do?
1215 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
1218 struct tcphdr *th = skb->h.th;
1223 struct ip_reply_arg arg;
1225 memset(&rep.th, 0, sizeof(struct tcphdr));
1226 memset(&arg, 0, sizeof arg);
1228 arg.iov[0].iov_base = (unsigned char *)&rep;
1229 arg.iov[0].iov_len = sizeof(rep.th);
1231 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1232 (TCPOPT_TIMESTAMP << 8) |
1234 rep.tsopt[1] = htonl(tcp_time_stamp);
1235 rep.tsopt[2] = htonl(ts);
1236 arg.iov[0].iov_len = sizeof(rep);
1239 /* Swap the send and the receive. */
1240 rep.th.dest = th->source;
1241 rep.th.source = th->dest;
1242 rep.th.doff = arg.iov[0].iov_len / 4;
1243 rep.th.seq = htonl(seq);
1244 rep.th.ack_seq = htonl(ack);
1246 rep.th.window = htons(win);
1248 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1249 skb->nh.iph->saddr, /*XXX*/
1250 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1251 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1253 ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
1255 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1258 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
1260 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
1262 tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
1263 tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
1268 static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req)
1270 tcp_v4_send_ack(skb, req->snt_isn + 1, req->rcv_isn + 1, req->rcv_wnd,
1274 static struct dst_entry* tcp_v4_route_req(struct sock *sk,
1275 struct open_request *req)
1278 struct ip_options *opt = req->af.v4_req.opt;
1279 struct flowi fl = { .oif = sk->sk_bound_dev_if,
1281 { .daddr = ((opt && opt->srr) ?
1283 req->af.v4_req.rmt_addr),
1284 .saddr = req->af.v4_req.loc_addr,
1285 .tos = RT_CONN_FLAGS(sk) } },
1286 .proto = IPPROTO_TCP,
1288 { .sport = inet_sk(sk)->sport,
1289 .dport = req->rmt_port } } };
1291 if (ip_route_output_flow(&rt, &fl, sk, 0)) {
1292 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1295 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
1297 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1304 * Send a SYN-ACK after having received an ACK.
1305 * This still operates on a open_request only, not on a big
1308 static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
1309 struct dst_entry *dst)
1312 struct sk_buff * skb;
1314 /* First, grab a route. */
1315 if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1318 skb = tcp_make_synack(sk, dst, req);
1321 struct tcphdr *th = skb->h.th;
1323 th->check = tcp_v4_check(th, skb->len,
1324 req->af.v4_req.loc_addr,
1325 req->af.v4_req.rmt_addr,
1326 csum_partial((char *)th, skb->len,
1329 err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr,
1330 req->af.v4_req.rmt_addr,
1331 req->af.v4_req.opt);
1332 if (err == NET_XMIT_CN)
1342 * IPv4 open_request destructor.
1344 static void tcp_v4_or_free(struct open_request *req)
1346 if (req->af.v4_req.opt)
1347 kfree(req->af.v4_req.opt);
1350 static inline void syn_flood_warning(struct sk_buff *skb)
1352 static unsigned long warntime;
1354 if (time_after(jiffies, (warntime + HZ * 60))) {
1357 "possible SYN flooding on port %d. Sending cookies.\n",
1358 ntohs(skb->h.th->dest));
1363 * Save and compile IPv4 options into the open_request if needed.
1365 static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
1366 struct sk_buff *skb)
1368 struct ip_options *opt = &(IPCB(skb)->opt);
1369 struct ip_options *dopt = NULL;
1371 if (opt && opt->optlen) {
1372 int opt_size = optlength(opt);
1373 dopt = kmalloc(opt_size, GFP_ATOMIC);
1375 if (ip_options_echo(dopt, skb)) {
1385 * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
1386 * One SYN_RECV socket costs about 80bytes on a 32bit machine.
1387 * It would be better to replace it with a global counter for all sockets
1388 * but then some measure against one socket starving all other sockets
1391 * It was 128 by default. Experiments with real servers show, that
1392 * it is absolutely not enough even at 100conn/sec. 256 cures most
1393 * of problems. This value is adjusted to 128 for very small machines
1394 * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
1395 * Further increasing requires to change hash table size.
1397 int sysctl_max_syn_backlog = 256;
1399 struct or_calltable or_ipv4 = {
1401 .rtx_syn_ack = tcp_v4_send_synack,
1402 .send_ack = tcp_v4_or_send_ack,
1403 .destructor = tcp_v4_or_free,
1404 .send_reset = tcp_v4_send_reset,
1407 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1410 struct open_request *req;
1411 __u32 saddr = skb->nh.iph->saddr;
1412 __u32 daddr = skb->nh.iph->daddr;
1413 __u32 isn = TCP_SKB_CB(skb)->when;
1414 struct dst_entry *dst = NULL;
1415 #ifdef CONFIG_SYN_COOKIES
1416 int want_cookie = 0;
1418 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1421 /* Never answer to SYNs send to broadcast or multicast */
1422 if (((struct rtable *)skb->dst)->rt_flags &
1423 (RTCF_BROADCAST | RTCF_MULTICAST))
1426 /* TW buckets are converted to open requests without
1427 * limitations, they conserve resources and peer is
1428 * evidently real one.
1430 if (tcp_synq_is_full(sk) && !isn) {
1431 #ifdef CONFIG_SYN_COOKIES
1432 if (sysctl_tcp_syncookies) {
1439 /* Accept backlog is full. If we have already queued enough
1440 * of warm entries in syn queue, drop request. It is better than
1441 * clogging syn queue with openreqs with exponentially increasing
1444 if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
1447 req = tcp_openreq_alloc();
1451 tcp_clear_options(&tp);
1453 tp.user_mss = tcp_sk(sk)->user_mss;
1455 tcp_parse_options(skb, &tp, 0);
1458 tcp_clear_options(&tp);
1462 if (tp.saw_tstamp && !tp.rcv_tsval) {
1463 /* Some OSes (unknown ones, but I see them on web server, which
1464 * contains information interesting only for windows'
1465 * users) do not send their stamp in SYN. It is easy case.
1466 * We simply do not advertise TS support.
1471 tp.tstamp_ok = tp.saw_tstamp;
1473 tcp_openreq_init(req, &tp, skb);
1475 req->af.v4_req.loc_addr = daddr;
1476 req->af.v4_req.rmt_addr = saddr;
1477 req->af.v4_req.opt = tcp_v4_save_options(sk, skb);
1478 req->class = &or_ipv4;
1480 TCP_ECN_create_request(req, skb->h.th);
1483 #ifdef CONFIG_SYN_COOKIES
1484 syn_flood_warning(skb);
1486 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1488 struct inet_peer *peer = NULL;
1490 /* VJ's idea. We save last timestamp seen
1491 * from the destination in peer table, when entering
1492 * state TIME-WAIT, and check against it before
1493 * accepting new connection request.
1495 * If "isn" is not zero, this request hit alive
1496 * timewait bucket, so that all the necessary checks
1497 * are made in the function processing timewait state.
1499 if (tp.saw_tstamp &&
1500 sysctl_tcp_tw_recycle &&
1501 (dst = tcp_v4_route_req(sk, req)) != NULL &&
1502 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1503 peer->v4daddr == saddr) {
1504 if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1505 (s32)(peer->tcp_ts - req->ts_recent) >
1507 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
1512 /* Kill the following clause, if you dislike this way. */
1513 else if (!sysctl_tcp_syncookies &&
1514 (sysctl_max_syn_backlog - tcp_synq_len(sk) <
1515 (sysctl_max_syn_backlog >> 2)) &&
1516 (!peer || !peer->tcp_ts_stamp) &&
1517 (!dst || !dst_metric(dst, RTAX_RTT))) {
1518 /* Without syncookies last quarter of
1519 * backlog is filled with destinations,
1520 * proven to be alive.
1521 * It means that we continue to communicate
1522 * to destinations, already remembered
1523 * to the moment of synflood.
1525 NETDEBUG(if (net_ratelimit()) \
1526 printk(KERN_DEBUG "TCP: drop open "
1527 "request from %u.%u."
1530 ntohs(skb->h.th->source)));
1535 isn = tcp_v4_init_sequence(sk, skb);
1539 if (tcp_v4_send_synack(sk, req, dst))
1543 tcp_openreq_free(req);
1545 tcp_v4_synq_add(sk, req);
1550 tcp_openreq_free(req);
1552 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1558 * The three way handshake has completed - we got a valid synack -
1559 * now create the new socket.
1561 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1562 struct open_request *req,
1563 struct dst_entry *dst)
1565 struct inet_opt *newinet;
1566 struct tcp_opt *newtp;
1569 if (sk_acceptq_is_full(sk))
1572 if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1575 newsk = tcp_create_openreq_child(sk, req, skb);
1579 newsk->sk_dst_cache = dst;
1580 tcp_v4_setup_caps(newsk, dst);
1582 newtp = tcp_sk(newsk);
1583 newinet = inet_sk(newsk);
1584 newinet->daddr = req->af.v4_req.rmt_addr;
1585 newinet->rcv_saddr = req->af.v4_req.loc_addr;
1586 newinet->saddr = req->af.v4_req.loc_addr;
1587 newinet->opt = req->af.v4_req.opt;
1588 req->af.v4_req.opt = NULL;
1589 newinet->mc_index = tcp_v4_iif(skb);
1590 newinet->mc_ttl = skb->nh.iph->ttl;
1591 newtp->ext_header_len = 0;
1593 newtp->ext_header_len = newinet->opt->optlen;
1594 newtp->ext2_header_len = dst->header_len;
1595 newinet->id = newtp->write_seq ^ jiffies;
1597 tcp_sync_mss(newsk, dst_pmtu(dst));
1598 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1599 tcp_initialize_rcv_mss(newsk);
1601 __tcp_v4_hash(newsk, 0);
1602 __tcp_inherit_port(sk, newsk);
1607 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1609 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1614 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1616 struct tcphdr *th = skb->h.th;
1617 struct iphdr *iph = skb->nh.iph;
1618 struct tcp_opt *tp = tcp_sk(sk);
1620 struct open_request **prev;
1621 /* Find possible connection requests. */
1622 struct open_request *req = tcp_v4_search_req(tp, &prev, th->source,
1623 iph->saddr, iph->daddr);
1625 return tcp_check_req(sk, skb, req, prev);
1627 nsk = __tcp_v4_lookup_established(skb->nh.iph->saddr,
1634 if (nsk->sk_state != TCP_TIME_WAIT) {
1638 tcp_tw_put((struct tcp_tw_bucket *)nsk);
1642 #ifdef CONFIG_SYN_COOKIES
1643 if (!th->rst && !th->syn && th->ack)
1644 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1649 static int tcp_v4_checksum_init(struct sk_buff *skb)
1651 if (skb->ip_summed == CHECKSUM_HW) {
1652 skb->ip_summed = CHECKSUM_UNNECESSARY;
1653 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1654 skb->nh.iph->daddr, skb->csum))
1657 NETDEBUG(if (net_ratelimit())
1658 printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
1659 skb->ip_summed = CHECKSUM_NONE;
1661 if (skb->len <= 76) {
1662 if (tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1664 skb_checksum(skb, 0, skb->len, 0)))
1666 skb->ip_summed = CHECKSUM_UNNECESSARY;
1668 skb->csum = ~tcp_v4_check(skb->h.th, skb->len,
1670 skb->nh.iph->daddr, 0);
1676 /* The socket must have it's spinlock held when we get
1679 * We have a potential double-lock case here, so even when
1680 * doing backlog processing we use the BH locking scheme.
1681 * This is because we cannot sleep with the original spinlock
1684 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1686 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1687 TCP_CHECK_TIMER(sk);
1688 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1690 TCP_CHECK_TIMER(sk);
1694 if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
1697 if (sk->sk_state == TCP_LISTEN) {
1698 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1703 if (tcp_child_process(sk, nsk, skb))
1709 TCP_CHECK_TIMER(sk);
1710 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1712 TCP_CHECK_TIMER(sk);
1716 tcp_v4_send_reset(skb);
1719 /* Be careful here. If this function gets more complicated and
1720 * gcc suffers from register pressure on the x86, sk (in %ebx)
1721 * might be destroyed here. This current version compiles correctly,
1722 * but you have been warned.
1727 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1735 int tcp_v4_rcv(struct sk_buff *skb)
1741 if (skb->pkt_type != PACKET_HOST)
1744 /* Count it even if it's bad */
1745 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1747 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1752 if (th->doff < sizeof(struct tcphdr) / 4)
1754 if (!pskb_may_pull(skb, th->doff * 4))
1757 /* An explanation is required here, I think.
1758 * Packet length and doff are validated by header prediction,
1759 * provided case of th->doff==0 is elimineted.
1760 * So, we defer the checks. */
1761 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1762 tcp_v4_checksum_init(skb) < 0))
1766 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1767 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1768 skb->len - th->doff * 4);
1769 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1770 TCP_SKB_CB(skb)->when = 0;
1771 TCP_SKB_CB(skb)->flags = skb->nh.iph->tos;
1772 TCP_SKB_CB(skb)->sacked = 0;
1774 sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source,
1775 skb->nh.iph->daddr, ntohs(th->dest),
1782 if (sk->sk_state == TCP_TIME_WAIT)
1785 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1786 goto discard_and_relse;
1788 if (sk_filter(sk, skb, 0))
1789 goto discard_and_relse;
1795 if (!sock_owned_by_user(sk)) {
1796 if (!tcp_prequeue(sk, skb))
1797 ret = tcp_v4_do_rcv(sk, skb);
1799 sk_add_backlog(sk, skb);
1807 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1810 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1812 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1814 tcp_v4_send_reset(skb);
1818 /* Discard frame. */
1827 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1828 tcp_tw_put((struct tcp_tw_bucket *) sk);
1832 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1833 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1834 tcp_tw_put((struct tcp_tw_bucket *) sk);
1837 switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
1838 skb, th, skb->len)) {
1840 struct sock *sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr,
1844 tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
1845 tcp_tw_put((struct tcp_tw_bucket *)sk);
1849 /* Fall through to ACK */
1852 tcp_v4_timewait_ack(sk, skb);
1856 case TCP_TW_SUCCESS:;
1861 /* With per-bucket locks this operation is not-atomic, so that
1862 * this version is not worse.
1864 static void __tcp_v4_rehash(struct sock *sk)
1866 sk->sk_prot->unhash(sk);
1867 sk->sk_prot->hash(sk);
1870 static int tcp_v4_reselect_saddr(struct sock *sk)
1872 struct inet_opt *inet = inet_sk(sk);
1875 __u32 old_saddr = inet->saddr;
1877 __u32 daddr = inet->daddr;
1879 if (inet->opt && inet->opt->srr)
1880 daddr = inet->opt->faddr;
1882 /* Query new route. */
1883 err = ip_route_connect(&rt, daddr, 0,
1884 RT_TOS(inet->tos) | sk->sk_localroute,
1885 sk->sk_bound_dev_if,
1887 inet->sport, inet->dport, sk);
1891 __sk_dst_set(sk, &rt->u.dst);
1892 tcp_v4_setup_caps(sk, &rt->u.dst);
1893 tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
1895 new_saddr = rt->rt_src;
1897 if (new_saddr == old_saddr)
1900 if (sysctl_ip_dynaddr > 1) {
1901 printk(KERN_INFO "tcp_v4_rebuild_header(): shifting inet->"
1902 "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n",
1904 NIPQUAD(new_saddr));
1907 inet->saddr = new_saddr;
1908 inet->rcv_saddr = new_saddr;
1910 /* XXX The only one ugly spot where we need to
1911 * XXX really change the sockets identity after
1912 * XXX it has entered the hashes. -DaveM
1914 * Besides that, it does not check for connection
1915 * uniqueness. Wait for troubles.
1917 __tcp_v4_rehash(sk);
1921 int tcp_v4_rebuild_header(struct sock *sk)
1923 struct inet_opt *inet = inet_sk(sk);
1924 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1928 /* Route is OK, nothing to do. */
1933 daddr = inet->daddr;
1934 if (inet->opt && inet->opt->srr)
1935 daddr = inet->opt->faddr;
1938 struct flowi fl = { .oif = sk->sk_bound_dev_if,
1941 .saddr = inet->saddr,
1942 .tos = RT_CONN_FLAGS(sk) } },
1943 .proto = IPPROTO_TCP,
1945 { .sport = inet->sport,
1946 .dport = inet->dport } } };
1948 err = ip_route_output_flow(&rt, &fl, sk, 0);
1951 __sk_dst_set(sk, &rt->u.dst);
1952 tcp_v4_setup_caps(sk, &rt->u.dst);
1953 tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;
1957 /* Routing failed... */
1958 sk->sk_route_caps = 0;
1960 if (!sysctl_ip_dynaddr ||
1961 sk->sk_state != TCP_SYN_SENT ||
1962 (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
1963 (err = tcp_v4_reselect_saddr(sk)) != 0)
1964 sk->sk_err_soft = -err;
1969 static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
1971 struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;
1972 struct inet_opt *inet = inet_sk(sk);
1974 sin->sin_family = AF_INET;
1975 sin->sin_addr.s_addr = inet->daddr;
1976 sin->sin_port = inet->dport;
1979 /* VJ's idea. Save last timestamp seen from this destination
1980 * and hold it at least for normal timewait interval to use for duplicate
1981 * segment detection in subsequent connections, before they enter synchronized
1985 int tcp_v4_remember_stamp(struct sock *sk)
1987 struct inet_opt *inet = inet_sk(sk);
1988 struct tcp_opt *tp = tcp_sk(sk);
1989 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1990 struct inet_peer *peer = NULL;
1993 if (!rt || rt->rt_dst != inet->daddr) {
1994 peer = inet_getpeer(inet->daddr, 1);
1998 rt_bind_peer(rt, 1);
2003 if ((s32)(peer->tcp_ts - tp->ts_recent) <= 0 ||
2004 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2005 peer->tcp_ts_stamp <= tp->ts_recent_stamp)) {
2006 peer->tcp_ts_stamp = tp->ts_recent_stamp;
2007 peer->tcp_ts = tp->ts_recent;
2017 int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
2019 struct inet_peer *peer = NULL;
2021 peer = inet_getpeer(tw->tw_daddr, 1);
2024 if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 ||
2025 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2026 peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) {
2027 peer->tcp_ts_stamp = tw->tw_ts_recent_stamp;
2028 peer->tcp_ts = tw->tw_ts_recent;
2037 struct tcp_func ipv4_specific = {
2038 .queue_xmit = ip_queue_xmit,
2039 .send_check = tcp_v4_send_check,
2040 .rebuild_header = tcp_v4_rebuild_header,
2041 .conn_request = tcp_v4_conn_request,
2042 .syn_recv_sock = tcp_v4_syn_recv_sock,
2043 .remember_stamp = tcp_v4_remember_stamp,
2044 .net_header_len = sizeof(struct iphdr),
2045 .setsockopt = ip_setsockopt,
2046 .getsockopt = ip_getsockopt,
2047 .addr2sockaddr = v4_addr2sockaddr,
2048 .sockaddr_len = sizeof(struct sockaddr_in),
2051 /* NOTE: A lot of things set to zero explicitly by call to
2052 * sk_alloc() so need not be done here.
2054 static int tcp_v4_init_sock(struct sock *sk)
2056 struct tcp_opt *tp = tcp_sk(sk);
2058 skb_queue_head_init(&tp->out_of_order_queue);
2059 tcp_init_xmit_timers(sk);
2060 tcp_prequeue_init(tp);
2062 tp->rto = TCP_TIMEOUT_INIT;
2063 tp->mdev = TCP_TIMEOUT_INIT;
2065 /* So many TCP implementations out there (incorrectly) count the
2066 * initial SYN frame in their delayed-ACK and congestion control
2067 * algorithms that we must have the following bandaid to talk
2068 * efficiently to them. -DaveM
2072 /* See draft-stevens-tcpca-spec-01 for discussion of the
2073 * initialization of these values.
2075 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
2076 tp->snd_cwnd_clamp = ~0;
2077 tp->mss_cache_std = tp->mss_cache = 536;
2079 tp->reordering = sysctl_tcp_reordering;
2081 sk->sk_state = TCP_CLOSE;
2083 sk->sk_write_space = sk_stream_write_space;
2084 sk->sk_use_write_queue = 1;
2086 tp->af_specific = &ipv4_specific;
2088 sk->sk_sndbuf = sysctl_tcp_wmem[1];
2089 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
2091 atomic_inc(&tcp_sockets_allocated);
2096 int tcp_v4_destroy_sock(struct sock *sk)
2098 struct tcp_opt *tp = tcp_sk(sk);
2100 tcp_clear_xmit_timers(sk);
2102 /* Cleanup up the write buffer. */
2103 sk_stream_writequeue_purge(sk);
2105 /* Cleans up our, hopefully empty, out_of_order_queue. */
2106 __skb_queue_purge(&tp->out_of_order_queue);
2108 /* Clean prequeue, it must be empty really */
2109 __skb_queue_purge(&tp->ucopy.prequeue);
2111 /* Clean up a referenced TCP bind bucket. */
2116 * If sendmsg cached page exists, toss it.
2118 if (sk->sk_sndmsg_page) {
2119 __free_page(sk->sk_sndmsg_page);
2120 sk->sk_sndmsg_page = NULL;
2123 atomic_dec(&tcp_sockets_allocated);
2128 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2130 #ifdef CONFIG_PROC_FS
2131 /* Proc filesystem TCP sock list dumping. */
2133 static inline struct tcp_tw_bucket *tw_head(struct hlist_head *head)
2135 return hlist_empty(head) ? NULL :
2136 list_entry(head->first, struct tcp_tw_bucket, tw_node);
2139 static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw)
2141 return tw->tw_node.next ?
2142 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2145 static void *listening_get_next(struct seq_file *seq, void *cur)
2148 struct hlist_node *node;
2149 struct sock *sk = cur;
2150 struct tcp_iter_state* st = seq->private;
2154 sk = sk_head(&tcp_listening_hash[0]);
2160 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2161 struct open_request *req = cur;
2163 tp = tcp_sk(st->syn_wait_sk);
2167 if (req->class->family == st->family) {
2173 if (++st->sbucket >= TCP_SYNQ_HSIZE)
2176 req = tp->listen_opt->syn_table[st->sbucket];
2178 sk = sk_next(st->syn_wait_sk);
2179 st->state = TCP_SEQ_STATE_LISTENING;
2180 read_unlock_bh(&tp->syn_wait_lock);
2183 read_lock_bh(&tp->syn_wait_lock);
2184 if (tp->listen_opt && tp->listen_opt->qlen)
2186 read_unlock_bh(&tp->syn_wait_lock);
2190 sk_for_each_from(sk, node) {
2191 if (sk->sk_family == st->family) {
2196 read_lock_bh(&tp->syn_wait_lock);
2197 if (tp->listen_opt && tp->listen_opt->qlen) {
2199 st->uid = sock_i_uid(sk);
2200 st->syn_wait_sk = sk;
2201 st->state = TCP_SEQ_STATE_OPENREQ;
2205 read_unlock_bh(&tp->syn_wait_lock);
2207 if (++st->bucket < TCP_LHTABLE_SIZE) {
2208 sk = sk_head(&tcp_listening_hash[st->bucket]);
2216 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2218 void *rc = listening_get_next(seq, NULL);
2220 while (rc && *pos) {
2221 rc = listening_get_next(seq, rc);
2227 static void *established_get_first(struct seq_file *seq)
2229 struct tcp_iter_state* st = seq->private;
2232 for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) {
2234 struct hlist_node *node;
2235 struct tcp_tw_bucket *tw;
2237 read_lock(&tcp_ehash[st->bucket].lock);
2238 sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) {
2239 if (sk->sk_family != st->family) {
2245 st->state = TCP_SEQ_STATE_TIME_WAIT;
2246 tw_for_each(tw, node,
2247 &tcp_ehash[st->bucket + tcp_ehash_size].chain) {
2248 if (tw->tw_family != st->family) {
2254 read_unlock(&tcp_ehash[st->bucket].lock);
2255 st->state = TCP_SEQ_STATE_ESTABLISHED;
2261 static void *established_get_next(struct seq_file *seq, void *cur)
2263 struct sock *sk = cur;
2264 struct tcp_tw_bucket *tw;
2265 struct hlist_node *node;
2266 struct tcp_iter_state* st = seq->private;
2270 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2274 while (tw && tw->tw_family != st->family) {
2281 read_unlock(&tcp_ehash[st->bucket].lock);
2282 st->state = TCP_SEQ_STATE_ESTABLISHED;
2283 if (++st->bucket < tcp_ehash_size) {
2284 read_lock(&tcp_ehash[st->bucket].lock);
2285 sk = sk_head(&tcp_ehash[st->bucket].chain);
2293 sk_for_each_from(sk, node) {
2294 if (sk->sk_family == st->family)
2298 st->state = TCP_SEQ_STATE_TIME_WAIT;
2299 tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain);
2307 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2309 void *rc = established_get_first(seq);
2312 rc = established_get_next(seq, rc);
2318 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2321 struct tcp_iter_state* st = seq->private;
2324 st->state = TCP_SEQ_STATE_LISTENING;
2325 rc = listening_get_idx(seq, &pos);
2328 tcp_listen_unlock();
2330 st->state = TCP_SEQ_STATE_ESTABLISHED;
2331 rc = established_get_idx(seq, pos);
2337 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2339 struct tcp_iter_state* st = seq->private;
2340 st->state = TCP_SEQ_STATE_LISTENING;
2342 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2345 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2348 struct tcp_iter_state* st;
2350 if (v == SEQ_START_TOKEN) {
2351 rc = tcp_get_idx(seq, 0);
2356 switch (st->state) {
2357 case TCP_SEQ_STATE_OPENREQ:
2358 case TCP_SEQ_STATE_LISTENING:
2359 rc = listening_get_next(seq, v);
2361 tcp_listen_unlock();
2363 st->state = TCP_SEQ_STATE_ESTABLISHED;
2364 rc = established_get_first(seq);
2367 case TCP_SEQ_STATE_ESTABLISHED:
2368 case TCP_SEQ_STATE_TIME_WAIT:
2369 rc = established_get_next(seq, v);
2377 static void tcp_seq_stop(struct seq_file *seq, void *v)
2379 struct tcp_iter_state* st = seq->private;
2381 switch (st->state) {
2382 case TCP_SEQ_STATE_OPENREQ:
2384 struct tcp_opt *tp = tcp_sk(st->syn_wait_sk);
2385 read_unlock_bh(&tp->syn_wait_lock);
2387 case TCP_SEQ_STATE_LISTENING:
2388 if (v != SEQ_START_TOKEN)
2389 tcp_listen_unlock();
2391 case TCP_SEQ_STATE_TIME_WAIT:
2392 case TCP_SEQ_STATE_ESTABLISHED:
2394 read_unlock(&tcp_ehash[st->bucket].lock);
2400 static int tcp_seq_open(struct inode *inode, struct file *file)
2402 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2403 struct seq_file *seq;
2404 struct tcp_iter_state *s;
2407 if (unlikely(afinfo == NULL))
2410 s = kmalloc(sizeof(*s), GFP_KERNEL);
2413 memset(s, 0, sizeof(*s));
2414 s->family = afinfo->family;
2415 s->seq_ops.start = tcp_seq_start;
2416 s->seq_ops.next = tcp_seq_next;
2417 s->seq_ops.show = afinfo->seq_show;
2418 s->seq_ops.stop = tcp_seq_stop;
2420 rc = seq_open(file, &s->seq_ops);
2423 seq = file->private_data;
2432 int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
2435 struct proc_dir_entry *p;
2439 afinfo->seq_fops->owner = afinfo->owner;
2440 afinfo->seq_fops->open = tcp_seq_open;
2441 afinfo->seq_fops->read = seq_read;
2442 afinfo->seq_fops->llseek = seq_lseek;
2443 afinfo->seq_fops->release = seq_release_private;
2445 p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
2453 void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
2457 proc_net_remove(afinfo->name);
2458 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
2461 static void get_openreq4(struct sock *sk, struct open_request *req,
2462 char *tmpbuf, int i, int uid)
2464 int ttd = req->expires - jiffies;
2466 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2467 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
2469 req->af.v4_req.loc_addr,
2470 ntohs(inet_sk(sk)->sport),
2471 req->af.v4_req.rmt_addr,
2472 ntohs(req->rmt_port),
2474 0, 0, /* could print option size, but that is af dependent. */
2475 1, /* timers active (only the expire timer) */
2476 jiffies_to_clock_t(ttd),
2479 0, /* non standard timer */
2480 0, /* open_requests have no inode */
2481 atomic_read(&sk->sk_refcnt),
2485 static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
2488 unsigned long timer_expires;
2489 struct tcp_opt *tp = tcp_sk(sp);
2490 struct inet_opt *inet = inet_sk(sp);
2491 unsigned int dest = inet->daddr;
2492 unsigned int src = inet->rcv_saddr;
2493 __u16 destp = ntohs(inet->dport);
2494 __u16 srcp = ntohs(inet->sport);
2496 if (tp->pending == TCP_TIME_RETRANS) {
2498 timer_expires = tp->timeout;
2499 } else if (tp->pending == TCP_TIME_PROBE0) {
2501 timer_expires = tp->timeout;
2502 } else if (timer_pending(&sp->sk_timer)) {
2504 timer_expires = sp->sk_timer.expires;
2507 timer_expires = jiffies;
2510 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2511 "%08X %5d %8d %lu %d %p %u %u %u %u %d",
2512 i, src, srcp, dest, destp, sp->sk_state,
2513 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
2515 jiffies_to_clock_t(timer_expires - jiffies),
2520 atomic_read(&sp->sk_refcnt), sp,
2521 tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong,
2523 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
2526 static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i)
2528 unsigned int dest, src;
2530 int ttd = tw->tw_ttd - jiffies;
2535 dest = tw->tw_daddr;
2536 src = tw->tw_rcv_saddr;
2537 destp = ntohs(tw->tw_dport);
2538 srcp = ntohs(tw->tw_sport);
2540 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2541 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p",
2542 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2543 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2544 atomic_read(&tw->tw_refcnt), tw);
2549 static int tcp4_seq_show(struct seq_file *seq, void *v)
2551 struct tcp_iter_state* st;
2552 char tmpbuf[TMPSZ + 1];
2554 if (v == SEQ_START_TOKEN) {
2555 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2556 " sl local_address rem_address st tx_queue "
2557 "rx_queue tr tm->when retrnsmt uid timeout "
2563 switch (st->state) {
2564 case TCP_SEQ_STATE_LISTENING:
2565 case TCP_SEQ_STATE_ESTABLISHED:
2566 get_tcp4_sock(v, tmpbuf, st->num);
2568 case TCP_SEQ_STATE_OPENREQ:
2569 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
2571 case TCP_SEQ_STATE_TIME_WAIT:
2572 get_timewait4_sock(v, tmpbuf, st->num);
2575 seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
2580 static struct file_operations tcp4_seq_fops;
2581 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2582 .owner = THIS_MODULE,
2585 .seq_show = tcp4_seq_show,
2586 .seq_fops = &tcp4_seq_fops,
2589 int __init tcp4_proc_init(void)
2591 return tcp_proc_register(&tcp4_seq_afinfo);
2594 void tcp4_proc_exit(void)
2596 tcp_proc_unregister(&tcp4_seq_afinfo);
2598 #endif /* CONFIG_PROC_FS */
2600 struct proto tcp_prot = {
2602 .owner = THIS_MODULE,
2604 .connect = tcp_v4_connect,
2605 .disconnect = tcp_disconnect,
2606 .accept = tcp_accept,
2608 .init = tcp_v4_init_sock,
2609 .destroy = tcp_v4_destroy_sock,
2610 .shutdown = tcp_shutdown,
2611 .setsockopt = tcp_setsockopt,
2612 .getsockopt = tcp_getsockopt,
2613 .sendmsg = tcp_sendmsg,
2614 .recvmsg = tcp_recvmsg,
2615 .backlog_rcv = tcp_v4_do_rcv,
2616 .hash = tcp_v4_hash,
2617 .unhash = tcp_unhash,
2618 .get_port = tcp_v4_get_port,
2619 .enter_memory_pressure = tcp_enter_memory_pressure,
2620 .sockets_allocated = &tcp_sockets_allocated,
2621 .memory_allocated = &tcp_memory_allocated,
2622 .memory_pressure = &tcp_memory_pressure,
2623 .sysctl_mem = sysctl_tcp_mem,
2624 .sysctl_wmem = sysctl_tcp_wmem,
2625 .sysctl_rmem = sysctl_tcp_rmem,
2626 .max_header = MAX_TCP_HEADER,
2627 .slab_obj_size = sizeof(struct tcp_sock),
2632 void __init tcp_v4_init(struct net_proto_family *ops)
2634 int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
2636 panic("Failed to create the TCP control socket.\n");
2637 tcp_socket->sk->sk_allocation = GFP_ATOMIC;
2638 inet_sk(tcp_socket->sk)->uc_ttl = -1;
2640 /* Unhash it so that IP input processing does not even
2641 * see it, we do not wish this socket to see incoming
2644 tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
2647 EXPORT_SYMBOL(ipv4_specific);
2648 EXPORT_SYMBOL(tcp_bind_hash);
2649 EXPORT_SYMBOL(tcp_bucket_create);
2650 EXPORT_SYMBOL(tcp_hashinfo);
2651 EXPORT_SYMBOL(tcp_inherit_port);
2652 EXPORT_SYMBOL(tcp_listen_wlock);
2653 EXPORT_SYMBOL(tcp_port_rover);
2654 EXPORT_SYMBOL(tcp_prot);
2655 EXPORT_SYMBOL(tcp_put_port);
2656 EXPORT_SYMBOL(tcp_unhash);
2657 EXPORT_SYMBOL(tcp_v4_conn_request);
2658 EXPORT_SYMBOL(tcp_v4_connect);
2659 EXPORT_SYMBOL(tcp_v4_do_rcv);
2660 EXPORT_SYMBOL(tcp_v4_rebuild_header);
2661 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2662 EXPORT_SYMBOL(tcp_v4_send_check);
2663 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2665 #ifdef CONFIG_PROC_FS
2666 EXPORT_SYMBOL(tcp_proc_register);
2667 EXPORT_SYMBOL(tcp_proc_unregister);
2669 EXPORT_SYMBOL(sysctl_local_port_range);
2670 EXPORT_SYMBOL(sysctl_max_syn_backlog);
2671 EXPORT_SYMBOL(sysctl_tcp_low_latency);