2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
10 * IPv4 specific functions
15 * linux/ipv4/tcp_input.c
16 * linux/ipv4/tcp_output.c
18 * See tcp.c for author information
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
28 * David S. Miller : New socket lookup architecture.
29 * This code is dedicated to John Dyson.
30 * David S. Miller : Change semantics of established hash,
31 * half is devoted to TIME_WAIT sockets
32 * and the rest go in the other half.
33 * Andi Kleen : Add support for syncookies and fixed
34 * some bugs: ip options weren't passed to
35 * the TCP layer, missed a check for an
37 * Andi Kleen : Implemented fast path mtu discovery.
38 * Fixed many serious bugs in the
39 * open_request handling and moved
40 * most of it into the af independent code.
41 * Added tail drop and some other bugfixes.
42 * Added new listen sematics.
43 * Mike McLagan : Routing by source
44 * Juan Jose Ciarlante: ip_dynaddr bits
45 * Andi Kleen: various fixes.
46 * Vitaly E. Lavrov : Transparent proxy revived after year
48 * Andi Kleen : Fix new listen.
49 * Andi Kleen : Fix accept error reporting.
50 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
51 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
52 * a single port at the same time.
55 #include <linux/config.h>
57 #include <linux/types.h>
58 #include <linux/fcntl.h>
59 #include <linux/module.h>
60 #include <linux/random.h>
61 #include <linux/cache.h>
62 #include <linux/jhash.h>
63 #include <linux/init.h>
64 #include <linux/times.h>
69 #include <net/inet_common.h>
72 #include <linux/inet.h>
73 #include <linux/ipv6.h>
74 #include <linux/stddef.h>
75 #include <linux/proc_fs.h>
76 #include <linux/seq_file.h>
77 #include <linux/vserver/debug.h>
79 extern int sysctl_ip_dynaddr;
80 int sysctl_tcp_tw_reuse;
81 int sysctl_tcp_low_latency;
83 /* Check TCP sequence numbers in ICMP packets. */
84 #define ICMP_MIN_LENGTH 8
86 /* Socket used for sending RSTs */
87 static struct socket *tcp_socket;
89 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
92 struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
93 .__tcp_lhash_lock = RW_LOCK_UNLOCKED,
94 .__tcp_lhash_users = ATOMIC_INIT(0),
96 = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait),
97 .__tcp_portalloc_lock = SPIN_LOCK_UNLOCKED
101 * This array holds the first and last local port number.
102 * For high-usage systems, use sysctl to change this to
105 int sysctl_local_port_range[2] = { 1024, 4999 };
106 int tcp_port_rover = 1024 - 1;
108 static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
109 __u32 faddr, __u16 fport)
111 int h = (laddr ^ lport) ^ (faddr ^ fport);
114 return h & (tcp_ehash_size - 1);
117 static __inline__ int tcp_sk_hashfn(struct sock *sk)
119 struct inet_sock *inet = inet_sk(sk);
120 __u32 laddr = inet->rcv_saddr;
121 __u16 lport = inet->num;
122 __u32 faddr = inet->daddr;
123 __u16 fport = inet->dport;
125 return tcp_hashfn(laddr, lport, faddr, fport);
128 /* Allocate and initialize a new TCP local port bind bucket.
129 * The bindhash mutex for snum's hash chain must be held here.
131 struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
134 struct tcp_bind_bucket *tb = kmem_cache_alloc(tcp_bucket_cachep,
139 INIT_HLIST_HEAD(&tb->owners);
140 hlist_add_head(&tb->node, &head->chain);
145 /* Caller must hold hashbucket lock for this tb with local BH disabled */
146 void tcp_bucket_destroy(struct tcp_bind_bucket *tb)
148 if (hlist_empty(&tb->owners)) {
149 __hlist_del(&tb->node);
150 kmem_cache_free(tcp_bucket_cachep, tb);
154 /* Caller must disable local BH processing. */
155 static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
157 struct tcp_bind_hashbucket *head =
158 &tcp_bhash[tcp_bhashfn(inet_sk(child)->num)];
159 struct tcp_bind_bucket *tb;
161 spin_lock(&head->lock);
162 tb = tcp_sk(sk)->bind_hash;
163 sk_add_bind_node(child, &tb->owners);
164 tcp_sk(child)->bind_hash = tb;
165 spin_unlock(&head->lock);
168 inline void tcp_inherit_port(struct sock *sk, struct sock *child)
171 __tcp_inherit_port(sk, child);
175 void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
178 inet_sk(sk)->num = snum;
179 sk_add_bind_node(sk, &tb->owners);
180 tcp_sk(sk)->bind_hash = tb;
183 static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
186 struct hlist_node *node;
187 int reuse = sk->sk_reuse;
189 sk_for_each_bound(sk2, node, &tb->owners) {
191 !tcp_v6_ipv6only(sk2) &&
192 (!sk->sk_bound_dev_if ||
193 !sk2->sk_bound_dev_if ||
194 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
195 if (!reuse || !sk2->sk_reuse ||
196 sk2->sk_state == TCP_LISTEN) {
197 if (nx_addr_conflict(sk->sk_nx_info,
198 tcp_v4_rcv_saddr(sk), sk2))
206 /* Obtain a reference to a local port for the given sock,
207 * if snum is zero it means select any available local port.
209 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
211 struct tcp_bind_hashbucket *head;
212 struct hlist_node *node;
213 struct tcp_bind_bucket *tb;
218 int low = sysctl_local_port_range[0];
219 int high = sysctl_local_port_range[1];
220 int remaining = (high - low) + 1;
223 spin_lock(&tcp_portalloc_lock);
224 if (tcp_port_rover < low)
227 rover = tcp_port_rover;
232 head = &tcp_bhash[tcp_bhashfn(rover)];
233 spin_lock(&head->lock);
234 tb_for_each(tb, node, &head->chain)
235 if (tb->port == rover)
239 spin_unlock(&head->lock);
240 } while (--remaining > 0);
241 tcp_port_rover = rover;
242 spin_unlock(&tcp_portalloc_lock);
244 /* Exhausted local port range during search? */
249 /* OK, here is the one we will use. HEAD is
250 * non-NULL and we hold it's mutex.
254 head = &tcp_bhash[tcp_bhashfn(snum)];
255 spin_lock(&head->lock);
256 tb_for_each(tb, node, &head->chain)
257 if (tb->port == snum)
263 if (!hlist_empty(&tb->owners)) {
264 if (sk->sk_reuse > 1)
266 if (tb->fastreuse > 0 &&
267 sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
271 if (tcp_bind_conflict(sk, tb))
277 if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
279 if (hlist_empty(&tb->owners)) {
280 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
284 } else if (tb->fastreuse &&
285 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
288 if (!tcp_sk(sk)->bind_hash)
289 tcp_bind_hash(sk, tb, snum);
290 BUG_TRAP(tcp_sk(sk)->bind_hash == tb);
294 spin_unlock(&head->lock);
300 /* Get rid of any references to a local port held by the
303 static void __tcp_put_port(struct sock *sk)
305 struct inet_sock *inet = inet_sk(sk);
306 struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)];
307 struct tcp_bind_bucket *tb;
309 spin_lock(&head->lock);
310 tb = tcp_sk(sk)->bind_hash;
311 __sk_del_bind_node(sk);
312 tcp_sk(sk)->bind_hash = NULL;
314 tcp_bucket_destroy(tb);
315 spin_unlock(&head->lock);
318 void tcp_put_port(struct sock *sk)
325 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
326 * Look, when several writers sleep and reader wakes them up, all but one
327 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
328 * this, _but_ remember, it adds useless work on UP machines (wake up each
329 * exclusive lock release). It should be ifdefed really.
332 void tcp_listen_wlock(void)
334 write_lock(&tcp_lhash_lock);
336 if (atomic_read(&tcp_lhash_users)) {
340 prepare_to_wait_exclusive(&tcp_lhash_wait,
341 &wait, TASK_UNINTERRUPTIBLE);
342 if (!atomic_read(&tcp_lhash_users))
344 write_unlock_bh(&tcp_lhash_lock);
346 write_lock_bh(&tcp_lhash_lock);
349 finish_wait(&tcp_lhash_wait, &wait);
353 static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
355 struct hlist_head *list;
358 BUG_TRAP(sk_unhashed(sk));
359 if (listen_possible && sk->sk_state == TCP_LISTEN) {
360 list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
361 lock = &tcp_lhash_lock;
364 list = &tcp_ehash[(sk->sk_hashent = tcp_sk_hashfn(sk))].chain;
365 lock = &tcp_ehash[sk->sk_hashent].lock;
368 __sk_add_node(sk, list);
369 sock_prot_inc_use(sk->sk_prot);
371 if (listen_possible && sk->sk_state == TCP_LISTEN)
372 wake_up(&tcp_lhash_wait);
375 static void tcp_v4_hash(struct sock *sk)
377 if (sk->sk_state != TCP_CLOSE) {
379 __tcp_v4_hash(sk, 1);
384 void tcp_unhash(struct sock *sk)
391 if (sk->sk_state == TCP_LISTEN) {
394 lock = &tcp_lhash_lock;
396 struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
398 write_lock_bh(&head->lock);
401 if (__sk_del_node_init(sk))
402 sock_prot_dec_use(sk->sk_prot);
403 write_unlock_bh(lock);
406 if (sk->sk_state == TCP_LISTEN)
407 wake_up(&tcp_lhash_wait);
412 * Check if a given address matches for a tcp socket
414 * nxi: the socket's nx_info if any
415 * addr: to be verified address
416 * saddr: socket addresses
418 static inline int tcp_addr_match (
423 if (addr && (saddr == addr))
426 return addr_in_nx_info(nxi, addr);
430 /* Don't inline this cruft. Here are some nice properties to
431 * exploit here. The BSD API does not allow a listening TCP
432 * to specify the remote port nor the remote address for the
433 * connection. So always assume those are both wildcarded
434 * during the search since they can never be otherwise.
436 static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr,
437 unsigned short hnum, int dif)
439 struct sock *result = NULL, *sk;
440 struct hlist_node *node;
444 sk_for_each(sk, node, head) {
445 struct inet_sock *inet = inet_sk(sk);
447 if (inet->num == hnum && !ipv6_only_sock(sk)) {
448 __u32 rcv_saddr = inet->rcv_saddr;
450 score = (sk->sk_family == PF_INET ? 1 : 0);
451 if (tcp_addr_match(sk->sk_nx_info, daddr, rcv_saddr))
455 if (sk->sk_bound_dev_if) {
456 if (sk->sk_bound_dev_if != dif)
462 if (score > hiscore) {
471 /* Optimize the common listener case. */
472 struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum, int dif)
474 struct sock *sk = NULL;
475 struct hlist_head *head;
477 read_lock(&tcp_lhash_lock);
478 head = &tcp_listening_hash[tcp_lhashfn(hnum)];
479 if (!hlist_empty(head)) {
480 struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
482 if (inet->num == hnum && !sk->sk_node.next &&
483 (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
484 tcp_addr_match(sk->sk_nx_info, daddr, inet->rcv_saddr) &&
485 !sk->sk_bound_dev_if)
487 sk = __tcp_v4_lookup_listener(head, daddr, hnum, dif);
493 read_unlock(&tcp_lhash_lock);
497 EXPORT_SYMBOL_GPL(tcp_v4_lookup_listener);
499 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
500 * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
502 * Local BH must be disabled here.
505 static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
509 struct tcp_ehash_bucket *head;
510 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
511 __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
513 struct hlist_node *node;
514 /* Optimize here for direct hit, only listening connections can
515 * have wildcards anyways.
517 int hash = tcp_hashfn(daddr, hnum, saddr, sport);
518 head = &tcp_ehash[hash];
519 read_lock(&head->lock);
520 sk_for_each(sk, node, &head->chain) {
521 if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
522 goto hit; /* You sunk my battleship! */
525 /* Must check for a TIME_WAIT'er before going to listener hash. */
526 sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
527 if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
532 read_unlock(&head->lock);
539 static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport,
540 u32 daddr, u16 hnum, int dif)
542 struct sock *sk = __tcp_v4_lookup_established(saddr, sport,
545 return sk ? : tcp_v4_lookup_listener(daddr, hnum, dif);
548 inline struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr,
554 sk = __tcp_v4_lookup(saddr, sport, daddr, ntohs(dport), dif);
560 EXPORT_SYMBOL_GPL(tcp_v4_lookup);
562 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
564 return secure_tcp_sequence_number(skb->nh.iph->daddr,
570 /* called with local bh disabled */
571 static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
572 struct tcp_tw_bucket **twp)
574 struct inet_sock *inet = inet_sk(sk);
575 u32 daddr = inet->rcv_saddr;
576 u32 saddr = inet->daddr;
577 int dif = sk->sk_bound_dev_if;
578 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
579 __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
580 int hash = tcp_hashfn(daddr, lport, saddr, inet->dport);
581 struct tcp_ehash_bucket *head = &tcp_ehash[hash];
583 struct hlist_node *node;
584 struct tcp_tw_bucket *tw;
586 write_lock(&head->lock);
588 /* Check TIME-WAIT sockets first. */
589 sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
590 tw = (struct tcp_tw_bucket *)sk2;
592 if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
593 struct tcp_sock *tp = tcp_sk(sk);
595 /* With PAWS, it is safe from the viewpoint
596 of data integrity. Even without PAWS it
597 is safe provided sequence spaces do not
598 overlap i.e. at data rates <= 80Mbit/sec.
600 Actually, the idea is close to VJ's one,
601 only timestamp cache is held not per host,
602 but per port pair and TW bucket is used
605 If TW bucket has been already destroyed we
606 fall back to VJ's scheme and use initial
607 timestamp retrieved from peer table.
609 if (tw->tw_ts_recent_stamp &&
610 (!twp || (sysctl_tcp_tw_reuse &&
612 tw->tw_ts_recent_stamp > 1))) {
614 tw->tw_snd_nxt + 65535 + 2) == 0)
616 tp->rx_opt.ts_recent = tw->tw_ts_recent;
617 tp->rx_opt.ts_recent_stamp = tw->tw_ts_recent_stamp;
626 /* And established part... */
627 sk_for_each(sk2, node, &head->chain) {
628 if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif))
633 /* Must record num and sport now. Otherwise we will see
634 * in hash table socket with a funny identity. */
636 inet->sport = htons(lport);
637 sk->sk_hashent = hash;
638 BUG_TRAP(sk_unhashed(sk));
639 __sk_add_node(sk, &head->chain);
640 sock_prot_inc_use(sk->sk_prot);
641 write_unlock(&head->lock);
645 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
647 /* Silly. Should hash-dance instead... */
648 tcp_tw_deschedule(tw);
649 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
657 write_unlock(&head->lock);
658 return -EADDRNOTAVAIL;
661 static inline u32 connect_port_offset(const struct sock *sk)
663 const struct inet_sock *inet = inet_sk(sk);
665 return secure_tcp_port_ephemeral(inet->rcv_saddr, inet->daddr,
670 * Bind a port for a connect operation and hash it.
672 static inline int tcp_v4_hash_connect(struct sock *sk)
674 unsigned short snum = inet_sk(sk)->num;
675 struct tcp_bind_hashbucket *head;
676 struct tcp_bind_bucket *tb;
680 int low = sysctl_local_port_range[0];
681 int high = sysctl_local_port_range[1];
682 int range = high - low;
686 u32 offset = hint + connect_port_offset(sk);
687 struct hlist_node *node;
688 struct tcp_tw_bucket *tw = NULL;
691 for (i = 1; i <= range; i++) {
692 port = low + (i + offset) % range;
693 head = &tcp_bhash[tcp_bhashfn(port)];
694 spin_lock(&head->lock);
696 /* Does not bother with rcv_saddr checks,
697 * because the established check is already
700 tb_for_each(tb, node, &head->chain) {
701 if (tb->port == port) {
702 BUG_TRAP(!hlist_empty(&tb->owners));
703 if (tb->fastreuse >= 0)
705 if (!__tcp_v4_check_established(sk,
713 tb = tcp_bucket_create(head, port);
715 spin_unlock(&head->lock);
722 spin_unlock(&head->lock);
726 return -EADDRNOTAVAIL;
731 /* Head lock still held and bh's disabled */
732 tcp_bind_hash(sk, tb, port);
733 if (sk_unhashed(sk)) {
734 inet_sk(sk)->sport = htons(port);
735 __tcp_v4_hash(sk, 0);
737 spin_unlock(&head->lock);
740 tcp_tw_deschedule(tw);
748 head = &tcp_bhash[tcp_bhashfn(snum)];
749 tb = tcp_sk(sk)->bind_hash;
750 spin_lock_bh(&head->lock);
751 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
752 __tcp_v4_hash(sk, 0);
753 spin_unlock_bh(&head->lock);
756 spin_unlock(&head->lock);
757 /* No definite answer... Walk to established hash table */
758 ret = __tcp_v4_check_established(sk, snum, NULL);
765 /* This will initiate an outgoing connection. */
766 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
768 struct inet_sock *inet = inet_sk(sk);
769 struct tcp_sock *tp = tcp_sk(sk);
770 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
776 if (addr_len < sizeof(struct sockaddr_in))
779 if (usin->sin_family != AF_INET)
780 return -EAFNOSUPPORT;
782 nexthop = daddr = usin->sin_addr.s_addr;
783 if (inet->opt && inet->opt->srr) {
786 nexthop = inet->opt->faddr;
789 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
790 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
792 inet->sport, usin->sin_port, sk);
796 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
801 if (!inet->opt || !inet->opt->srr)
805 inet->saddr = rt->rt_src;
806 inet->rcv_saddr = inet->saddr;
808 if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
809 /* Reset inherited state */
810 tp->rx_opt.ts_recent = 0;
811 tp->rx_opt.ts_recent_stamp = 0;
815 if (sysctl_tcp_tw_recycle &&
816 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
817 struct inet_peer *peer = rt_get_peer(rt);
819 /* VJ's idea. We save last timestamp seen from
820 * the destination in peer table, when entering state TIME-WAIT
821 * and initialize rx_opt.ts_recent from it, when trying new connection.
824 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
825 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
826 tp->rx_opt.ts_recent = peer->tcp_ts;
830 inet->dport = usin->sin_port;
833 tp->ext_header_len = 0;
835 tp->ext_header_len = inet->opt->optlen;
837 tp->rx_opt.mss_clamp = 536;
839 /* Socket identity is still unknown (sport may be zero).
840 * However we set state to SYN-SENT and not releasing socket
841 * lock select source port, enter ourselves into the hash tables and
842 * complete initialization after this.
844 tcp_set_state(sk, TCP_SYN_SENT);
845 err = tcp_v4_hash_connect(sk);
849 err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
853 /* OK, now commit destination to socket. */
854 __sk_dst_set(sk, &rt->u.dst);
855 tcp_v4_setup_caps(sk, &rt->u.dst);
858 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
863 inet->id = tp->write_seq ^ jiffies;
865 err = tcp_connect(sk);
873 /* This unhashes the socket and releases the local port, if necessary. */
874 tcp_set_state(sk, TCP_CLOSE);
876 sk->sk_route_caps = 0;
881 static __inline__ int tcp_v4_iif(struct sk_buff *skb)
883 return ((struct rtable *)skb->dst)->rt_iif;
886 static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
888 return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
891 static struct open_request *tcp_v4_search_req(struct tcp_sock *tp,
892 struct open_request ***prevp,
894 __u32 raddr, __u32 laddr)
896 struct tcp_listen_opt *lopt = tp->listen_opt;
897 struct open_request *req, **prev;
899 for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
900 (req = *prev) != NULL;
901 prev = &req->dl_next) {
902 if (req->rmt_port == rport &&
903 req->af.v4_req.rmt_addr == raddr &&
904 req->af.v4_req.loc_addr == laddr &&
905 TCP_INET_FAMILY(req->class->family)) {
915 static void tcp_v4_synq_add(struct sock *sk, struct open_request *req)
917 struct tcp_sock *tp = tcp_sk(sk);
918 struct tcp_listen_opt *lopt = tp->listen_opt;
919 u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
921 req->expires = jiffies + TCP_TIMEOUT_INIT;
924 req->dl_next = lopt->syn_table[h];
926 write_lock(&tp->syn_wait_lock);
927 lopt->syn_table[h] = req;
928 write_unlock(&tp->syn_wait_lock);
935 * This routine does path mtu discovery as defined in RFC1191.
937 static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
940 struct dst_entry *dst;
941 struct inet_sock *inet = inet_sk(sk);
942 struct tcp_sock *tp = tcp_sk(sk);
944 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
945 * send out by Linux are always <576bytes so they should go through
948 if (sk->sk_state == TCP_LISTEN)
951 /* We don't check in the destentry if pmtu discovery is forbidden
952 * on this route. We just assume that no packet_to_big packets
953 * are send back when pmtu discovery is not active.
954 * There is a small race when the user changes this flag in the
955 * route, but I think that's acceptable.
957 if ((dst = __sk_dst_check(sk, 0)) == NULL)
960 dst->ops->update_pmtu(dst, mtu);
962 /* Something is about to be wrong... Remember soft error
963 * for the case, if this connection will not able to recover.
965 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
966 sk->sk_err_soft = EMSGSIZE;
970 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
971 tp->pmtu_cookie > mtu) {
972 tcp_sync_mss(sk, mtu);
974 /* Resend the TCP packet because it's
975 * clear that the old packet has been
976 * dropped. This is the new "fast" path mtu
979 tcp_simple_retransmit(sk);
980 } /* else let the usual retransmit timer handle it */
984 * This routine is called by the ICMP module when it gets some
985 * sort of error condition. If err < 0 then the socket should
986 * be closed and the error returned to the user. If err > 0
987 * it's just the icmp type << 8 | icmp code. After adjustment
988 * header points to the first 8 bytes of the tcp header. We need
989 * to find the appropriate port.
991 * The locking strategy used here is very "optimistic". When
992 * someone else accesses the socket the ICMP is just dropped
993 * and for some paths there is no check at all.
994 * A more general error queue to queue errors for later handling
995 * is probably better.
999 void tcp_v4_err(struct sk_buff *skb, u32 info)
1001 struct iphdr *iph = (struct iphdr *)skb->data;
1002 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1003 struct tcp_sock *tp;
1004 struct inet_sock *inet;
1005 int type = skb->h.icmph->type;
1006 int code = skb->h.icmph->code;
1011 if (skb->len < (iph->ihl << 2) + 8) {
1012 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
1016 sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,
1017 th->source, tcp_v4_iif(skb));
1019 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
1022 if (sk->sk_state == TCP_TIME_WAIT) {
1023 tcp_tw_put((struct tcp_tw_bucket *)sk);
1028 /* If too many ICMPs get dropped on busy
1029 * servers this needs to be solved differently.
1031 if (sock_owned_by_user(sk))
1032 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
1034 if (sk->sk_state == TCP_CLOSE)
1038 seq = ntohl(th->seq);
1039 if (sk->sk_state != TCP_LISTEN &&
1040 !between(seq, tp->snd_una, tp->snd_nxt)) {
1041 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
1046 case ICMP_SOURCE_QUENCH:
1047 /* Just silently ignore these. */
1049 case ICMP_PARAMETERPROB:
1052 case ICMP_DEST_UNREACH:
1053 if (code > NR_ICMP_UNREACH)
1056 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
1057 if (!sock_owned_by_user(sk))
1058 do_pmtu_discovery(sk, iph, info);
1062 err = icmp_err_convert[code].errno;
1064 case ICMP_TIME_EXCEEDED:
1071 switch (sk->sk_state) {
1072 struct open_request *req, **prev;
1074 if (sock_owned_by_user(sk))
1077 req = tcp_v4_search_req(tp, &prev, th->dest,
1078 iph->daddr, iph->saddr);
1082 /* ICMPs are not backlogged, hence we cannot get
1083 an established socket here.
1087 if (seq != req->snt_isn) {
1088 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
1093 * Still in SYN_RECV, just remove it silently.
1094 * There is no good way to pass the error to the newly
1095 * created socket, and POSIX does not want network
1096 * errors returned from accept().
1098 tcp_synq_drop(sk, req, prev);
1102 case TCP_SYN_RECV: /* Cannot happen.
1103 It can f.e. if SYNs crossed.
1105 if (!sock_owned_by_user(sk)) {
1106 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1109 sk->sk_error_report(sk);
1113 sk->sk_err_soft = err;
1118 /* If we've already connected we will keep trying
1119 * until we time out, or the user gives up.
1121 * rfc1122 4.2.3.9 allows to consider as hard errors
1122 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
1123 * but it is obsoleted by pmtu discovery).
1125 * Note, that in modern internet, where routing is unreliable
1126 * and in each dark corner broken firewalls sit, sending random
1127 * errors ordered by their masters even this two messages finally lose
1128 * their original sense (even Linux sends invalid PORT_UNREACHs)
1130 * Now we are in compliance with RFCs.
1135 if (!sock_owned_by_user(sk) && inet->recverr) {
1137 sk->sk_error_report(sk);
1138 } else { /* Only an error on timeout */
1139 sk->sk_err_soft = err;
1147 /* This routine computes an IPv4 TCP checksum. */
1148 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
1149 struct sk_buff *skb)
1151 struct inet_sock *inet = inet_sk(sk);
1153 if (skb->ip_summed == CHECKSUM_HW) {
1154 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
1155 skb->csum = offsetof(struct tcphdr, check);
1157 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
1158 csum_partial((char *)th,
1165 * This routine will send an RST to the other tcp.
1167 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
1169 * Answer: if a packet caused RST, it is not for a socket
1170 * existing in our system, if it is matched to a socket,
1171 * it is just duplicate segment or bug in other side's TCP.
1172 * So that we build reply only basing on parameters
1173 * arrived with segment.
1174 * Exception: precedence violation. We do not implement it in any case.
1177 static void tcp_v4_send_reset(struct sk_buff *skb)
1179 struct tcphdr *th = skb->h.th;
1181 struct ip_reply_arg arg;
1183 /* Never send a reset in response to a reset. */
1187 if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
1190 /* Swap the send and the receive. */
1191 memset(&rth, 0, sizeof(struct tcphdr));
1192 rth.dest = th->source;
1193 rth.source = th->dest;
1194 rth.doff = sizeof(struct tcphdr) / 4;
1198 rth.seq = th->ack_seq;
1201 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
1202 skb->len - (th->doff << 2));
1205 memset(&arg, 0, sizeof arg);
1206 arg.iov[0].iov_base = (unsigned char *)&rth;
1207 arg.iov[0].iov_len = sizeof rth;
1208 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1209 skb->nh.iph->saddr, /*XXX*/
1210 sizeof(struct tcphdr), IPPROTO_TCP, 0);
1211 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1213 ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
1215 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1216 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1219 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
1220 outside socket context is ugly, certainly. What can I do?
1223 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
1226 struct tcphdr *th = skb->h.th;
1231 struct ip_reply_arg arg;
1233 memset(&rep.th, 0, sizeof(struct tcphdr));
1234 memset(&arg, 0, sizeof arg);
1236 arg.iov[0].iov_base = (unsigned char *)&rep;
1237 arg.iov[0].iov_len = sizeof(rep.th);
1239 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1240 (TCPOPT_TIMESTAMP << 8) |
1242 rep.tsopt[1] = htonl(tcp_time_stamp);
1243 rep.tsopt[2] = htonl(ts);
1244 arg.iov[0].iov_len = sizeof(rep);
1247 /* Swap the send and the receive. */
1248 rep.th.dest = th->source;
1249 rep.th.source = th->dest;
1250 rep.th.doff = arg.iov[0].iov_len / 4;
1251 rep.th.seq = htonl(seq);
1252 rep.th.ack_seq = htonl(ack);
1254 rep.th.window = htons(win);
1256 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
1257 skb->nh.iph->saddr, /*XXX*/
1258 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1259 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1261 ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
1263 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1266 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
1268 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
1270 tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
1271 tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
1276 static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req)
1278 tcp_v4_send_ack(skb, req->snt_isn + 1, req->rcv_isn + 1, req->rcv_wnd,
1282 static struct dst_entry* tcp_v4_route_req(struct sock *sk,
1283 struct open_request *req)
1286 struct ip_options *opt = req->af.v4_req.opt;
1287 struct flowi fl = { .oif = sk->sk_bound_dev_if,
1289 { .daddr = ((opt && opt->srr) ?
1291 req->af.v4_req.rmt_addr),
1292 .saddr = req->af.v4_req.loc_addr,
1293 .tos = RT_CONN_FLAGS(sk) } },
1294 .proto = IPPROTO_TCP,
1296 { .sport = inet_sk(sk)->sport,
1297 .dport = req->rmt_port } } };
1299 if (ip_route_output_flow(&rt, &fl, sk, 0)) {
1300 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1303 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
1305 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1312 * Send a SYN-ACK after having received an ACK.
1313 * This still operates on a open_request only, not on a big
1316 static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
1317 struct dst_entry *dst)
1320 struct sk_buff * skb;
1322 /* First, grab a route. */
1323 if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1326 skb = tcp_make_synack(sk, dst, req);
1329 struct tcphdr *th = skb->h.th;
1331 th->check = tcp_v4_check(th, skb->len,
1332 req->af.v4_req.loc_addr,
1333 req->af.v4_req.rmt_addr,
1334 csum_partial((char *)th, skb->len,
1337 err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr,
1338 req->af.v4_req.rmt_addr,
1339 req->af.v4_req.opt);
1340 if (err == NET_XMIT_CN)
1350 * IPv4 open_request destructor.
1352 static void tcp_v4_or_free(struct open_request *req)
1354 if (req->af.v4_req.opt)
1355 kfree(req->af.v4_req.opt);
1358 static inline void syn_flood_warning(struct sk_buff *skb)
1360 static unsigned long warntime;
1362 if (time_after(jiffies, (warntime + HZ * 60))) {
1365 "possible SYN flooding on port %d. Sending cookies.\n",
1366 ntohs(skb->h.th->dest));
1371 * Save and compile IPv4 options into the open_request if needed.
1373 static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
1374 struct sk_buff *skb)
1376 struct ip_options *opt = &(IPCB(skb)->opt);
1377 struct ip_options *dopt = NULL;
1379 if (opt && opt->optlen) {
1380 int opt_size = optlength(opt);
1381 dopt = kmalloc(opt_size, GFP_ATOMIC);
1383 if (ip_options_echo(dopt, skb)) {
1393 * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
1394 * One SYN_RECV socket costs about 80bytes on a 32bit machine.
1395 * It would be better to replace it with a global counter for all sockets
1396 * but then some measure against one socket starving all other sockets
1399 * It was 128 by default. Experiments with real servers show, that
1400 * it is absolutely not enough even at 100conn/sec. 256 cures most
1401 * of problems. This value is adjusted to 128 for very small machines
1402 * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
1403 * Further increasing requires to change hash table size.
1405 int sysctl_max_syn_backlog = 256;
1407 struct or_calltable or_ipv4 = {
1409 .rtx_syn_ack = tcp_v4_send_synack,
1410 .send_ack = tcp_v4_or_send_ack,
1411 .destructor = tcp_v4_or_free,
1412 .send_reset = tcp_v4_send_reset,
1415 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1417 struct tcp_options_received tmp_opt;
1418 struct open_request *req;
1419 __u32 saddr = skb->nh.iph->saddr;
1420 __u32 daddr = skb->nh.iph->daddr;
1421 __u32 isn = TCP_SKB_CB(skb)->when;
1422 struct dst_entry *dst = NULL;
1423 #ifdef CONFIG_SYN_COOKIES
1424 int want_cookie = 0;
1426 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1429 /* Never answer to SYNs send to broadcast or multicast */
1430 if (((struct rtable *)skb->dst)->rt_flags &
1431 (RTCF_BROADCAST | RTCF_MULTICAST))
1434 /* TW buckets are converted to open requests without
1435 * limitations, they conserve resources and peer is
1436 * evidently real one.
1438 if (tcp_synq_is_full(sk) && !isn) {
1439 #ifdef CONFIG_SYN_COOKIES
1440 if (sysctl_tcp_syncookies) {
1447 /* Accept backlog is full. If we have already queued enough
1448 * of warm entries in syn queue, drop request. It is better than
1449 * clogging syn queue with openreqs with exponentially increasing
1452 if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
1455 req = tcp_openreq_alloc();
1459 tcp_clear_options(&tmp_opt);
1460 tmp_opt.mss_clamp = 536;
1461 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
1463 tcp_parse_options(skb, &tmp_opt, 0);
1466 tcp_clear_options(&tmp_opt);
1467 tmp_opt.saw_tstamp = 0;
1470 if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) {
1471 /* Some OSes (unknown ones, but I see them on web server, which
1472 * contains information interesting only for windows'
1473 * users) do not send their stamp in SYN. It is easy case.
1474 * We simply do not advertise TS support.
1476 tmp_opt.saw_tstamp = 0;
1477 tmp_opt.tstamp_ok = 0;
1479 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1481 tcp_openreq_init(req, &tmp_opt, skb);
1483 req->af.v4_req.loc_addr = daddr;
1484 req->af.v4_req.rmt_addr = saddr;
1485 req->af.v4_req.opt = tcp_v4_save_options(sk, skb);
1486 req->class = &or_ipv4;
1488 TCP_ECN_create_request(req, skb->h.th);
1491 #ifdef CONFIG_SYN_COOKIES
1492 syn_flood_warning(skb);
1494 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1496 struct inet_peer *peer = NULL;
1498 /* VJ's idea. We save last timestamp seen
1499 * from the destination in peer table, when entering
1500 * state TIME-WAIT, and check against it before
1501 * accepting new connection request.
1503 * If "isn" is not zero, this request hit alive
1504 * timewait bucket, so that all the necessary checks
1505 * are made in the function processing timewait state.
1507 if (tmp_opt.saw_tstamp &&
1508 sysctl_tcp_tw_recycle &&
1509 (dst = tcp_v4_route_req(sk, req)) != NULL &&
1510 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1511 peer->v4daddr == saddr) {
1512 if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1513 (s32)(peer->tcp_ts - req->ts_recent) >
1515 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
1520 /* Kill the following clause, if you dislike this way. */
1521 else if (!sysctl_tcp_syncookies &&
1522 (sysctl_max_syn_backlog - tcp_synq_len(sk) <
1523 (sysctl_max_syn_backlog >> 2)) &&
1524 (!peer || !peer->tcp_ts_stamp) &&
1525 (!dst || !dst_metric(dst, RTAX_RTT))) {
1526 /* Without syncookies last quarter of
1527 * backlog is filled with destinations,
1528 * proven to be alive.
1529 * It means that we continue to communicate
1530 * to destinations, already remembered
1531 * to the moment of synflood.
1533 NETDEBUG(if (net_ratelimit()) \
1534 printk(KERN_DEBUG "TCP: drop open "
1535 "request from %u.%u."
1538 ntohs(skb->h.th->source)));
1543 isn = tcp_v4_init_sequence(sk, skb);
1547 if (tcp_v4_send_synack(sk, req, dst))
1551 tcp_openreq_free(req);
1553 tcp_v4_synq_add(sk, req);
1558 tcp_openreq_free(req);
1560 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1566 * The three way handshake has completed - we got a valid synack -
1567 * now create the new socket.
1569 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1570 struct open_request *req,
1571 struct dst_entry *dst)
1573 struct inet_sock *newinet;
1574 struct tcp_sock *newtp;
1577 if (sk_acceptq_is_full(sk))
1580 if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
1583 newsk = tcp_create_openreq_child(sk, req, skb);
1587 newsk->sk_dst_cache = dst;
1588 tcp_v4_setup_caps(newsk, dst);
1590 newtp = tcp_sk(newsk);
1591 newinet = inet_sk(newsk);
1592 newinet->daddr = req->af.v4_req.rmt_addr;
1593 newinet->rcv_saddr = req->af.v4_req.loc_addr;
1594 newinet->saddr = req->af.v4_req.loc_addr;
1595 newinet->opt = req->af.v4_req.opt;
1596 req->af.v4_req.opt = NULL;
1597 newinet->mc_index = tcp_v4_iif(skb);
1598 newinet->mc_ttl = skb->nh.iph->ttl;
1599 newtp->ext_header_len = 0;
1601 newtp->ext_header_len = newinet->opt->optlen;
1602 newinet->id = newtp->write_seq ^ jiffies;
1604 tcp_sync_mss(newsk, dst_mtu(dst));
1605 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1606 tcp_initialize_rcv_mss(newsk);
1608 __tcp_v4_hash(newsk, 0);
1609 __tcp_inherit_port(sk, newsk);
1614 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1616 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1621 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1623 struct tcphdr *th = skb->h.th;
1624 struct iphdr *iph = skb->nh.iph;
1625 struct tcp_sock *tp = tcp_sk(sk);
1627 struct open_request **prev;
1628 /* Find possible connection requests. */
1629 struct open_request *req = tcp_v4_search_req(tp, &prev, th->source,
1630 iph->saddr, iph->daddr);
1632 return tcp_check_req(sk, skb, req, prev);
1634 nsk = __tcp_v4_lookup_established(skb->nh.iph->saddr,
1641 if (nsk->sk_state != TCP_TIME_WAIT) {
1645 tcp_tw_put((struct tcp_tw_bucket *)nsk);
1649 #ifdef CONFIG_SYN_COOKIES
1650 if (!th->rst && !th->syn && th->ack)
1651 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1656 static int tcp_v4_checksum_init(struct sk_buff *skb)
1658 if (skb->ip_summed == CHECKSUM_HW) {
1659 skb->ip_summed = CHECKSUM_UNNECESSARY;
1660 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1661 skb->nh.iph->daddr, skb->csum))
1664 NETDEBUG(if (net_ratelimit())
1665 printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
1666 skb->ip_summed = CHECKSUM_NONE;
1668 if (skb->len <= 76) {
1669 if (tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
1671 skb_checksum(skb, 0, skb->len, 0)))
1673 skb->ip_summed = CHECKSUM_UNNECESSARY;
1675 skb->csum = ~tcp_v4_check(skb->h.th, skb->len,
1677 skb->nh.iph->daddr, 0);
1683 /* The socket must have it's spinlock held when we get
1686 * We have a potential double-lock case here, so even when
1687 * doing backlog processing we use the BH locking scheme.
1688 * This is because we cannot sleep with the original spinlock
1691 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1693 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1694 TCP_CHECK_TIMER(sk);
1695 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1697 TCP_CHECK_TIMER(sk);
1701 if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
1704 if (sk->sk_state == TCP_LISTEN) {
1705 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1710 if (tcp_child_process(sk, nsk, skb))
1716 TCP_CHECK_TIMER(sk);
1717 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1719 TCP_CHECK_TIMER(sk);
1723 tcp_v4_send_reset(skb);
1726 /* Be careful here. If this function gets more complicated and
1727 * gcc suffers from register pressure on the x86, sk (in %ebx)
1728 * might be destroyed here. This current version compiles correctly,
1729 * but you have been warned.
1734 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1742 int tcp_v4_rcv(struct sk_buff *skb)
1748 if (skb->pkt_type != PACKET_HOST)
1751 /* Count it even if it's bad */
1752 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1754 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1759 if (th->doff < sizeof(struct tcphdr) / 4)
1761 if (!pskb_may_pull(skb, th->doff * 4))
1764 /* An explanation is required here, I think.
1765 * Packet length and doff are validated by header prediction,
1766 * provided case of th->doff==0 is elimineted.
1767 * So, we defer the checks. */
1768 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1769 tcp_v4_checksum_init(skb) < 0))
1773 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1774 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1775 skb->len - th->doff * 4);
1776 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1777 TCP_SKB_CB(skb)->when = 0;
1778 TCP_SKB_CB(skb)->flags = skb->nh.iph->tos;
1779 TCP_SKB_CB(skb)->sacked = 0;
1781 sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source,
1782 skb->nh.iph->daddr, ntohs(th->dest),
1789 #if defined(CONFIG_VNET) || defined(CONFIG_VNET_MODULE)
1790 /* Silently drop if VNET is active and the context is not
1791 * entitled to read the packet.
1794 /* Transfer ownership of reusable TIME_WAIT buckets to
1795 * whomever VNET decided should own the packet.
1797 if (sk->sk_state == TCP_TIME_WAIT)
1798 sk->sk_xid = skb->xid;
1800 if ((int) sk->sk_xid > 0 && sk->sk_xid != skb->xid)
1805 if (sk->sk_state == TCP_TIME_WAIT)
1808 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1809 goto discard_and_relse;
1811 if (sk_filter(sk, skb, 0))
1812 goto discard_and_relse;
1818 if (!sock_owned_by_user(sk)) {
1819 if (!tcp_prequeue(sk, skb))
1820 ret = tcp_v4_do_rcv(sk, skb);
1822 sk_add_backlog(sk, skb);
1830 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1833 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1835 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1836 #if defined(CONFIG_VNET) || defined(CONFIG_VNET_MODULE)
1837 } else if (vnet_active && skb->sk) {
1838 /* VNET: Suppress RST if the port was bound to a (presumably raw) socket */
1841 tcp_v4_send_reset(skb);
1845 /* Discard frame. */
1854 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1855 tcp_tw_put((struct tcp_tw_bucket *) sk);
1859 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1860 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1861 tcp_tw_put((struct tcp_tw_bucket *) sk);
1864 switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
1865 skb, th, skb->len)) {
1867 struct sock *sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr,
1871 tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
1872 tcp_tw_put((struct tcp_tw_bucket *)sk);
1876 /* Fall through to ACK */
1879 tcp_v4_timewait_ack(sk, skb);
1883 case TCP_TW_SUCCESS:;
1888 /* With per-bucket locks this operation is not-atomic, so that
1889 * this version is not worse.
1891 static void __tcp_v4_rehash(struct sock *sk)
1893 sk->sk_prot->unhash(sk);
1894 sk->sk_prot->hash(sk);
1897 static int tcp_v4_reselect_saddr(struct sock *sk)
1899 struct inet_sock *inet = inet_sk(sk);
1902 __u32 old_saddr = inet->saddr;
1904 __u32 daddr = inet->daddr;
1906 if (inet->opt && inet->opt->srr)
1907 daddr = inet->opt->faddr;
1909 /* Query new route. */
1910 err = ip_route_connect(&rt, daddr, 0,
1912 sk->sk_bound_dev_if,
1914 inet->sport, inet->dport, sk);
1918 __sk_dst_set(sk, &rt->u.dst);
1919 tcp_v4_setup_caps(sk, &rt->u.dst);
1921 new_saddr = rt->rt_src;
1923 if (new_saddr == old_saddr)
1926 if (sysctl_ip_dynaddr > 1) {
1927 printk(KERN_INFO "tcp_v4_rebuild_header(): shifting inet->"
1928 "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n",
1930 NIPQUAD(new_saddr));
1933 inet->saddr = new_saddr;
1934 inet->rcv_saddr = new_saddr;
1936 /* XXX The only one ugly spot where we need to
1937 * XXX really change the sockets identity after
1938 * XXX it has entered the hashes. -DaveM
1940 * Besides that, it does not check for connection
1941 * uniqueness. Wait for troubles.
1943 __tcp_v4_rehash(sk);
1947 int tcp_v4_rebuild_header(struct sock *sk)
1949 struct inet_sock *inet = inet_sk(sk);
1950 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1954 /* Route is OK, nothing to do. */
1959 daddr = inet->daddr;
1960 if (inet->opt && inet->opt->srr)
1961 daddr = inet->opt->faddr;
1964 struct flowi fl = { .oif = sk->sk_bound_dev_if,
1967 .saddr = inet->saddr,
1968 .tos = RT_CONN_FLAGS(sk) } },
1969 .proto = IPPROTO_TCP,
1971 { .sport = inet->sport,
1972 .dport = inet->dport } } };
1974 err = ip_route_output_flow(&rt, &fl, sk, 0);
1977 __sk_dst_set(sk, &rt->u.dst);
1978 tcp_v4_setup_caps(sk, &rt->u.dst);
1982 /* Routing failed... */
1983 sk->sk_route_caps = 0;
1985 if (!sysctl_ip_dynaddr ||
1986 sk->sk_state != TCP_SYN_SENT ||
1987 (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
1988 (err = tcp_v4_reselect_saddr(sk)) != 0)
1989 sk->sk_err_soft = -err;
1994 static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
1996 struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;
1997 struct inet_sock *inet = inet_sk(sk);
1999 sin->sin_family = AF_INET;
2000 sin->sin_addr.s_addr = inet->daddr;
2001 sin->sin_port = inet->dport;
2004 /* VJ's idea. Save last timestamp seen from this destination
2005 * and hold it at least for normal timewait interval to use for duplicate
2006 * segment detection in subsequent connections, before they enter synchronized
2010 int tcp_v4_remember_stamp(struct sock *sk)
2012 struct inet_sock *inet = inet_sk(sk);
2013 struct tcp_sock *tp = tcp_sk(sk);
2014 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
2015 struct inet_peer *peer = NULL;
2018 if (!rt || rt->rt_dst != inet->daddr) {
2019 peer = inet_getpeer(inet->daddr, 1);
2023 rt_bind_peer(rt, 1);
2028 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
2029 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2030 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
2031 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
2032 peer->tcp_ts = tp->rx_opt.ts_recent;
2042 int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
2044 struct inet_peer *peer = NULL;
2046 peer = inet_getpeer(tw->tw_daddr, 1);
2049 if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 ||
2050 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
2051 peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) {
2052 peer->tcp_ts_stamp = tw->tw_ts_recent_stamp;
2053 peer->tcp_ts = tw->tw_ts_recent;
2062 struct tcp_func ipv4_specific = {
2063 .queue_xmit = ip_queue_xmit,
2064 .send_check = tcp_v4_send_check,
2065 .rebuild_header = tcp_v4_rebuild_header,
2066 .conn_request = tcp_v4_conn_request,
2067 .syn_recv_sock = tcp_v4_syn_recv_sock,
2068 .remember_stamp = tcp_v4_remember_stamp,
2069 .net_header_len = sizeof(struct iphdr),
2070 .setsockopt = ip_setsockopt,
2071 .getsockopt = ip_getsockopt,
2072 .addr2sockaddr = v4_addr2sockaddr,
2073 .sockaddr_len = sizeof(struct sockaddr_in),
2076 /* NOTE: A lot of things set to zero explicitly by call to
2077 * sk_alloc() so need not be done here.
2079 static int tcp_v4_init_sock(struct sock *sk)
2081 struct tcp_sock *tp = tcp_sk(sk);
2083 skb_queue_head_init(&tp->out_of_order_queue);
2084 tcp_init_xmit_timers(sk);
2085 tcp_prequeue_init(tp);
2087 tp->rto = TCP_TIMEOUT_INIT;
2088 tp->mdev = TCP_TIMEOUT_INIT;
2090 /* So many TCP implementations out there (incorrectly) count the
2091 * initial SYN frame in their delayed-ACK and congestion control
2092 * algorithms that we must have the following bandaid to talk
2093 * efficiently to them. -DaveM
2097 /* See draft-stevens-tcpca-spec-01 for discussion of the
2098 * initialization of these values.
2100 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
2101 tp->snd_cwnd_clamp = ~0;
2102 tp->mss_cache_std = tp->mss_cache = 536;
2104 tp->reordering = sysctl_tcp_reordering;
2106 sk->sk_state = TCP_CLOSE;
2108 sk->sk_write_space = sk_stream_write_space;
2109 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2111 tp->af_specific = &ipv4_specific;
2113 sk->sk_sndbuf = sysctl_tcp_wmem[1];
2114 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
2116 atomic_inc(&tcp_sockets_allocated);
2121 int tcp_v4_destroy_sock(struct sock *sk)
2123 struct tcp_sock *tp = tcp_sk(sk);
2125 tcp_clear_xmit_timers(sk);
2127 /* Cleanup up the write buffer. */
2128 sk_stream_writequeue_purge(sk);
2130 /* Cleans up our, hopefully empty, out_of_order_queue. */
2131 __skb_queue_purge(&tp->out_of_order_queue);
2133 /* Clean prequeue, it must be empty really */
2134 __skb_queue_purge(&tp->ucopy.prequeue);
2136 /* Clean up a referenced TCP bind bucket. */
2141 * If sendmsg cached page exists, toss it.
2143 if (sk->sk_sndmsg_page) {
2144 __free_page(sk->sk_sndmsg_page);
2145 sk->sk_sndmsg_page = NULL;
2148 atomic_dec(&tcp_sockets_allocated);
2153 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2155 #ifdef CONFIG_PROC_FS
2156 /* Proc filesystem TCP sock list dumping. */
2158 static inline struct tcp_tw_bucket *tw_head(struct hlist_head *head)
2160 return hlist_empty(head) ? NULL :
2161 list_entry(head->first, struct tcp_tw_bucket, tw_node);
2164 static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw)
2166 return tw->tw_node.next ?
2167 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2170 static void *listening_get_next(struct seq_file *seq, void *cur)
2172 struct tcp_sock *tp;
2173 struct hlist_node *node;
2174 struct sock *sk = cur;
2175 struct tcp_iter_state* st = seq->private;
2179 sk = sk_head(&tcp_listening_hash[0]);
2185 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2186 struct open_request *req = cur;
2188 tp = tcp_sk(st->syn_wait_sk);
2192 vxdprintk(VXD_CBIT(net, 6),
2193 "sk,req: %p [#%d] (from %d)", req->sk,
2194 (req->sk)?req->sk->sk_xid:0, vx_current_xid());
2196 !vx_check(req->sk->sk_xid, VX_IDENT|VX_WATCH))
2198 if (req->class->family == st->family) {
2204 if (++st->sbucket >= TCP_SYNQ_HSIZE)
2207 req = tp->listen_opt->syn_table[st->sbucket];
2209 sk = sk_next(st->syn_wait_sk);
2210 st->state = TCP_SEQ_STATE_LISTENING;
2211 read_unlock_bh(&tp->syn_wait_lock);
2214 read_lock_bh(&tp->syn_wait_lock);
2215 if (tp->listen_opt && tp->listen_opt->qlen)
2217 read_unlock_bh(&tp->syn_wait_lock);
2221 sk_for_each_from(sk, node) {
2222 vxdprintk(VXD_CBIT(net, 6), "sk: %p [#%d] (from %d)",
2223 sk, sk->sk_xid, vx_current_xid());
2224 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2226 if (sk->sk_family == st->family) {
2231 read_lock_bh(&tp->syn_wait_lock);
2232 if (tp->listen_opt && tp->listen_opt->qlen) {
2234 st->uid = sock_i_uid(sk);
2235 st->syn_wait_sk = sk;
2236 st->state = TCP_SEQ_STATE_OPENREQ;
2240 read_unlock_bh(&tp->syn_wait_lock);
2242 if (++st->bucket < TCP_LHTABLE_SIZE) {
2243 sk = sk_head(&tcp_listening_hash[st->bucket]);
2251 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2253 void *rc = listening_get_next(seq, NULL);
2255 while (rc && *pos) {
2256 rc = listening_get_next(seq, rc);
2262 static void *established_get_first(struct seq_file *seq)
2264 struct tcp_iter_state* st = seq->private;
2267 for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) {
2269 struct hlist_node *node;
2270 struct tcp_tw_bucket *tw;
2272 /* We can reschedule _before_ having picked the target: */
2273 cond_resched_softirq();
2275 read_lock(&tcp_ehash[st->bucket].lock);
2276 sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) {
2277 vxdprintk(VXD_CBIT(net, 6),
2278 "sk,egf: %p [#%d] (from %d)",
2279 sk, sk->sk_xid, vx_current_xid());
2280 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2282 if (sk->sk_family != st->family)
2287 st->state = TCP_SEQ_STATE_TIME_WAIT;
2288 tw_for_each(tw, node,
2289 &tcp_ehash[st->bucket + tcp_ehash_size].chain) {
2290 vxdprintk(VXD_CBIT(net, 6),
2291 "tw: %p [#%d] (from %d)",
2292 tw, tw->tw_xid, vx_current_xid());
2293 if (!vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))
2295 if (tw->tw_family != st->family)
2300 read_unlock(&tcp_ehash[st->bucket].lock);
2301 st->state = TCP_SEQ_STATE_ESTABLISHED;
2307 static void *established_get_next(struct seq_file *seq, void *cur)
2309 struct sock *sk = cur;
2310 struct tcp_tw_bucket *tw;
2311 struct hlist_node *node;
2312 struct tcp_iter_state* st = seq->private;
2316 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2320 while (tw && (tw->tw_family != st->family ||
2321 !vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))) {
2328 read_unlock(&tcp_ehash[st->bucket].lock);
2329 st->state = TCP_SEQ_STATE_ESTABLISHED;
2331 /* We can reschedule between buckets: */
2332 cond_resched_softirq();
2334 if (++st->bucket < tcp_ehash_size) {
2335 read_lock(&tcp_ehash[st->bucket].lock);
2336 sk = sk_head(&tcp_ehash[st->bucket].chain);
2344 sk_for_each_from(sk, node) {
2345 vxdprintk(VXD_CBIT(net, 6),
2346 "sk,egn: %p [#%d] (from %d)",
2347 sk, sk->sk_xid, vx_current_xid());
2348 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
2350 if (sk->sk_family == st->family)
2354 st->state = TCP_SEQ_STATE_TIME_WAIT;
2355 tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain);
2363 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2365 void *rc = established_get_first(seq);
2368 rc = established_get_next(seq, rc);
2374 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2377 struct tcp_iter_state* st = seq->private;
2380 st->state = TCP_SEQ_STATE_LISTENING;
2381 rc = listening_get_idx(seq, &pos);
2384 tcp_listen_unlock();
2386 st->state = TCP_SEQ_STATE_ESTABLISHED;
2387 rc = established_get_idx(seq, pos);
2393 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2395 struct tcp_iter_state* st = seq->private;
2396 st->state = TCP_SEQ_STATE_LISTENING;
2398 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2401 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2404 struct tcp_iter_state* st;
2406 if (v == SEQ_START_TOKEN) {
2407 rc = tcp_get_idx(seq, 0);
2412 switch (st->state) {
2413 case TCP_SEQ_STATE_OPENREQ:
2414 case TCP_SEQ_STATE_LISTENING:
2415 rc = listening_get_next(seq, v);
2417 tcp_listen_unlock();
2419 st->state = TCP_SEQ_STATE_ESTABLISHED;
2420 rc = established_get_first(seq);
2423 case TCP_SEQ_STATE_ESTABLISHED:
2424 case TCP_SEQ_STATE_TIME_WAIT:
2425 rc = established_get_next(seq, v);
2433 static void tcp_seq_stop(struct seq_file *seq, void *v)
2435 struct tcp_iter_state* st = seq->private;
2437 switch (st->state) {
2438 case TCP_SEQ_STATE_OPENREQ:
2440 struct tcp_sock *tp = tcp_sk(st->syn_wait_sk);
2441 read_unlock_bh(&tp->syn_wait_lock);
2443 case TCP_SEQ_STATE_LISTENING:
2444 if (v != SEQ_START_TOKEN)
2445 tcp_listen_unlock();
2447 case TCP_SEQ_STATE_TIME_WAIT:
2448 case TCP_SEQ_STATE_ESTABLISHED:
2450 read_unlock(&tcp_ehash[st->bucket].lock);
2456 static int tcp_seq_open(struct inode *inode, struct file *file)
2458 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2459 struct seq_file *seq;
2460 struct tcp_iter_state *s;
2463 if (unlikely(afinfo == NULL))
2466 s = kmalloc(sizeof(*s), GFP_KERNEL);
2469 memset(s, 0, sizeof(*s));
2470 s->family = afinfo->family;
2471 s->seq_ops.start = tcp_seq_start;
2472 s->seq_ops.next = tcp_seq_next;
2473 s->seq_ops.show = afinfo->seq_show;
2474 s->seq_ops.stop = tcp_seq_stop;
2476 rc = seq_open(file, &s->seq_ops);
2479 seq = file->private_data;
2488 int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
2491 struct proc_dir_entry *p;
2495 afinfo->seq_fops->owner = afinfo->owner;
2496 afinfo->seq_fops->open = tcp_seq_open;
2497 afinfo->seq_fops->read = seq_read;
2498 afinfo->seq_fops->llseek = seq_lseek;
2499 afinfo->seq_fops->release = seq_release_private;
2501 p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
2509 void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
2513 proc_net_remove(afinfo->name);
2514 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
2517 static void get_openreq4(struct sock *sk, struct open_request *req,
2518 char *tmpbuf, int i, int uid)
2520 int ttd = req->expires - jiffies;
2522 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2523 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
2525 req->af.v4_req.loc_addr,
2526 ntohs(inet_sk(sk)->sport),
2527 req->af.v4_req.rmt_addr,
2528 ntohs(req->rmt_port),
2530 0, 0, /* could print option size, but that is af dependent. */
2531 1, /* timers active (only the expire timer) */
2532 jiffies_to_clock_t(ttd),
2535 0, /* non standard timer */
2536 0, /* open_requests have no inode */
2537 atomic_read(&sk->sk_refcnt),
2541 static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
2544 unsigned long timer_expires;
2545 struct tcp_sock *tp = tcp_sk(sp);
2546 struct inet_sock *inet = inet_sk(sp);
2547 unsigned int dest = inet->daddr;
2548 unsigned int src = inet->rcv_saddr;
2549 __u16 destp = ntohs(inet->dport);
2550 __u16 srcp = ntohs(inet->sport);
2552 if (tp->pending == TCP_TIME_RETRANS) {
2554 timer_expires = tp->timeout;
2555 } else if (tp->pending == TCP_TIME_PROBE0) {
2557 timer_expires = tp->timeout;
2558 } else if (timer_pending(&sp->sk_timer)) {
2560 timer_expires = sp->sk_timer.expires;
2563 timer_expires = jiffies;
2566 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2567 "%08X %5d %8d %lu %d %p %u %u %u %u %d",
2568 i, src, srcp, dest, destp, sp->sk_state,
2569 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
2571 jiffies_to_clock_t(timer_expires - jiffies),
2576 atomic_read(&sp->sk_refcnt), sp,
2577 tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong,
2579 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
2582 static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i)
2584 unsigned int dest, src;
2586 int ttd = tw->tw_ttd - jiffies;
2591 dest = tw->tw_daddr;
2592 src = tw->tw_rcv_saddr;
2593 destp = ntohs(tw->tw_dport);
2594 srcp = ntohs(tw->tw_sport);
2596 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
2597 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p",
2598 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2599 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2600 atomic_read(&tw->tw_refcnt), tw);
2605 static int tcp4_seq_show(struct seq_file *seq, void *v)
2607 struct tcp_iter_state* st;
2608 char tmpbuf[TMPSZ + 1];
2610 if (v == SEQ_START_TOKEN) {
2611 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2612 " sl local_address rem_address st tx_queue "
2613 "rx_queue tr tm->when retrnsmt uid timeout "
2619 switch (st->state) {
2620 case TCP_SEQ_STATE_LISTENING:
2621 case TCP_SEQ_STATE_ESTABLISHED:
2622 get_tcp4_sock(v, tmpbuf, st->num);
2624 case TCP_SEQ_STATE_OPENREQ:
2625 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
2627 case TCP_SEQ_STATE_TIME_WAIT:
2628 get_timewait4_sock(v, tmpbuf, st->num);
2631 seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
2636 static struct file_operations tcp4_seq_fops;
2637 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2638 .owner = THIS_MODULE,
2641 .seq_show = tcp4_seq_show,
2642 .seq_fops = &tcp4_seq_fops,
2645 int __init tcp4_proc_init(void)
2647 return tcp_proc_register(&tcp4_seq_afinfo);
2650 void tcp4_proc_exit(void)
2652 tcp_proc_unregister(&tcp4_seq_afinfo);
2654 #endif /* CONFIG_PROC_FS */
2656 struct proto tcp_prot = {
2658 .owner = THIS_MODULE,
2660 .connect = tcp_v4_connect,
2661 .disconnect = tcp_disconnect,
2662 .accept = tcp_accept,
2664 .init = tcp_v4_init_sock,
2665 .destroy = tcp_v4_destroy_sock,
2666 .shutdown = tcp_shutdown,
2667 .setsockopt = tcp_setsockopt,
2668 .getsockopt = tcp_getsockopt,
2669 .sendmsg = tcp_sendmsg,
2670 .recvmsg = tcp_recvmsg,
2671 .backlog_rcv = tcp_v4_do_rcv,
2672 .hash = tcp_v4_hash,
2673 .unhash = tcp_unhash,
2674 .get_port = tcp_v4_get_port,
2675 .enter_memory_pressure = tcp_enter_memory_pressure,
2676 .sockets_allocated = &tcp_sockets_allocated,
2677 .memory_allocated = &tcp_memory_allocated,
2678 .memory_pressure = &tcp_memory_pressure,
2679 .sysctl_mem = sysctl_tcp_mem,
2680 .sysctl_wmem = sysctl_tcp_wmem,
2681 .sysctl_rmem = sysctl_tcp_rmem,
2682 .max_header = MAX_TCP_HEADER,
2683 .obj_size = sizeof(struct tcp_sock),
2688 void __init tcp_v4_init(struct net_proto_family *ops)
2690 int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
2692 panic("Failed to create the TCP control socket.\n");
2693 tcp_socket->sk->sk_allocation = GFP_ATOMIC;
2694 inet_sk(tcp_socket->sk)->uc_ttl = -1;
2696 /* Unhash it so that IP input processing does not even
2697 * see it, we do not wish this socket to see incoming
2700 tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
2703 EXPORT_SYMBOL(ipv4_specific);
2704 EXPORT_SYMBOL(tcp_bind_hash);
2705 EXPORT_SYMBOL(tcp_bucket_create);
2706 EXPORT_SYMBOL(tcp_hashinfo);
2707 EXPORT_SYMBOL(tcp_inherit_port);
2708 EXPORT_SYMBOL(tcp_listen_wlock);
2709 EXPORT_SYMBOL(tcp_port_rover);
2710 EXPORT_SYMBOL(tcp_prot);
2711 EXPORT_SYMBOL(tcp_put_port);
2712 EXPORT_SYMBOL(tcp_unhash);
2713 EXPORT_SYMBOL(tcp_v4_conn_request);
2714 EXPORT_SYMBOL(tcp_v4_connect);
2715 EXPORT_SYMBOL(tcp_v4_do_rcv);
2716 EXPORT_SYMBOL(tcp_v4_rebuild_header);
2717 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2718 EXPORT_SYMBOL(tcp_v4_send_check);
2719 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2721 #ifdef CONFIG_PROC_FS
2722 EXPORT_SYMBOL(tcp_proc_register);
2723 EXPORT_SYMBOL(tcp_proc_unregister);
2725 EXPORT_SYMBOL(sysctl_local_port_range);
2726 EXPORT_SYMBOL(sysctl_max_syn_backlog);
2727 EXPORT_SYMBOL(sysctl_tcp_low_latency);
2728 EXPORT_SYMBOL(sysctl_tcp_tw_reuse);