2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
10 * IPv4 specific functions
15 * linux/ipv4/tcp_input.c
16 * linux/ipv4/tcp_output.c
18 * See tcp.c for author information
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
28 * David S. Miller : New socket lookup architecture.
29 * This code is dedicated to John Dyson.
30 * David S. Miller : Change semantics of established hash,
31 * half is devoted to TIME_WAIT sockets
32 * and the rest go in the other half.
33 * Andi Kleen : Add support for syncookies and fixed
34 * some bugs: ip options weren't passed to
35 * the TCP layer, missed a check for an
37 * Andi Kleen : Implemented fast path mtu discovery.
38 * Fixed many serious bugs in the
39 * request_sock handling and moved
40 * most of it into the af independent code.
41 * Added tail drop and some other bugfixes.
42 * Added new listen semantics.
43 * Mike McLagan : Routing by source
44 * Juan Jose Ciarlante: ip_dynaddr bits
45 * Andi Kleen: various fixes.
46 * Vitaly E. Lavrov : Transparent proxy revived after year
48 * Andi Kleen : Fix new listen.
49 * Andi Kleen : Fix accept error reporting.
50 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
51 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
52 * a single port at the same time.
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
66 #include <net/inet_hashtables.h>
68 #include <net/transp_v6.h>
70 #include <net/inet_common.h>
71 #include <net/timewait_sock.h>
73 #include <net/netdma.h>
75 #include <linux/inet.h>
76 #include <linux/ipv6.h>
77 #include <linux/stddef.h>
78 #include <linux/proc_fs.h>
79 #include <linux/seq_file.h>
80 #include <linux/vserver/debug.h>
82 int sysctl_tcp_tw_reuse;
83 int sysctl_tcp_low_latency;
85 /* Check TCP sequence numbers in ICMP packets. */
86 #define ICMP_MIN_LENGTH 8
88 /* Socket used for sending RSTs */
89 static struct socket *tcp_socket;
91 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
93 struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
94 .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock),
95 .lhash_users = ATOMIC_INIT(0),
96 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
99 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
101 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
102 inet_csk_bind_conflict);
105 static void tcp_v4_hash(struct sock *sk)
107 inet_hash(&tcp_hashinfo, sk);
110 void tcp_unhash(struct sock *sk)
112 inet_unhash(&tcp_hashinfo, sk);
115 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
117 return secure_tcp_sequence_number(skb->nh.iph->daddr,
123 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
125 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
126 struct tcp_sock *tp = tcp_sk(sk);
128 /* With PAWS, it is safe from the viewpoint
129 of data integrity. Even without PAWS it is safe provided sequence
130 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
132 Actually, the idea is close to VJ's one, only timestamp cache is
133 held not per host, but per port pair and TW bucket is used as state
136 If TW bucket has been already destroyed we fall back to VJ's scheme
137 and use initial timestamp retrieved from peer table.
139 if (tcptw->tw_ts_recent_stamp &&
140 (twp == NULL || (sysctl_tcp_tw_reuse &&
141 xtime.tv_sec - tcptw->tw_ts_recent_stamp > 1))) {
142 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
143 if (tp->write_seq == 0)
145 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
146 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
154 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
156 /* This will initiate an outgoing connection. */
157 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
159 struct inet_sock *inet = inet_sk(sk);
160 struct tcp_sock *tp = tcp_sk(sk);
161 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
167 if (addr_len < sizeof(struct sockaddr_in))
170 if (usin->sin_family != AF_INET)
171 return -EAFNOSUPPORT;
173 nexthop = daddr = usin->sin_addr.s_addr;
174 if (inet->opt && inet->opt->srr) {
177 nexthop = inet->opt->faddr;
180 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
181 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
183 inet->sport, usin->sin_port, sk);
187 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
192 if (!inet->opt || !inet->opt->srr)
196 inet->saddr = rt->rt_src;
197 inet->rcv_saddr = inet->saddr;
199 if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
200 /* Reset inherited state */
201 tp->rx_opt.ts_recent = 0;
202 tp->rx_opt.ts_recent_stamp = 0;
206 if (tcp_death_row.sysctl_tw_recycle &&
207 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
208 struct inet_peer *peer = rt_get_peer(rt);
210 /* VJ's idea. We save last timestamp seen from
211 * the destination in peer table, when entering state TIME-WAIT
212 * and initialize rx_opt.ts_recent from it, when trying new connection.
215 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
216 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
217 tp->rx_opt.ts_recent = peer->tcp_ts;
221 inet->dport = usin->sin_port;
224 inet_csk(sk)->icsk_ext_hdr_len = 0;
226 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
228 tp->rx_opt.mss_clamp = 536;
230 /* Socket identity is still unknown (sport may be zero).
231 * However we set state to SYN-SENT and not releasing socket
232 * lock select source port, enter ourselves into the hash tables and
233 * complete initialization after this.
235 tcp_set_state(sk, TCP_SYN_SENT);
236 err = inet_hash_connect(&tcp_death_row, sk);
240 err = ip_route_newports(&rt, IPPROTO_TCP, inet->sport, inet->dport, sk);
244 /* OK, now commit destination to socket. */
245 sk->sk_gso_type = SKB_GSO_TCPV4;
246 sk_setup_caps(sk, &rt->u.dst);
249 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
254 inet->id = tp->write_seq ^ jiffies;
256 err = tcp_connect(sk);
264 /* This unhashes the socket and releases the local port, if necessary. */
265 tcp_set_state(sk, TCP_CLOSE);
267 sk->sk_route_caps = 0;
273 * This routine does path mtu discovery as defined in RFC1191.
275 static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
277 struct dst_entry *dst;
278 struct inet_sock *inet = inet_sk(sk);
280 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
281 * send out by Linux are always <576bytes so they should go through
284 if (sk->sk_state == TCP_LISTEN)
287 /* We don't check in the destentry if pmtu discovery is forbidden
288 * on this route. We just assume that no packet_to_big packets
289 * are send back when pmtu discovery is not active.
290 * There is a small race when the user changes this flag in the
291 * route, but I think that's acceptable.
293 if ((dst = __sk_dst_check(sk, 0)) == NULL)
296 dst->ops->update_pmtu(dst, mtu);
298 /* Something is about to be wrong... Remember soft error
299 * for the case, if this connection will not able to recover.
301 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
302 sk->sk_err_soft = EMSGSIZE;
306 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
307 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
308 tcp_sync_mss(sk, mtu);
310 /* Resend the TCP packet because it's
311 * clear that the old packet has been
312 * dropped. This is the new "fast" path mtu
315 tcp_simple_retransmit(sk);
316 } /* else let the usual retransmit timer handle it */
320 * This routine is called by the ICMP module when it gets some
321 * sort of error condition. If err < 0 then the socket should
322 * be closed and the error returned to the user. If err > 0
323 * it's just the icmp type << 8 | icmp code. After adjustment
324 * header points to the first 8 bytes of the tcp header. We need
325 * to find the appropriate port.
327 * The locking strategy used here is very "optimistic". When
328 * someone else accesses the socket the ICMP is just dropped
329 * and for some paths there is no check at all.
330 * A more general error queue to queue errors for later handling
331 * is probably better.
335 void tcp_v4_err(struct sk_buff *skb, u32 info)
337 struct iphdr *iph = (struct iphdr *)skb->data;
338 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
340 struct inet_sock *inet;
341 int type = skb->h.icmph->type;
342 int code = skb->h.icmph->code;
347 if (skb->len < (iph->ihl << 2) + 8) {
348 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
352 sk = inet_lookup(&tcp_hashinfo, iph->daddr, th->dest, iph->saddr,
353 th->source, inet_iif(skb));
355 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
358 if (sk->sk_state == TCP_TIME_WAIT) {
359 inet_twsk_put((struct inet_timewait_sock *)sk);
364 /* If too many ICMPs get dropped on busy
365 * servers this needs to be solved differently.
367 if (sock_owned_by_user(sk))
368 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
370 if (sk->sk_state == TCP_CLOSE)
374 seq = ntohl(th->seq);
375 if (sk->sk_state != TCP_LISTEN &&
376 !between(seq, tp->snd_una, tp->snd_nxt)) {
377 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
382 case ICMP_SOURCE_QUENCH:
383 /* Just silently ignore these. */
385 case ICMP_PARAMETERPROB:
388 case ICMP_DEST_UNREACH:
389 if (code > NR_ICMP_UNREACH)
392 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
393 if (!sock_owned_by_user(sk))
394 do_pmtu_discovery(sk, iph, info);
398 err = icmp_err_convert[code].errno;
400 case ICMP_TIME_EXCEEDED:
407 switch (sk->sk_state) {
408 struct request_sock *req, **prev;
410 if (sock_owned_by_user(sk))
413 req = inet_csk_search_req(sk, &prev, th->dest,
414 iph->daddr, iph->saddr);
418 /* ICMPs are not backlogged, hence we cannot get
419 an established socket here.
423 if (seq != tcp_rsk(req)->snt_isn) {
424 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
429 * Still in SYN_RECV, just remove it silently.
430 * There is no good way to pass the error to the newly
431 * created socket, and POSIX does not want network
432 * errors returned from accept().
434 inet_csk_reqsk_queue_drop(sk, req, prev);
438 case TCP_SYN_RECV: /* Cannot happen.
439 It can f.e. if SYNs crossed.
441 if (!sock_owned_by_user(sk)) {
444 sk->sk_error_report(sk);
448 sk->sk_err_soft = err;
453 /* If we've already connected we will keep trying
454 * until we time out, or the user gives up.
456 * rfc1122 4.2.3.9 allows to consider as hard errors
457 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
458 * but it is obsoleted by pmtu discovery).
460 * Note, that in modern internet, where routing is unreliable
461 * and in each dark corner broken firewalls sit, sending random
462 * errors ordered by their masters even this two messages finally lose
463 * their original sense (even Linux sends invalid PORT_UNREACHs)
465 * Now we are in compliance with RFCs.
470 if (!sock_owned_by_user(sk) && inet->recverr) {
472 sk->sk_error_report(sk);
473 } else { /* Only an error on timeout */
474 sk->sk_err_soft = err;
482 /* This routine computes an IPv4 TCP checksum. */
483 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
485 struct inet_sock *inet = inet_sk(sk);
486 struct tcphdr *th = skb->h.th;
488 if (skb->ip_summed == CHECKSUM_HW) {
489 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
490 skb->csum = offsetof(struct tcphdr, check);
492 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
493 csum_partial((char *)th,
499 int tcp_v4_gso_send_check(struct sk_buff *skb)
504 if (!pskb_may_pull(skb, sizeof(*th)))
511 th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0);
512 skb->csum = offsetof(struct tcphdr, check);
513 skb->ip_summed = CHECKSUM_HW;
518 * This routine will send an RST to the other tcp.
520 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
522 * Answer: if a packet caused RST, it is not for a socket
523 * existing in our system, if it is matched to a socket,
524 * it is just duplicate segment or bug in other side's TCP.
525 * So that we build reply only basing on parameters
526 * arrived with segment.
527 * Exception: precedence violation. We do not implement it in any case.
530 static void tcp_v4_send_reset(struct sk_buff *skb)
532 struct tcphdr *th = skb->h.th;
534 struct ip_reply_arg arg;
536 /* Never send a reset in response to a reset. */
540 if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
543 /* Swap the send and the receive. */
544 memset(&rth, 0, sizeof(struct tcphdr));
545 rth.dest = th->source;
546 rth.source = th->dest;
547 rth.doff = sizeof(struct tcphdr) / 4;
551 rth.seq = th->ack_seq;
554 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
555 skb->len - (th->doff << 2));
558 memset(&arg, 0, sizeof arg);
559 arg.iov[0].iov_base = (unsigned char *)&rth;
560 arg.iov[0].iov_len = sizeof rth;
561 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
562 skb->nh.iph->saddr, /*XXX*/
563 sizeof(struct tcphdr), IPPROTO_TCP, 0);
564 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
566 ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
568 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
569 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
572 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
573 outside socket context is ugly, certainly. What can I do?
576 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
579 struct tcphdr *th = skb->h.th;
584 struct ip_reply_arg arg;
586 memset(&rep.th, 0, sizeof(struct tcphdr));
587 memset(&arg, 0, sizeof arg);
589 arg.iov[0].iov_base = (unsigned char *)&rep;
590 arg.iov[0].iov_len = sizeof(rep.th);
592 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
593 (TCPOPT_TIMESTAMP << 8) |
595 rep.tsopt[1] = htonl(tcp_time_stamp);
596 rep.tsopt[2] = htonl(ts);
597 arg.iov[0].iov_len = sizeof(rep);
600 /* Swap the send and the receive. */
601 rep.th.dest = th->source;
602 rep.th.source = th->dest;
603 rep.th.doff = arg.iov[0].iov_len / 4;
604 rep.th.seq = htonl(seq);
605 rep.th.ack_seq = htonl(ack);
607 rep.th.window = htons(win);
609 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
610 skb->nh.iph->saddr, /*XXX*/
611 arg.iov[0].iov_len, IPPROTO_TCP, 0);
612 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
614 ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
616 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
619 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
621 struct inet_timewait_sock *tw = inet_twsk(sk);
622 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
624 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
625 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent);
630 static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
632 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
637 * Send a SYN-ACK after having received an ACK.
638 * This still operates on a request_sock only, not on a big
641 static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
642 struct dst_entry *dst)
644 const struct inet_request_sock *ireq = inet_rsk(req);
646 struct sk_buff * skb;
648 /* First, grab a route. */
649 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
652 skb = tcp_make_synack(sk, dst, req);
655 struct tcphdr *th = skb->h.th;
657 th->check = tcp_v4_check(th, skb->len,
660 csum_partial((char *)th, skb->len,
663 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
666 if (err == NET_XMIT_CN)
676 * IPv4 request_sock destructor.
678 static void tcp_v4_reqsk_destructor(struct request_sock *req)
680 kfree(inet_rsk(req)->opt);
683 #ifdef CONFIG_SYN_COOKIES
684 static void syn_flood_warning(struct sk_buff *skb)
686 static unsigned long warntime;
688 if (time_after(jiffies, (warntime + HZ * 60))) {
691 "possible SYN flooding on port %d. Sending cookies.\n",
692 ntohs(skb->h.th->dest));
698 * Save and compile IPv4 options into the request_sock if needed.
700 static struct ip_options *tcp_v4_save_options(struct sock *sk,
703 struct ip_options *opt = &(IPCB(skb)->opt);
704 struct ip_options *dopt = NULL;
706 if (opt && opt->optlen) {
707 int opt_size = optlength(opt);
708 dopt = kmalloc(opt_size, GFP_ATOMIC);
710 if (ip_options_echo(dopt, skb)) {
719 struct request_sock_ops tcp_request_sock_ops = {
721 .obj_size = sizeof(struct tcp_request_sock),
722 .rtx_syn_ack = tcp_v4_send_synack,
723 .send_ack = tcp_v4_reqsk_send_ack,
724 .destructor = tcp_v4_reqsk_destructor,
725 .send_reset = tcp_v4_send_reset,
728 static struct timewait_sock_ops tcp_timewait_sock_ops = {
729 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
730 .twsk_unique = tcp_twsk_unique,
733 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
735 struct inet_request_sock *ireq;
736 struct tcp_options_received tmp_opt;
737 struct request_sock *req;
738 __u32 saddr = skb->nh.iph->saddr;
739 __u32 daddr = skb->nh.iph->daddr;
740 __u32 isn = TCP_SKB_CB(skb)->when;
741 struct dst_entry *dst = NULL;
742 #ifdef CONFIG_SYN_COOKIES
745 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
748 /* Never answer to SYNs send to broadcast or multicast */
749 if (((struct rtable *)skb->dst)->rt_flags &
750 (RTCF_BROADCAST | RTCF_MULTICAST))
753 /* TW buckets are converted to open requests without
754 * limitations, they conserve resources and peer is
755 * evidently real one.
757 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
758 #ifdef CONFIG_SYN_COOKIES
759 if (sysctl_tcp_syncookies) {
766 /* Accept backlog is full. If we have already queued enough
767 * of warm entries in syn queue, drop request. It is better than
768 * clogging syn queue with openreqs with exponentially increasing
771 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
774 req = reqsk_alloc(&tcp_request_sock_ops);
778 tcp_clear_options(&tmp_opt);
779 tmp_opt.mss_clamp = 536;
780 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
782 tcp_parse_options(skb, &tmp_opt, 0);
785 tcp_clear_options(&tmp_opt);
786 tmp_opt.saw_tstamp = 0;
789 if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) {
790 /* Some OSes (unknown ones, but I see them on web server, which
791 * contains information interesting only for windows'
792 * users) do not send their stamp in SYN. It is easy case.
793 * We simply do not advertise TS support.
795 tmp_opt.saw_tstamp = 0;
796 tmp_opt.tstamp_ok = 0;
798 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
800 tcp_openreq_init(req, &tmp_opt, skb);
802 ireq = inet_rsk(req);
803 ireq->loc_addr = daddr;
804 ireq->rmt_addr = saddr;
805 ireq->opt = tcp_v4_save_options(sk, skb);
807 TCP_ECN_create_request(req, skb->h.th);
810 #ifdef CONFIG_SYN_COOKIES
811 syn_flood_warning(skb);
813 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
815 struct inet_peer *peer = NULL;
817 /* VJ's idea. We save last timestamp seen
818 * from the destination in peer table, when entering
819 * state TIME-WAIT, and check against it before
820 * accepting new connection request.
822 * If "isn" is not zero, this request hit alive
823 * timewait bucket, so that all the necessary checks
824 * are made in the function processing timewait state.
826 if (tmp_opt.saw_tstamp &&
827 tcp_death_row.sysctl_tw_recycle &&
828 (dst = inet_csk_route_req(sk, req)) != NULL &&
829 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
830 peer->v4daddr == saddr) {
831 if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
832 (s32)(peer->tcp_ts - req->ts_recent) >
834 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
839 /* Kill the following clause, if you dislike this way. */
840 else if (!sysctl_tcp_syncookies &&
841 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
842 (sysctl_max_syn_backlog >> 2)) &&
843 (!peer || !peer->tcp_ts_stamp) &&
844 (!dst || !dst_metric(dst, RTAX_RTT))) {
845 /* Without syncookies last quarter of
846 * backlog is filled with destinations,
847 * proven to be alive.
848 * It means that we continue to communicate
849 * to destinations, already remembered
850 * to the moment of synflood.
852 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
853 "request from %u.%u.%u.%u/%u\n",
855 ntohs(skb->h.th->source));
860 isn = tcp_v4_init_sequence(sk, skb);
862 tcp_rsk(req)->snt_isn = isn;
864 if (tcp_v4_send_synack(sk, req, dst))
870 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
882 * The three way handshake has completed - we got a valid synack -
883 * now create the new socket.
885 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
886 struct request_sock *req,
887 struct dst_entry *dst)
889 struct inet_request_sock *ireq;
890 struct inet_sock *newinet;
891 struct tcp_sock *newtp;
894 if (sk_acceptq_is_full(sk))
897 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
900 newsk = tcp_create_openreq_child(sk, req, skb);
904 newsk->sk_gso_type = SKB_GSO_TCPV4;
905 sk_setup_caps(newsk, dst);
907 newtp = tcp_sk(newsk);
908 newinet = inet_sk(newsk);
909 ireq = inet_rsk(req);
910 newinet->daddr = ireq->rmt_addr;
911 newinet->rcv_saddr = ireq->loc_addr;
912 newinet->saddr = ireq->loc_addr;
913 newinet->opt = ireq->opt;
915 newinet->mc_index = inet_iif(skb);
916 newinet->mc_ttl = skb->nh.iph->ttl;
917 inet_csk(newsk)->icsk_ext_hdr_len = 0;
919 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
920 newinet->id = newtp->write_seq ^ jiffies;
922 tcp_mtup_init(newsk);
923 tcp_sync_mss(newsk, dst_mtu(dst));
924 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
925 tcp_initialize_rcv_mss(newsk);
927 __inet_hash(&tcp_hashinfo, newsk, 0);
928 __inet_inherit_port(&tcp_hashinfo, sk, newsk);
933 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
935 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
940 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
942 struct tcphdr *th = skb->h.th;
943 struct iphdr *iph = skb->nh.iph;
945 struct request_sock **prev;
946 /* Find possible connection requests. */
947 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
948 iph->saddr, iph->daddr);
950 return tcp_check_req(sk, skb, req, prev);
952 nsk = __inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr,
953 th->source, skb->nh.iph->daddr,
954 ntohs(th->dest), inet_iif(skb));
957 if (nsk->sk_state != TCP_TIME_WAIT) {
961 inet_twsk_put((struct inet_timewait_sock *)nsk);
965 #ifdef CONFIG_SYN_COOKIES
966 if (!th->rst && !th->syn && th->ack)
967 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
972 static int tcp_v4_checksum_init(struct sk_buff *skb)
974 if (skb->ip_summed == CHECKSUM_HW) {
975 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
976 skb->nh.iph->daddr, skb->csum)) {
977 skb->ip_summed = CHECKSUM_UNNECESSARY;
982 skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr, skb->nh.iph->daddr,
983 skb->len, IPPROTO_TCP, 0);
985 if (skb->len <= 76) {
986 return __skb_checksum_complete(skb);
992 /* The socket must have it's spinlock held when we get
995 * We have a potential double-lock case here, so even when
996 * doing backlog processing we use the BH locking scheme.
997 * This is because we cannot sleep with the original spinlock
1000 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1002 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1003 TCP_CHECK_TIMER(sk);
1004 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1006 TCP_CHECK_TIMER(sk);
1010 if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
1013 if (sk->sk_state == TCP_LISTEN) {
1014 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1019 if (tcp_child_process(sk, nsk, skb))
1025 TCP_CHECK_TIMER(sk);
1026 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1028 TCP_CHECK_TIMER(sk);
1032 tcp_v4_send_reset(skb);
1035 /* Be careful here. If this function gets more complicated and
1036 * gcc suffers from register pressure on the x86, sk (in %ebx)
1037 * might be destroyed here. This current version compiles correctly,
1038 * but you have been warned.
1043 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1051 int tcp_v4_rcv(struct sk_buff *skb)
1057 if (skb->pkt_type != PACKET_HOST)
1060 /* Count it even if it's bad */
1061 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1063 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1068 if (th->doff < sizeof(struct tcphdr) / 4)
1070 if (!pskb_may_pull(skb, th->doff * 4))
1073 /* An explanation is required here, I think.
1074 * Packet length and doff are validated by header prediction,
1075 * provided case of th->doff==0 is eliminated.
1076 * So, we defer the checks. */
1077 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1078 tcp_v4_checksum_init(skb)))
1082 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1083 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1084 skb->len - th->doff * 4);
1085 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1086 TCP_SKB_CB(skb)->when = 0;
1087 TCP_SKB_CB(skb)->flags = skb->nh.iph->tos;
1088 TCP_SKB_CB(skb)->sacked = 0;
1090 sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source,
1091 skb->nh.iph->daddr, ntohs(th->dest),
1098 #if defined(CONFIG_VNET) || defined(CONFIG_VNET_MODULE)
1099 /* Silently drop if VNET is active and the context is not
1100 * entitled to read the packet.
1103 /* Transfer ownership of reusable TIME_WAIT buckets to
1104 * whomever VNET decided should own the packet.
1106 if (sk->sk_state == TCP_TIME_WAIT)
1107 sk->sk_xid = skb->xid;
1109 if ((int) sk->sk_xid > 0 && sk->sk_xid != skb->xid)
1114 if (sk->sk_state == TCP_TIME_WAIT)
1117 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1118 goto discard_and_relse;
1121 if (sk_filter(sk, skb, 0))
1122 goto discard_and_relse;
1126 bh_lock_sock_nested(sk);
1128 if (!sock_owned_by_user(sk)) {
1129 #ifdef CONFIG_NET_DMA
1130 struct tcp_sock *tp = tcp_sk(sk);
1131 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1132 tp->ucopy.dma_chan = get_softnet_dma();
1133 if (tp->ucopy.dma_chan)
1134 ret = tcp_v4_do_rcv(sk, skb);
1138 if (!tcp_prequeue(sk, skb))
1139 ret = tcp_v4_do_rcv(sk, skb);
1142 sk_add_backlog(sk, skb);
1150 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1153 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1155 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1156 #if defined(CONFIG_VNET) || defined(CONFIG_VNET_MODULE)
1157 } else if (vnet_active && skb->sk) {
1158 /* VNET: Suppress RST if the port was bound to a (presumably raw) socket */
1161 tcp_v4_send_reset(skb);
1165 /* Discard frame. */
1174 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1175 inet_twsk_put((struct inet_timewait_sock *) sk);
1179 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1180 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1181 inet_twsk_put((struct inet_timewait_sock *) sk);
1184 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1187 struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo,
1192 inet_twsk_deschedule((struct inet_timewait_sock *)sk,
1194 inet_twsk_put((struct inet_timewait_sock *)sk);
1198 /* Fall through to ACK */
1201 tcp_v4_timewait_ack(sk, skb);
1205 case TCP_TW_SUCCESS:;
1210 /* VJ's idea. Save last timestamp seen from this destination
1211 * and hold it at least for normal timewait interval to use for duplicate
1212 * segment detection in subsequent connections, before they enter synchronized
1216 int tcp_v4_remember_stamp(struct sock *sk)
1218 struct inet_sock *inet = inet_sk(sk);
1219 struct tcp_sock *tp = tcp_sk(sk);
1220 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1221 struct inet_peer *peer = NULL;
1224 if (!rt || rt->rt_dst != inet->daddr) {
1225 peer = inet_getpeer(inet->daddr, 1);
1229 rt_bind_peer(rt, 1);
1234 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1235 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
1236 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
1237 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
1238 peer->tcp_ts = tp->rx_opt.ts_recent;
1248 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1250 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1253 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1255 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1256 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
1257 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
1258 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
1259 peer->tcp_ts = tcptw->tw_ts_recent;
1268 struct inet_connection_sock_af_ops ipv4_specific = {
1269 .queue_xmit = ip_queue_xmit,
1270 .send_check = tcp_v4_send_check,
1271 .rebuild_header = inet_sk_rebuild_header,
1272 .conn_request = tcp_v4_conn_request,
1273 .syn_recv_sock = tcp_v4_syn_recv_sock,
1274 .remember_stamp = tcp_v4_remember_stamp,
1275 .net_header_len = sizeof(struct iphdr),
1276 .setsockopt = ip_setsockopt,
1277 .getsockopt = ip_getsockopt,
1278 .addr2sockaddr = inet_csk_addr2sockaddr,
1279 .sockaddr_len = sizeof(struct sockaddr_in),
1280 #ifdef CONFIG_COMPAT
1281 .compat_setsockopt = compat_ip_setsockopt,
1282 .compat_getsockopt = compat_ip_getsockopt,
1286 /* NOTE: A lot of things set to zero explicitly by call to
1287 * sk_alloc() so need not be done here.
1289 static int tcp_v4_init_sock(struct sock *sk)
1291 struct inet_connection_sock *icsk = inet_csk(sk);
1292 struct tcp_sock *tp = tcp_sk(sk);
1294 skb_queue_head_init(&tp->out_of_order_queue);
1295 tcp_init_xmit_timers(sk);
1296 tcp_prequeue_init(tp);
1298 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1299 tp->mdev = TCP_TIMEOUT_INIT;
1301 /* So many TCP implementations out there (incorrectly) count the
1302 * initial SYN frame in their delayed-ACK and congestion control
1303 * algorithms that we must have the following bandaid to talk
1304 * efficiently to them. -DaveM
1308 /* See draft-stevens-tcpca-spec-01 for discussion of the
1309 * initialization of these values.
1311 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
1312 tp->snd_cwnd_clamp = ~0;
1313 tp->mss_cache = 536;
1315 tp->reordering = sysctl_tcp_reordering;
1316 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1318 sk->sk_state = TCP_CLOSE;
1320 sk->sk_write_space = sk_stream_write_space;
1321 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1323 icsk->icsk_af_ops = &ipv4_specific;
1324 icsk->icsk_sync_mss = tcp_sync_mss;
1326 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1327 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1329 atomic_inc(&tcp_sockets_allocated);
1334 int tcp_v4_destroy_sock(struct sock *sk)
1336 struct tcp_sock *tp = tcp_sk(sk);
1338 tcp_clear_xmit_timers(sk);
1340 tcp_cleanup_congestion_control(sk);
1342 /* Cleanup up the write buffer. */
1343 sk_stream_writequeue_purge(sk);
1345 /* Cleans up our, hopefully empty, out_of_order_queue. */
1346 __skb_queue_purge(&tp->out_of_order_queue);
1348 #ifdef CONFIG_NET_DMA
1349 /* Cleans up our sk_async_wait_queue */
1350 __skb_queue_purge(&sk->sk_async_wait_queue);
1353 /* Clean prequeue, it must be empty really */
1354 __skb_queue_purge(&tp->ucopy.prequeue);
1356 /* Clean up a referenced TCP bind bucket. */
1357 if (inet_csk(sk)->icsk_bind_hash)
1358 inet_put_port(&tcp_hashinfo, sk);
1361 * If sendmsg cached page exists, toss it.
1363 if (sk->sk_sndmsg_page) {
1364 __free_page(sk->sk_sndmsg_page);
1365 sk->sk_sndmsg_page = NULL;
1368 atomic_dec(&tcp_sockets_allocated);
1373 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1375 #ifdef CONFIG_PROC_FS
1376 /* Proc filesystem TCP sock list dumping. */
1378 static inline struct inet_timewait_sock *tw_head(struct hlist_head *head)
1380 return hlist_empty(head) ? NULL :
1381 list_entry(head->first, struct inet_timewait_sock, tw_node);
1384 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1386 return tw->tw_node.next ?
1387 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1390 static void *listening_get_next(struct seq_file *seq, void *cur)
1392 struct inet_connection_sock *icsk;
1393 struct hlist_node *node;
1394 struct sock *sk = cur;
1395 struct tcp_iter_state* st = seq->private;
1399 sk = sk_head(&tcp_hashinfo.listening_hash[0]);
1405 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1406 struct request_sock *req = cur;
1408 icsk = inet_csk(st->syn_wait_sk);
1412 vxdprintk(VXD_CBIT(net, 6),
1413 "sk,req: %p [#%d] (from %d)", req->sk,
1414 (req->sk)?req->sk->sk_xid:0, vx_current_xid());
1416 !vx_check(req->sk->sk_xid, VX_IDENT|VX_WATCH))
1418 if (req->rsk_ops->family == st->family) {
1424 if (++st->sbucket >= TCP_SYNQ_HSIZE)
1427 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1429 sk = sk_next(st->syn_wait_sk);
1430 st->state = TCP_SEQ_STATE_LISTENING;
1431 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1433 icsk = inet_csk(sk);
1434 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1435 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1437 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1441 sk_for_each_from(sk, node) {
1442 vxdprintk(VXD_CBIT(net, 6), "sk: %p [#%d] (from %d)",
1443 sk, sk->sk_xid, vx_current_xid());
1444 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
1446 if (sk->sk_family == st->family) {
1450 icsk = inet_csk(sk);
1451 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1452 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1454 st->uid = sock_i_uid(sk);
1455 st->syn_wait_sk = sk;
1456 st->state = TCP_SEQ_STATE_OPENREQ;
1460 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1462 if (++st->bucket < INET_LHTABLE_SIZE) {
1463 sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
1471 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1473 void *rc = listening_get_next(seq, NULL);
1475 while (rc && *pos) {
1476 rc = listening_get_next(seq, rc);
1482 static void *established_get_first(struct seq_file *seq)
1484 struct tcp_iter_state* st = seq->private;
1487 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
1489 struct hlist_node *node;
1490 struct inet_timewait_sock *tw;
1492 /* We can reschedule _before_ having picked the target: */
1493 cond_resched_softirq();
1495 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
1496 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1497 vxdprintk(VXD_CBIT(net, 6),
1498 "sk,egf: %p [#%d] (from %d)",
1499 sk, sk->sk_xid, vx_current_xid());
1500 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
1502 if (sk->sk_family != st->family)
1507 st->state = TCP_SEQ_STATE_TIME_WAIT;
1508 inet_twsk_for_each(tw, node,
1509 &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) {
1510 vxdprintk(VXD_CBIT(net, 6),
1511 "tw: %p [#%d] (from %d)",
1512 tw, tw->tw_xid, vx_current_xid());
1513 if (!vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))
1515 if (tw->tw_family != st->family)
1520 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1521 st->state = TCP_SEQ_STATE_ESTABLISHED;
1527 static void *established_get_next(struct seq_file *seq, void *cur)
1529 struct sock *sk = cur;
1530 struct inet_timewait_sock *tw;
1531 struct hlist_node *node;
1532 struct tcp_iter_state* st = seq->private;
1536 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
1540 while (tw && (tw->tw_family != st->family ||
1541 !vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))) {
1548 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1549 st->state = TCP_SEQ_STATE_ESTABLISHED;
1551 /* We can reschedule between buckets: */
1552 cond_resched_softirq();
1554 if (++st->bucket < tcp_hashinfo.ehash_size) {
1555 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
1556 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
1564 sk_for_each_from(sk, node) {
1565 vxdprintk(VXD_CBIT(net, 6),
1566 "sk,egn: %p [#%d] (from %d)",
1567 sk, sk->sk_xid, vx_current_xid());
1568 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
1570 if (sk->sk_family == st->family)
1574 st->state = TCP_SEQ_STATE_TIME_WAIT;
1575 tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain);
1583 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1585 void *rc = established_get_first(seq);
1588 rc = established_get_next(seq, rc);
1594 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1597 struct tcp_iter_state* st = seq->private;
1599 inet_listen_lock(&tcp_hashinfo);
1600 st->state = TCP_SEQ_STATE_LISTENING;
1601 rc = listening_get_idx(seq, &pos);
1604 inet_listen_unlock(&tcp_hashinfo);
1606 st->state = TCP_SEQ_STATE_ESTABLISHED;
1607 rc = established_get_idx(seq, pos);
1613 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
1615 struct tcp_iter_state* st = seq->private;
1616 st->state = TCP_SEQ_STATE_LISTENING;
1618 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1621 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1624 struct tcp_iter_state* st;
1626 if (v == SEQ_START_TOKEN) {
1627 rc = tcp_get_idx(seq, 0);
1632 switch (st->state) {
1633 case TCP_SEQ_STATE_OPENREQ:
1634 case TCP_SEQ_STATE_LISTENING:
1635 rc = listening_get_next(seq, v);
1637 inet_listen_unlock(&tcp_hashinfo);
1639 st->state = TCP_SEQ_STATE_ESTABLISHED;
1640 rc = established_get_first(seq);
1643 case TCP_SEQ_STATE_ESTABLISHED:
1644 case TCP_SEQ_STATE_TIME_WAIT:
1645 rc = established_get_next(seq, v);
1653 static void tcp_seq_stop(struct seq_file *seq, void *v)
1655 struct tcp_iter_state* st = seq->private;
1657 switch (st->state) {
1658 case TCP_SEQ_STATE_OPENREQ:
1660 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
1661 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1663 case TCP_SEQ_STATE_LISTENING:
1664 if (v != SEQ_START_TOKEN)
1665 inet_listen_unlock(&tcp_hashinfo);
1667 case TCP_SEQ_STATE_TIME_WAIT:
1668 case TCP_SEQ_STATE_ESTABLISHED:
1670 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1676 static int tcp_seq_open(struct inode *inode, struct file *file)
1678 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
1679 struct seq_file *seq;
1680 struct tcp_iter_state *s;
1683 if (unlikely(afinfo == NULL))
1686 s = kzalloc(sizeof(*s), GFP_KERNEL);
1689 s->family = afinfo->family;
1690 s->seq_ops.start = tcp_seq_start;
1691 s->seq_ops.next = tcp_seq_next;
1692 s->seq_ops.show = afinfo->seq_show;
1693 s->seq_ops.stop = tcp_seq_stop;
1695 rc = seq_open(file, &s->seq_ops);
1698 seq = file->private_data;
1707 int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
1710 struct proc_dir_entry *p;
1714 afinfo->seq_fops->owner = afinfo->owner;
1715 afinfo->seq_fops->open = tcp_seq_open;
1716 afinfo->seq_fops->read = seq_read;
1717 afinfo->seq_fops->llseek = seq_lseek;
1718 afinfo->seq_fops->release = seq_release_private;
1720 p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
1728 void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
1732 proc_net_remove(afinfo->name);
1733 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
1736 static void get_openreq4(struct sock *sk, struct request_sock *req,
1737 char *tmpbuf, int i, int uid)
1739 const struct inet_request_sock *ireq = inet_rsk(req);
1740 int ttd = req->expires - jiffies;
1742 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1743 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
1746 ntohs(inet_sk(sk)->sport),
1748 ntohs(ireq->rmt_port),
1750 0, 0, /* could print option size, but that is af dependent. */
1751 1, /* timers active (only the expire timer) */
1752 jiffies_to_clock_t(ttd),
1755 0, /* non standard timer */
1756 0, /* open_requests have no inode */
1757 atomic_read(&sk->sk_refcnt),
1761 static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
1764 unsigned long timer_expires;
1765 struct tcp_sock *tp = tcp_sk(sp);
1766 const struct inet_connection_sock *icsk = inet_csk(sp);
1767 struct inet_sock *inet = inet_sk(sp);
1768 unsigned int dest = inet->daddr;
1769 unsigned int src = inet->rcv_saddr;
1770 __u16 destp = ntohs(inet->dport);
1771 __u16 srcp = ntohs(inet->sport);
1773 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1775 timer_expires = icsk->icsk_timeout;
1776 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1778 timer_expires = icsk->icsk_timeout;
1779 } else if (timer_pending(&sp->sk_timer)) {
1781 timer_expires = sp->sk_timer.expires;
1784 timer_expires = jiffies;
1787 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
1788 "%08X %5d %8d %lu %d %p %u %u %u %u %d",
1789 i, src, srcp, dest, destp, sp->sk_state,
1790 tp->write_seq - tp->snd_una,
1791 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1793 jiffies_to_clock_t(timer_expires - jiffies),
1794 icsk->icsk_retransmits,
1796 icsk->icsk_probes_out,
1798 atomic_read(&sp->sk_refcnt), sp,
1801 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1803 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
1806 static void get_timewait4_sock(struct inet_timewait_sock *tw, char *tmpbuf, int i)
1808 unsigned int dest, src;
1810 int ttd = tw->tw_ttd - jiffies;
1815 dest = tw->tw_daddr;
1816 src = tw->tw_rcv_saddr;
1817 destp = ntohs(tw->tw_dport);
1818 srcp = ntohs(tw->tw_sport);
1820 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1821 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p",
1822 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
1823 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1824 atomic_read(&tw->tw_refcnt), tw);
1829 static int tcp4_seq_show(struct seq_file *seq, void *v)
1831 struct tcp_iter_state* st;
1832 char tmpbuf[TMPSZ + 1];
1834 if (v == SEQ_START_TOKEN) {
1835 seq_printf(seq, "%-*s\n", TMPSZ - 1,
1836 " sl local_address rem_address st tx_queue "
1837 "rx_queue tr tm->when retrnsmt uid timeout "
1843 switch (st->state) {
1844 case TCP_SEQ_STATE_LISTENING:
1845 case TCP_SEQ_STATE_ESTABLISHED:
1846 get_tcp4_sock(v, tmpbuf, st->num);
1848 case TCP_SEQ_STATE_OPENREQ:
1849 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
1851 case TCP_SEQ_STATE_TIME_WAIT:
1852 get_timewait4_sock(v, tmpbuf, st->num);
1855 seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
1860 static struct file_operations tcp4_seq_fops;
1861 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1862 .owner = THIS_MODULE,
1865 .seq_show = tcp4_seq_show,
1866 .seq_fops = &tcp4_seq_fops,
1869 int __init tcp4_proc_init(void)
1871 return tcp_proc_register(&tcp4_seq_afinfo);
1874 void tcp4_proc_exit(void)
1876 tcp_proc_unregister(&tcp4_seq_afinfo);
1878 #endif /* CONFIG_PROC_FS */
1880 struct proto tcp_prot = {
1882 .owner = THIS_MODULE,
1884 .connect = tcp_v4_connect,
1885 .disconnect = tcp_disconnect,
1886 .accept = inet_csk_accept,
1888 .init = tcp_v4_init_sock,
1889 .destroy = tcp_v4_destroy_sock,
1890 .shutdown = tcp_shutdown,
1891 .setsockopt = tcp_setsockopt,
1892 .getsockopt = tcp_getsockopt,
1893 .sendmsg = tcp_sendmsg,
1894 .recvmsg = tcp_recvmsg,
1895 .backlog_rcv = tcp_v4_do_rcv,
1896 .hash = tcp_v4_hash,
1897 .unhash = tcp_unhash,
1898 .get_port = tcp_v4_get_port,
1899 .enter_memory_pressure = tcp_enter_memory_pressure,
1900 .sockets_allocated = &tcp_sockets_allocated,
1901 .orphan_count = &tcp_orphan_count,
1902 .memory_allocated = &tcp_memory_allocated,
1903 .memory_pressure = &tcp_memory_pressure,
1904 .sysctl_mem = sysctl_tcp_mem,
1905 .sysctl_wmem = sysctl_tcp_wmem,
1906 .sysctl_rmem = sysctl_tcp_rmem,
1907 .max_header = MAX_TCP_HEADER,
1908 .obj_size = sizeof(struct tcp_sock),
1909 .twsk_prot = &tcp_timewait_sock_ops,
1910 .rsk_prot = &tcp_request_sock_ops,
1911 #ifdef CONFIG_COMPAT
1912 .compat_setsockopt = compat_tcp_setsockopt,
1913 .compat_getsockopt = compat_tcp_getsockopt,
1917 void __init tcp_v4_init(struct net_proto_family *ops)
1919 if (inet_csk_ctl_sock_create(&tcp_socket, PF_INET, SOCK_RAW, IPPROTO_TCP) < 0)
1920 panic("Failed to create the TCP control socket.\n");
1923 EXPORT_SYMBOL(ipv4_specific);
1924 EXPORT_SYMBOL(tcp_hashinfo);
1925 EXPORT_SYMBOL(tcp_prot);
1926 EXPORT_SYMBOL(tcp_unhash);
1927 EXPORT_SYMBOL(tcp_v4_conn_request);
1928 EXPORT_SYMBOL(tcp_v4_connect);
1929 EXPORT_SYMBOL(tcp_v4_do_rcv);
1930 EXPORT_SYMBOL(tcp_v4_remember_stamp);
1931 EXPORT_SYMBOL(tcp_v4_send_check);
1932 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1934 #ifdef CONFIG_PROC_FS
1935 EXPORT_SYMBOL(tcp_proc_register);
1936 EXPORT_SYMBOL(tcp_proc_unregister);
1938 EXPORT_SYMBOL(sysctl_local_port_range);
1939 EXPORT_SYMBOL(sysctl_tcp_low_latency);