2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
10 * IPv4 specific functions
15 * linux/ipv4/tcp_input.c
16 * linux/ipv4/tcp_output.c
18 * See tcp.c for author information
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
28 * David S. Miller : New socket lookup architecture.
29 * This code is dedicated to John Dyson.
30 * David S. Miller : Change semantics of established hash,
31 * half is devoted to TIME_WAIT sockets
32 * and the rest go in the other half.
33 * Andi Kleen : Add support for syncookies and fixed
34 * some bugs: ip options weren't passed to
35 * the TCP layer, missed a check for an
37 * Andi Kleen : Implemented fast path mtu discovery.
38 * Fixed many serious bugs in the
39 * request_sock handling and moved
40 * most of it into the af independent code.
41 * Added tail drop and some other bugfixes.
42 * Added new listen semantics.
43 * Mike McLagan : Routing by source
44 * Juan Jose Ciarlante: ip_dynaddr bits
45 * Andi Kleen: various fixes.
46 * Vitaly E. Lavrov : Transparent proxy revived after year
48 * Andi Kleen : Fix new listen.
49 * Andi Kleen : Fix accept error reporting.
50 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
51 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
52 * a single port at the same time.
55 #include <linux/config.h>
57 #include <linux/types.h>
58 #include <linux/fcntl.h>
59 #include <linux/module.h>
60 #include <linux/random.h>
61 #include <linux/cache.h>
62 #include <linux/jhash.h>
63 #include <linux/init.h>
64 #include <linux/times.h>
67 #include <net/inet_hashtables.h>
69 #include <net/transp_v6.h>
71 #include <net/inet_common.h>
72 #include <net/timewait_sock.h>
75 #include <linux/inet.h>
76 #include <linux/ipv6.h>
77 #include <linux/stddef.h>
78 #include <linux/proc_fs.h>
79 #include <linux/seq_file.h>
80 #include <linux/vserver/debug.h>
82 int sysctl_tcp_tw_reuse;
83 int sysctl_tcp_low_latency;
85 /* Check TCP sequence numbers in ICMP packets. */
86 #define ICMP_MIN_LENGTH 8
88 /* Socket used for sending RSTs */
89 static struct socket *tcp_socket;
91 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
93 struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
94 .lhash_lock = RW_LOCK_UNLOCKED,
95 .lhash_users = ATOMIC_INIT(0),
96 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
99 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
101 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
102 inet_csk_bind_conflict);
105 static void tcp_v4_hash(struct sock *sk)
107 inet_hash(&tcp_hashinfo, sk);
110 void tcp_unhash(struct sock *sk)
112 inet_unhash(&tcp_hashinfo, sk);
115 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
117 return secure_tcp_sequence_number(skb->nh.iph->daddr,
123 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
125 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
126 struct tcp_sock *tp = tcp_sk(sk);
128 /* With PAWS, it is safe from the viewpoint
129 of data integrity. Even without PAWS it is safe provided sequence
130 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
132 Actually, the idea is close to VJ's one, only timestamp cache is
133 held not per host, but per port pair and TW bucket is used as state
136 If TW bucket has been already destroyed we fall back to VJ's scheme
137 and use initial timestamp retrieved from peer table.
139 if (tcptw->tw_ts_recent_stamp &&
140 (twp == NULL || (sysctl_tcp_tw_reuse &&
141 xtime.tv_sec - tcptw->tw_ts_recent_stamp > 1))) {
142 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
143 if (tp->write_seq == 0)
145 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
146 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
154 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
156 /* This will initiate an outgoing connection. */
157 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
159 struct inet_sock *inet = inet_sk(sk);
160 struct tcp_sock *tp = tcp_sk(sk);
161 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
167 if (addr_len < sizeof(struct sockaddr_in))
170 if (usin->sin_family != AF_INET)
171 return -EAFNOSUPPORT;
173 nexthop = daddr = usin->sin_addr.s_addr;
174 if (inet->opt && inet->opt->srr) {
177 nexthop = inet->opt->faddr;
180 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
181 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
183 inet->sport, usin->sin_port, sk);
187 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
192 if (!inet->opt || !inet->opt->srr)
196 inet->saddr = rt->rt_src;
197 inet->rcv_saddr = inet->saddr;
199 if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
200 /* Reset inherited state */
201 tp->rx_opt.ts_recent = 0;
202 tp->rx_opt.ts_recent_stamp = 0;
206 if (tcp_death_row.sysctl_tw_recycle &&
207 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
208 struct inet_peer *peer = rt_get_peer(rt);
210 /* VJ's idea. We save last timestamp seen from
211 * the destination in peer table, when entering state TIME-WAIT
212 * and initialize rx_opt.ts_recent from it, when trying new connection.
215 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
216 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
217 tp->rx_opt.ts_recent = peer->tcp_ts;
221 inet->dport = usin->sin_port;
224 inet_csk(sk)->icsk_ext_hdr_len = 0;
226 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
228 tp->rx_opt.mss_clamp = 536;
230 /* Socket identity is still unknown (sport may be zero).
231 * However we set state to SYN-SENT and not releasing socket
232 * lock select source port, enter ourselves into the hash tables and
233 * complete initialization after this.
235 tcp_set_state(sk, TCP_SYN_SENT);
236 err = inet_hash_connect(&tcp_death_row, sk);
240 err = ip_route_newports(&rt, IPPROTO_TCP, inet->sport, inet->dport, sk);
244 /* OK, now commit destination to socket. */
245 sk_setup_caps(sk, &rt->u.dst);
248 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
253 inet->id = tp->write_seq ^ jiffies;
255 err = tcp_connect(sk);
263 /* This unhashes the socket and releases the local port, if necessary. */
264 tcp_set_state(sk, TCP_CLOSE);
266 sk->sk_route_caps = 0;
272 * This routine does path mtu discovery as defined in RFC1191.
274 static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
276 struct dst_entry *dst;
277 struct inet_sock *inet = inet_sk(sk);
279 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
280 * send out by Linux are always <576bytes so they should go through
283 if (sk->sk_state == TCP_LISTEN)
286 /* We don't check in the destentry if pmtu discovery is forbidden
287 * on this route. We just assume that no packet_to_big packets
288 * are send back when pmtu discovery is not active.
289 * There is a small race when the user changes this flag in the
290 * route, but I think that's acceptable.
292 if ((dst = __sk_dst_check(sk, 0)) == NULL)
295 dst->ops->update_pmtu(dst, mtu);
297 /* Something is about to be wrong... Remember soft error
298 * for the case, if this connection will not able to recover.
300 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
301 sk->sk_err_soft = EMSGSIZE;
305 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
306 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
307 tcp_sync_mss(sk, mtu);
309 /* Resend the TCP packet because it's
310 * clear that the old packet has been
311 * dropped. This is the new "fast" path mtu
314 tcp_simple_retransmit(sk);
315 } /* else let the usual retransmit timer handle it */
319 * This routine is called by the ICMP module when it gets some
320 * sort of error condition. If err < 0 then the socket should
321 * be closed and the error returned to the user. If err > 0
322 * it's just the icmp type << 8 | icmp code. After adjustment
323 * header points to the first 8 bytes of the tcp header. We need
324 * to find the appropriate port.
326 * The locking strategy used here is very "optimistic". When
327 * someone else accesses the socket the ICMP is just dropped
328 * and for some paths there is no check at all.
329 * A more general error queue to queue errors for later handling
330 * is probably better.
334 void tcp_v4_err(struct sk_buff *skb, u32 info)
336 struct iphdr *iph = (struct iphdr *)skb->data;
337 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
339 struct inet_sock *inet;
340 int type = skb->h.icmph->type;
341 int code = skb->h.icmph->code;
346 if (skb->len < (iph->ihl << 2) + 8) {
347 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
351 sk = inet_lookup(&tcp_hashinfo, iph->daddr, th->dest, iph->saddr,
352 th->source, inet_iif(skb));
354 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
357 if (sk->sk_state == TCP_TIME_WAIT) {
358 inet_twsk_put((struct inet_timewait_sock *)sk);
363 /* If too many ICMPs get dropped on busy
364 * servers this needs to be solved differently.
366 if (sock_owned_by_user(sk))
367 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
369 if (sk->sk_state == TCP_CLOSE)
373 seq = ntohl(th->seq);
374 if (sk->sk_state != TCP_LISTEN &&
375 !between(seq, tp->snd_una, tp->snd_nxt)) {
376 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
381 case ICMP_SOURCE_QUENCH:
382 /* Just silently ignore these. */
384 case ICMP_PARAMETERPROB:
387 case ICMP_DEST_UNREACH:
388 if (code > NR_ICMP_UNREACH)
391 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
392 if (!sock_owned_by_user(sk))
393 do_pmtu_discovery(sk, iph, info);
397 err = icmp_err_convert[code].errno;
399 case ICMP_TIME_EXCEEDED:
406 switch (sk->sk_state) {
407 struct request_sock *req, **prev;
409 if (sock_owned_by_user(sk))
412 req = inet_csk_search_req(sk, &prev, th->dest,
413 iph->daddr, iph->saddr);
417 /* ICMPs are not backlogged, hence we cannot get
418 an established socket here.
422 if (seq != tcp_rsk(req)->snt_isn) {
423 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
428 * Still in SYN_RECV, just remove it silently.
429 * There is no good way to pass the error to the newly
430 * created socket, and POSIX does not want network
431 * errors returned from accept().
433 inet_csk_reqsk_queue_drop(sk, req, prev);
437 case TCP_SYN_RECV: /* Cannot happen.
438 It can f.e. if SYNs crossed.
440 if (!sock_owned_by_user(sk)) {
441 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
444 sk->sk_error_report(sk);
448 sk->sk_err_soft = err;
453 /* If we've already connected we will keep trying
454 * until we time out, or the user gives up.
456 * rfc1122 4.2.3.9 allows to consider as hard errors
457 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
458 * but it is obsoleted by pmtu discovery).
460 * Note, that in modern internet, where routing is unreliable
461 * and in each dark corner broken firewalls sit, sending random
462 * errors ordered by their masters even this two messages finally lose
463 * their original sense (even Linux sends invalid PORT_UNREACHs)
465 * Now we are in compliance with RFCs.
470 if (!sock_owned_by_user(sk) && inet->recverr) {
472 sk->sk_error_report(sk);
473 } else { /* Only an error on timeout */
474 sk->sk_err_soft = err;
482 /* This routine computes an IPv4 TCP checksum. */
483 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
485 struct inet_sock *inet = inet_sk(sk);
486 struct tcphdr *th = skb->h.th;
488 if (skb->ip_summed == CHECKSUM_HW) {
489 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
490 skb->csum = offsetof(struct tcphdr, check);
492 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
493 csum_partial((char *)th,
499 int tcp_v4_gso_send_check(struct sk_buff *skb)
504 if (!pskb_may_pull(skb, sizeof(*th)))
511 th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0);
512 skb->csum = offsetof(struct tcphdr, check);
513 skb->ip_summed = CHECKSUM_HW;
518 * This routine will send an RST to the other tcp.
520 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
522 * Answer: if a packet caused RST, it is not for a socket
523 * existing in our system, if it is matched to a socket,
524 * it is just duplicate segment or bug in other side's TCP.
525 * So that we build reply only basing on parameters
526 * arrived with segment.
527 * Exception: precedence violation. We do not implement it in any case.
530 static void tcp_v4_send_reset(struct sk_buff *skb)
532 struct tcphdr *th = skb->h.th;
534 struct ip_reply_arg arg;
536 /* Never send a reset in response to a reset. */
540 if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
543 /* Swap the send and the receive. */
544 memset(&rth, 0, sizeof(struct tcphdr));
545 rth.dest = th->source;
546 rth.source = th->dest;
547 rth.doff = sizeof(struct tcphdr) / 4;
551 rth.seq = th->ack_seq;
554 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
555 skb->len - (th->doff << 2));
558 memset(&arg, 0, sizeof arg);
559 arg.iov[0].iov_base = (unsigned char *)&rth;
560 arg.iov[0].iov_len = sizeof rth;
561 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
562 skb->nh.iph->saddr, /*XXX*/
563 sizeof(struct tcphdr), IPPROTO_TCP, 0);
564 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
566 ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
568 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
569 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
572 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
573 outside socket context is ugly, certainly. What can I do?
576 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
579 struct tcphdr *th = skb->h.th;
584 struct ip_reply_arg arg;
586 memset(&rep.th, 0, sizeof(struct tcphdr));
587 memset(&arg, 0, sizeof arg);
589 arg.iov[0].iov_base = (unsigned char *)&rep;
590 arg.iov[0].iov_len = sizeof(rep.th);
592 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
593 (TCPOPT_TIMESTAMP << 8) |
595 rep.tsopt[1] = htonl(tcp_time_stamp);
596 rep.tsopt[2] = htonl(ts);
597 arg.iov[0].iov_len = sizeof(rep);
600 /* Swap the send and the receive. */
601 rep.th.dest = th->source;
602 rep.th.source = th->dest;
603 rep.th.doff = arg.iov[0].iov_len / 4;
604 rep.th.seq = htonl(seq);
605 rep.th.ack_seq = htonl(ack);
607 rep.th.window = htons(win);
609 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
610 skb->nh.iph->saddr, /*XXX*/
611 arg.iov[0].iov_len, IPPROTO_TCP, 0);
612 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
614 ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
616 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
619 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
621 struct inet_timewait_sock *tw = inet_twsk(sk);
622 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
624 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
625 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent);
630 static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
632 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
637 * Send a SYN-ACK after having received an ACK.
638 * This still operates on a request_sock only, not on a big
641 static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
642 struct dst_entry *dst)
644 const struct inet_request_sock *ireq = inet_rsk(req);
646 struct sk_buff * skb;
648 /* First, grab a route. */
649 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
652 skb = tcp_make_synack(sk, dst, req);
655 struct tcphdr *th = skb->h.th;
657 th->check = tcp_v4_check(th, skb->len,
660 csum_partial((char *)th, skb->len,
663 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
666 if (err == NET_XMIT_CN)
676 * IPv4 request_sock destructor.
678 static void tcp_v4_reqsk_destructor(struct request_sock *req)
680 kfree(inet_rsk(req)->opt);
683 #ifdef CONFIG_SYN_COOKIES
684 static void syn_flood_warning(struct sk_buff *skb)
686 static unsigned long warntime;
688 if (time_after(jiffies, (warntime + HZ * 60))) {
691 "possible SYN flooding on port %d. Sending cookies.\n",
692 ntohs(skb->h.th->dest));
698 * Save and compile IPv4 options into the request_sock if needed.
700 static struct ip_options *tcp_v4_save_options(struct sock *sk,
703 struct ip_options *opt = &(IPCB(skb)->opt);
704 struct ip_options *dopt = NULL;
706 if (opt && opt->optlen) {
707 int opt_size = optlength(opt);
708 dopt = kmalloc(opt_size, GFP_ATOMIC);
710 if (ip_options_echo(dopt, skb)) {
719 struct request_sock_ops tcp_request_sock_ops = {
721 .obj_size = sizeof(struct tcp_request_sock),
722 .rtx_syn_ack = tcp_v4_send_synack,
723 .send_ack = tcp_v4_reqsk_send_ack,
724 .destructor = tcp_v4_reqsk_destructor,
725 .send_reset = tcp_v4_send_reset,
728 static struct timewait_sock_ops tcp_timewait_sock_ops = {
729 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
730 .twsk_unique = tcp_twsk_unique,
733 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
735 struct inet_request_sock *ireq;
736 struct tcp_options_received tmp_opt;
737 struct request_sock *req;
738 __u32 saddr = skb->nh.iph->saddr;
739 __u32 daddr = skb->nh.iph->daddr;
740 __u32 isn = TCP_SKB_CB(skb)->when;
741 struct dst_entry *dst = NULL;
742 #ifdef CONFIG_SYN_COOKIES
745 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
748 /* Never answer to SYNs send to broadcast or multicast */
749 if (((struct rtable *)skb->dst)->rt_flags &
750 (RTCF_BROADCAST | RTCF_MULTICAST))
753 /* TW buckets are converted to open requests without
754 * limitations, they conserve resources and peer is
755 * evidently real one.
757 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
758 #ifdef CONFIG_SYN_COOKIES
759 if (sysctl_tcp_syncookies) {
766 /* Accept backlog is full. If we have already queued enough
767 * of warm entries in syn queue, drop request. It is better than
768 * clogging syn queue with openreqs with exponentially increasing
771 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
774 req = reqsk_alloc(&tcp_request_sock_ops);
778 tcp_clear_options(&tmp_opt);
779 tmp_opt.mss_clamp = 536;
780 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
782 tcp_parse_options(skb, &tmp_opt, 0);
785 tcp_clear_options(&tmp_opt);
786 tmp_opt.saw_tstamp = 0;
789 if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) {
790 /* Some OSes (unknown ones, but I see them on web server, which
791 * contains information interesting only for windows'
792 * users) do not send their stamp in SYN. It is easy case.
793 * We simply do not advertise TS support.
795 tmp_opt.saw_tstamp = 0;
796 tmp_opt.tstamp_ok = 0;
798 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
800 tcp_openreq_init(req, &tmp_opt, skb);
802 ireq = inet_rsk(req);
803 ireq->loc_addr = daddr;
804 ireq->rmt_addr = saddr;
805 ireq->opt = tcp_v4_save_options(sk, skb);
807 TCP_ECN_create_request(req, skb->h.th);
810 #ifdef CONFIG_SYN_COOKIES
811 syn_flood_warning(skb);
813 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
815 struct inet_peer *peer = NULL;
817 /* VJ's idea. We save last timestamp seen
818 * from the destination in peer table, when entering
819 * state TIME-WAIT, and check against it before
820 * accepting new connection request.
822 * If "isn" is not zero, this request hit alive
823 * timewait bucket, so that all the necessary checks
824 * are made in the function processing timewait state.
826 if (tmp_opt.saw_tstamp &&
827 tcp_death_row.sysctl_tw_recycle &&
828 (dst = inet_csk_route_req(sk, req)) != NULL &&
829 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
830 peer->v4daddr == saddr) {
831 if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
832 (s32)(peer->tcp_ts - req->ts_recent) >
834 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
839 /* Kill the following clause, if you dislike this way. */
840 else if (!sysctl_tcp_syncookies &&
841 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
842 (sysctl_max_syn_backlog >> 2)) &&
843 (!peer || !peer->tcp_ts_stamp) &&
844 (!dst || !dst_metric(dst, RTAX_RTT))) {
845 /* Without syncookies last quarter of
846 * backlog is filled with destinations,
847 * proven to be alive.
848 * It means that we continue to communicate
849 * to destinations, already remembered
850 * to the moment of synflood.
852 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
853 "request from %u.%u.%u.%u/%u\n",
855 ntohs(skb->h.th->source));
860 isn = tcp_v4_init_sequence(sk, skb);
862 tcp_rsk(req)->snt_isn = isn;
864 if (tcp_v4_send_synack(sk, req, dst))
870 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
877 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
883 * The three way handshake has completed - we got a valid synack -
884 * now create the new socket.
886 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
887 struct request_sock *req,
888 struct dst_entry *dst)
890 struct inet_request_sock *ireq;
891 struct inet_sock *newinet;
892 struct tcp_sock *newtp;
895 if (sk_acceptq_is_full(sk))
898 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
901 newsk = tcp_create_openreq_child(sk, req, skb);
905 sk_setup_caps(newsk, dst);
907 newtp = tcp_sk(newsk);
908 newinet = inet_sk(newsk);
909 ireq = inet_rsk(req);
910 newinet->daddr = ireq->rmt_addr;
911 newinet->rcv_saddr = ireq->loc_addr;
912 newinet->saddr = ireq->loc_addr;
913 newinet->opt = ireq->opt;
915 newinet->mc_index = inet_iif(skb);
916 newinet->mc_ttl = skb->nh.iph->ttl;
917 inet_csk(newsk)->icsk_ext_hdr_len = 0;
919 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
920 newinet->id = newtp->write_seq ^ jiffies;
922 tcp_mtup_init(newsk);
923 tcp_sync_mss(newsk, dst_mtu(dst));
924 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
925 tcp_initialize_rcv_mss(newsk);
927 __inet_hash(&tcp_hashinfo, newsk, 0);
928 __inet_inherit_port(&tcp_hashinfo, sk, newsk);
933 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
935 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
940 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
942 struct tcphdr *th = skb->h.th;
943 struct iphdr *iph = skb->nh.iph;
945 struct request_sock **prev;
946 /* Find possible connection requests. */
947 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
948 iph->saddr, iph->daddr);
950 return tcp_check_req(sk, skb, req, prev);
952 nsk = __inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr,
953 th->source, skb->nh.iph->daddr,
954 ntohs(th->dest), inet_iif(skb));
957 if (nsk->sk_state != TCP_TIME_WAIT) {
961 inet_twsk_put((struct inet_timewait_sock *)nsk);
965 #ifdef CONFIG_SYN_COOKIES
966 if (!th->rst && !th->syn && th->ack)
967 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
972 static int tcp_v4_checksum_init(struct sk_buff *skb)
974 if (skb->ip_summed == CHECKSUM_HW) {
975 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
976 skb->nh.iph->daddr, skb->csum)) {
977 skb->ip_summed = CHECKSUM_UNNECESSARY;
982 skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr, skb->nh.iph->daddr,
983 skb->len, IPPROTO_TCP, 0);
985 if (skb->len <= 76) {
986 return __skb_checksum_complete(skb);
992 /* The socket must have it's spinlock held when we get
995 * We have a potential double-lock case here, so even when
996 * doing backlog processing we use the BH locking scheme.
997 * This is because we cannot sleep with the original spinlock
1000 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1002 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1003 TCP_CHECK_TIMER(sk);
1004 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1006 TCP_CHECK_TIMER(sk);
1010 if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
1013 if (sk->sk_state == TCP_LISTEN) {
1014 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1019 if (tcp_child_process(sk, nsk, skb))
1025 TCP_CHECK_TIMER(sk);
1026 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1028 TCP_CHECK_TIMER(sk);
1032 tcp_v4_send_reset(skb);
1035 /* Be careful here. If this function gets more complicated and
1036 * gcc suffers from register pressure on the x86, sk (in %ebx)
1037 * might be destroyed here. This current version compiles correctly,
1038 * but you have been warned.
1043 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1051 int tcp_v4_rcv(struct sk_buff *skb)
1057 if (skb->pkt_type != PACKET_HOST)
1060 /* Count it even if it's bad */
1061 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1063 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1068 if (th->doff < sizeof(struct tcphdr) / 4)
1070 if (!pskb_may_pull(skb, th->doff * 4))
1073 /* An explanation is required here, I think.
1074 * Packet length and doff are validated by header prediction,
1075 * provided case of th->doff==0 is eliminated.
1076 * So, we defer the checks. */
1077 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1078 tcp_v4_checksum_init(skb)))
1082 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1083 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1084 skb->len - th->doff * 4);
1085 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1086 TCP_SKB_CB(skb)->when = 0;
1087 TCP_SKB_CB(skb)->flags = skb->nh.iph->tos;
1088 TCP_SKB_CB(skb)->sacked = 0;
1090 sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source,
1091 skb->nh.iph->daddr, ntohs(th->dest),
1098 if (sk->sk_state == TCP_TIME_WAIT)
1101 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1102 goto discard_and_relse;
1105 if (sk_filter(sk, skb, 0))
1106 goto discard_and_relse;
1112 if (!sock_owned_by_user(sk)) {
1113 if (!tcp_prequeue(sk, skb))
1114 ret = tcp_v4_do_rcv(sk, skb);
1116 sk_add_backlog(sk, skb);
1124 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1127 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1129 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1131 tcp_v4_send_reset(skb);
1135 /* Discard frame. */
1144 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1145 inet_twsk_put((struct inet_timewait_sock *) sk);
1149 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1150 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1151 inet_twsk_put((struct inet_timewait_sock *) sk);
1154 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1157 struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo,
1162 inet_twsk_deschedule((struct inet_timewait_sock *)sk,
1164 inet_twsk_put((struct inet_timewait_sock *)sk);
1168 /* Fall through to ACK */
1171 tcp_v4_timewait_ack(sk, skb);
1175 case TCP_TW_SUCCESS:;
1180 /* VJ's idea. Save last timestamp seen from this destination
1181 * and hold it at least for normal timewait interval to use for duplicate
1182 * segment detection in subsequent connections, before they enter synchronized
1186 int tcp_v4_remember_stamp(struct sock *sk)
1188 struct inet_sock *inet = inet_sk(sk);
1189 struct tcp_sock *tp = tcp_sk(sk);
1190 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1191 struct inet_peer *peer = NULL;
1194 if (!rt || rt->rt_dst != inet->daddr) {
1195 peer = inet_getpeer(inet->daddr, 1);
1199 rt_bind_peer(rt, 1);
1204 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1205 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
1206 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
1207 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
1208 peer->tcp_ts = tp->rx_opt.ts_recent;
1218 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1220 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1223 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1225 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1226 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
1227 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
1228 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
1229 peer->tcp_ts = tcptw->tw_ts_recent;
1238 struct inet_connection_sock_af_ops ipv4_specific = {
1239 .queue_xmit = ip_queue_xmit,
1240 .send_check = tcp_v4_send_check,
1241 .rebuild_header = inet_sk_rebuild_header,
1242 .conn_request = tcp_v4_conn_request,
1243 .syn_recv_sock = tcp_v4_syn_recv_sock,
1244 .remember_stamp = tcp_v4_remember_stamp,
1245 .net_header_len = sizeof(struct iphdr),
1246 .setsockopt = ip_setsockopt,
1247 .getsockopt = ip_getsockopt,
1248 .addr2sockaddr = inet_csk_addr2sockaddr,
1249 .sockaddr_len = sizeof(struct sockaddr_in),
1250 #ifdef CONFIG_COMPAT
1251 .compat_setsockopt = compat_ip_setsockopt,
1252 .compat_getsockopt = compat_ip_getsockopt,
1256 /* NOTE: A lot of things set to zero explicitly by call to
1257 * sk_alloc() so need not be done here.
1259 static int tcp_v4_init_sock(struct sock *sk)
1261 struct inet_connection_sock *icsk = inet_csk(sk);
1262 struct tcp_sock *tp = tcp_sk(sk);
1264 skb_queue_head_init(&tp->out_of_order_queue);
1265 tcp_init_xmit_timers(sk);
1266 tcp_prequeue_init(tp);
1268 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1269 tp->mdev = TCP_TIMEOUT_INIT;
1271 /* So many TCP implementations out there (incorrectly) count the
1272 * initial SYN frame in their delayed-ACK and congestion control
1273 * algorithms that we must have the following bandaid to talk
1274 * efficiently to them. -DaveM
1278 /* See draft-stevens-tcpca-spec-01 for discussion of the
1279 * initialization of these values.
1281 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
1282 tp->snd_cwnd_clamp = ~0;
1283 tp->mss_cache = 536;
1285 tp->reordering = sysctl_tcp_reordering;
1286 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1288 sk->sk_state = TCP_CLOSE;
1290 sk->sk_write_space = sk_stream_write_space;
1291 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1293 icsk->icsk_af_ops = &ipv4_specific;
1294 icsk->icsk_sync_mss = tcp_sync_mss;
1296 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1297 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1299 atomic_inc(&tcp_sockets_allocated);
1304 int tcp_v4_destroy_sock(struct sock *sk)
1306 struct tcp_sock *tp = tcp_sk(sk);
1308 tcp_clear_xmit_timers(sk);
1310 tcp_cleanup_congestion_control(sk);
1312 /* Cleanup up the write buffer. */
1313 sk_stream_writequeue_purge(sk);
1315 /* Cleans up our, hopefully empty, out_of_order_queue. */
1316 __skb_queue_purge(&tp->out_of_order_queue);
1318 /* Clean prequeue, it must be empty really */
1319 __skb_queue_purge(&tp->ucopy.prequeue);
1321 /* Clean up a referenced TCP bind bucket. */
1322 if (inet_csk(sk)->icsk_bind_hash)
1323 inet_put_port(&tcp_hashinfo, sk);
1326 * If sendmsg cached page exists, toss it.
1328 if (sk->sk_sndmsg_page) {
1329 __free_page(sk->sk_sndmsg_page);
1330 sk->sk_sndmsg_page = NULL;
1333 atomic_dec(&tcp_sockets_allocated);
1338 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1340 #ifdef CONFIG_PROC_FS
1341 /* Proc filesystem TCP sock list dumping. */
1343 static inline struct inet_timewait_sock *tw_head(struct hlist_head *head)
1345 return hlist_empty(head) ? NULL :
1346 list_entry(head->first, struct inet_timewait_sock, tw_node);
1349 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1351 return tw->tw_node.next ?
1352 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1355 static void *listening_get_next(struct seq_file *seq, void *cur)
1357 struct inet_connection_sock *icsk;
1358 struct hlist_node *node;
1359 struct sock *sk = cur;
1360 struct tcp_iter_state* st = seq->private;
1364 sk = sk_head(&tcp_hashinfo.listening_hash[0]);
1370 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1371 struct request_sock *req = cur;
1373 icsk = inet_csk(st->syn_wait_sk);
1377 vxdprintk(VXD_CBIT(net, 6),
1378 "sk,req: %p [#%d] (from %d)", req->sk,
1379 (req->sk)?req->sk->sk_xid:0, vx_current_xid());
1381 !vx_check(req->sk->sk_xid, VX_IDENT|VX_WATCH))
1383 if (req->rsk_ops->family == st->family) {
1389 if (++st->sbucket >= TCP_SYNQ_HSIZE)
1392 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1394 sk = sk_next(st->syn_wait_sk);
1395 st->state = TCP_SEQ_STATE_LISTENING;
1396 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1398 icsk = inet_csk(sk);
1399 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1400 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1402 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1406 sk_for_each_from(sk, node) {
1407 vxdprintk(VXD_CBIT(net, 6), "sk: %p [#%d] (from %d)",
1408 sk, sk->sk_xid, vx_current_xid());
1409 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
1411 if (sk->sk_family == st->family) {
1415 icsk = inet_csk(sk);
1416 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1417 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1419 st->uid = sock_i_uid(sk);
1420 st->syn_wait_sk = sk;
1421 st->state = TCP_SEQ_STATE_OPENREQ;
1425 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1427 if (++st->bucket < INET_LHTABLE_SIZE) {
1428 sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
1436 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1438 void *rc = listening_get_next(seq, NULL);
1440 while (rc && *pos) {
1441 rc = listening_get_next(seq, rc);
1447 static void *established_get_first(struct seq_file *seq)
1449 struct tcp_iter_state* st = seq->private;
1452 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
1454 struct hlist_node *node;
1455 struct inet_timewait_sock *tw;
1457 /* We can reschedule _before_ having picked the target: */
1458 cond_resched_softirq();
1460 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
1461 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1462 vxdprintk(VXD_CBIT(net, 6),
1463 "sk,egf: %p [#%d] (from %d)",
1464 sk, sk->sk_xid, vx_current_xid());
1465 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
1467 if (sk->sk_family != st->family)
1472 st->state = TCP_SEQ_STATE_TIME_WAIT;
1473 inet_twsk_for_each(tw, node,
1474 &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) {
1475 vxdprintk(VXD_CBIT(net, 6),
1476 "tw: %p [#%d] (from %d)",
1477 tw, tw->tw_xid, vx_current_xid());
1478 if (!vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))
1480 if (tw->tw_family != st->family)
1485 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1486 st->state = TCP_SEQ_STATE_ESTABLISHED;
1492 static void *established_get_next(struct seq_file *seq, void *cur)
1494 struct sock *sk = cur;
1495 struct inet_timewait_sock *tw;
1496 struct hlist_node *node;
1497 struct tcp_iter_state* st = seq->private;
1501 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
1505 while (tw && (tw->tw_family != st->family ||
1506 !vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))) {
1513 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1514 st->state = TCP_SEQ_STATE_ESTABLISHED;
1516 /* We can reschedule between buckets: */
1517 cond_resched_softirq();
1519 if (++st->bucket < tcp_hashinfo.ehash_size) {
1520 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
1521 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
1529 sk_for_each_from(sk, node) {
1530 vxdprintk(VXD_CBIT(net, 6),
1531 "sk,egn: %p [#%d] (from %d)",
1532 sk, sk->sk_xid, vx_current_xid());
1533 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
1535 if (sk->sk_family == st->family)
1539 st->state = TCP_SEQ_STATE_TIME_WAIT;
1540 tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain);
1548 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1550 void *rc = established_get_first(seq);
1553 rc = established_get_next(seq, rc);
1559 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1562 struct tcp_iter_state* st = seq->private;
1564 inet_listen_lock(&tcp_hashinfo);
1565 st->state = TCP_SEQ_STATE_LISTENING;
1566 rc = listening_get_idx(seq, &pos);
1569 inet_listen_unlock(&tcp_hashinfo);
1571 st->state = TCP_SEQ_STATE_ESTABLISHED;
1572 rc = established_get_idx(seq, pos);
1578 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
1580 struct tcp_iter_state* st = seq->private;
1581 st->state = TCP_SEQ_STATE_LISTENING;
1583 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1586 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1589 struct tcp_iter_state* st;
1591 if (v == SEQ_START_TOKEN) {
1592 rc = tcp_get_idx(seq, 0);
1597 switch (st->state) {
1598 case TCP_SEQ_STATE_OPENREQ:
1599 case TCP_SEQ_STATE_LISTENING:
1600 rc = listening_get_next(seq, v);
1602 inet_listen_unlock(&tcp_hashinfo);
1604 st->state = TCP_SEQ_STATE_ESTABLISHED;
1605 rc = established_get_first(seq);
1608 case TCP_SEQ_STATE_ESTABLISHED:
1609 case TCP_SEQ_STATE_TIME_WAIT:
1610 rc = established_get_next(seq, v);
1618 static void tcp_seq_stop(struct seq_file *seq, void *v)
1620 struct tcp_iter_state* st = seq->private;
1622 switch (st->state) {
1623 case TCP_SEQ_STATE_OPENREQ:
1625 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
1626 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1628 case TCP_SEQ_STATE_LISTENING:
1629 if (v != SEQ_START_TOKEN)
1630 inet_listen_unlock(&tcp_hashinfo);
1632 case TCP_SEQ_STATE_TIME_WAIT:
1633 case TCP_SEQ_STATE_ESTABLISHED:
1635 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1641 static int tcp_seq_open(struct inode *inode, struct file *file)
1643 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
1644 struct seq_file *seq;
1645 struct tcp_iter_state *s;
1648 if (unlikely(afinfo == NULL))
1651 s = kmalloc(sizeof(*s), GFP_KERNEL);
1654 memset(s, 0, sizeof(*s));
1655 s->family = afinfo->family;
1656 s->seq_ops.start = tcp_seq_start;
1657 s->seq_ops.next = tcp_seq_next;
1658 s->seq_ops.show = afinfo->seq_show;
1659 s->seq_ops.stop = tcp_seq_stop;
1661 rc = seq_open(file, &s->seq_ops);
1664 seq = file->private_data;
1673 int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
1676 struct proc_dir_entry *p;
1680 afinfo->seq_fops->owner = afinfo->owner;
1681 afinfo->seq_fops->open = tcp_seq_open;
1682 afinfo->seq_fops->read = seq_read;
1683 afinfo->seq_fops->llseek = seq_lseek;
1684 afinfo->seq_fops->release = seq_release_private;
1686 p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
1694 void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
1698 proc_net_remove(afinfo->name);
1699 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
1702 static void get_openreq4(struct sock *sk, struct request_sock *req,
1703 char *tmpbuf, int i, int uid)
1705 const struct inet_request_sock *ireq = inet_rsk(req);
1706 int ttd = req->expires - jiffies;
1708 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1709 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
1712 ntohs(inet_sk(sk)->sport),
1714 ntohs(ireq->rmt_port),
1716 0, 0, /* could print option size, but that is af dependent. */
1717 1, /* timers active (only the expire timer) */
1718 jiffies_to_clock_t(ttd),
1721 0, /* non standard timer */
1722 0, /* open_requests have no inode */
1723 atomic_read(&sk->sk_refcnt),
1727 static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
1730 unsigned long timer_expires;
1731 struct tcp_sock *tp = tcp_sk(sp);
1732 const struct inet_connection_sock *icsk = inet_csk(sp);
1733 struct inet_sock *inet = inet_sk(sp);
1734 unsigned int dest = inet->daddr;
1735 unsigned int src = inet->rcv_saddr;
1736 __u16 destp = ntohs(inet->dport);
1737 __u16 srcp = ntohs(inet->sport);
1739 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1741 timer_expires = icsk->icsk_timeout;
1742 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1744 timer_expires = icsk->icsk_timeout;
1745 } else if (timer_pending(&sp->sk_timer)) {
1747 timer_expires = sp->sk_timer.expires;
1750 timer_expires = jiffies;
1753 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
1754 "%08X %5d %8d %lu %d %p %u %u %u %u %d",
1755 i, src, srcp, dest, destp, sp->sk_state,
1756 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
1758 jiffies_to_clock_t(timer_expires - jiffies),
1759 icsk->icsk_retransmits,
1761 icsk->icsk_probes_out,
1763 atomic_read(&sp->sk_refcnt), sp,
1766 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1768 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
1771 static void get_timewait4_sock(struct inet_timewait_sock *tw, char *tmpbuf, int i)
1773 unsigned int dest, src;
1775 int ttd = tw->tw_ttd - jiffies;
1780 dest = tw->tw_daddr;
1781 src = tw->tw_rcv_saddr;
1782 destp = ntohs(tw->tw_dport);
1783 srcp = ntohs(tw->tw_sport);
1785 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1786 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p",
1787 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
1788 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1789 atomic_read(&tw->tw_refcnt), tw);
1794 static int tcp4_seq_show(struct seq_file *seq, void *v)
1796 struct tcp_iter_state* st;
1797 char tmpbuf[TMPSZ + 1];
1799 if (v == SEQ_START_TOKEN) {
1800 seq_printf(seq, "%-*s\n", TMPSZ - 1,
1801 " sl local_address rem_address st tx_queue "
1802 "rx_queue tr tm->when retrnsmt uid timeout "
1808 switch (st->state) {
1809 case TCP_SEQ_STATE_LISTENING:
1810 case TCP_SEQ_STATE_ESTABLISHED:
1811 get_tcp4_sock(v, tmpbuf, st->num);
1813 case TCP_SEQ_STATE_OPENREQ:
1814 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
1816 case TCP_SEQ_STATE_TIME_WAIT:
1817 get_timewait4_sock(v, tmpbuf, st->num);
1820 seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
1825 static struct file_operations tcp4_seq_fops;
1826 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1827 .owner = THIS_MODULE,
1830 .seq_show = tcp4_seq_show,
1831 .seq_fops = &tcp4_seq_fops,
1834 int __init tcp4_proc_init(void)
1836 return tcp_proc_register(&tcp4_seq_afinfo);
1839 void tcp4_proc_exit(void)
1841 tcp_proc_unregister(&tcp4_seq_afinfo);
1843 #endif /* CONFIG_PROC_FS */
1845 struct proto tcp_prot = {
1847 .owner = THIS_MODULE,
1849 .connect = tcp_v4_connect,
1850 .disconnect = tcp_disconnect,
1851 .accept = inet_csk_accept,
1853 .init = tcp_v4_init_sock,
1854 .destroy = tcp_v4_destroy_sock,
1855 .shutdown = tcp_shutdown,
1856 .setsockopt = tcp_setsockopt,
1857 .getsockopt = tcp_getsockopt,
1858 .sendmsg = tcp_sendmsg,
1859 .recvmsg = tcp_recvmsg,
1860 .backlog_rcv = tcp_v4_do_rcv,
1861 .hash = tcp_v4_hash,
1862 .unhash = tcp_unhash,
1863 .get_port = tcp_v4_get_port,
1864 .enter_memory_pressure = tcp_enter_memory_pressure,
1865 .sockets_allocated = &tcp_sockets_allocated,
1866 .orphan_count = &tcp_orphan_count,
1867 .memory_allocated = &tcp_memory_allocated,
1868 .memory_pressure = &tcp_memory_pressure,
1869 .sysctl_mem = sysctl_tcp_mem,
1870 .sysctl_wmem = sysctl_tcp_wmem,
1871 .sysctl_rmem = sysctl_tcp_rmem,
1872 .max_header = MAX_TCP_HEADER,
1873 .obj_size = sizeof(struct tcp_sock),
1874 .twsk_prot = &tcp_timewait_sock_ops,
1875 .rsk_prot = &tcp_request_sock_ops,
1876 #ifdef CONFIG_COMPAT
1877 .compat_setsockopt = compat_tcp_setsockopt,
1878 .compat_getsockopt = compat_tcp_getsockopt,
1882 void __init tcp_v4_init(struct net_proto_family *ops)
1884 if (inet_csk_ctl_sock_create(&tcp_socket, PF_INET, SOCK_RAW, IPPROTO_TCP) < 0)
1885 panic("Failed to create the TCP control socket.\n");
1888 EXPORT_SYMBOL(ipv4_specific);
1889 EXPORT_SYMBOL(tcp_hashinfo);
1890 EXPORT_SYMBOL(tcp_prot);
1891 EXPORT_SYMBOL(tcp_unhash);
1892 EXPORT_SYMBOL(tcp_v4_conn_request);
1893 EXPORT_SYMBOL(tcp_v4_connect);
1894 EXPORT_SYMBOL(tcp_v4_do_rcv);
1895 EXPORT_SYMBOL(tcp_v4_remember_stamp);
1896 EXPORT_SYMBOL(tcp_v4_send_check);
1897 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1899 #ifdef CONFIG_PROC_FS
1900 EXPORT_SYMBOL(tcp_proc_register);
1901 EXPORT_SYMBOL(tcp_proc_unregister);
1903 EXPORT_SYMBOL(sysctl_local_port_range);
1904 EXPORT_SYMBOL(sysctl_tcp_low_latency);