2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
10 * IPv4 specific functions
15 * linux/ipv4/tcp_input.c
16 * linux/ipv4/tcp_output.c
18 * See tcp.c for author information
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
28 * David S. Miller : New socket lookup architecture.
29 * This code is dedicated to John Dyson.
30 * David S. Miller : Change semantics of established hash,
31 * half is devoted to TIME_WAIT sockets
32 * and the rest go in the other half.
33 * Andi Kleen : Add support for syncookies and fixed
34 * some bugs: ip options weren't passed to
35 * the TCP layer, missed a check for an
37 * Andi Kleen : Implemented fast path mtu discovery.
38 * Fixed many serious bugs in the
39 * request_sock handling and moved
40 * most of it into the af independent code.
41 * Added tail drop and some other bugfixes.
42 * Added new listen semantics.
43 * Mike McLagan : Routing by source
44 * Juan Jose Ciarlante: ip_dynaddr bits
45 * Andi Kleen: various fixes.
46 * Vitaly E. Lavrov : Transparent proxy revived after year
48 * Andi Kleen : Fix new listen.
49 * Andi Kleen : Fix accept error reporting.
50 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
51 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
52 * a single port at the same time.
55 #include <linux/config.h>
57 #include <linux/types.h>
58 #include <linux/fcntl.h>
59 #include <linux/module.h>
60 #include <linux/random.h>
61 #include <linux/cache.h>
62 #include <linux/jhash.h>
63 #include <linux/init.h>
64 #include <linux/times.h>
67 #include <net/inet_hashtables.h>
69 #include <net/transp_v6.h>
71 #include <net/inet_common.h>
72 #include <net/timewait_sock.h>
75 #include <linux/inet.h>
76 #include <linux/ipv6.h>
77 #include <linux/stddef.h>
78 #include <linux/proc_fs.h>
79 #include <linux/seq_file.h>
80 #include <linux/vserver/debug.h>
82 int sysctl_tcp_tw_reuse;
83 int sysctl_tcp_low_latency;
85 /* Check TCP sequence numbers in ICMP packets. */
86 #define ICMP_MIN_LENGTH 8
88 /* Socket used for sending RSTs */
89 static struct socket *tcp_socket;
91 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
93 struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
94 .lhash_lock = RW_LOCK_UNLOCKED,
95 .lhash_users = ATOMIC_INIT(0),
96 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
99 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
101 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
102 inet_csk_bind_conflict);
105 static void tcp_v4_hash(struct sock *sk)
107 inet_hash(&tcp_hashinfo, sk);
110 void tcp_unhash(struct sock *sk)
112 inet_unhash(&tcp_hashinfo, sk);
115 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
117 return secure_tcp_sequence_number(skb->nh.iph->daddr,
123 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
125 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
126 struct tcp_sock *tp = tcp_sk(sk);
128 /* With PAWS, it is safe from the viewpoint
129 of data integrity. Even without PAWS it is safe provided sequence
130 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
132 Actually, the idea is close to VJ's one, only timestamp cache is
133 held not per host, but per port pair and TW bucket is used as state
136 If TW bucket has been already destroyed we fall back to VJ's scheme
137 and use initial timestamp retrieved from peer table.
139 if (tcptw->tw_ts_recent_stamp &&
140 (twp == NULL || (sysctl_tcp_tw_reuse &&
141 xtime.tv_sec - tcptw->tw_ts_recent_stamp > 1))) {
142 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
143 if (tp->write_seq == 0)
145 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
146 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
154 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
156 /* This will initiate an outgoing connection. */
157 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
159 struct inet_sock *inet = inet_sk(sk);
160 struct tcp_sock *tp = tcp_sk(sk);
161 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
167 if (addr_len < sizeof(struct sockaddr_in))
170 if (usin->sin_family != AF_INET)
171 return -EAFNOSUPPORT;
173 nexthop = daddr = usin->sin_addr.s_addr;
174 if (inet->opt && inet->opt->srr) {
177 nexthop = inet->opt->faddr;
180 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
181 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
183 inet->sport, usin->sin_port, sk);
187 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
192 if (!inet->opt || !inet->opt->srr)
196 inet->saddr = rt->rt_src;
197 inet->rcv_saddr = inet->saddr;
199 if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
200 /* Reset inherited state */
201 tp->rx_opt.ts_recent = 0;
202 tp->rx_opt.ts_recent_stamp = 0;
206 if (tcp_death_row.sysctl_tw_recycle &&
207 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
208 struct inet_peer *peer = rt_get_peer(rt);
210 /* VJ's idea. We save last timestamp seen from
211 * the destination in peer table, when entering state TIME-WAIT
212 * and initialize rx_opt.ts_recent from it, when trying new connection.
215 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
216 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
217 tp->rx_opt.ts_recent = peer->tcp_ts;
221 inet->dport = usin->sin_port;
224 inet_csk(sk)->icsk_ext_hdr_len = 0;
226 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
228 tp->rx_opt.mss_clamp = 536;
230 /* Socket identity is still unknown (sport may be zero).
231 * However we set state to SYN-SENT and not releasing socket
232 * lock select source port, enter ourselves into the hash tables and
233 * complete initialization after this.
235 tcp_set_state(sk, TCP_SYN_SENT);
236 err = inet_hash_connect(&tcp_death_row, sk);
240 err = ip_route_newports(&rt, IPPROTO_TCP, inet->sport, inet->dport, sk);
244 /* OK, now commit destination to socket. */
245 sk_setup_caps(sk, &rt->u.dst);
248 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
253 inet->id = tp->write_seq ^ jiffies;
255 err = tcp_connect(sk);
263 /* This unhashes the socket and releases the local port, if necessary. */
264 tcp_set_state(sk, TCP_CLOSE);
266 sk->sk_route_caps = 0;
272 * This routine does path mtu discovery as defined in RFC1191.
274 static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
276 struct dst_entry *dst;
277 struct inet_sock *inet = inet_sk(sk);
279 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
280 * send out by Linux are always <576bytes so they should go through
283 if (sk->sk_state == TCP_LISTEN)
286 /* We don't check in the destentry if pmtu discovery is forbidden
287 * on this route. We just assume that no packet_to_big packets
288 * are send back when pmtu discovery is not active.
289 * There is a small race when the user changes this flag in the
290 * route, but I think that's acceptable.
292 if ((dst = __sk_dst_check(sk, 0)) == NULL)
295 dst->ops->update_pmtu(dst, mtu);
297 /* Something is about to be wrong... Remember soft error
298 * for the case, if this connection will not able to recover.
300 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
301 sk->sk_err_soft = EMSGSIZE;
305 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
306 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
307 tcp_sync_mss(sk, mtu);
309 /* Resend the TCP packet because it's
310 * clear that the old packet has been
311 * dropped. This is the new "fast" path mtu
314 tcp_simple_retransmit(sk);
315 } /* else let the usual retransmit timer handle it */
319 * This routine is called by the ICMP module when it gets some
320 * sort of error condition. If err < 0 then the socket should
321 * be closed and the error returned to the user. If err > 0
322 * it's just the icmp type << 8 | icmp code. After adjustment
323 * header points to the first 8 bytes of the tcp header. We need
324 * to find the appropriate port.
326 * The locking strategy used here is very "optimistic". When
327 * someone else accesses the socket the ICMP is just dropped
328 * and for some paths there is no check at all.
329 * A more general error queue to queue errors for later handling
330 * is probably better.
334 void tcp_v4_err(struct sk_buff *skb, u32 info)
336 struct iphdr *iph = (struct iphdr *)skb->data;
337 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
339 struct inet_sock *inet;
340 int type = skb->h.icmph->type;
341 int code = skb->h.icmph->code;
346 if (skb->len < (iph->ihl << 2) + 8) {
347 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
351 sk = inet_lookup(&tcp_hashinfo, iph->daddr, th->dest, iph->saddr,
352 th->source, inet_iif(skb));
354 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
357 if (sk->sk_state == TCP_TIME_WAIT) {
358 inet_twsk_put((struct inet_timewait_sock *)sk);
363 /* If too many ICMPs get dropped on busy
364 * servers this needs to be solved differently.
366 if (sock_owned_by_user(sk))
367 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
369 if (sk->sk_state == TCP_CLOSE)
373 seq = ntohl(th->seq);
374 if (sk->sk_state != TCP_LISTEN &&
375 !between(seq, tp->snd_una, tp->snd_nxt)) {
376 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
381 case ICMP_SOURCE_QUENCH:
382 /* Just silently ignore these. */
384 case ICMP_PARAMETERPROB:
387 case ICMP_DEST_UNREACH:
388 if (code > NR_ICMP_UNREACH)
391 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
392 if (!sock_owned_by_user(sk))
393 do_pmtu_discovery(sk, iph, info);
397 err = icmp_err_convert[code].errno;
399 case ICMP_TIME_EXCEEDED:
406 switch (sk->sk_state) {
407 struct request_sock *req, **prev;
409 if (sock_owned_by_user(sk))
412 req = inet_csk_search_req(sk, &prev, th->dest,
413 iph->daddr, iph->saddr);
417 /* ICMPs are not backlogged, hence we cannot get
418 an established socket here.
422 if (seq != tcp_rsk(req)->snt_isn) {
423 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
428 * Still in SYN_RECV, just remove it silently.
429 * There is no good way to pass the error to the newly
430 * created socket, and POSIX does not want network
431 * errors returned from accept().
433 inet_csk_reqsk_queue_drop(sk, req, prev);
437 case TCP_SYN_RECV: /* Cannot happen.
438 It can f.e. if SYNs crossed.
440 if (!sock_owned_by_user(sk)) {
441 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
444 sk->sk_error_report(sk);
448 sk->sk_err_soft = err;
453 /* If we've already connected we will keep trying
454 * until we time out, or the user gives up.
456 * rfc1122 4.2.3.9 allows to consider as hard errors
457 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
458 * but it is obsoleted by pmtu discovery).
460 * Note, that in modern internet, where routing is unreliable
461 * and in each dark corner broken firewalls sit, sending random
462 * errors ordered by their masters even this two messages finally lose
463 * their original sense (even Linux sends invalid PORT_UNREACHs)
465 * Now we are in compliance with RFCs.
470 if (!sock_owned_by_user(sk) && inet->recverr) {
472 sk->sk_error_report(sk);
473 } else { /* Only an error on timeout */
474 sk->sk_err_soft = err;
482 /* This routine computes an IPv4 TCP checksum. */
483 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
485 struct inet_sock *inet = inet_sk(sk);
486 struct tcphdr *th = skb->h.th;
488 if (skb->ip_summed == CHECKSUM_HW) {
489 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
490 skb->csum = offsetof(struct tcphdr, check);
492 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
493 csum_partial((char *)th,
499 int tcp_v4_gso_send_check(struct sk_buff *skb)
504 if (!pskb_may_pull(skb, sizeof(*th)))
511 th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0);
512 skb->csum = offsetof(struct tcphdr, check);
513 skb->ip_summed = CHECKSUM_HW;
518 * This routine will send an RST to the other tcp.
520 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
522 * Answer: if a packet caused RST, it is not for a socket
523 * existing in our system, if it is matched to a socket,
524 * it is just duplicate segment or bug in other side's TCP.
525 * So that we build reply only basing on parameters
526 * arrived with segment.
527 * Exception: precedence violation. We do not implement it in any case.
530 static void tcp_v4_send_reset(struct sk_buff *skb)
532 struct tcphdr *th = skb->h.th;
534 struct ip_reply_arg arg;
536 /* Never send a reset in response to a reset. */
540 if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
543 /* Swap the send and the receive. */
544 memset(&rth, 0, sizeof(struct tcphdr));
545 rth.dest = th->source;
546 rth.source = th->dest;
547 rth.doff = sizeof(struct tcphdr) / 4;
551 rth.seq = th->ack_seq;
554 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
555 skb->len - (th->doff << 2));
558 memset(&arg, 0, sizeof arg);
559 arg.iov[0].iov_base = (unsigned char *)&rth;
560 arg.iov[0].iov_len = sizeof rth;
561 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
562 skb->nh.iph->saddr, /*XXX*/
563 sizeof(struct tcphdr), IPPROTO_TCP, 0);
564 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
566 ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
568 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
569 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
572 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
573 outside socket context is ugly, certainly. What can I do?
576 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
579 struct tcphdr *th = skb->h.th;
584 struct ip_reply_arg arg;
586 memset(&rep.th, 0, sizeof(struct tcphdr));
587 memset(&arg, 0, sizeof arg);
589 arg.iov[0].iov_base = (unsigned char *)&rep;
590 arg.iov[0].iov_len = sizeof(rep.th);
592 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
593 (TCPOPT_TIMESTAMP << 8) |
595 rep.tsopt[1] = htonl(tcp_time_stamp);
596 rep.tsopt[2] = htonl(ts);
597 arg.iov[0].iov_len = sizeof(rep);
600 /* Swap the send and the receive. */
601 rep.th.dest = th->source;
602 rep.th.source = th->dest;
603 rep.th.doff = arg.iov[0].iov_len / 4;
604 rep.th.seq = htonl(seq);
605 rep.th.ack_seq = htonl(ack);
607 rep.th.window = htons(win);
609 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
610 skb->nh.iph->saddr, /*XXX*/
611 arg.iov[0].iov_len, IPPROTO_TCP, 0);
612 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
614 ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
616 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
619 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
621 struct inet_timewait_sock *tw = inet_twsk(sk);
622 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
624 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
625 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent);
630 static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
632 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
637 * Send a SYN-ACK after having received an ACK.
638 * This still operates on a request_sock only, not on a big
641 static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
642 struct dst_entry *dst)
644 const struct inet_request_sock *ireq = inet_rsk(req);
646 struct sk_buff * skb;
648 /* First, grab a route. */
649 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
652 skb = tcp_make_synack(sk, dst, req);
655 struct tcphdr *th = skb->h.th;
657 th->check = tcp_v4_check(th, skb->len,
660 csum_partial((char *)th, skb->len,
663 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
666 if (err == NET_XMIT_CN)
676 * IPv4 request_sock destructor.
678 static void tcp_v4_reqsk_destructor(struct request_sock *req)
680 kfree(inet_rsk(req)->opt);
683 #ifdef CONFIG_SYN_COOKIES
684 static void syn_flood_warning(struct sk_buff *skb)
686 static unsigned long warntime;
688 if (time_after(jiffies, (warntime + HZ * 60))) {
691 "possible SYN flooding on port %d. Sending cookies.\n",
692 ntohs(skb->h.th->dest));
698 * Save and compile IPv4 options into the request_sock if needed.
700 static struct ip_options *tcp_v4_save_options(struct sock *sk,
703 struct ip_options *opt = &(IPCB(skb)->opt);
704 struct ip_options *dopt = NULL;
706 if (opt && opt->optlen) {
707 int opt_size = optlength(opt);
708 dopt = kmalloc(opt_size, GFP_ATOMIC);
710 if (ip_options_echo(dopt, skb)) {
719 struct request_sock_ops tcp_request_sock_ops = {
721 .obj_size = sizeof(struct tcp_request_sock),
722 .rtx_syn_ack = tcp_v4_send_synack,
723 .send_ack = tcp_v4_reqsk_send_ack,
724 .destructor = tcp_v4_reqsk_destructor,
725 .send_reset = tcp_v4_send_reset,
728 static struct timewait_sock_ops tcp_timewait_sock_ops = {
729 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
730 .twsk_unique = tcp_twsk_unique,
733 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
735 struct inet_request_sock *ireq;
736 struct tcp_options_received tmp_opt;
737 struct request_sock *req;
738 __u32 saddr = skb->nh.iph->saddr;
739 __u32 daddr = skb->nh.iph->daddr;
740 __u32 isn = TCP_SKB_CB(skb)->when;
741 struct dst_entry *dst = NULL;
742 #ifdef CONFIG_SYN_COOKIES
745 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
748 /* Never answer to SYNs send to broadcast or multicast */
749 if (((struct rtable *)skb->dst)->rt_flags &
750 (RTCF_BROADCAST | RTCF_MULTICAST))
753 /* TW buckets are converted to open requests without
754 * limitations, they conserve resources and peer is
755 * evidently real one.
757 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
758 #ifdef CONFIG_SYN_COOKIES
759 if (sysctl_tcp_syncookies) {
766 /* Accept backlog is full. If we have already queued enough
767 * of warm entries in syn queue, drop request. It is better than
768 * clogging syn queue with openreqs with exponentially increasing
771 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
774 req = reqsk_alloc(&tcp_request_sock_ops);
778 tcp_clear_options(&tmp_opt);
779 tmp_opt.mss_clamp = 536;
780 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
782 tcp_parse_options(skb, &tmp_opt, 0);
785 tcp_clear_options(&tmp_opt);
786 tmp_opt.saw_tstamp = 0;
789 if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) {
790 /* Some OSes (unknown ones, but I see them on web server, which
791 * contains information interesting only for windows'
792 * users) do not send their stamp in SYN. It is easy case.
793 * We simply do not advertise TS support.
795 tmp_opt.saw_tstamp = 0;
796 tmp_opt.tstamp_ok = 0;
798 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
800 tcp_openreq_init(req, &tmp_opt, skb);
802 ireq = inet_rsk(req);
803 ireq->loc_addr = daddr;
804 ireq->rmt_addr = saddr;
805 ireq->opt = tcp_v4_save_options(sk, skb);
807 TCP_ECN_create_request(req, skb->h.th);
810 #ifdef CONFIG_SYN_COOKIES
811 syn_flood_warning(skb);
813 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
815 struct inet_peer *peer = NULL;
817 /* VJ's idea. We save last timestamp seen
818 * from the destination in peer table, when entering
819 * state TIME-WAIT, and check against it before
820 * accepting new connection request.
822 * If "isn" is not zero, this request hit alive
823 * timewait bucket, so that all the necessary checks
824 * are made in the function processing timewait state.
826 if (tmp_opt.saw_tstamp &&
827 tcp_death_row.sysctl_tw_recycle &&
828 (dst = inet_csk_route_req(sk, req)) != NULL &&
829 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
830 peer->v4daddr == saddr) {
831 if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
832 (s32)(peer->tcp_ts - req->ts_recent) >
834 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
839 /* Kill the following clause, if you dislike this way. */
840 else if (!sysctl_tcp_syncookies &&
841 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
842 (sysctl_max_syn_backlog >> 2)) &&
843 (!peer || !peer->tcp_ts_stamp) &&
844 (!dst || !dst_metric(dst, RTAX_RTT))) {
845 /* Without syncookies last quarter of
846 * backlog is filled with destinations,
847 * proven to be alive.
848 * It means that we continue to communicate
849 * to destinations, already remembered
850 * to the moment of synflood.
852 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
853 "request from %u.%u.%u.%u/%u\n",
855 ntohs(skb->h.th->source));
860 isn = tcp_v4_init_sequence(sk, skb);
862 tcp_rsk(req)->snt_isn = isn;
864 if (tcp_v4_send_synack(sk, req, dst))
870 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
877 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
883 * The three way handshake has completed - we got a valid synack -
884 * now create the new socket.
886 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
887 struct request_sock *req,
888 struct dst_entry *dst)
890 struct inet_request_sock *ireq;
891 struct inet_sock *newinet;
892 struct tcp_sock *newtp;
895 if (sk_acceptq_is_full(sk))
898 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
901 newsk = tcp_create_openreq_child(sk, req, skb);
905 sk_setup_caps(newsk, dst);
907 newtp = tcp_sk(newsk);
908 newinet = inet_sk(newsk);
909 ireq = inet_rsk(req);
910 newinet->daddr = ireq->rmt_addr;
911 newinet->rcv_saddr = ireq->loc_addr;
912 newinet->saddr = ireq->loc_addr;
913 newinet->opt = ireq->opt;
915 newinet->mc_index = inet_iif(skb);
916 newinet->mc_ttl = skb->nh.iph->ttl;
917 inet_csk(newsk)->icsk_ext_hdr_len = 0;
919 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
920 newinet->id = newtp->write_seq ^ jiffies;
922 tcp_mtup_init(newsk);
923 tcp_sync_mss(newsk, dst_mtu(dst));
924 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
925 tcp_initialize_rcv_mss(newsk);
927 __inet_hash(&tcp_hashinfo, newsk, 0);
928 __inet_inherit_port(&tcp_hashinfo, sk, newsk);
933 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
935 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
940 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
942 struct tcphdr *th = skb->h.th;
943 struct iphdr *iph = skb->nh.iph;
945 struct request_sock **prev;
946 /* Find possible connection requests. */
947 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
948 iph->saddr, iph->daddr);
950 return tcp_check_req(sk, skb, req, prev);
952 nsk = __inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr,
953 th->source, skb->nh.iph->daddr,
954 ntohs(th->dest), inet_iif(skb));
957 if (nsk->sk_state != TCP_TIME_WAIT) {
961 inet_twsk_put((struct inet_timewait_sock *)nsk);
965 #ifdef CONFIG_SYN_COOKIES
966 if (!th->rst && !th->syn && th->ack)
967 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
972 static int tcp_v4_checksum_init(struct sk_buff *skb)
974 if (skb->ip_summed == CHECKSUM_HW) {
975 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
976 skb->nh.iph->daddr, skb->csum)) {
977 skb->ip_summed = CHECKSUM_UNNECESSARY;
982 skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr, skb->nh.iph->daddr,
983 skb->len, IPPROTO_TCP, 0);
985 if (skb->len <= 76) {
986 return __skb_checksum_complete(skb);
992 /* The socket must have it's spinlock held when we get
995 * We have a potential double-lock case here, so even when
996 * doing backlog processing we use the BH locking scheme.
997 * This is because we cannot sleep with the original spinlock
1000 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1002 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1003 TCP_CHECK_TIMER(sk);
1004 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1006 TCP_CHECK_TIMER(sk);
1010 if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
1013 if (sk->sk_state == TCP_LISTEN) {
1014 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1019 if (tcp_child_process(sk, nsk, skb))
1025 TCP_CHECK_TIMER(sk);
1026 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1028 TCP_CHECK_TIMER(sk);
1032 tcp_v4_send_reset(skb);
1035 /* Be careful here. If this function gets more complicated and
1036 * gcc suffers from register pressure on the x86, sk (in %ebx)
1037 * might be destroyed here. This current version compiles correctly,
1038 * but you have been warned.
1043 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1051 int tcp_v4_rcv(struct sk_buff *skb)
1057 if (skb->pkt_type != PACKET_HOST)
1060 /* Count it even if it's bad */
1061 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1063 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1068 if (th->doff < sizeof(struct tcphdr) / 4)
1070 if (!pskb_may_pull(skb, th->doff * 4))
1073 /* An explanation is required here, I think.
1074 * Packet length and doff are validated by header prediction,
1075 * provided case of th->doff==0 is eliminated.
1076 * So, we defer the checks. */
1077 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1078 tcp_v4_checksum_init(skb)))
1082 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1083 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1084 skb->len - th->doff * 4);
1085 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1086 TCP_SKB_CB(skb)->when = 0;
1087 TCP_SKB_CB(skb)->flags = skb->nh.iph->tos;
1088 TCP_SKB_CB(skb)->sacked = 0;
1090 sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source,
1091 skb->nh.iph->daddr, ntohs(th->dest),
1098 #if defined(CONFIG_VNET) || defined(CONFIG_VNET_MODULE)
1099 /* Silently drop if VNET is active and the context is not
1100 * entitled to read the packet.
1103 /* Transfer ownership of reusable TIME_WAIT buckets to
1104 * whomever VNET decided should own the packet.
1106 if (sk->sk_state == TCP_TIME_WAIT)
1107 sk->sk_xid = skb->xid;
1109 if ((int) sk->sk_xid > 0 && sk->sk_xid != skb->xid)
1114 if (sk->sk_state == TCP_TIME_WAIT)
1117 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1118 goto discard_and_relse;
1121 if (sk_filter(sk, skb, 0))
1122 goto discard_and_relse;
1128 if (!sock_owned_by_user(sk)) {
1129 if (!tcp_prequeue(sk, skb))
1130 ret = tcp_v4_do_rcv(sk, skb);
1132 sk_add_backlog(sk, skb);
1140 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1143 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1145 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1146 #if defined(CONFIG_VNET) || defined(CONFIG_VNET_MODULE)
1147 } else if (vnet_active && skb->sk) {
1148 /* VNET: Suppress RST if the port was bound to a (presumably raw) socket */
1151 tcp_v4_send_reset(skb);
1155 /* Discard frame. */
1164 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1165 inet_twsk_put((struct inet_timewait_sock *) sk);
1169 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1170 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1171 inet_twsk_put((struct inet_timewait_sock *) sk);
1174 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1177 struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo,
1182 inet_twsk_deschedule((struct inet_timewait_sock *)sk,
1184 inet_twsk_put((struct inet_timewait_sock *)sk);
1188 /* Fall through to ACK */
1191 tcp_v4_timewait_ack(sk, skb);
1195 case TCP_TW_SUCCESS:;
1200 /* VJ's idea. Save last timestamp seen from this destination
1201 * and hold it at least for normal timewait interval to use for duplicate
1202 * segment detection in subsequent connections, before they enter synchronized
1206 int tcp_v4_remember_stamp(struct sock *sk)
1208 struct inet_sock *inet = inet_sk(sk);
1209 struct tcp_sock *tp = tcp_sk(sk);
1210 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1211 struct inet_peer *peer = NULL;
1214 if (!rt || rt->rt_dst != inet->daddr) {
1215 peer = inet_getpeer(inet->daddr, 1);
1219 rt_bind_peer(rt, 1);
1224 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1225 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
1226 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
1227 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
1228 peer->tcp_ts = tp->rx_opt.ts_recent;
1238 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1240 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1243 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1245 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1246 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
1247 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
1248 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
1249 peer->tcp_ts = tcptw->tw_ts_recent;
1258 struct inet_connection_sock_af_ops ipv4_specific = {
1259 .queue_xmit = ip_queue_xmit,
1260 .send_check = tcp_v4_send_check,
1261 .rebuild_header = inet_sk_rebuild_header,
1262 .conn_request = tcp_v4_conn_request,
1263 .syn_recv_sock = tcp_v4_syn_recv_sock,
1264 .remember_stamp = tcp_v4_remember_stamp,
1265 .net_header_len = sizeof(struct iphdr),
1266 .setsockopt = ip_setsockopt,
1267 .getsockopt = ip_getsockopt,
1268 .addr2sockaddr = inet_csk_addr2sockaddr,
1269 .sockaddr_len = sizeof(struct sockaddr_in),
1270 #ifdef CONFIG_COMPAT
1271 .compat_setsockopt = compat_ip_setsockopt,
1272 .compat_getsockopt = compat_ip_getsockopt,
1276 /* NOTE: A lot of things set to zero explicitly by call to
1277 * sk_alloc() so need not be done here.
1279 static int tcp_v4_init_sock(struct sock *sk)
1281 struct inet_connection_sock *icsk = inet_csk(sk);
1282 struct tcp_sock *tp = tcp_sk(sk);
1284 skb_queue_head_init(&tp->out_of_order_queue);
1285 tcp_init_xmit_timers(sk);
1286 tcp_prequeue_init(tp);
1288 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1289 tp->mdev = TCP_TIMEOUT_INIT;
1291 /* So many TCP implementations out there (incorrectly) count the
1292 * initial SYN frame in their delayed-ACK and congestion control
1293 * algorithms that we must have the following bandaid to talk
1294 * efficiently to them. -DaveM
1298 /* See draft-stevens-tcpca-spec-01 for discussion of the
1299 * initialization of these values.
1301 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
1302 tp->snd_cwnd_clamp = ~0;
1303 tp->mss_cache = 536;
1305 tp->reordering = sysctl_tcp_reordering;
1306 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1308 sk->sk_state = TCP_CLOSE;
1310 sk->sk_write_space = sk_stream_write_space;
1311 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1313 icsk->icsk_af_ops = &ipv4_specific;
1314 icsk->icsk_sync_mss = tcp_sync_mss;
1316 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1317 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1319 atomic_inc(&tcp_sockets_allocated);
1324 int tcp_v4_destroy_sock(struct sock *sk)
1326 struct tcp_sock *tp = tcp_sk(sk);
1328 tcp_clear_xmit_timers(sk);
1330 tcp_cleanup_congestion_control(sk);
1332 /* Cleanup up the write buffer. */
1333 sk_stream_writequeue_purge(sk);
1335 /* Cleans up our, hopefully empty, out_of_order_queue. */
1336 __skb_queue_purge(&tp->out_of_order_queue);
1338 /* Clean prequeue, it must be empty really */
1339 __skb_queue_purge(&tp->ucopy.prequeue);
1341 /* Clean up a referenced TCP bind bucket. */
1342 if (inet_csk(sk)->icsk_bind_hash)
1343 inet_put_port(&tcp_hashinfo, sk);
1346 * If sendmsg cached page exists, toss it.
1348 if (sk->sk_sndmsg_page) {
1349 __free_page(sk->sk_sndmsg_page);
1350 sk->sk_sndmsg_page = NULL;
1353 atomic_dec(&tcp_sockets_allocated);
1358 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1360 #ifdef CONFIG_PROC_FS
1361 /* Proc filesystem TCP sock list dumping. */
1363 static inline struct inet_timewait_sock *tw_head(struct hlist_head *head)
1365 return hlist_empty(head) ? NULL :
1366 list_entry(head->first, struct inet_timewait_sock, tw_node);
1369 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1371 return tw->tw_node.next ?
1372 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1375 static void *listening_get_next(struct seq_file *seq, void *cur)
1377 struct inet_connection_sock *icsk;
1378 struct hlist_node *node;
1379 struct sock *sk = cur;
1380 struct tcp_iter_state* st = seq->private;
1384 sk = sk_head(&tcp_hashinfo.listening_hash[0]);
1390 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1391 struct request_sock *req = cur;
1393 icsk = inet_csk(st->syn_wait_sk);
1397 vxdprintk(VXD_CBIT(net, 6),
1398 "sk,req: %p [#%d] (from %d)", req->sk,
1399 (req->sk)?req->sk->sk_xid:0, vx_current_xid());
1401 !vx_check(req->sk->sk_xid, VX_IDENT|VX_WATCH))
1403 if (req->rsk_ops->family == st->family) {
1409 if (++st->sbucket >= TCP_SYNQ_HSIZE)
1412 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1414 sk = sk_next(st->syn_wait_sk);
1415 st->state = TCP_SEQ_STATE_LISTENING;
1416 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1418 icsk = inet_csk(sk);
1419 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1420 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1422 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1426 sk_for_each_from(sk, node) {
1427 vxdprintk(VXD_CBIT(net, 6), "sk: %p [#%d] (from %d)",
1428 sk, sk->sk_xid, vx_current_xid());
1429 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
1431 if (sk->sk_family == st->family) {
1435 icsk = inet_csk(sk);
1436 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1437 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1439 st->uid = sock_i_uid(sk);
1440 st->syn_wait_sk = sk;
1441 st->state = TCP_SEQ_STATE_OPENREQ;
1445 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1447 if (++st->bucket < INET_LHTABLE_SIZE) {
1448 sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
1456 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1458 void *rc = listening_get_next(seq, NULL);
1460 while (rc && *pos) {
1461 rc = listening_get_next(seq, rc);
1467 static void *established_get_first(struct seq_file *seq)
1469 struct tcp_iter_state* st = seq->private;
1472 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
1474 struct hlist_node *node;
1475 struct inet_timewait_sock *tw;
1477 /* We can reschedule _before_ having picked the target: */
1478 cond_resched_softirq();
1480 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
1481 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1482 vxdprintk(VXD_CBIT(net, 6),
1483 "sk,egf: %p [#%d] (from %d)",
1484 sk, sk->sk_xid, vx_current_xid());
1485 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
1487 if (sk->sk_family != st->family)
1492 st->state = TCP_SEQ_STATE_TIME_WAIT;
1493 inet_twsk_for_each(tw, node,
1494 &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) {
1495 vxdprintk(VXD_CBIT(net, 6),
1496 "tw: %p [#%d] (from %d)",
1497 tw, tw->tw_xid, vx_current_xid());
1498 if (!vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))
1500 if (tw->tw_family != st->family)
1505 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1506 st->state = TCP_SEQ_STATE_ESTABLISHED;
1512 static void *established_get_next(struct seq_file *seq, void *cur)
1514 struct sock *sk = cur;
1515 struct inet_timewait_sock *tw;
1516 struct hlist_node *node;
1517 struct tcp_iter_state* st = seq->private;
1521 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
1525 while (tw && (tw->tw_family != st->family ||
1526 !vx_check(tw->tw_xid, VX_IDENT|VX_WATCH))) {
1533 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1534 st->state = TCP_SEQ_STATE_ESTABLISHED;
1536 /* We can reschedule between buckets: */
1537 cond_resched_softirq();
1539 if (++st->bucket < tcp_hashinfo.ehash_size) {
1540 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
1541 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
1549 sk_for_each_from(sk, node) {
1550 vxdprintk(VXD_CBIT(net, 6),
1551 "sk,egn: %p [#%d] (from %d)",
1552 sk, sk->sk_xid, vx_current_xid());
1553 if (!vx_check(sk->sk_xid, VX_IDENT|VX_WATCH))
1555 if (sk->sk_family == st->family)
1559 st->state = TCP_SEQ_STATE_TIME_WAIT;
1560 tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain);
1568 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1570 void *rc = established_get_first(seq);
1573 rc = established_get_next(seq, rc);
1579 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1582 struct tcp_iter_state* st = seq->private;
1584 inet_listen_lock(&tcp_hashinfo);
1585 st->state = TCP_SEQ_STATE_LISTENING;
1586 rc = listening_get_idx(seq, &pos);
1589 inet_listen_unlock(&tcp_hashinfo);
1591 st->state = TCP_SEQ_STATE_ESTABLISHED;
1592 rc = established_get_idx(seq, pos);
1598 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
1600 struct tcp_iter_state* st = seq->private;
1601 st->state = TCP_SEQ_STATE_LISTENING;
1603 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1606 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1609 struct tcp_iter_state* st;
1611 if (v == SEQ_START_TOKEN) {
1612 rc = tcp_get_idx(seq, 0);
1617 switch (st->state) {
1618 case TCP_SEQ_STATE_OPENREQ:
1619 case TCP_SEQ_STATE_LISTENING:
1620 rc = listening_get_next(seq, v);
1622 inet_listen_unlock(&tcp_hashinfo);
1624 st->state = TCP_SEQ_STATE_ESTABLISHED;
1625 rc = established_get_first(seq);
1628 case TCP_SEQ_STATE_ESTABLISHED:
1629 case TCP_SEQ_STATE_TIME_WAIT:
1630 rc = established_get_next(seq, v);
1638 static void tcp_seq_stop(struct seq_file *seq, void *v)
1640 struct tcp_iter_state* st = seq->private;
1642 switch (st->state) {
1643 case TCP_SEQ_STATE_OPENREQ:
1645 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
1646 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1648 case TCP_SEQ_STATE_LISTENING:
1649 if (v != SEQ_START_TOKEN)
1650 inet_listen_unlock(&tcp_hashinfo);
1652 case TCP_SEQ_STATE_TIME_WAIT:
1653 case TCP_SEQ_STATE_ESTABLISHED:
1655 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1661 static int tcp_seq_open(struct inode *inode, struct file *file)
1663 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
1664 struct seq_file *seq;
1665 struct tcp_iter_state *s;
1668 if (unlikely(afinfo == NULL))
1671 s = kmalloc(sizeof(*s), GFP_KERNEL);
1674 memset(s, 0, sizeof(*s));
1675 s->family = afinfo->family;
1676 s->seq_ops.start = tcp_seq_start;
1677 s->seq_ops.next = tcp_seq_next;
1678 s->seq_ops.show = afinfo->seq_show;
1679 s->seq_ops.stop = tcp_seq_stop;
1681 rc = seq_open(file, &s->seq_ops);
1684 seq = file->private_data;
1693 int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
1696 struct proc_dir_entry *p;
1700 afinfo->seq_fops->owner = afinfo->owner;
1701 afinfo->seq_fops->open = tcp_seq_open;
1702 afinfo->seq_fops->read = seq_read;
1703 afinfo->seq_fops->llseek = seq_lseek;
1704 afinfo->seq_fops->release = seq_release_private;
1706 p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
1714 void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
1718 proc_net_remove(afinfo->name);
1719 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
1722 static void get_openreq4(struct sock *sk, struct request_sock *req,
1723 char *tmpbuf, int i, int uid)
1725 const struct inet_request_sock *ireq = inet_rsk(req);
1726 int ttd = req->expires - jiffies;
1728 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1729 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
1732 ntohs(inet_sk(sk)->sport),
1734 ntohs(ireq->rmt_port),
1736 0, 0, /* could print option size, but that is af dependent. */
1737 1, /* timers active (only the expire timer) */
1738 jiffies_to_clock_t(ttd),
1741 0, /* non standard timer */
1742 0, /* open_requests have no inode */
1743 atomic_read(&sk->sk_refcnt),
1747 static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
1750 unsigned long timer_expires;
1751 struct tcp_sock *tp = tcp_sk(sp);
1752 const struct inet_connection_sock *icsk = inet_csk(sp);
1753 struct inet_sock *inet = inet_sk(sp);
1754 unsigned int dest = inet->daddr;
1755 unsigned int src = inet->rcv_saddr;
1756 __u16 destp = ntohs(inet->dport);
1757 __u16 srcp = ntohs(inet->sport);
1759 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1761 timer_expires = icsk->icsk_timeout;
1762 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1764 timer_expires = icsk->icsk_timeout;
1765 } else if (timer_pending(&sp->sk_timer)) {
1767 timer_expires = sp->sk_timer.expires;
1770 timer_expires = jiffies;
1773 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
1774 "%08X %5d %8d %lu %d %p %u %u %u %u %d",
1775 i, src, srcp, dest, destp, sp->sk_state,
1776 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
1778 jiffies_to_clock_t(timer_expires - jiffies),
1779 icsk->icsk_retransmits,
1781 icsk->icsk_probes_out,
1783 atomic_read(&sp->sk_refcnt), sp,
1786 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1788 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
1791 static void get_timewait4_sock(struct inet_timewait_sock *tw, char *tmpbuf, int i)
1793 unsigned int dest, src;
1795 int ttd = tw->tw_ttd - jiffies;
1800 dest = tw->tw_daddr;
1801 src = tw->tw_rcv_saddr;
1802 destp = ntohs(tw->tw_dport);
1803 srcp = ntohs(tw->tw_sport);
1805 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1806 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p",
1807 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
1808 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1809 atomic_read(&tw->tw_refcnt), tw);
1814 static int tcp4_seq_show(struct seq_file *seq, void *v)
1816 struct tcp_iter_state* st;
1817 char tmpbuf[TMPSZ + 1];
1819 if (v == SEQ_START_TOKEN) {
1820 seq_printf(seq, "%-*s\n", TMPSZ - 1,
1821 " sl local_address rem_address st tx_queue "
1822 "rx_queue tr tm->when retrnsmt uid timeout "
1828 switch (st->state) {
1829 case TCP_SEQ_STATE_LISTENING:
1830 case TCP_SEQ_STATE_ESTABLISHED:
1831 get_tcp4_sock(v, tmpbuf, st->num);
1833 case TCP_SEQ_STATE_OPENREQ:
1834 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
1836 case TCP_SEQ_STATE_TIME_WAIT:
1837 get_timewait4_sock(v, tmpbuf, st->num);
1840 seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
1845 static struct file_operations tcp4_seq_fops;
1846 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1847 .owner = THIS_MODULE,
1850 .seq_show = tcp4_seq_show,
1851 .seq_fops = &tcp4_seq_fops,
1854 int __init tcp4_proc_init(void)
1856 return tcp_proc_register(&tcp4_seq_afinfo);
1859 void tcp4_proc_exit(void)
1861 tcp_proc_unregister(&tcp4_seq_afinfo);
1863 #endif /* CONFIG_PROC_FS */
1865 struct proto tcp_prot = {
1867 .owner = THIS_MODULE,
1869 .connect = tcp_v4_connect,
1870 .disconnect = tcp_disconnect,
1871 .accept = inet_csk_accept,
1873 .init = tcp_v4_init_sock,
1874 .destroy = tcp_v4_destroy_sock,
1875 .shutdown = tcp_shutdown,
1876 .setsockopt = tcp_setsockopt,
1877 .getsockopt = tcp_getsockopt,
1878 .sendmsg = tcp_sendmsg,
1879 .recvmsg = tcp_recvmsg,
1880 .backlog_rcv = tcp_v4_do_rcv,
1881 .hash = tcp_v4_hash,
1882 .unhash = tcp_unhash,
1883 .get_port = tcp_v4_get_port,
1884 .enter_memory_pressure = tcp_enter_memory_pressure,
1885 .sockets_allocated = &tcp_sockets_allocated,
1886 .orphan_count = &tcp_orphan_count,
1887 .memory_allocated = &tcp_memory_allocated,
1888 .memory_pressure = &tcp_memory_pressure,
1889 .sysctl_mem = sysctl_tcp_mem,
1890 .sysctl_wmem = sysctl_tcp_wmem,
1891 .sysctl_rmem = sysctl_tcp_rmem,
1892 .max_header = MAX_TCP_HEADER,
1893 .obj_size = sizeof(struct tcp_sock),
1894 .twsk_prot = &tcp_timewait_sock_ops,
1895 .rsk_prot = &tcp_request_sock_ops,
1896 #ifdef CONFIG_COMPAT
1897 .compat_setsockopt = compat_tcp_setsockopt,
1898 .compat_getsockopt = compat_tcp_getsockopt,
1902 void __init tcp_v4_init(struct net_proto_family *ops)
1904 if (inet_csk_ctl_sock_create(&tcp_socket, PF_INET, SOCK_RAW, IPPROTO_TCP) < 0)
1905 panic("Failed to create the TCP control socket.\n");
1908 EXPORT_SYMBOL(ipv4_specific);
1909 EXPORT_SYMBOL(tcp_hashinfo);
1910 EXPORT_SYMBOL(tcp_prot);
1911 EXPORT_SYMBOL(tcp_unhash);
1912 EXPORT_SYMBOL(tcp_v4_conn_request);
1913 EXPORT_SYMBOL(tcp_v4_connect);
1914 EXPORT_SYMBOL(tcp_v4_do_rcv);
1915 EXPORT_SYMBOL(tcp_v4_remember_stamp);
1916 EXPORT_SYMBOL(tcp_v4_send_check);
1917 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1919 #ifdef CONFIG_PROC_FS
1920 EXPORT_SYMBOL(tcp_proc_register);
1921 EXPORT_SYMBOL(tcp_proc_unregister);
1923 EXPORT_SYMBOL(sysctl_local_port_range);
1924 EXPORT_SYMBOL(sysctl_tcp_low_latency);