2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
57 * Alan Cox : Tidied tcp_data to avoid a potential
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
215 * Description of States:
217 * TCP_SYN_SENT sent a connection request, waiting for ack
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
222 * TCP_ESTABLISHED connection established
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
247 * TCP_CLOSE socket is finished
250 #include <linux/config.h>
251 #include <linux/module.h>
252 #include <linux/types.h>
253 #include <linux/fcntl.h>
254 #include <linux/poll.h>
255 #include <linux/init.h>
256 #include <linux/smp_lock.h>
257 #include <linux/fs.h>
258 #include <linux/random.h>
259 #include <linux/bootmem.h>
260 #include <linux/cache.h>
261 #include <linux/err.h>
262 #include <linux/in.h>
264 #include <net/icmp.h>
266 #include <net/xfrm.h>
270 #include <asm/uaccess.h>
271 #include <asm/ioctls.h>
273 int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
275 DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
277 atomic_t tcp_orphan_count = ATOMIC_INIT(0);
279 EXPORT_SYMBOL_GPL(tcp_orphan_count);
281 int sysctl_tcp_mem[3] __read_mostly;
282 int sysctl_tcp_wmem[3] __read_mostly;
283 int sysctl_tcp_rmem[3] __read_mostly;
285 EXPORT_SYMBOL(sysctl_tcp_mem);
286 EXPORT_SYMBOL(sysctl_tcp_rmem);
287 EXPORT_SYMBOL(sysctl_tcp_wmem);
289 atomic_t tcp_memory_allocated; /* Current allocated memory. */
290 atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
292 EXPORT_SYMBOL(tcp_memory_allocated);
293 EXPORT_SYMBOL(tcp_sockets_allocated);
296 * Pressure flag: try to collapse.
297 * Technical note: it is used by multiple contexts non atomically.
298 * All the sk_stream_mem_schedule() is of this nature: accounting
299 * is strict, actions are advisory and have some latency.
301 int tcp_memory_pressure;
303 EXPORT_SYMBOL(tcp_memory_pressure);
305 void tcp_enter_memory_pressure(void)
307 if (!tcp_memory_pressure) {
308 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
309 tcp_memory_pressure = 1;
313 EXPORT_SYMBOL(tcp_enter_memory_pressure);
316 * Wait for a TCP event.
318 * Note that we don't need to lock the socket, as the upper poll layers
319 * take care of normal races (between the test and the event) and we don't
320 * go look at any of the socket buffers directly.
322 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
325 struct sock *sk = sock->sk;
326 struct tcp_sock *tp = tcp_sk(sk);
328 poll_wait(file, sk->sk_sleep, wait);
329 if (sk->sk_state == TCP_LISTEN)
330 return inet_csk_listen_poll(sk);
332 /* Socket is not locked. We are protected from async events
333 by poll logic and correct handling of state changes
334 made by another threads is impossible in any case.
342 * POLLHUP is certainly not done right. But poll() doesn't
343 * have a notion of HUP in just one direction, and for a
344 * socket the read side is more interesting.
346 * Some poll() documentation says that POLLHUP is incompatible
347 * with the POLLOUT/POLLWR flags, so somebody should check this
348 * all. But careful, it tends to be safer to return too many
349 * bits than too few, and you can easily break real applications
350 * if you don't tell them that something has hung up!
354 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
355 * our fs/select.c). It means that after we received EOF,
356 * poll always returns immediately, making impossible poll() on write()
357 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
358 * if and only if shutdown has been made in both directions.
359 * Actually, it is interesting to look how Solaris and DUX
360 * solve this dilemma. I would prefer, if PULLHUP were maskable,
361 * then we could set it on SND_SHUTDOWN. BTW examples given
362 * in Stevens' books assume exactly this behaviour, it explains
363 * why PULLHUP is incompatible with POLLOUT. --ANK
365 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
366 * blocking on fresh not-connected or disconnected socket. --ANK
368 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
370 if (sk->sk_shutdown & RCV_SHUTDOWN)
371 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
374 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
375 /* Potential race condition. If read of tp below will
376 * escape above sk->sk_state, we can be illegally awaken
377 * in SYN_* states. */
378 if ((tp->rcv_nxt != tp->copied_seq) &&
379 (tp->urg_seq != tp->copied_seq ||
380 tp->rcv_nxt != tp->copied_seq + 1 ||
381 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
382 mask |= POLLIN | POLLRDNORM;
384 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
385 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
386 mask |= POLLOUT | POLLWRNORM;
387 } else { /* send SIGIO later */
388 set_bit(SOCK_ASYNC_NOSPACE,
389 &sk->sk_socket->flags);
390 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
392 /* Race breaker. If space is freed after
393 * wspace test but before the flags are set,
394 * IO signal will be lost.
396 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
397 mask |= POLLOUT | POLLWRNORM;
401 if (tp->urg_data & TCP_URG_VALID)
407 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
409 struct tcp_sock *tp = tcp_sk(sk);
414 if (sk->sk_state == TCP_LISTEN)
418 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
420 else if (sock_flag(sk, SOCK_URGINLINE) ||
422 before(tp->urg_seq, tp->copied_seq) ||
423 !before(tp->urg_seq, tp->rcv_nxt)) {
424 answ = tp->rcv_nxt - tp->copied_seq;
426 /* Subtract 1, if FIN is in queue. */
427 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
429 ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
431 answ = tp->urg_seq - tp->copied_seq;
435 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
438 if (sk->sk_state == TCP_LISTEN)
441 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
444 answ = tp->write_seq - tp->snd_una;
450 return put_user(answ, (int __user *)arg);
453 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
455 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
456 tp->pushed_seq = tp->write_seq;
459 static inline int forced_push(struct tcp_sock *tp)
461 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
464 static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
468 TCP_SKB_CB(skb)->seq = tp->write_seq;
469 TCP_SKB_CB(skb)->end_seq = tp->write_seq;
470 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
471 TCP_SKB_CB(skb)->sacked = 0;
472 skb_header_release(skb);
473 __skb_queue_tail(&sk->sk_write_queue, skb);
474 sk_charge_skb(sk, skb);
475 if (!sk->sk_send_head)
476 sk->sk_send_head = skb;
477 if (tp->nonagle & TCP_NAGLE_PUSH)
478 tp->nonagle &= ~TCP_NAGLE_PUSH;
481 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
484 if (flags & MSG_OOB) {
486 tp->snd_up = tp->write_seq;
487 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
491 static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
492 int mss_now, int nonagle)
494 if (sk->sk_send_head) {
495 struct sk_buff *skb = sk->sk_write_queue.prev;
496 if (!(flags & MSG_MORE) || forced_push(tp))
497 tcp_mark_push(tp, skb);
498 tcp_mark_urg(tp, flags, skb);
499 __tcp_push_pending_frames(sk, tp, mss_now,
500 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
504 static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
505 size_t psize, int flags)
507 struct tcp_sock *tp = tcp_sk(sk);
508 int mss_now, size_goal;
511 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
513 /* Wait for a connection to finish. */
514 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
515 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
518 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
520 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
521 size_goal = tp->xmit_size_goal;
525 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
529 struct sk_buff *skb = sk->sk_write_queue.prev;
530 struct page *page = pages[poffset / PAGE_SIZE];
531 int copy, i, can_coalesce;
532 int offset = poffset % PAGE_SIZE;
533 int size = min_t(size_t, psize, PAGE_SIZE - offset);
535 if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) {
537 if (!sk_stream_memory_free(sk))
538 goto wait_for_sndbuf;
540 skb = sk_stream_alloc_pskb(sk, 0, 0,
543 goto wait_for_memory;
545 skb_entail(sk, tp, skb);
552 i = skb_shinfo(skb)->nr_frags;
553 can_coalesce = skb_can_coalesce(skb, i, page, offset);
554 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
555 tcp_mark_push(tp, skb);
558 if (!sk_stream_wmem_schedule(sk, copy))
559 goto wait_for_memory;
562 skb_shinfo(skb)->frags[i - 1].size += copy;
565 skb_fill_page_desc(skb, i, page, offset, copy);
569 skb->data_len += copy;
570 skb->truesize += copy;
571 sk->sk_wmem_queued += copy;
572 sk->sk_forward_alloc -= copy;
573 skb->ip_summed = CHECKSUM_HW;
574 tp->write_seq += copy;
575 TCP_SKB_CB(skb)->end_seq += copy;
576 skb_shinfo(skb)->gso_segs = 0;
579 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
583 if (!(psize -= copy))
586 if (skb->len < mss_now || (flags & MSG_OOB))
589 if (forced_push(tp)) {
590 tcp_mark_push(tp, skb);
591 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
592 } else if (skb == sk->sk_send_head)
593 tcp_push_one(sk, mss_now);
597 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
600 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
602 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
605 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
606 size_goal = tp->xmit_size_goal;
611 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
618 return sk_stream_error(sk, flags, err);
621 ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
622 size_t size, int flags)
625 struct sock *sk = sock->sk;
627 if (!(sk->sk_route_caps & NETIF_F_SG) ||
628 !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
629 return sock_no_sendpage(sock, page, offset, size, flags);
633 res = do_tcp_sendpages(sk, &page, offset, size, flags);
639 #define TCP_PAGE(sk) (sk->sk_sndmsg_page)
640 #define TCP_OFF(sk) (sk->sk_sndmsg_off)
642 static inline int select_size(struct sock *sk, struct tcp_sock *tp)
644 int tmp = tp->mss_cache;
646 if (sk->sk_route_caps & NETIF_F_SG) {
647 if (sk->sk_route_caps & NETIF_F_TSO)
650 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
652 if (tmp >= pgbreak &&
653 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
661 int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
665 struct tcp_sock *tp = tcp_sk(sk);
668 int mss_now, size_goal;
675 flags = msg->msg_flags;
676 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
678 /* Wait for a connection to finish. */
679 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
680 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
683 /* This should be in poll */
684 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
686 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
687 size_goal = tp->xmit_size_goal;
689 /* Ok commence sending. */
690 iovlen = msg->msg_iovlen;
695 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
698 while (--iovlen >= 0) {
699 int seglen = iov->iov_len;
700 unsigned char __user *from = iov->iov_base;
707 skb = sk->sk_write_queue.prev;
709 if (!sk->sk_send_head ||
710 (copy = size_goal - skb->len) <= 0) {
713 /* Allocate new segment. If the interface is SG,
714 * allocate skb fitting to single page.
716 if (!sk_stream_memory_free(sk))
717 goto wait_for_sndbuf;
719 skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
720 0, sk->sk_allocation);
722 goto wait_for_memory;
725 * Check whether we can use HW checksum.
727 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
728 skb->ip_summed = CHECKSUM_HW;
730 skb_entail(sk, tp, skb);
734 /* Try to append data to the end of skb. */
738 /* Where to copy to? */
739 if (skb_tailroom(skb) > 0) {
740 /* We have some space in skb head. Superb! */
741 if (copy > skb_tailroom(skb))
742 copy = skb_tailroom(skb);
743 if ((err = skb_add_data(skb, from, copy)) != 0)
747 int i = skb_shinfo(skb)->nr_frags;
748 struct page *page = TCP_PAGE(sk);
749 int off = TCP_OFF(sk);
751 if (skb_can_coalesce(skb, i, page, off) &&
753 /* We can extend the last page
756 } else if (i == MAX_SKB_FRAGS ||
758 !(sk->sk_route_caps & NETIF_F_SG))) {
759 /* Need to add new fragment and cannot
760 * do this because interface is non-SG,
761 * or because all the page slots are
763 tcp_mark_push(tp, skb);
766 if (off == PAGE_SIZE) {
768 TCP_PAGE(sk) = page = NULL;
774 if (copy > PAGE_SIZE - off)
775 copy = PAGE_SIZE - off;
777 if (!sk_stream_wmem_schedule(sk, copy))
778 goto wait_for_memory;
781 /* Allocate new cache page. */
782 if (!(page = sk_stream_alloc_page(sk)))
783 goto wait_for_memory;
786 /* Time to copy data. We are close to
788 err = skb_copy_to_page(sk, from, skb, page,
791 /* If this page was new, give it to the
792 * socket so it does not get leaked.
801 /* Update the skb. */
803 skb_shinfo(skb)->frags[i - 1].size +=
806 skb_fill_page_desc(skb, i, page, off, copy);
809 } else if (off + copy < PAGE_SIZE) {
815 TCP_OFF(sk) = off + copy;
819 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
821 tp->write_seq += copy;
822 TCP_SKB_CB(skb)->end_seq += copy;
823 skb_shinfo(skb)->gso_segs = 0;
827 if ((seglen -= copy) == 0 && iovlen == 0)
830 if (skb->len < mss_now || (flags & MSG_OOB))
833 if (forced_push(tp)) {
834 tcp_mark_push(tp, skb);
835 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
836 } else if (skb == sk->sk_send_head)
837 tcp_push_one(sk, mss_now);
841 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
844 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
846 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
849 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
850 size_goal = tp->xmit_size_goal;
856 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
863 if (sk->sk_send_head == skb)
864 sk->sk_send_head = NULL;
865 __skb_unlink(skb, &sk->sk_write_queue);
866 sk_stream_free_skb(sk, skb);
873 err = sk_stream_error(sk, flags, err);
880 * Handle reading urgent data. BSD has very simple semantics for
881 * this, no blocking and very strange errors 8)
884 static int tcp_recv_urg(struct sock *sk, long timeo,
885 struct msghdr *msg, int len, int flags,
888 struct tcp_sock *tp = tcp_sk(sk);
890 /* No URG data to read. */
891 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
892 tp->urg_data == TCP_URG_READ)
893 return -EINVAL; /* Yes this is right ! */
895 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
898 if (tp->urg_data & TCP_URG_VALID) {
900 char c = tp->urg_data;
902 if (!(flags & MSG_PEEK))
903 tp->urg_data = TCP_URG_READ;
905 /* Read urgent data. */
906 msg->msg_flags |= MSG_OOB;
909 if (!(flags & MSG_TRUNC))
910 err = memcpy_toiovec(msg->msg_iov, &c, 1);
913 msg->msg_flags |= MSG_TRUNC;
915 return err ? -EFAULT : len;
918 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
921 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
922 * the available implementations agree in this case:
923 * this call should never block, independent of the
924 * blocking state of the socket.
925 * Mike <pall@rz.uni-karlsruhe.de>
930 /* Clean up the receive buffer for full frames taken by the user,
931 * then send an ACK if necessary. COPIED is the number of bytes
932 * tcp_recvmsg has given to the user so far, it speeds up the
933 * calculation of whether or not we must ACK for the sake of
936 void cleanup_rbuf(struct sock *sk, int copied)
938 struct tcp_sock *tp = tcp_sk(sk);
942 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
944 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
947 if (inet_csk_ack_scheduled(sk)) {
948 const struct inet_connection_sock *icsk = inet_csk(sk);
949 /* Delayed ACKs frequently hit locked sockets during bulk
951 if (icsk->icsk_ack.blocked ||
952 /* Once-per-two-segments ACK was not sent by tcp_input.c */
953 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
955 * If this read emptied read buffer, we send ACK, if
956 * connection is not bidirectional, user drained
957 * receive buffer and there was a small segment
960 (copied > 0 && (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
961 !icsk->icsk_ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
965 /* We send an ACK if we can now advertise a non-zero window
966 * which has been raised "significantly".
968 * Even if window raised up to infinity, do not send window open ACK
969 * in states, where we will not receive more. It is useless.
971 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
972 __u32 rcv_window_now = tcp_receive_window(tp);
974 /* Optimize, __tcp_select_window() is not cheap. */
975 if (2*rcv_window_now <= tp->window_clamp) {
976 __u32 new_window = __tcp_select_window(sk);
978 /* Send ACK now, if this read freed lots of space
979 * in our buffer. Certainly, new_window is new window.
980 * We can advertise it now, if it is not less than current one.
981 * "Lots" means "at least twice" here.
983 if (new_window && new_window >= 2 * rcv_window_now)
991 static void tcp_prequeue_process(struct sock *sk)
994 struct tcp_sock *tp = tcp_sk(sk);
996 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
998 /* RX process wants to run with disabled BHs, though it is not
1001 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1002 sk->sk_backlog_rcv(sk, skb);
1005 /* Clear memory counter. */
1006 tp->ucopy.memory = 0;
1009 static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1011 struct sk_buff *skb;
1014 skb_queue_walk(&sk->sk_receive_queue, skb) {
1015 offset = seq - TCP_SKB_CB(skb)->seq;
1018 if (offset < skb->len || skb->h.th->fin) {
1027 * This routine provides an alternative to tcp_recvmsg() for routines
1028 * that would like to handle copying from skbuffs directly in 'sendfile'
1031 * - It is assumed that the socket was locked by the caller.
1032 * - The routine does not block.
1033 * - At present, there is no support for reading OOB data
1034 * or for 'peeking' the socket using this routine
1035 * (although both would be easy to implement).
1037 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1038 sk_read_actor_t recv_actor)
1040 struct sk_buff *skb;
1041 struct tcp_sock *tp = tcp_sk(sk);
1042 u32 seq = tp->copied_seq;
1046 if (sk->sk_state == TCP_LISTEN)
1048 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1049 if (offset < skb->len) {
1052 len = skb->len - offset;
1053 /* Stop reading if we hit a patch of urgent data */
1055 u32 urg_offset = tp->urg_seq - seq;
1056 if (urg_offset < len)
1061 used = recv_actor(desc, skb, offset, len);
1067 if (offset != skb->len)
1070 if (skb->h.th->fin) {
1071 sk_eat_skb(sk, skb);
1075 sk_eat_skb(sk, skb);
1079 tp->copied_seq = seq;
1081 tcp_rcv_space_adjust(sk);
1083 /* Clean up data we have read: This will do ACK frames. */
1085 cleanup_rbuf(sk, copied);
1090 * This routine copies from a sock struct into the user buffer.
1092 * Technical note: in 2.3 we work on _locked_ socket, so that
1093 * tricks with *seq access order and skb->users are not required.
1094 * Probably, code can be easily improved even more.
1097 int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1098 size_t len, int nonblock, int flags, int *addr_len)
1100 struct tcp_sock *tp = tcp_sk(sk);
1106 int target; /* Read at least this many bytes */
1108 struct task_struct *user_recv = NULL;
1112 TCP_CHECK_TIMER(sk);
1115 if (sk->sk_state == TCP_LISTEN)
1118 timeo = sock_rcvtimeo(sk, nonblock);
1120 /* Urgent data needs to be handled specially. */
1121 if (flags & MSG_OOB)
1124 seq = &tp->copied_seq;
1125 if (flags & MSG_PEEK) {
1126 peek_seq = tp->copied_seq;
1130 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1133 struct sk_buff *skb;
1136 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1137 if (tp->urg_data && tp->urg_seq == *seq) {
1140 if (signal_pending(current)) {
1141 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1146 /* Next get a buffer. */
1148 skb = skb_peek(&sk->sk_receive_queue);
1153 /* Now that we have two receive queues this
1156 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1157 printk(KERN_INFO "recvmsg bug: copied %X "
1158 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1161 offset = *seq - TCP_SKB_CB(skb)->seq;
1164 if (offset < skb->len)
1168 BUG_TRAP(flags & MSG_PEEK);
1170 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1172 /* Well, if we have backlog, try to process it now yet. */
1174 if (copied >= target && !sk->sk_backlog.tail)
1179 sk->sk_state == TCP_CLOSE ||
1180 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1182 signal_pending(current) ||
1186 if (sock_flag(sk, SOCK_DONE))
1190 copied = sock_error(sk);
1194 if (sk->sk_shutdown & RCV_SHUTDOWN)
1197 if (sk->sk_state == TCP_CLOSE) {
1198 if (!sock_flag(sk, SOCK_DONE)) {
1199 /* This occurs when user tries to read
1200 * from never connected socket.
1213 if (signal_pending(current)) {
1214 copied = sock_intr_errno(timeo);
1219 cleanup_rbuf(sk, copied);
1221 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1222 /* Install new reader */
1223 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1224 user_recv = current;
1225 tp->ucopy.task = user_recv;
1226 tp->ucopy.iov = msg->msg_iov;
1229 tp->ucopy.len = len;
1231 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1232 (flags & (MSG_PEEK | MSG_TRUNC)));
1234 /* Ugly... If prequeue is not empty, we have to
1235 * process it before releasing socket, otherwise
1236 * order will be broken at second iteration.
1237 * More elegant solution is required!!!
1239 * Look: we have the following (pseudo)queues:
1241 * 1. packets in flight
1246 * Each queue can be processed only if the next ones
1247 * are empty. At this point we have empty receive_queue.
1248 * But prequeue _can_ be not empty after 2nd iteration,
1249 * when we jumped to start of loop because backlog
1250 * processing added something to receive_queue.
1251 * We cannot release_sock(), because backlog contains
1252 * packets arrived _after_ prequeued ones.
1254 * Shortly, algorithm is clear --- to process all
1255 * the queues in order. We could make it more directly,
1256 * requeueing packets from backlog to prequeue, if
1257 * is not empty. It is more elegant, but eats cycles,
1260 if (!skb_queue_empty(&tp->ucopy.prequeue))
1263 /* __ Set realtime policy in scheduler __ */
1266 if (copied >= target) {
1267 /* Do not sleep, just process backlog. */
1271 sk_wait_data(sk, &timeo);
1276 /* __ Restore normal policy in scheduler __ */
1278 if ((chunk = len - tp->ucopy.len) != 0) {
1279 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1284 if (tp->rcv_nxt == tp->copied_seq &&
1285 !skb_queue_empty(&tp->ucopy.prequeue)) {
1287 tcp_prequeue_process(sk);
1289 if ((chunk = len - tp->ucopy.len) != 0) {
1290 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1296 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1297 if (net_ratelimit())
1298 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1299 current->comm, current->pid);
1300 peek_seq = tp->copied_seq;
1305 /* Ok so how much can we use? */
1306 used = skb->len - offset;
1310 /* Do we have urgent data here? */
1312 u32 urg_offset = tp->urg_seq - *seq;
1313 if (urg_offset < used) {
1315 if (!sock_flag(sk, SOCK_URGINLINE)) {
1327 if (!(flags & MSG_TRUNC)) {
1328 err = skb_copy_datagram_iovec(skb, offset,
1329 msg->msg_iov, used);
1331 /* Exception. Bailout! */
1342 tcp_rcv_space_adjust(sk);
1345 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1347 tcp_fast_path_check(sk, tp);
1349 if (used + offset < skb->len)
1354 if (!(flags & MSG_PEEK))
1355 sk_eat_skb(sk, skb);
1359 /* Process the FIN. */
1361 if (!(flags & MSG_PEEK))
1362 sk_eat_skb(sk, skb);
1367 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1370 tp->ucopy.len = copied > 0 ? len : 0;
1372 tcp_prequeue_process(sk);
1374 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1375 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1381 tp->ucopy.task = NULL;
1385 /* According to UNIX98, msg_name/msg_namelen are ignored
1386 * on connected socket. I was just happy when found this 8) --ANK
1389 /* Clean up data we have read: This will do ACK frames. */
1390 cleanup_rbuf(sk, copied);
1392 TCP_CHECK_TIMER(sk);
1397 TCP_CHECK_TIMER(sk);
1402 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1407 * State processing on a close. This implements the state shift for
1408 * sending our FIN frame. Note that we only send a FIN for some
1409 * states. A shutdown() may have already sent the FIN, or we may be
1413 static const unsigned char new_state[16] = {
1414 /* current state: new state: action: */
1415 /* (Invalid) */ TCP_CLOSE,
1416 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1417 /* TCP_SYN_SENT */ TCP_CLOSE,
1418 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1419 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1420 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1421 /* TCP_TIME_WAIT */ TCP_CLOSE,
1422 /* TCP_CLOSE */ TCP_CLOSE,
1423 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1424 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1425 /* TCP_LISTEN */ TCP_CLOSE,
1426 /* TCP_CLOSING */ TCP_CLOSING,
1429 static int tcp_close_state(struct sock *sk)
1431 int next = (int)new_state[sk->sk_state];
1432 int ns = next & TCP_STATE_MASK;
1434 tcp_set_state(sk, ns);
1436 return next & TCP_ACTION_FIN;
1440 * Shutdown the sending side of a connection. Much like close except
1441 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1444 void tcp_shutdown(struct sock *sk, int how)
1446 /* We need to grab some memory, and put together a FIN,
1447 * and then put it into the queue to be sent.
1448 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1450 if (!(how & SEND_SHUTDOWN))
1453 /* If we've already sent a FIN, or it's a closed state, skip this. */
1454 if ((1 << sk->sk_state) &
1455 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1456 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1457 /* Clear out any half completed packets. FIN if needed. */
1458 if (tcp_close_state(sk))
1463 void tcp_close(struct sock *sk, long timeout)
1465 struct sk_buff *skb;
1466 int data_was_unread = 0;
1470 sk->sk_shutdown = SHUTDOWN_MASK;
1472 if (sk->sk_state == TCP_LISTEN) {
1473 tcp_set_state(sk, TCP_CLOSE);
1476 inet_csk_listen_stop(sk);
1478 goto adjudge_to_death;
1481 /* We need to flush the recv. buffs. We do this only on the
1482 * descriptor close, not protocol-sourced closes, because the
1483 * reader process may not have drained the data yet!
1485 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1486 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1488 data_was_unread += len;
1492 sk_stream_mem_reclaim(sk);
1494 /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
1495 * 3.10, we send a RST here because data was lost. To
1496 * witness the awful effects of the old behavior of always
1497 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
1498 * a bulk GET in an FTP client, suspend the process, wait
1499 * for the client to advertise a zero window, then kill -9
1500 * the FTP client, wheee... Note: timeout is always zero
1503 if (data_was_unread) {
1504 /* Unread data was tossed, zap the connection. */
1505 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1506 tcp_set_state(sk, TCP_CLOSE);
1507 tcp_send_active_reset(sk, GFP_KERNEL);
1508 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1509 /* Check zero linger _after_ checking for unread data. */
1510 sk->sk_prot->disconnect(sk, 0);
1511 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1512 } else if (tcp_close_state(sk)) {
1513 /* We FIN if the application ate all the data before
1514 * zapping the connection.
1517 /* RED-PEN. Formally speaking, we have broken TCP state
1518 * machine. State transitions:
1520 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1521 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1522 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1524 * are legal only when FIN has been sent (i.e. in window),
1525 * rather than queued out of window. Purists blame.
1527 * F.e. "RFC state" is ESTABLISHED,
1528 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1530 * The visible declinations are that sometimes
1531 * we enter time-wait state, when it is not required really
1532 * (harmless), do not send active resets, when they are
1533 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1534 * they look as CLOSING or LAST_ACK for Linux)
1535 * Probably, I missed some more holelets.
1541 sk_stream_wait_close(sk, timeout);
1544 state = sk->sk_state;
1547 atomic_inc(sk->sk_prot->orphan_count);
1549 /* It is the last release_sock in its life. It will remove backlog. */
1553 /* Now socket is owned by kernel and we acquire BH lock
1554 to finish close. No need to check for user refs.
1558 BUG_TRAP(!sock_owned_by_user(sk));
1560 /* Have we already been destroyed by a softirq or backlog? */
1561 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1564 /* This is a (useful) BSD violating of the RFC. There is a
1565 * problem with TCP as specified in that the other end could
1566 * keep a socket open forever with no application left this end.
1567 * We use a 3 minute timeout (about the same as BSD) then kill
1568 * our end. If they send after that then tough - BUT: long enough
1569 * that we won't make the old 4*rto = almost no time - whoops
1572 * Nope, it was not mistake. It is really desired behaviour
1573 * f.e. on http servers, when such sockets are useless, but
1574 * consume significant resources. Let's do it with special
1575 * linger2 option. --ANK
1578 if (sk->sk_state == TCP_FIN_WAIT2) {
1579 struct tcp_sock *tp = tcp_sk(sk);
1580 if (tp->linger2 < 0) {
1581 tcp_set_state(sk, TCP_CLOSE);
1582 tcp_send_active_reset(sk, GFP_ATOMIC);
1583 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1585 const int tmo = tcp_fin_time(sk);
1587 if (tmo > TCP_TIMEWAIT_LEN) {
1588 inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk));
1590 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1595 if (sk->sk_state != TCP_CLOSE) {
1596 sk_stream_mem_reclaim(sk);
1597 if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans ||
1598 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
1599 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
1600 if (net_ratelimit())
1601 printk(KERN_INFO "TCP: too many of orphaned "
1603 tcp_set_state(sk, TCP_CLOSE);
1604 tcp_send_active_reset(sk, GFP_ATOMIC);
1605 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1609 if (sk->sk_state == TCP_CLOSE)
1610 inet_csk_destroy_sock(sk);
1611 /* Otherwise, socket is reprieved until protocol close. */
1619 /* These states need RST on ABORT according to RFC793 */
1621 static inline int tcp_need_reset(int state)
1623 return (1 << state) &
1624 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1625 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1628 int tcp_disconnect(struct sock *sk, int flags)
1630 struct inet_sock *inet = inet_sk(sk);
1631 struct inet_connection_sock *icsk = inet_csk(sk);
1632 struct tcp_sock *tp = tcp_sk(sk);
1634 int old_state = sk->sk_state;
1636 if (old_state != TCP_CLOSE)
1637 tcp_set_state(sk, TCP_CLOSE);
1639 /* ABORT function of RFC793 */
1640 if (old_state == TCP_LISTEN) {
1641 inet_csk_listen_stop(sk);
1642 } else if (tcp_need_reset(old_state) ||
1643 (tp->snd_nxt != tp->write_seq &&
1644 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
1645 /* The last check adjusts for discrepancy of Linux wrt. RFC
1648 tcp_send_active_reset(sk, gfp_any());
1649 sk->sk_err = ECONNRESET;
1650 } else if (old_state == TCP_SYN_SENT)
1651 sk->sk_err = ECONNRESET;
1653 tcp_clear_xmit_timers(sk);
1654 __skb_queue_purge(&sk->sk_receive_queue);
1655 sk_stream_writequeue_purge(sk);
1656 __skb_queue_purge(&tp->out_of_order_queue);
1660 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1661 inet_reset_saddr(sk);
1663 sk->sk_shutdown = 0;
1664 sock_reset_flag(sk, SOCK_DONE);
1666 if ((tp->write_seq += tp->max_window + 2) == 0)
1668 icsk->icsk_backoff = 0;
1670 icsk->icsk_probes_out = 0;
1671 tp->packets_out = 0;
1672 tp->snd_ssthresh = 0x7fffffff;
1673 tp->snd_cwnd_cnt = 0;
1674 tp->bytes_acked = 0;
1675 tcp_set_ca_state(sk, TCP_CA_Open);
1676 tcp_clear_retrans(tp);
1677 inet_csk_delack_init(sk);
1678 sk->sk_send_head = NULL;
1679 tp->rx_opt.saw_tstamp = 0;
1680 tcp_sack_reset(&tp->rx_opt);
1683 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
1685 sk->sk_error_report(sk);
1690 * Socket option code for TCP.
1692 static int do_tcp_setsockopt(struct sock *sk, int level,
1693 int optname, char __user *optval, int optlen)
1695 struct tcp_sock *tp = tcp_sk(sk);
1696 struct inet_connection_sock *icsk = inet_csk(sk);
1700 /* This is a string value all the others are int's */
1701 if (optname == TCP_CONGESTION) {
1702 char name[TCP_CA_NAME_MAX];
1707 val = strncpy_from_user(name, optval,
1708 min(TCP_CA_NAME_MAX-1, optlen));
1714 err = tcp_set_congestion_control(sk, name);
1719 if (optlen < sizeof(int))
1722 if (get_user(val, (int __user *)optval))
1729 /* Values greater than interface MTU won't take effect. However
1730 * at the point when this call is done we typically don't yet
1731 * know which interface is going to be used */
1732 if (val < 8 || val > MAX_TCP_WINDOW) {
1736 tp->rx_opt.user_mss = val;
1741 /* TCP_NODELAY is weaker than TCP_CORK, so that
1742 * this option on corked socket is remembered, but
1743 * it is not activated until cork is cleared.
1745 * However, when TCP_NODELAY is set we make
1746 * an explicit push, which overrides even TCP_CORK
1747 * for currently queued segments.
1749 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
1750 tcp_push_pending_frames(sk, tp);
1752 tp->nonagle &= ~TCP_NAGLE_OFF;
1757 /* When set indicates to always queue non-full frames.
1758 * Later the user clears this option and we transmit
1759 * any pending partial frames in the queue. This is
1760 * meant to be used alongside sendfile() to get properly
1761 * filled frames when the user (for example) must write
1762 * out headers with a write() call first and then use
1763 * sendfile to send out the data parts.
1765 * TCP_CORK can be set together with TCP_NODELAY and it is
1766 * stronger than TCP_NODELAY.
1769 tp->nonagle |= TCP_NAGLE_CORK;
1771 tp->nonagle &= ~TCP_NAGLE_CORK;
1772 if (tp->nonagle&TCP_NAGLE_OFF)
1773 tp->nonagle |= TCP_NAGLE_PUSH;
1774 tcp_push_pending_frames(sk, tp);
1779 if (val < 1 || val > MAX_TCP_KEEPIDLE)
1782 tp->keepalive_time = val * HZ;
1783 if (sock_flag(sk, SOCK_KEEPOPEN) &&
1784 !((1 << sk->sk_state) &
1785 (TCPF_CLOSE | TCPF_LISTEN))) {
1786 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
1787 if (tp->keepalive_time > elapsed)
1788 elapsed = tp->keepalive_time - elapsed;
1791 inet_csk_reset_keepalive_timer(sk, elapsed);
1796 if (val < 1 || val > MAX_TCP_KEEPINTVL)
1799 tp->keepalive_intvl = val * HZ;
1802 if (val < 1 || val > MAX_TCP_KEEPCNT)
1805 tp->keepalive_probes = val;
1808 if (val < 1 || val > MAX_TCP_SYNCNT)
1811 icsk->icsk_syn_retries = val;
1817 else if (val > sysctl_tcp_fin_timeout / HZ)
1820 tp->linger2 = val * HZ;
1823 case TCP_DEFER_ACCEPT:
1824 icsk->icsk_accept_queue.rskq_defer_accept = 0;
1826 /* Translate value in seconds to number of
1828 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
1829 val > ((TCP_TIMEOUT_INIT / HZ) <<
1830 icsk->icsk_accept_queue.rskq_defer_accept))
1831 icsk->icsk_accept_queue.rskq_defer_accept++;
1832 icsk->icsk_accept_queue.rskq_defer_accept++;
1836 case TCP_WINDOW_CLAMP:
1838 if (sk->sk_state != TCP_CLOSE) {
1842 tp->window_clamp = 0;
1844 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
1845 SOCK_MIN_RCVBUF / 2 : val;
1850 icsk->icsk_ack.pingpong = 1;
1852 icsk->icsk_ack.pingpong = 0;
1853 if ((1 << sk->sk_state) &
1854 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
1855 inet_csk_ack_scheduled(sk)) {
1856 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
1857 cleanup_rbuf(sk, 1);
1859 icsk->icsk_ack.pingpong = 1;
1872 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1875 struct inet_connection_sock *icsk = inet_csk(sk);
1877 if (level != SOL_TCP)
1878 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
1880 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1883 #ifdef CONFIG_COMPAT
1884 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
1885 char __user *optval, int optlen)
1887 if (level != SOL_TCP)
1888 return inet_csk_compat_setsockopt(sk, level, optname,
1890 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1893 EXPORT_SYMBOL(compat_tcp_setsockopt);
1896 /* Return information about state of tcp endpoint in API format. */
1897 void tcp_get_info(struct sock *sk, struct tcp_info *info)
1899 struct tcp_sock *tp = tcp_sk(sk);
1900 const struct inet_connection_sock *icsk = inet_csk(sk);
1901 u32 now = tcp_time_stamp;
1903 memset(info, 0, sizeof(*info));
1905 info->tcpi_state = sk->sk_state;
1906 info->tcpi_ca_state = icsk->icsk_ca_state;
1907 info->tcpi_retransmits = icsk->icsk_retransmits;
1908 info->tcpi_probes = icsk->icsk_probes_out;
1909 info->tcpi_backoff = icsk->icsk_backoff;
1911 if (tp->rx_opt.tstamp_ok)
1912 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
1913 if (tp->rx_opt.sack_ok)
1914 info->tcpi_options |= TCPI_OPT_SACK;
1915 if (tp->rx_opt.wscale_ok) {
1916 info->tcpi_options |= TCPI_OPT_WSCALE;
1917 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
1918 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
1921 if (tp->ecn_flags&TCP_ECN_OK)
1922 info->tcpi_options |= TCPI_OPT_ECN;
1924 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
1925 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
1926 info->tcpi_snd_mss = tp->mss_cache;
1927 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
1929 info->tcpi_unacked = tp->packets_out;
1930 info->tcpi_sacked = tp->sacked_out;
1931 info->tcpi_lost = tp->lost_out;
1932 info->tcpi_retrans = tp->retrans_out;
1933 info->tcpi_fackets = tp->fackets_out;
1935 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
1936 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
1937 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
1939 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
1940 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
1941 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
1942 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
1943 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
1944 info->tcpi_snd_cwnd = tp->snd_cwnd;
1945 info->tcpi_advmss = tp->advmss;
1946 info->tcpi_reordering = tp->reordering;
1948 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
1949 info->tcpi_rcv_space = tp->rcvq_space.space;
1951 info->tcpi_total_retrans = tp->total_retrans;
1954 EXPORT_SYMBOL_GPL(tcp_get_info);
1956 static int do_tcp_getsockopt(struct sock *sk, int level,
1957 int optname, char __user *optval, int __user *optlen)
1959 struct inet_connection_sock *icsk = inet_csk(sk);
1960 struct tcp_sock *tp = tcp_sk(sk);
1963 if (get_user(len, optlen))
1966 len = min_t(unsigned int, len, sizeof(int));
1973 val = tp->mss_cache;
1974 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
1975 val = tp->rx_opt.user_mss;
1978 val = !!(tp->nonagle&TCP_NAGLE_OFF);
1981 val = !!(tp->nonagle&TCP_NAGLE_CORK);
1984 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
1987 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
1990 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
1993 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
1998 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2000 case TCP_DEFER_ACCEPT:
2001 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2002 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
2004 case TCP_WINDOW_CLAMP:
2005 val = tp->window_clamp;
2008 struct tcp_info info;
2010 if (get_user(len, optlen))
2013 tcp_get_info(sk, &info);
2015 len = min_t(unsigned int, len, sizeof(info));
2016 if (put_user(len, optlen))
2018 if (copy_to_user(optval, &info, len))
2023 val = !icsk->icsk_ack.pingpong;
2026 case TCP_CONGESTION:
2027 if (get_user(len, optlen))
2029 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2030 if (put_user(len, optlen))
2032 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
2036 return -ENOPROTOOPT;
2039 if (put_user(len, optlen))
2041 if (copy_to_user(optval, &val, len))
2046 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2049 struct inet_connection_sock *icsk = inet_csk(sk);
2051 if (level != SOL_TCP)
2052 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2054 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2057 #ifdef CONFIG_COMPAT
2058 int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2059 char __user *optval, int __user *optlen)
2061 if (level != SOL_TCP)
2062 return inet_csk_compat_getsockopt(sk, level, optname,
2064 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2067 EXPORT_SYMBOL(compat_tcp_getsockopt);
2070 struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2072 struct sk_buff *segs = ERR_PTR(-EINVAL);
2077 unsigned int oldlen;
2080 if (!pskb_may_pull(skb, sizeof(*th)))
2084 thlen = th->doff * 4;
2085 if (thlen < sizeof(*th))
2088 if (!pskb_may_pull(skb, thlen))
2091 oldlen = (u16)~skb->len;
2092 __skb_pull(skb, thlen);
2094 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2095 /* Packet is from an untrusted source, reset gso_segs. */
2096 int mss = skb_shinfo(skb)->gso_size;
2098 skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss;
2104 segs = skb_segment(skb, features);
2108 len = skb_shinfo(skb)->gso_size;
2109 delta = htonl(oldlen + (thlen + len));
2113 seq = ntohl(th->seq);
2116 th->fin = th->psh = 0;
2118 th->check = ~csum_fold(th->check + delta);
2119 if (skb->ip_summed != CHECKSUM_HW)
2120 th->check = csum_fold(csum_partial(skb->h.raw, thlen,
2127 th->seq = htonl(seq);
2129 } while (skb->next);
2131 delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
2132 th->check = ~csum_fold(th->check + delta);
2133 if (skb->ip_summed != CHECKSUM_HW)
2134 th->check = csum_fold(csum_partial(skb->h.raw, thlen,
2141 extern void __skb_cb_too_small_for_tcp(int, int);
2142 extern struct tcp_congestion_ops tcp_reno;
2144 static __initdata unsigned long thash_entries;
2145 static int __init set_thash_entries(char *str)
2149 thash_entries = simple_strtoul(str, &str, 0);
2152 __setup("thash_entries=", set_thash_entries);
2154 void __init tcp_init(void)
2156 struct sk_buff *skb = NULL;
2157 unsigned long limit;
2158 int order, i, max_share;
2160 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2161 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2164 tcp_hashinfo.bind_bucket_cachep =
2165 kmem_cache_create("tcp_bind_bucket",
2166 sizeof(struct inet_bind_bucket), 0,
2167 SLAB_HWCACHE_ALIGN, NULL, NULL);
2168 if (!tcp_hashinfo.bind_bucket_cachep)
2169 panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
2171 /* Size and allocate the main established and bind bucket
2174 * The methodology is similar to that of the buffer cache.
2176 tcp_hashinfo.ehash =
2177 alloc_large_system_hash("TCP established",
2178 sizeof(struct inet_ehash_bucket),
2180 (num_physpages >= 128 * 1024) ?
2183 &tcp_hashinfo.ehash_size,
2186 tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
2187 for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
2188 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2189 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
2192 tcp_hashinfo.bhash =
2193 alloc_large_system_hash("TCP bind",
2194 sizeof(struct inet_bind_hashbucket),
2195 tcp_hashinfo.ehash_size,
2196 (num_physpages >= 128 * 1024) ?
2199 &tcp_hashinfo.bhash_size,
2202 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2203 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2204 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2205 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
2208 /* Try to be a bit smarter and adjust defaults depending
2209 * on available memory.
2211 for (order = 0; ((1 << order) << PAGE_SHIFT) <
2212 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
2216 sysctl_local_port_range[0] = 32768;
2217 sysctl_local_port_range[1] = 61000;
2218 tcp_death_row.sysctl_max_tw_buckets = 180000;
2219 sysctl_tcp_max_orphans = 4096 << (order - 4);
2220 sysctl_max_syn_backlog = 1024;
2221 } else if (order < 3) {
2222 sysctl_local_port_range[0] = 1024 * (3 - order);
2223 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
2224 sysctl_tcp_max_orphans >>= (3 - order);
2225 sysctl_max_syn_backlog = 128;
2228 sysctl_tcp_mem[0] = 768 << order;
2229 sysctl_tcp_mem[1] = 1024 << order;
2230 sysctl_tcp_mem[2] = 1536 << order;
2232 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2233 max_share = min(4UL*1024*1024, limit);
2235 sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
2236 sysctl_tcp_wmem[1] = 16*1024;
2237 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2239 sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
2240 sysctl_tcp_rmem[1] = 87380;
2241 sysctl_tcp_rmem[2] = max(87380, max_share);
2243 printk(KERN_INFO "TCP: Hash tables configured "
2244 "(established %d bind %d)\n",
2245 tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
2247 tcp_register_congestion_control(&tcp_reno);
2250 EXPORT_SYMBOL(tcp_close);
2251 EXPORT_SYMBOL(tcp_disconnect);
2252 EXPORT_SYMBOL(tcp_getsockopt);
2253 EXPORT_SYMBOL(tcp_ioctl);
2254 EXPORT_SYMBOL(tcp_poll);
2255 EXPORT_SYMBOL(tcp_read_sock);
2256 EXPORT_SYMBOL(tcp_recvmsg);
2257 EXPORT_SYMBOL(tcp_sendmsg);
2258 EXPORT_SYMBOL(tcp_sendpage);
2259 EXPORT_SYMBOL(tcp_setsockopt);
2260 EXPORT_SYMBOL(tcp_shutdown);
2261 EXPORT_SYMBOL(tcp_statistics);
2262 EXPORT_SYMBOL_GPL(cleanup_rbuf);