2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
57 * Alan Cox : Tidied tcp_data to avoid a potential
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
215 * Description of States:
217 * TCP_SYN_SENT sent a connection request, waiting for ack
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
222 * TCP_ESTABLISHED connection established
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
247 * TCP_CLOSE socket is finished
250 #include <linux/config.h>
251 #include <linux/module.h>
252 #include <linux/types.h>
253 #include <linux/fcntl.h>
254 #include <linux/poll.h>
255 #include <linux/init.h>
256 #include <linux/smp_lock.h>
257 #include <linux/fs.h>
258 #include <linux/random.h>
260 #include <net/icmp.h>
262 #include <net/xfrm.h>
266 #include <asm/uaccess.h>
267 #include <asm/ioctls.h>
269 int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
271 DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
273 kmem_cache_t *tcp_openreq_cachep;
274 kmem_cache_t *tcp_bucket_cachep;
275 kmem_cache_t *tcp_timewait_cachep;
277 atomic_t tcp_orphan_count = ATOMIC_INIT(0);
279 int sysctl_tcp_default_win_scale = 7;
281 int sysctl_tcp_mem[3];
282 int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
283 int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
285 EXPORT_SYMBOL(sysctl_tcp_mem);
286 EXPORT_SYMBOL(sysctl_tcp_rmem);
287 EXPORT_SYMBOL(sysctl_tcp_wmem);
289 atomic_t tcp_memory_allocated; /* Current allocated memory. */
290 atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
292 EXPORT_SYMBOL(tcp_memory_allocated);
293 EXPORT_SYMBOL(tcp_sockets_allocated);
296 * Pressure flag: try to collapse.
297 * Technical note: it is used by multiple contexts non atomically.
298 * All the sk_stream_mem_schedule() is of this nature: accounting
299 * is strict, actions are advisory and have some latency.
301 int tcp_memory_pressure;
303 EXPORT_SYMBOL(tcp_memory_pressure);
305 void tcp_enter_memory_pressure(void)
307 if (!tcp_memory_pressure) {
308 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
309 tcp_memory_pressure = 1;
313 EXPORT_SYMBOL(tcp_enter_memory_pressure);
316 * LISTEN is a special case for poll..
318 static __inline__ unsigned int tcp_listen_poll(struct sock *sk,
321 return tcp_sk(sk)->accept_queue ? (POLLIN | POLLRDNORM) : 0;
325 * Wait for a TCP event.
327 * Note that we don't need to lock the socket, as the upper poll layers
328 * take care of normal races (between the test and the event) and we don't
329 * go look at any of the socket buffers directly.
331 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
334 struct sock *sk = sock->sk;
335 struct tcp_opt *tp = tcp_sk(sk);
337 poll_wait(file, sk->sk_sleep, wait);
338 if (sk->sk_state == TCP_LISTEN)
339 return tcp_listen_poll(sk, wait);
341 /* Socket is not locked. We are protected from async events
342 by poll logic and correct handling of state changes
343 made by another threads is impossible in any case.
351 * POLLHUP is certainly not done right. But poll() doesn't
352 * have a notion of HUP in just one direction, and for a
353 * socket the read side is more interesting.
355 * Some poll() documentation says that POLLHUP is incompatible
356 * with the POLLOUT/POLLWR flags, so somebody should check this
357 * all. But careful, it tends to be safer to return too many
358 * bits than too few, and you can easily break real applications
359 * if you don't tell them that something has hung up!
363 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
364 * our fs/select.c). It means that after we received EOF,
365 * poll always returns immediately, making impossible poll() on write()
366 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
367 * if and only if shutdown has been made in both directions.
368 * Actually, it is interesting to look how Solaris and DUX
369 * solve this dilemma. I would prefer, if PULLHUP were maskable,
370 * then we could set it on SND_SHUTDOWN. BTW examples given
371 * in Stevens' books assume exactly this behaviour, it explains
372 * why PULLHUP is incompatible with POLLOUT. --ANK
374 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
375 * blocking on fresh not-connected or disconnected socket. --ANK
377 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
379 if (sk->sk_shutdown & RCV_SHUTDOWN)
380 mask |= POLLIN | POLLRDNORM;
383 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
384 /* Potential race condition. If read of tp below will
385 * escape above sk->sk_state, we can be illegally awaken
386 * in SYN_* states. */
387 if ((tp->rcv_nxt != tp->copied_seq) &&
388 (tp->urg_seq != tp->copied_seq ||
389 tp->rcv_nxt != tp->copied_seq + 1 ||
390 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
391 mask |= POLLIN | POLLRDNORM;
393 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
394 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
395 mask |= POLLOUT | POLLWRNORM;
396 } else { /* send SIGIO later */
397 set_bit(SOCK_ASYNC_NOSPACE,
398 &sk->sk_socket->flags);
399 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
401 /* Race breaker. If space is freed after
402 * wspace test but before the flags are set,
403 * IO signal will be lost.
405 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
406 mask |= POLLOUT | POLLWRNORM;
410 if (tp->urg_data & TCP_URG_VALID)
416 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
418 struct tcp_opt *tp = tcp_sk(sk);
423 if (sk->sk_state == TCP_LISTEN)
427 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
429 else if (sock_flag(sk, SOCK_URGINLINE) ||
431 before(tp->urg_seq, tp->copied_seq) ||
432 !before(tp->urg_seq, tp->rcv_nxt)) {
433 answ = tp->rcv_nxt - tp->copied_seq;
435 /* Subtract 1, if FIN is in queue. */
436 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
438 ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
440 answ = tp->urg_seq - tp->copied_seq;
444 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
447 if (sk->sk_state == TCP_LISTEN)
450 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
453 answ = tp->write_seq - tp->snd_una;
459 return put_user(answ, (int __user *)arg);
463 int tcp_listen_start(struct sock *sk)
465 struct inet_opt *inet = inet_sk(sk);
466 struct tcp_opt *tp = tcp_sk(sk);
467 struct tcp_listen_opt *lopt;
469 sk->sk_max_ack_backlog = 0;
470 sk->sk_ack_backlog = 0;
471 tp->accept_queue = tp->accept_queue_tail = NULL;
472 tp->syn_wait_lock = RW_LOCK_UNLOCKED;
475 lopt = kmalloc(sizeof(struct tcp_listen_opt), GFP_KERNEL);
479 memset(lopt, 0, sizeof(struct tcp_listen_opt));
480 for (lopt->max_qlen_log = 6; ; lopt->max_qlen_log++)
481 if ((1 << lopt->max_qlen_log) >= sysctl_max_syn_backlog)
483 get_random_bytes(&lopt->hash_rnd, 4);
485 write_lock_bh(&tp->syn_wait_lock);
486 tp->listen_opt = lopt;
487 write_unlock_bh(&tp->syn_wait_lock);
489 /* There is race window here: we announce ourselves listening,
490 * but this transition is still not validated by get_port().
491 * It is OK, because this socket enters to hash table only
492 * after validation is complete.
494 sk->sk_state = TCP_LISTEN;
495 if (!sk->sk_prot->get_port(sk, inet->num)) {
496 inet->sport = htons(inet->num);
499 sk->sk_prot->hash(sk);
504 sk->sk_state = TCP_CLOSE;
505 write_lock_bh(&tp->syn_wait_lock);
506 tp->listen_opt = NULL;
507 write_unlock_bh(&tp->syn_wait_lock);
513 * This routine closes sockets which have been at least partially
514 * opened, but not yet accepted.
517 static void tcp_listen_stop (struct sock *sk)
519 struct tcp_opt *tp = tcp_sk(sk);
520 struct tcp_listen_opt *lopt = tp->listen_opt;
521 struct open_request *acc_req = tp->accept_queue;
522 struct open_request *req;
525 tcp_delete_keepalive_timer(sk);
527 /* make all the listen_opt local to us */
528 write_lock_bh(&tp->syn_wait_lock);
529 tp->listen_opt = NULL;
530 write_unlock_bh(&tp->syn_wait_lock);
531 tp->accept_queue = tp->accept_queue_tail = NULL;
534 for (i = 0; i < TCP_SYNQ_HSIZE; i++) {
535 while ((req = lopt->syn_table[i]) != NULL) {
536 lopt->syn_table[i] = req->dl_next;
538 tcp_openreq_free(req);
540 /* Following specs, it would be better either to send FIN
541 * (and enter FIN-WAIT-1, it is normal close)
542 * or to send active reset (abort).
543 * Certainly, it is pretty dangerous while synflood, but it is
544 * bad justification for our negligence 8)
545 * To be honest, we are not able to make either
546 * of the variants now. --ANK
551 BUG_TRAP(!lopt->qlen);
555 while ((req = acc_req) != NULL) {
556 struct sock *child = req->sk;
558 acc_req = req->dl_next;
562 BUG_TRAP(!sock_owned_by_user(child));
565 tcp_disconnect(child, O_NONBLOCK);
569 atomic_inc(&tcp_orphan_count);
571 tcp_destroy_sock(child);
573 bh_unlock_sock(child);
577 sk_acceptq_removed(sk);
578 tcp_openreq_fastfree(req);
580 BUG_TRAP(!sk->sk_ack_backlog);
583 static inline void tcp_mark_push(struct tcp_opt *tp, struct sk_buff *skb)
585 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
586 tp->pushed_seq = tp->write_seq;
589 static inline int forced_push(struct tcp_opt *tp)
591 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
594 static inline void skb_entail(struct sock *sk, struct tcp_opt *tp,
598 TCP_SKB_CB(skb)->seq = tp->write_seq;
599 TCP_SKB_CB(skb)->end_seq = tp->write_seq;
600 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
601 TCP_SKB_CB(skb)->sacked = 0;
602 __skb_queue_tail(&sk->sk_write_queue, skb);
603 sk_charge_skb(sk, skb);
604 if (!sk->sk_send_head)
605 sk->sk_send_head = skb;
606 else if (tp->nonagle&TCP_NAGLE_PUSH)
607 tp->nonagle &= ~TCP_NAGLE_PUSH;
610 static inline void tcp_mark_urg(struct tcp_opt *tp, int flags,
613 if (flags & MSG_OOB) {
615 tp->snd_up = tp->write_seq;
616 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
620 static inline void tcp_push(struct sock *sk, struct tcp_opt *tp, int flags,
621 int mss_now, int nonagle)
623 if (sk->sk_send_head) {
624 struct sk_buff *skb = sk->sk_write_queue.prev;
625 if (!(flags & MSG_MORE) || forced_push(tp))
626 tcp_mark_push(tp, skb);
627 tcp_mark_urg(tp, flags, skb);
628 __tcp_push_pending_frames(sk, tp, mss_now,
629 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
633 static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
634 size_t psize, int flags)
636 struct tcp_opt *tp = tcp_sk(sk);
640 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
642 /* Wait for a connection to finish. */
643 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
644 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
647 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
649 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
653 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
657 struct sk_buff *skb = sk->sk_write_queue.prev;
658 struct page *page = pages[poffset / PAGE_SIZE];
660 int offset = poffset % PAGE_SIZE;
661 int size = min_t(size_t, psize, PAGE_SIZE - offset);
663 if (!sk->sk_send_head || (copy = mss_now - skb->len) <= 0) {
665 if (!sk_stream_memory_free(sk))
666 goto wait_for_sndbuf;
668 skb = sk_stream_alloc_pskb(sk, 0, tp->mss_cache,
671 goto wait_for_memory;
673 skb_entail(sk, tp, skb);
680 i = skb_shinfo(skb)->nr_frags;
681 if (skb_can_coalesce(skb, i, page, offset)) {
682 skb_shinfo(skb)->frags[i - 1].size += copy;
683 } else if (i < MAX_SKB_FRAGS) {
685 skb_fill_page_desc(skb, i, page, offset, copy);
687 tcp_mark_push(tp, skb);
692 skb->data_len += copy;
693 skb->ip_summed = CHECKSUM_HW;
694 tp->write_seq += copy;
695 TCP_SKB_CB(skb)->end_seq += copy;
698 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
702 if (!(psize -= copy))
705 if (skb->len != mss_now || (flags & MSG_OOB))
708 if (forced_push(tp)) {
709 tcp_mark_push(tp, skb);
710 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
711 } else if (skb == sk->sk_send_head)
712 tcp_push_one(sk, mss_now);
716 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
719 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
721 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
724 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
729 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
736 return sk_stream_error(sk, flags, err);
739 ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
740 size_t size, int flags)
743 struct sock *sk = sock->sk;
745 #define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
747 if (!(sk->sk_route_caps & NETIF_F_SG) ||
748 !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS))
749 return sock_no_sendpage(sock, page, offset, size, flags);
751 #undef TCP_ZC_CSUM_FLAGS
755 res = do_tcp_sendpages(sk, &page, offset, size, flags);
761 #define TCP_PAGE(sk) (sk->sk_sndmsg_page)
762 #define TCP_OFF(sk) (sk->sk_sndmsg_off)
764 static inline int select_size(struct sock *sk, struct tcp_opt *tp)
766 int tmp = tp->mss_cache_std;
768 if (sk->sk_route_caps & NETIF_F_SG) {
769 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
771 if (tmp >= pgbreak &&
772 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
778 int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
782 struct tcp_opt *tp = tcp_sk(sk);
792 flags = msg->msg_flags;
793 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
795 /* Wait for a connection to finish. */
796 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
797 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
800 /* This should be in poll */
801 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
803 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
805 /* Ok commence sending. */
806 iovlen = msg->msg_iovlen;
811 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
814 while (--iovlen >= 0) {
815 int seglen = iov->iov_len;
816 unsigned char __user *from = iov->iov_base;
823 skb = sk->sk_write_queue.prev;
825 if (!sk->sk_send_head ||
826 (copy = mss_now - skb->len) <= 0) {
829 /* Allocate new segment. If the interface is SG,
830 * allocate skb fitting to single page.
832 if (!sk_stream_memory_free(sk))
833 goto wait_for_sndbuf;
835 skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
836 0, sk->sk_allocation);
838 goto wait_for_memory;
841 * Check whether we can use HW checksum.
843 if (sk->sk_route_caps &
844 (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM |
846 skb->ip_summed = CHECKSUM_HW;
848 skb_entail(sk, tp, skb);
852 /* Try to append data to the end of skb. */
856 /* Where to copy to? */
857 if (skb_tailroom(skb) > 0) {
858 /* We have some space in skb head. Superb! */
859 if (copy > skb_tailroom(skb))
860 copy = skb_tailroom(skb);
861 if ((err = skb_add_data(skb, from, copy)) != 0)
865 int i = skb_shinfo(skb)->nr_frags;
866 struct page *page = TCP_PAGE(sk);
867 int off = TCP_OFF(sk);
869 if (skb_can_coalesce(skb, i, page, off) &&
871 /* We can extend the last page
874 } else if (i == MAX_SKB_FRAGS ||
876 !(sk->sk_route_caps & NETIF_F_SG))) {
877 /* Need to add new fragment and cannot
878 * do this because interface is non-SG,
879 * or because all the page slots are
881 tcp_mark_push(tp, skb);
884 /* If page is cached, align
885 * offset to L1 cache boundary
887 off = (off + L1_CACHE_BYTES - 1) &
888 ~(L1_CACHE_BYTES - 1);
889 if (off == PAGE_SIZE) {
891 TCP_PAGE(sk) = page = NULL;
896 /* Allocate new cache page. */
897 if (!(page = sk_stream_alloc_page(sk)))
898 goto wait_for_memory;
902 if (copy > PAGE_SIZE - off)
903 copy = PAGE_SIZE - off;
905 /* Time to copy data. We are close to
907 err = skb_copy_to_page(sk, from, skb, page,
910 /* If this page was new, give it to the
911 * socket so it does not get leaked.
920 /* Update the skb. */
922 skb_shinfo(skb)->frags[i - 1].size +=
925 skb_fill_page_desc(skb, i, page, off, copy);
928 } else if (off + copy < PAGE_SIZE) {
934 TCP_OFF(sk) = off + copy;
938 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
940 tp->write_seq += copy;
941 TCP_SKB_CB(skb)->end_seq += copy;
945 if ((seglen -= copy) == 0 && iovlen == 0)
948 if (skb->len != mss_now || (flags & MSG_OOB))
951 if (forced_push(tp)) {
952 tcp_mark_push(tp, skb);
953 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
954 } else if (skb == sk->sk_send_head)
955 tcp_push_one(sk, mss_now);
959 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
962 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
964 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
967 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
973 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
980 if (sk->sk_send_head == skb)
981 sk->sk_send_head = NULL;
982 __skb_unlink(skb, skb->list);
983 sk_stream_free_skb(sk, skb);
990 err = sk_stream_error(sk, flags, err);
997 * Handle reading urgent data. BSD has very simple semantics for
998 * this, no blocking and very strange errors 8)
1001 static int tcp_recv_urg(struct sock *sk, long timeo,
1002 struct msghdr *msg, int len, int flags,
1005 struct tcp_opt *tp = tcp_sk(sk);
1007 /* No URG data to read. */
1008 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1009 tp->urg_data == TCP_URG_READ)
1010 return -EINVAL; /* Yes this is right ! */
1012 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1015 if (tp->urg_data & TCP_URG_VALID) {
1017 char c = tp->urg_data;
1019 if (!(flags & MSG_PEEK))
1020 tp->urg_data = TCP_URG_READ;
1022 /* Read urgent data. */
1023 msg->msg_flags |= MSG_OOB;
1026 if (!(flags & MSG_TRUNC))
1027 err = memcpy_toiovec(msg->msg_iov, &c, 1);
1030 msg->msg_flags |= MSG_TRUNC;
1032 return err ? -EFAULT : len;
1035 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1038 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1039 * the available implementations agree in this case:
1040 * this call should never block, independent of the
1041 * blocking state of the socket.
1042 * Mike <pall@rz.uni-karlsruhe.de>
1047 /* Clean up the receive buffer for full frames taken by the user,
1048 * then send an ACK if necessary. COPIED is the number of bytes
1049 * tcp_recvmsg has given to the user so far, it speeds up the
1050 * calculation of whether or not we must ACK for the sake of
1053 static void cleanup_rbuf(struct sock *sk, int copied)
1055 struct tcp_opt *tp = tcp_sk(sk);
1056 int time_to_ack = 0;
1059 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1061 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
1064 if (tcp_ack_scheduled(tp)) {
1065 /* Delayed ACKs frequently hit locked sockets during bulk
1067 if (tp->ack.blocked ||
1068 /* Once-per-two-segments ACK was not sent by tcp_input.c */
1069 tp->rcv_nxt - tp->rcv_wup > tp->ack.rcv_mss ||
1071 * If this read emptied read buffer, we send ACK, if
1072 * connection is not bidirectional, user drained
1073 * receive buffer and there was a small segment
1076 (copied > 0 && (tp->ack.pending & TCP_ACK_PUSHED) &&
1077 !tp->ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
1081 /* We send an ACK if we can now advertise a non-zero window
1082 * which has been raised "significantly".
1084 * Even if window raised up to infinity, do not send window open ACK
1085 * in states, where we will not receive more. It is useless.
1087 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1088 __u32 rcv_window_now = tcp_receive_window(tp);
1090 /* Optimize, __tcp_select_window() is not cheap. */
1091 if (2*rcv_window_now <= tp->window_clamp) {
1092 __u32 new_window = __tcp_select_window(sk);
1094 /* Send ACK now, if this read freed lots of space
1095 * in our buffer. Certainly, new_window is new window.
1096 * We can advertise it now, if it is not less than current one.
1097 * "Lots" means "at least twice" here.
1099 if (new_window && new_window >= 2 * rcv_window_now)
1107 static void tcp_prequeue_process(struct sock *sk)
1109 struct sk_buff *skb;
1110 struct tcp_opt *tp = tcp_sk(sk);
1112 NET_ADD_STATS_USER(LINUX_MIB_TCPPREQUEUED, skb_queue_len(&tp->ucopy.prequeue));
1114 /* RX process wants to run with disabled BHs, though it is not
1117 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1118 sk->sk_backlog_rcv(sk, skb);
1121 /* Clear memory counter. */
1122 tp->ucopy.memory = 0;
1125 static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1127 struct sk_buff *skb;
1130 skb_queue_walk(&sk->sk_receive_queue, skb) {
1131 offset = seq - TCP_SKB_CB(skb)->seq;
1134 if (offset < skb->len || skb->h.th->fin) {
1143 * This routine provides an alternative to tcp_recvmsg() for routines
1144 * that would like to handle copying from skbuffs directly in 'sendfile'
1147 * - It is assumed that the socket was locked by the caller.
1148 * - The routine does not block.
1149 * - At present, there is no support for reading OOB data
1150 * or for 'peeking' the socket using this routine
1151 * (although both would be easy to implement).
1153 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1154 sk_read_actor_t recv_actor)
1156 struct sk_buff *skb;
1157 struct tcp_opt *tp = tcp_sk(sk);
1158 u32 seq = tp->copied_seq;
1162 if (sk->sk_state == TCP_LISTEN)
1164 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1165 if (offset < skb->len) {
1168 len = skb->len - offset;
1169 /* Stop reading if we hit a patch of urgent data */
1171 u32 urg_offset = tp->urg_seq - seq;
1172 if (urg_offset < len)
1177 used = recv_actor(desc, skb, offset, len);
1183 if (offset != skb->len)
1186 if (skb->h.th->fin) {
1187 sk_eat_skb(sk, skb);
1191 sk_eat_skb(sk, skb);
1195 tp->copied_seq = seq;
1197 tcp_rcv_space_adjust(sk);
1199 /* Clean up data we have read: This will do ACK frames. */
1201 cleanup_rbuf(sk, copied);
1206 * This routine copies from a sock struct into the user buffer.
1208 * Technical note: in 2.3 we work on _locked_ socket, so that
1209 * tricks with *seq access order and skb->users are not required.
1210 * Probably, code can be easily improved even more.
1213 int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1214 size_t len, int nonblock, int flags, int *addr_len)
1216 struct tcp_opt *tp = tcp_sk(sk);
1222 int target; /* Read at least this many bytes */
1224 struct task_struct *user_recv = NULL;
1228 TCP_CHECK_TIMER(sk);
1231 if (sk->sk_state == TCP_LISTEN)
1234 timeo = sock_rcvtimeo(sk, nonblock);
1236 /* Urgent data needs to be handled specially. */
1237 if (flags & MSG_OOB)
1240 seq = &tp->copied_seq;
1241 if (flags & MSG_PEEK) {
1242 peek_seq = tp->copied_seq;
1246 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1249 struct sk_buff *skb;
1252 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1253 if (tp->urg_data && tp->urg_seq == *seq) {
1256 if (signal_pending(current)) {
1257 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1262 /* Next get a buffer. */
1264 skb = skb_peek(&sk->sk_receive_queue);
1269 /* Now that we have two receive queues this
1272 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1273 printk(KERN_INFO "recvmsg bug: copied %X "
1274 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1277 offset = *seq - TCP_SKB_CB(skb)->seq;
1280 if (offset < skb->len)
1284 BUG_TRAP(flags & MSG_PEEK);
1286 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1288 /* Well, if we have backlog, try to process it now yet. */
1290 if (copied >= target && !sk->sk_backlog.tail)
1295 sk->sk_state == TCP_CLOSE ||
1296 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1298 signal_pending(current) ||
1302 if (sock_flag(sk, SOCK_DONE))
1306 copied = sock_error(sk);
1310 if (sk->sk_shutdown & RCV_SHUTDOWN)
1313 if (sk->sk_state == TCP_CLOSE) {
1314 if (!sock_flag(sk, SOCK_DONE)) {
1315 /* This occurs when user tries to read
1316 * from never connected socket.
1329 if (signal_pending(current)) {
1330 copied = sock_intr_errno(timeo);
1335 cleanup_rbuf(sk, copied);
1337 if (tp->ucopy.task == user_recv) {
1338 /* Install new reader */
1339 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1340 user_recv = current;
1341 tp->ucopy.task = user_recv;
1342 tp->ucopy.iov = msg->msg_iov;
1345 tp->ucopy.len = len;
1347 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1348 (flags & (MSG_PEEK | MSG_TRUNC)));
1350 /* Ugly... If prequeue is not empty, we have to
1351 * process it before releasing socket, otherwise
1352 * order will be broken at second iteration.
1353 * More elegant solution is required!!!
1355 * Look: we have the following (pseudo)queues:
1357 * 1. packets in flight
1362 * Each queue can be processed only if the next ones
1363 * are empty. At this point we have empty receive_queue.
1364 * But prequeue _can_ be not empty after 2nd iteration,
1365 * when we jumped to start of loop because backlog
1366 * processing added something to receive_queue.
1367 * We cannot release_sock(), because backlog contains
1368 * packets arrived _after_ prequeued ones.
1370 * Shortly, algorithm is clear --- to process all
1371 * the queues in order. We could make it more directly,
1372 * requeueing packets from backlog to prequeue, if
1373 * is not empty. It is more elegant, but eats cycles,
1376 if (skb_queue_len(&tp->ucopy.prequeue))
1379 /* __ Set realtime policy in scheduler __ */
1382 if (copied >= target) {
1383 /* Do not sleep, just process backlog. */
1387 sk_wait_data(sk, &timeo);
1392 /* __ Restore normal policy in scheduler __ */
1394 if ((chunk = len - tp->ucopy.len) != 0) {
1395 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1400 if (tp->rcv_nxt == tp->copied_seq &&
1401 skb_queue_len(&tp->ucopy.prequeue)) {
1403 tcp_prequeue_process(sk);
1405 if ((chunk = len - tp->ucopy.len) != 0) {
1406 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1412 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1413 if (net_ratelimit())
1414 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1415 current->comm, current->pid);
1416 peek_seq = tp->copied_seq;
1421 /* Ok so how much can we use? */
1422 used = skb->len - offset;
1426 /* Do we have urgent data here? */
1428 u32 urg_offset = tp->urg_seq - *seq;
1429 if (urg_offset < used) {
1431 if (!sock_flag(sk, SOCK_URGINLINE)) {
1443 if (!(flags & MSG_TRUNC)) {
1444 err = skb_copy_datagram_iovec(skb, offset,
1445 msg->msg_iov, used);
1447 /* Exception. Bailout! */
1458 tcp_rcv_space_adjust(sk);
1461 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1463 tcp_fast_path_check(sk, tp);
1465 if (used + offset < skb->len)
1470 if (!(flags & MSG_PEEK))
1471 sk_eat_skb(sk, skb);
1475 /* Process the FIN. */
1477 if (!(flags & MSG_PEEK))
1478 sk_eat_skb(sk, skb);
1483 if (skb_queue_len(&tp->ucopy.prequeue)) {
1486 tp->ucopy.len = copied > 0 ? len : 0;
1488 tcp_prequeue_process(sk);
1490 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1491 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1497 tp->ucopy.task = NULL;
1501 /* According to UNIX98, msg_name/msg_namelen are ignored
1502 * on connected socket. I was just happy when found this 8) --ANK
1505 /* Clean up data we have read: This will do ACK frames. */
1506 cleanup_rbuf(sk, copied);
1508 TCP_CHECK_TIMER(sk);
1513 TCP_CHECK_TIMER(sk);
1518 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1523 * State processing on a close. This implements the state shift for
1524 * sending our FIN frame. Note that we only send a FIN for some
1525 * states. A shutdown() may have already sent the FIN, or we may be
1529 static unsigned char new_state[16] = {
1530 /* current state: new state: action: */
1531 /* (Invalid) */ TCP_CLOSE,
1532 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1533 /* TCP_SYN_SENT */ TCP_CLOSE,
1534 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1535 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1536 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1537 /* TCP_TIME_WAIT */ TCP_CLOSE,
1538 /* TCP_CLOSE */ TCP_CLOSE,
1539 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1540 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1541 /* TCP_LISTEN */ TCP_CLOSE,
1542 /* TCP_CLOSING */ TCP_CLOSING,
1545 static int tcp_close_state(struct sock *sk)
1547 int next = (int)new_state[sk->sk_state];
1548 int ns = next & TCP_STATE_MASK;
1550 tcp_set_state(sk, ns);
1552 return next & TCP_ACTION_FIN;
1556 * Shutdown the sending side of a connection. Much like close except
1557 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1560 void tcp_shutdown(struct sock *sk, int how)
1562 /* We need to grab some memory, and put together a FIN,
1563 * and then put it into the queue to be sent.
1564 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1566 if (!(how & SEND_SHUTDOWN))
1569 /* If we've already sent a FIN, or it's a closed state, skip this. */
1570 if ((1 << sk->sk_state) &
1571 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1572 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1573 /* Clear out any half completed packets. FIN if needed. */
1574 if (tcp_close_state(sk))
1580 * At this point, there should be no process reference to this
1581 * socket, and thus no user references at all. Therefore we
1582 * can assume the socket waitqueue is inactive and nobody will
1583 * try to jump onto it.
1585 void tcp_destroy_sock(struct sock *sk)
1587 BUG_TRAP(sk->sk_state == TCP_CLOSE);
1588 BUG_TRAP(sock_flag(sk, SOCK_DEAD));
1590 /* It cannot be in hash table! */
1591 BUG_TRAP(sk_unhashed(sk));
1593 /* If it has not 0 inet_sk(sk)->num, it must be bound */
1594 BUG_TRAP(!inet_sk(sk)->num || tcp_sk(sk)->bind_hash);
1597 if (sk->sk_zapped) {
1598 printk(KERN_DEBUG "TCP: double destroy sk=%p\n", sk);
1604 sk->sk_prot->destroy(sk);
1606 sk_stream_kill_queues(sk);
1608 xfrm_sk_free_policy(sk);
1610 #ifdef INET_REFCNT_DEBUG
1611 if (atomic_read(&sk->sk_refcnt) != 1) {
1612 printk(KERN_DEBUG "Destruction TCP %p delayed, c=%d\n",
1613 sk, atomic_read(&sk->sk_refcnt));
1617 atomic_dec(&tcp_orphan_count);
1621 void tcp_close(struct sock *sk, long timeout)
1623 struct sk_buff *skb;
1624 int data_was_unread = 0;
1627 sk->sk_shutdown = SHUTDOWN_MASK;
1629 if (sk->sk_state == TCP_LISTEN) {
1630 tcp_set_state(sk, TCP_CLOSE);
1633 tcp_listen_stop(sk);
1635 goto adjudge_to_death;
1638 /* We need to flush the recv. buffs. We do this only on the
1639 * descriptor close, not protocol-sourced closes, because the
1640 * reader process may not have drained the data yet!
1642 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1643 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1645 data_was_unread += len;
1649 sk_stream_mem_reclaim(sk);
1651 /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
1652 * 3.10, we send a RST here because data was lost. To
1653 * witness the awful effects of the old behavior of always
1654 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
1655 * a bulk GET in an FTP client, suspend the process, wait
1656 * for the client to advertise a zero window, then kill -9
1657 * the FTP client, wheee... Note: timeout is always zero
1660 if (data_was_unread) {
1661 /* Unread data was tossed, zap the connection. */
1662 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1663 tcp_set_state(sk, TCP_CLOSE);
1664 tcp_send_active_reset(sk, GFP_KERNEL);
1665 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1666 /* Check zero linger _after_ checking for unread data. */
1667 sk->sk_prot->disconnect(sk, 0);
1668 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1669 } else if (tcp_close_state(sk)) {
1670 /* We FIN if the application ate all the data before
1671 * zapping the connection.
1674 /* RED-PEN. Formally speaking, we have broken TCP state
1675 * machine. State transitions:
1677 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1678 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1679 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1681 * are legal only when FIN has been sent (i.e. in window),
1682 * rather than queued out of window. Purists blame.
1684 * F.e. "RFC state" is ESTABLISHED,
1685 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1687 * The visible declinations are that sometimes
1688 * we enter time-wait state, when it is not required really
1689 * (harmless), do not send active resets, when they are
1690 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1691 * they look as CLOSING or LAST_ACK for Linux)
1692 * Probably, I missed some more holelets.
1698 sk_stream_wait_close(sk, timeout);
1701 /* It is the last release_sock in its life. It will remove backlog. */
1705 /* Now socket is owned by kernel and we acquire BH lock
1706 to finish close. No need to check for user refs.
1710 BUG_TRAP(!sock_owned_by_user(sk));
1715 /* This is a (useful) BSD violating of the RFC. There is a
1716 * problem with TCP as specified in that the other end could
1717 * keep a socket open forever with no application left this end.
1718 * We use a 3 minute timeout (about the same as BSD) then kill
1719 * our end. If they send after that then tough - BUT: long enough
1720 * that we won't make the old 4*rto = almost no time - whoops
1723 * Nope, it was not mistake. It is really desired behaviour
1724 * f.e. on http servers, when such sockets are useless, but
1725 * consume significant resources. Let's do it with special
1726 * linger2 option. --ANK
1729 if (sk->sk_state == TCP_FIN_WAIT2) {
1730 struct tcp_opt *tp = tcp_sk(sk);
1731 if (tp->linger2 < 0) {
1732 tcp_set_state(sk, TCP_CLOSE);
1733 tcp_send_active_reset(sk, GFP_ATOMIC);
1734 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1736 int tmo = tcp_fin_time(tp);
1738 if (tmo > TCP_TIMEWAIT_LEN) {
1739 tcp_reset_keepalive_timer(sk, tcp_fin_time(tp));
1741 atomic_inc(&tcp_orphan_count);
1742 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1747 if (sk->sk_state != TCP_CLOSE) {
1748 sk_stream_mem_reclaim(sk);
1749 if (atomic_read(&tcp_orphan_count) > sysctl_tcp_max_orphans ||
1750 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
1751 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
1752 if (net_ratelimit())
1753 printk(KERN_INFO "TCP: too many of orphaned "
1755 tcp_set_state(sk, TCP_CLOSE);
1756 tcp_send_active_reset(sk, GFP_ATOMIC);
1757 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1760 atomic_inc(&tcp_orphan_count);
1762 if (sk->sk_state == TCP_CLOSE)
1763 tcp_destroy_sock(sk);
1764 /* Otherwise, socket is reprieved until protocol close. */
1772 /* These states need RST on ABORT according to RFC793 */
1774 static inline int tcp_need_reset(int state)
1776 return (1 << state) &
1777 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1778 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1781 int tcp_disconnect(struct sock *sk, int flags)
1783 struct inet_opt *inet = inet_sk(sk);
1784 struct tcp_opt *tp = tcp_sk(sk);
1786 int old_state = sk->sk_state;
1788 if (old_state != TCP_CLOSE)
1789 tcp_set_state(sk, TCP_CLOSE);
1791 /* ABORT function of RFC793 */
1792 if (old_state == TCP_LISTEN) {
1793 tcp_listen_stop(sk);
1794 } else if (tcp_need_reset(old_state) ||
1795 (tp->snd_nxt != tp->write_seq &&
1796 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
1797 /* The last check adjusts for discrepance of Linux wrt. RFC
1800 tcp_send_active_reset(sk, gfp_any());
1801 sk->sk_err = ECONNRESET;
1802 } else if (old_state == TCP_SYN_SENT)
1803 sk->sk_err = ECONNRESET;
1805 tcp_clear_xmit_timers(sk);
1806 __skb_queue_purge(&sk->sk_receive_queue);
1807 sk_stream_writequeue_purge(sk);
1808 __skb_queue_purge(&tp->out_of_order_queue);
1812 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1813 inet_reset_saddr(sk);
1815 sk->sk_shutdown = 0;
1816 sock_reset_flag(sk, SOCK_DONE);
1818 if ((tp->write_seq += tp->max_window + 2) == 0)
1823 tp->packets_out = 0;
1824 tp->snd_ssthresh = 0x7fffffff;
1825 tp->snd_cwnd_cnt = 0;
1826 tcp_set_ca_state(tp, TCP_CA_Open);
1827 tcp_clear_retrans(tp);
1828 tcp_delack_init(tp);
1829 sk->sk_send_head = NULL;
1834 BUG_TRAP(!inet->num || tp->bind_hash);
1836 sk->sk_error_report(sk);
1841 * Wait for an incoming connection, avoid race
1842 * conditions. This must be called with the socket locked.
1844 static int wait_for_connect(struct sock *sk, long timeo)
1846 struct tcp_opt *tp = tcp_sk(sk);
1851 * True wake-one mechanism for incoming connections: only
1852 * one process gets woken up, not the 'whole herd'.
1853 * Since we do not 'race & poll' for established sockets
1854 * anymore, the common case will execute the loop only once.
1856 * Subtle issue: "add_wait_queue_exclusive()" will be added
1857 * after any current non-exclusive waiters, and we know that
1858 * it will always _stay_ after any new non-exclusive waiters
1859 * because all non-exclusive waiters are added at the
1860 * beginning of the wait-queue. As such, it's ok to "drop"
1861 * our exclusiveness temporarily when we get woken up without
1862 * having to remove and re-insert us on the wait queue.
1865 prepare_to_wait_exclusive(sk->sk_sleep, &wait,
1866 TASK_INTERRUPTIBLE);
1868 if (!tp->accept_queue)
1869 timeo = schedule_timeout(timeo);
1872 if (tp->accept_queue)
1875 if (sk->sk_state != TCP_LISTEN)
1877 err = sock_intr_errno(timeo);
1878 if (signal_pending(current))
1884 finish_wait(sk->sk_sleep, &wait);
1889 * This will accept the next outstanding connection.
1892 struct sock *tcp_accept(struct sock *sk, int flags, int *err)
1894 struct tcp_opt *tp = tcp_sk(sk);
1895 struct open_request *req;
1901 /* We need to make sure that this socket is listening,
1902 * and that it has something pending.
1905 if (sk->sk_state != TCP_LISTEN)
1908 /* Find already established connection */
1909 if (!tp->accept_queue) {
1910 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1912 /* If this is a non blocking socket don't sleep */
1917 error = wait_for_connect(sk, timeo);
1922 req = tp->accept_queue;
1923 if ((tp->accept_queue = req->dl_next) == NULL)
1924 tp->accept_queue_tail = NULL;
1927 sk_acceptq_removed(sk);
1928 tcp_openreq_fastfree(req);
1929 BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
1940 * Socket option code for TCP.
1942 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1945 struct tcp_opt *tp = tcp_sk(sk);
1949 if (level != SOL_TCP)
1950 return tp->af_specific->setsockopt(sk, level, optname,
1953 if (optlen < sizeof(int))
1956 if (get_user(val, (int __user *)optval))
1963 /* Values greater than interface MTU won't take effect. However
1964 * at the point when this call is done we typically don't yet
1965 * know which interface is going to be used */
1966 if (val < 8 || val > MAX_TCP_WINDOW) {
1975 /* TCP_NODELAY is weaker than TCP_CORK, so that
1976 * this option on corked socket is remembered, but
1977 * it is not activated until cork is cleared.
1979 * However, when TCP_NODELAY is set we make
1980 * an explicit push, which overrides even TCP_CORK
1981 * for currently queued segments.
1983 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
1984 tcp_push_pending_frames(sk, tp);
1986 tp->nonagle &= ~TCP_NAGLE_OFF;
1991 /* When set indicates to always queue non-full frames.
1992 * Later the user clears this option and we transmit
1993 * any pending partial frames in the queue. This is
1994 * meant to be used alongside sendfile() to get properly
1995 * filled frames when the user (for example) must write
1996 * out headers with a write() call first and then use
1997 * sendfile to send out the data parts.
1999 * TCP_CORK can be set together with TCP_NODELAY and it is
2000 * stronger than TCP_NODELAY.
2003 tp->nonagle |= TCP_NAGLE_CORK;
2005 tp->nonagle &= ~TCP_NAGLE_CORK;
2006 if (tp->nonagle&TCP_NAGLE_OFF)
2007 tp->nonagle |= TCP_NAGLE_PUSH;
2008 tcp_push_pending_frames(sk, tp);
2013 if (val < 1 || val > MAX_TCP_KEEPIDLE)
2016 tp->keepalive_time = val * HZ;
2017 if (sock_flag(sk, SOCK_KEEPOPEN) &&
2018 !((1 << sk->sk_state) &
2019 (TCPF_CLOSE | TCPF_LISTEN))) {
2020 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
2021 if (tp->keepalive_time > elapsed)
2022 elapsed = tp->keepalive_time - elapsed;
2025 tcp_reset_keepalive_timer(sk, elapsed);
2030 if (val < 1 || val > MAX_TCP_KEEPINTVL)
2033 tp->keepalive_intvl = val * HZ;
2036 if (val < 1 || val > MAX_TCP_KEEPCNT)
2039 tp->keepalive_probes = val;
2042 if (val < 1 || val > MAX_TCP_SYNCNT)
2045 tp->syn_retries = val;
2051 else if (val > sysctl_tcp_fin_timeout / HZ)
2054 tp->linger2 = val * HZ;
2057 case TCP_DEFER_ACCEPT:
2058 tp->defer_accept = 0;
2060 /* Translate value in seconds to number of
2062 while (tp->defer_accept < 32 &&
2063 val > ((TCP_TIMEOUT_INIT / HZ) <<
2070 case TCP_WINDOW_CLAMP:
2072 if (sk->sk_state != TCP_CLOSE) {
2076 tp->window_clamp = 0;
2078 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2079 SOCK_MIN_RCVBUF / 2 : val;
2084 tp->ack.pingpong = 1;
2086 tp->ack.pingpong = 0;
2087 if ((1 << sk->sk_state) &
2088 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2089 tcp_ack_scheduled(tp)) {
2090 tp->ack.pending |= TCP_ACK_PUSHED;
2091 cleanup_rbuf(sk, 1);
2093 tp->ack.pingpong = 1;
2106 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2109 struct tcp_opt *tp = tcp_sk(sk);
2112 if (level != SOL_TCP)
2113 return tp->af_specific->getsockopt(sk, level, optname,
2116 if (get_user(len, optlen))
2119 len = min_t(unsigned int, len, sizeof(int));
2126 val = tp->mss_cache_std;
2127 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2131 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2134 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2137 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2140 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2143 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2146 val = tp->syn_retries ? : sysctl_tcp_syn_retries;
2151 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2153 case TCP_DEFER_ACCEPT:
2154 val = !tp->defer_accept ? 0 : ((TCP_TIMEOUT_INIT / HZ) <<
2155 (tp->defer_accept - 1));
2157 case TCP_WINDOW_CLAMP:
2158 val = tp->window_clamp;
2161 struct tcp_info info;
2163 if (get_user(len, optlen))
2166 tcp_get_info(sk, &info);
2168 len = min_t(unsigned int, len, sizeof(info));
2169 if (put_user(len, optlen))
2171 if (copy_to_user(optval, &info, len))
2176 val = !tp->ack.pingpong;
2179 return -ENOPROTOOPT;
2182 if (put_user(len, optlen))
2184 if (copy_to_user(optval, &val, len))
2190 extern void __skb_cb_too_small_for_tcp(int, int);
2191 extern void tcpdiag_init(void);
2193 static __initdata unsigned long thash_entries;
2194 static int __init set_thash_entries(char *str)
2198 thash_entries = simple_strtoul(str, &str, 0);
2201 __setup("thash_entries=", set_thash_entries);
2203 void __init tcp_init(void)
2205 struct sk_buff *skb = NULL;
2209 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2210 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2213 tcp_openreq_cachep = kmem_cache_create("tcp_open_request",
2214 sizeof(struct open_request),
2215 0, SLAB_HWCACHE_ALIGN,
2217 if (!tcp_openreq_cachep)
2218 panic("tcp_init: Cannot alloc open_request cache.");
2220 tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket",
2221 sizeof(struct tcp_bind_bucket),
2222 0, SLAB_HWCACHE_ALIGN,
2224 if (!tcp_bucket_cachep)
2225 panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
2227 tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket",
2228 sizeof(struct tcp_tw_bucket),
2229 0, SLAB_HWCACHE_ALIGN,
2231 if (!tcp_timewait_cachep)
2232 panic("tcp_init: Cannot alloc tcp_tw_bucket cache.");
2234 /* Size and allocate the main established and bind bucket
2237 * The methodology is similar to that of the buffer cache.
2239 if (num_physpages >= (128 * 1024))
2240 goal = num_physpages >> (21 - PAGE_SHIFT);
2242 goal = num_physpages >> (23 - PAGE_SHIFT);
2245 goal = (thash_entries * sizeof(struct tcp_ehash_bucket)) >> PAGE_SHIFT;
2246 for (order = 0; (1UL << order) < goal; order++)
2249 tcp_ehash_size = (1UL << order) * PAGE_SIZE /
2250 sizeof(struct tcp_ehash_bucket);
2251 tcp_ehash_size >>= 1;
2252 while (tcp_ehash_size & (tcp_ehash_size - 1))
2254 tcp_ehash = (struct tcp_ehash_bucket *)
2255 __get_free_pages(GFP_ATOMIC, order);
2256 } while (!tcp_ehash && --order > 0);
2259 panic("Failed to allocate TCP established hash table\n");
2260 for (i = 0; i < (tcp_ehash_size << 1); i++) {
2261 tcp_ehash[i].lock = RW_LOCK_UNLOCKED;
2262 INIT_HLIST_HEAD(&tcp_ehash[i].chain);
2266 tcp_bhash_size = (1UL << order) * PAGE_SIZE /
2267 sizeof(struct tcp_bind_hashbucket);
2268 if ((tcp_bhash_size > (64 * 1024)) && order > 0)
2270 tcp_bhash = (struct tcp_bind_hashbucket *)
2271 __get_free_pages(GFP_ATOMIC, order);
2272 } while (!tcp_bhash && --order >= 0);
2275 panic("Failed to allocate TCP bind hash table\n");
2276 for (i = 0; i < tcp_bhash_size; i++) {
2277 tcp_bhash[i].lock = SPIN_LOCK_UNLOCKED;
2278 INIT_HLIST_HEAD(&tcp_bhash[i].chain);
2281 /* Try to be a bit smarter and adjust defaults depending
2282 * on available memory.
2285 sysctl_local_port_range[0] = 32768;
2286 sysctl_local_port_range[1] = 61000;
2287 sysctl_tcp_max_tw_buckets = 180000;
2288 sysctl_tcp_max_orphans = 4096 << (order - 4);
2289 sysctl_max_syn_backlog = 1024;
2290 } else if (order < 3) {
2291 sysctl_local_port_range[0] = 1024 * (3 - order);
2292 sysctl_tcp_max_tw_buckets >>= (3 - order);
2293 sysctl_tcp_max_orphans >>= (3 - order);
2294 sysctl_max_syn_backlog = 128;
2296 tcp_port_rover = sysctl_local_port_range[0] - 1;
2298 sysctl_tcp_mem[0] = 768 << order;
2299 sysctl_tcp_mem[1] = 1024 << order;
2300 sysctl_tcp_mem[2] = 1536 << order;
2303 sysctl_tcp_wmem[2] = 64 * 1024;
2304 sysctl_tcp_rmem[0] = PAGE_SIZE;
2305 sysctl_tcp_rmem[1] = 43689;
2306 sysctl_tcp_rmem[2] = 2 * 43689;
2309 printk(KERN_INFO "TCP: Hash tables configured "
2310 "(established %d bind %d)\n",
2311 tcp_ehash_size << 1, tcp_bhash_size);
2316 EXPORT_SYMBOL(tcp_accept);
2317 EXPORT_SYMBOL(tcp_close);
2318 EXPORT_SYMBOL(tcp_close_state);
2319 EXPORT_SYMBOL(tcp_destroy_sock);
2320 EXPORT_SYMBOL(tcp_disconnect);
2321 EXPORT_SYMBOL(tcp_getsockopt);
2322 EXPORT_SYMBOL(tcp_ioctl);
2323 EXPORT_SYMBOL(tcp_openreq_cachep);
2324 EXPORT_SYMBOL(tcp_poll);
2325 EXPORT_SYMBOL(tcp_read_sock);
2326 EXPORT_SYMBOL(tcp_recvmsg);
2327 EXPORT_SYMBOL(tcp_sendmsg);
2328 EXPORT_SYMBOL(tcp_sendpage);
2329 EXPORT_SYMBOL(tcp_setsockopt);
2330 EXPORT_SYMBOL(tcp_shutdown);
2331 EXPORT_SYMBOL(tcp_statistics);
2332 EXPORT_SYMBOL(tcp_timewait_cachep);