2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
57 * Alan Cox : Tidied tcp_data to avoid a potential
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
215 * Description of States:
217 * TCP_SYN_SENT sent a connection request, waiting for ack
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
222 * TCP_ESTABLISHED connection established
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
247 * TCP_CLOSE socket is finished
250 #include <linux/config.h>
251 #include <linux/module.h>
252 #include <linux/types.h>
253 #include <linux/fcntl.h>
254 #include <linux/poll.h>
255 #include <linux/init.h>
256 #include <linux/smp_lock.h>
257 #include <linux/fs.h>
258 #include <linux/random.h>
259 #include <linux/ckrm.h>
261 #include <net/icmp.h>
263 #include <net/xfrm.h>
267 #include <asm/uaccess.h>
268 #include <asm/ioctls.h>
270 int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
272 DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
274 kmem_cache_t *tcp_openreq_cachep;
275 kmem_cache_t *tcp_bucket_cachep;
276 kmem_cache_t *tcp_timewait_cachep;
278 atomic_t tcp_orphan_count = ATOMIC_INIT(0);
280 int sysctl_tcp_mem[3];
281 int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
282 int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
284 atomic_t tcp_memory_allocated; /* Current allocated memory. */
285 atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
287 /* Pressure flag: try to collapse.
288 * Technical note: it is used by multiple contexts non atomically.
289 * All the tcp_mem_schedule() is of this nature: accounting
290 * is strict, actions are advisory and have some latency. */
291 int tcp_memory_pressure;
293 #define TCP_PAGES(amt) (((amt) + TCP_MEM_QUANTUM - 1) / TCP_MEM_QUANTUM)
295 int tcp_mem_schedule(struct sock *sk, int size, int kind)
297 int amt = TCP_PAGES(size);
299 sk->sk_forward_alloc += amt * TCP_MEM_QUANTUM;
300 atomic_add(amt, &tcp_memory_allocated);
303 if (atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
304 if (tcp_memory_pressure)
305 tcp_memory_pressure = 0;
309 /* Over hard limit. */
310 if (atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) {
311 tcp_enter_memory_pressure();
312 goto suppress_allocation;
315 /* Under pressure. */
316 if (atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[1])
317 tcp_enter_memory_pressure();
320 if (atomic_read(&sk->sk_rmem_alloc) < sysctl_tcp_rmem[0])
322 } else if (sk->sk_wmem_queued < sysctl_tcp_wmem[0])
325 if (!tcp_memory_pressure ||
326 sysctl_tcp_mem[2] > atomic_read(&tcp_sockets_allocated) *
327 TCP_PAGES(sk->sk_wmem_queued +
328 atomic_read(&sk->sk_rmem_alloc) +
329 sk->sk_forward_alloc))
335 tcp_moderate_sndbuf(sk);
337 /* Fail only if socket is _under_ its sndbuf.
338 * In this case we cannot block, so that we have to fail.
340 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
344 /* Alas. Undo changes. */
345 sk->sk_forward_alloc -= amt * TCP_MEM_QUANTUM;
346 atomic_sub(amt, &tcp_memory_allocated);
350 void __tcp_mem_reclaim(struct sock *sk)
352 if (sk->sk_forward_alloc >= TCP_MEM_QUANTUM) {
353 atomic_sub(sk->sk_forward_alloc / TCP_MEM_QUANTUM,
354 &tcp_memory_allocated);
355 sk->sk_forward_alloc &= TCP_MEM_QUANTUM - 1;
356 if (tcp_memory_pressure &&
357 atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0])
358 tcp_memory_pressure = 0;
362 void tcp_rfree(struct sk_buff *skb)
364 struct sock *sk = skb->sk;
366 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
367 sk->sk_forward_alloc += skb->truesize;
371 * LISTEN is a special case for poll..
373 static __inline__ unsigned int tcp_listen_poll(struct sock *sk,
376 return tcp_sk(sk)->accept_queue ? (POLLIN | POLLRDNORM) : 0;
380 * Wait for a TCP event.
382 * Note that we don't need to lock the socket, as the upper poll layers
383 * take care of normal races (between the test and the event) and we don't
384 * go look at any of the socket buffers directly.
386 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
389 struct sock *sk = sock->sk;
390 struct tcp_opt *tp = tcp_sk(sk);
392 poll_wait(file, sk->sk_sleep, wait);
393 if (sk->sk_state == TCP_LISTEN)
394 return tcp_listen_poll(sk, wait);
396 /* Socket is not locked. We are protected from async events
397 by poll logic and correct handling of state changes
398 made by another threads is impossible in any case.
406 * POLLHUP is certainly not done right. But poll() doesn't
407 * have a notion of HUP in just one direction, and for a
408 * socket the read side is more interesting.
410 * Some poll() documentation says that POLLHUP is incompatible
411 * with the POLLOUT/POLLWR flags, so somebody should check this
412 * all. But careful, it tends to be safer to return too many
413 * bits than too few, and you can easily break real applications
414 * if you don't tell them that something has hung up!
418 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
419 * our fs/select.c). It means that after we received EOF,
420 * poll always returns immediately, making impossible poll() on write()
421 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
422 * if and only if shutdown has been made in both directions.
423 * Actually, it is interesting to look how Solaris and DUX
424 * solve this dilemma. I would prefer, if PULLHUP were maskable,
425 * then we could set it on SND_SHUTDOWN. BTW examples given
426 * in Stevens' books assume exactly this behaviour, it explains
427 * why PULLHUP is incompatible with POLLOUT. --ANK
429 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
430 * blocking on fresh not-connected or disconnected socket. --ANK
432 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
434 if (sk->sk_shutdown & RCV_SHUTDOWN)
435 mask |= POLLIN | POLLRDNORM;
438 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
439 /* Potential race condition. If read of tp below will
440 * escape above sk->sk_state, we can be illegally awaken
441 * in SYN_* states. */
442 if ((tp->rcv_nxt != tp->copied_seq) &&
443 (tp->urg_seq != tp->copied_seq ||
444 tp->rcv_nxt != tp->copied_seq + 1 ||
445 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
446 mask |= POLLIN | POLLRDNORM;
448 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
449 if (tcp_wspace(sk) >= tcp_min_write_space(sk)) {
450 mask |= POLLOUT | POLLWRNORM;
451 } else { /* send SIGIO later */
452 set_bit(SOCK_ASYNC_NOSPACE,
453 &sk->sk_socket->flags);
454 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
456 /* Race breaker. If space is freed after
457 * wspace test but before the flags are set,
458 * IO signal will be lost.
460 if (tcp_wspace(sk) >= tcp_min_write_space(sk))
461 mask |= POLLOUT | POLLWRNORM;
465 if (tp->urg_data & TCP_URG_VALID)
472 * TCP socket write_space callback.
474 void tcp_write_space(struct sock *sk)
476 struct socket *sock = sk->sk_socket;
478 if (tcp_wspace(sk) >= tcp_min_write_space(sk) && sock) {
479 clear_bit(SOCK_NOSPACE, &sock->flags);
481 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
482 wake_up_interruptible(sk->sk_sleep);
484 if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
485 sock_wake_async(sock, 2, POLL_OUT);
489 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
491 struct tcp_opt *tp = tcp_sk(sk);
496 if (sk->sk_state == TCP_LISTEN)
500 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
502 else if (sock_flag(sk, SOCK_URGINLINE) ||
504 before(tp->urg_seq, tp->copied_seq) ||
505 !before(tp->urg_seq, tp->rcv_nxt)) {
506 answ = tp->rcv_nxt - tp->copied_seq;
508 /* Subtract 1, if FIN is in queue. */
509 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
511 ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
513 answ = tp->urg_seq - tp->copied_seq;
517 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
520 if (sk->sk_state == TCP_LISTEN)
523 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
526 answ = tp->write_seq - tp->snd_una;
532 return put_user(answ, (int *)arg);
536 int tcp_listen_start(struct sock *sk)
538 #ifdef CONFIG_ACCEPT_QUEUES
541 struct inet_opt *inet = inet_sk(sk);
542 struct tcp_opt *tp = tcp_sk(sk);
543 struct tcp_listen_opt *lopt;
545 sk->sk_max_ack_backlog = 0;
546 sk->sk_ack_backlog = 0;
547 tp->accept_queue = NULL;
548 #ifdef CONFIG_ACCEPT_QUEUES
550 for (i=0; i < NUM_ACCEPT_QUEUES; i++) {
551 tp->acceptq[i].aq_tail = NULL;
552 tp->acceptq[i].aq_head = NULL;
553 tp->acceptq[i].aq_wait_time = 0;
554 tp->acceptq[i].aq_qcount = 0;
555 tp->acceptq[i].aq_count = 0;
557 tp->acceptq[i].aq_valid = 1;
558 tp->acceptq[i].aq_ratio = 1;
561 tp->acceptq[i].aq_valid = 0;
562 tp->acceptq[i].aq_ratio = 0;
566 tp->syn_wait_lock = RW_LOCK_UNLOCKED;
569 lopt = kmalloc(sizeof(struct tcp_listen_opt), GFP_KERNEL);
573 memset(lopt, 0, sizeof(struct tcp_listen_opt));
574 for (lopt->max_qlen_log = 6; ; lopt->max_qlen_log++)
575 if ((1 << lopt->max_qlen_log) >= sysctl_max_syn_backlog)
577 get_random_bytes(&lopt->hash_rnd, 4);
579 write_lock_bh(&tp->syn_wait_lock);
580 tp->listen_opt = lopt;
581 write_unlock_bh(&tp->syn_wait_lock);
583 /* There is race window here: we announce ourselves listening,
584 * but this transition is still not validated by get_port().
585 * It is OK, because this socket enters to hash table only
586 * after validation is complete.
588 sk->sk_state = TCP_LISTEN;
589 if (!sk->sk_prot->get_port(sk, inet->num)) {
590 inet->sport = htons(inet->num);
593 sk->sk_prot->hash(sk);
596 ckrm_cb_listen_start(sk);
602 sk->sk_state = TCP_CLOSE;
603 write_lock_bh(&tp->syn_wait_lock);
604 tp->listen_opt = NULL;
605 write_unlock_bh(&tp->syn_wait_lock);
611 * This routine closes sockets which have been at least partially
612 * opened, but not yet accepted.
615 static void tcp_listen_stop (struct sock *sk)
617 struct tcp_opt *tp = tcp_sk(sk);
618 struct tcp_listen_opt *lopt = tp->listen_opt;
619 struct open_request *acc_req = tp->accept_queue;
620 struct open_request *req;
623 tcp_delete_keepalive_timer(sk);
625 /* make all the listen_opt local to us */
626 write_lock_bh(&tp->syn_wait_lock);
627 tp->listen_opt = NULL;
628 write_unlock_bh(&tp->syn_wait_lock);
631 ckrm_cb_listen_stop(sk);
634 #ifdef CONFIG_ACCEPT_QUEUES
635 for (i = 0; i < NUM_ACCEPT_QUEUES; i++)
636 tp->acceptq[i].aq_head = tp->acceptq[i].aq_tail = NULL;
638 tp->accept_queue_tail = NULL;
640 tp->accept_queue = NULL;
643 for (i = 0; i < TCP_SYNQ_HSIZE; i++) {
644 while ((req = lopt->syn_table[i]) != NULL) {
645 lopt->syn_table[i] = req->dl_next;
647 tcp_openreq_free(req);
649 /* Following specs, it would be better either to send FIN
650 * (and enter FIN-WAIT-1, it is normal close)
651 * or to send active reset (abort).
652 * Certainly, it is pretty dangerous while synflood, but it is
653 * bad justification for our negligence 8)
654 * To be honest, we are not able to make either
655 * of the variants now. --ANK
660 BUG_TRAP(!lopt->qlen);
664 while ((req = acc_req) != NULL) {
665 struct sock *child = req->sk;
667 acc_req = req->dl_next;
671 BUG_TRAP(!sock_owned_by_user(child));
674 tcp_disconnect(child, O_NONBLOCK);
678 atomic_inc(&tcp_orphan_count);
680 tcp_destroy_sock(child);
682 bh_unlock_sock(child);
686 #ifdef CONFIG_ACCEPT_QUEUES
687 tcp_acceptq_removed(sk, req->acceptq_class);
689 tcp_acceptq_removed(sk);
691 tcp_openreq_fastfree(req);
693 BUG_TRAP(!sk->sk_ack_backlog);
697 * Wait for a socket to get into the connected state
699 * Note: Must be called with the socket locked.
701 static int wait_for_tcp_connect(struct sock *sk, int flags, long *timeo_p)
703 struct tcp_opt *tp = tcp_sk(sk);
704 struct task_struct *tsk = current;
707 while ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
709 return sock_error(sk);
710 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV))
714 if (signal_pending(tsk))
715 return sock_intr_errno(*timeo_p);
717 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
721 *timeo_p = schedule_timeout(*timeo_p);
724 finish_wait(sk->sk_sleep, &wait);
730 static inline int tcp_memory_free(struct sock *sk)
732 return sk->sk_wmem_queued < sk->sk_sndbuf;
736 * Wait for more memory for a socket
738 static int wait_for_tcp_memory(struct sock *sk, long *timeo)
740 struct tcp_opt *tp = tcp_sk(sk);
743 long current_timeo = *timeo;
746 if (tcp_memory_free(sk))
747 current_timeo = vm_wait = (net_random() % (HZ / 5)) + 2;
750 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
752 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
754 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
758 if (signal_pending(current))
760 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
761 if (tcp_memory_free(sk) && !vm_wait)
764 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
767 if (!tcp_memory_free(sk) || vm_wait)
768 current_timeo = schedule_timeout(current_timeo);
773 vm_wait -= current_timeo;
774 current_timeo = *timeo;
775 if (current_timeo != MAX_SCHEDULE_TIMEOUT &&
776 (current_timeo -= vm_wait) < 0)
780 *timeo = current_timeo;
783 finish_wait(sk->sk_sleep, &wait);
793 err = sock_intr_errno(*timeo);
797 static inline int can_coalesce(struct sk_buff *skb, int i, struct page *page,
801 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
802 return page == frag->page &&
803 off == frag->page_offset + frag->size;
808 static inline void fill_page_desc(struct sk_buff *skb, int i,
809 struct page *page, int off, int size)
811 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
813 frag->page_offset = off;
815 skb_shinfo(skb)->nr_frags = i + 1;
818 static inline void tcp_mark_push(struct tcp_opt *tp, struct sk_buff *skb)
820 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
821 tp->pushed_seq = tp->write_seq;
824 static inline int forced_push(struct tcp_opt *tp)
826 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
829 static inline void skb_entail(struct sock *sk, struct tcp_opt *tp,
833 TCP_SKB_CB(skb)->seq = tp->write_seq;
834 TCP_SKB_CB(skb)->end_seq = tp->write_seq;
835 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
836 TCP_SKB_CB(skb)->sacked = 0;
837 __skb_queue_tail(&sk->sk_write_queue, skb);
838 tcp_charge_skb(sk, skb);
841 else if (tp->nonagle&TCP_NAGLE_PUSH)
842 tp->nonagle &= ~TCP_NAGLE_PUSH;
845 static inline void tcp_mark_urg(struct tcp_opt *tp, int flags,
848 if (flags & MSG_OOB) {
850 tp->snd_up = tp->write_seq;
851 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
855 static inline void tcp_push(struct sock *sk, struct tcp_opt *tp, int flags,
856 int mss_now, int nonagle)
859 struct sk_buff *skb = sk->sk_write_queue.prev;
860 if (!(flags & MSG_MORE) || forced_push(tp))
861 tcp_mark_push(tp, skb);
862 tcp_mark_urg(tp, flags, skb);
863 __tcp_push_pending_frames(sk, tp, mss_now,
864 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
868 static int tcp_error(struct sock *sk, int flags, int err)
871 err = sock_error(sk) ? : -EPIPE;
872 if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
873 send_sig(SIGPIPE, current, 0);
877 static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
878 size_t psize, int flags)
880 struct tcp_opt *tp = tcp_sk(sk);
884 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
886 /* Wait for a connection to finish. */
887 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
888 if ((err = wait_for_tcp_connect(sk, 0, &timeo)) != 0)
891 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
893 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
897 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
901 struct sk_buff *skb = sk->sk_write_queue.prev;
902 struct page *page = pages[poffset / PAGE_SIZE];
904 int offset = poffset % PAGE_SIZE;
905 int size = min_t(size_t, psize, PAGE_SIZE - offset);
907 if (!tp->send_head || (copy = mss_now - skb->len) <= 0) {
909 if (!tcp_memory_free(sk))
910 goto wait_for_sndbuf;
912 skb = tcp_alloc_pskb(sk, 0, tp->mss_cache,
915 goto wait_for_memory;
917 skb_entail(sk, tp, skb);
924 i = skb_shinfo(skb)->nr_frags;
925 if (can_coalesce(skb, i, page, offset)) {
926 skb_shinfo(skb)->frags[i - 1].size += copy;
927 } else if (i < MAX_SKB_FRAGS) {
929 fill_page_desc(skb, i, page, offset, copy);
931 tcp_mark_push(tp, skb);
936 skb->data_len += copy;
937 skb->ip_summed = CHECKSUM_HW;
938 tp->write_seq += copy;
939 TCP_SKB_CB(skb)->end_seq += copy;
942 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
946 if (!(psize -= copy))
949 if (skb->len != mss_now || (flags & MSG_OOB))
952 if (forced_push(tp)) {
953 tcp_mark_push(tp, skb);
954 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
955 } else if (skb == tp->send_head)
956 tcp_push_one(sk, mss_now);
960 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
963 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
965 if ((err = wait_for_tcp_memory(sk, &timeo)) != 0)
968 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
973 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
980 return tcp_error(sk, flags, err);
983 ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
984 size_t size, int flags)
987 struct sock *sk = sock->sk;
989 #define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
991 if (!(sk->sk_route_caps & NETIF_F_SG) ||
992 !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS))
993 return sock_no_sendpage(sock, page, offset, size, flags);
995 #undef TCP_ZC_CSUM_FLAGS
999 res = do_tcp_sendpages(sk, &page, offset, size, flags);
1000 TCP_CHECK_TIMER(sk);
1005 #define TCP_PAGE(sk) (inet_sk(sk)->sndmsg_page)
1006 #define TCP_OFF(sk) (inet_sk(sk)->sndmsg_off)
1008 static inline int tcp_copy_to_page(struct sock *sk, char *from,
1009 struct sk_buff *skb, struct page *page,
1015 if (skb->ip_summed == CHECKSUM_NONE) {
1016 csum = csum_and_copy_from_user(from, page_address(page) + off,
1018 if (err) return err;
1019 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1021 if (copy_from_user(page_address(page) + off, from, copy))
1026 skb->data_len += copy;
1027 skb->truesize += copy;
1028 sk->sk_wmem_queued += copy;
1029 sk->sk_forward_alloc -= copy;
1033 static inline int skb_add_data(struct sk_buff *skb, char *from, int copy)
1039 if (skb->ip_summed == CHECKSUM_NONE) {
1040 csum = csum_and_copy_from_user(from, skb_put(skb, copy),
1043 skb->csum = csum_block_add(skb->csum, csum, off);
1047 if (!copy_from_user(skb_put(skb, copy), from, copy))
1051 __skb_trim(skb, off);
1055 static inline int select_size(struct sock *sk, struct tcp_opt *tp)
1057 int tmp = tp->mss_cache_std;
1059 if (sk->sk_route_caps & NETIF_F_SG) {
1060 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
1062 if (tmp >= pgbreak &&
1063 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
1069 int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1073 struct tcp_opt *tp = tcp_sk(sk);
1074 struct sk_buff *skb;
1081 TCP_CHECK_TIMER(sk);
1083 flags = msg->msg_flags;
1084 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1086 /* Wait for a connection to finish. */
1087 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
1088 if ((err = wait_for_tcp_connect(sk, flags, &timeo)) != 0)
1091 /* This should be in poll */
1092 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1094 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
1096 /* Ok commence sending. */
1097 iovlen = msg->msg_iovlen;
1102 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
1105 while (--iovlen >= 0) {
1106 int seglen = iov->iov_len;
1107 unsigned char *from = iov->iov_base;
1111 while (seglen > 0) {
1114 skb = sk->sk_write_queue.prev;
1116 if (!tp->send_head ||
1117 (copy = mss_now - skb->len) <= 0) {
1120 /* Allocate new segment. If the interface is SG,
1121 * allocate skb fitting to single page.
1123 if (!tcp_memory_free(sk))
1124 goto wait_for_sndbuf;
1126 skb = tcp_alloc_pskb(sk, select_size(sk, tp),
1127 0, sk->sk_allocation);
1129 goto wait_for_memory;
1132 * Check whether we can use HW checksum.
1134 if (sk->sk_route_caps &
1135 (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM |
1137 skb->ip_summed = CHECKSUM_HW;
1139 skb_entail(sk, tp, skb);
1143 /* Try to append data to the end of skb. */
1147 /* Where to copy to? */
1148 if (skb_tailroom(skb) > 0) {
1149 /* We have some space in skb head. Superb! */
1150 if (copy > skb_tailroom(skb))
1151 copy = skb_tailroom(skb);
1152 if ((err = skb_add_data(skb, from, copy)) != 0)
1156 int i = skb_shinfo(skb)->nr_frags;
1157 struct page *page = TCP_PAGE(sk);
1158 int off = TCP_OFF(sk);
1160 if (can_coalesce(skb, i, page, off) &&
1162 /* We can extend the last page
1165 } else if (i == MAX_SKB_FRAGS ||
1167 !(sk->sk_route_caps & NETIF_F_SG))) {
1168 /* Need to add new fragment and cannot
1169 * do this because interface is non-SG,
1170 * or because all the page slots are
1172 tcp_mark_push(tp, skb);
1175 /* If page is cached, align
1176 * offset to L1 cache boundary
1178 off = (off + L1_CACHE_BYTES - 1) &
1179 ~(L1_CACHE_BYTES - 1);
1180 if (off == PAGE_SIZE) {
1182 TCP_PAGE(sk) = page = NULL;
1187 /* Allocate new cache page. */
1188 if (!(page = tcp_alloc_page(sk)))
1189 goto wait_for_memory;
1193 if (copy > PAGE_SIZE - off)
1194 copy = PAGE_SIZE - off;
1196 /* Time to copy data. We are close to
1198 err = tcp_copy_to_page(sk, from, skb, page,
1201 /* If this page was new, give it to the
1202 * socket so it does not get leaked.
1204 if (!TCP_PAGE(sk)) {
1205 TCP_PAGE(sk) = page;
1211 /* Update the skb. */
1213 skb_shinfo(skb)->frags[i - 1].size +=
1216 fill_page_desc(skb, i, page, off, copy);
1219 } else if (off + copy < PAGE_SIZE) {
1221 TCP_PAGE(sk) = page;
1225 TCP_OFF(sk) = off + copy;
1229 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
1231 tp->write_seq += copy;
1232 TCP_SKB_CB(skb)->end_seq += copy;
1236 if ((seglen -= copy) == 0 && iovlen == 0)
1239 if (skb->len != mss_now || (flags & MSG_OOB))
1242 if (forced_push(tp)) {
1243 tcp_mark_push(tp, skb);
1244 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
1245 } else if (skb == tp->send_head)
1246 tcp_push_one(sk, mss_now);
1250 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1253 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
1255 if ((err = wait_for_tcp_memory(sk, &timeo)) != 0)
1258 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
1264 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
1265 TCP_CHECK_TIMER(sk);
1271 if (tp->send_head == skb)
1272 tp->send_head = NULL;
1273 __skb_unlink(skb, skb->list);
1274 tcp_free_skb(sk, skb);
1281 err = tcp_error(sk, flags, err);
1282 TCP_CHECK_TIMER(sk);
1288 * Handle reading urgent data. BSD has very simple semantics for
1289 * this, no blocking and very strange errors 8)
1292 static int tcp_recv_urg(struct sock *sk, long timeo,
1293 struct msghdr *msg, int len, int flags,
1296 struct tcp_opt *tp = tcp_sk(sk);
1298 /* No URG data to read. */
1299 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1300 tp->urg_data == TCP_URG_READ)
1301 return -EINVAL; /* Yes this is right ! */
1303 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1306 if (tp->urg_data & TCP_URG_VALID) {
1308 char c = tp->urg_data;
1310 if (!(flags & MSG_PEEK))
1311 tp->urg_data = TCP_URG_READ;
1313 /* Read urgent data. */
1314 msg->msg_flags |= MSG_OOB;
1317 if (!(flags & MSG_TRUNC))
1318 err = memcpy_toiovec(msg->msg_iov, &c, 1);
1321 msg->msg_flags |= MSG_TRUNC;
1323 return err ? -EFAULT : len;
1326 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1329 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1330 * the available implementations agree in this case:
1331 * this call should never block, independent of the
1332 * blocking state of the socket.
1333 * Mike <pall@rz.uni-karlsruhe.de>
1339 * Release a skb if it is no longer needed. This routine
1340 * must be called with interrupts disabled or with the
1341 * socket locked so that the sk_buff queue operation is ok.
1344 static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
1346 __skb_unlink(skb, &sk->sk_receive_queue);
1350 /* Clean up the receive buffer for full frames taken by the user,
1351 * then send an ACK if necessary. COPIED is the number of bytes
1352 * tcp_recvmsg has given to the user so far, it speeds up the
1353 * calculation of whether or not we must ACK for the sake of
1356 static void cleanup_rbuf(struct sock *sk, int copied)
1358 struct tcp_opt *tp = tcp_sk(sk);
1359 int time_to_ack = 0;
1362 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1364 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
1367 if (tcp_ack_scheduled(tp)) {
1368 /* Delayed ACKs frequently hit locked sockets during bulk
1370 if (tp->ack.blocked ||
1371 /* Once-per-two-segments ACK was not sent by tcp_input.c */
1372 tp->rcv_nxt - tp->rcv_wup > tp->ack.rcv_mss ||
1374 * If this read emptied read buffer, we send ACK, if
1375 * connection is not bidirectional, user drained
1376 * receive buffer and there was a small segment
1379 (copied > 0 && (tp->ack.pending & TCP_ACK_PUSHED) &&
1380 !tp->ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
1384 /* We send an ACK if we can now advertise a non-zero window
1385 * which has been raised "significantly".
1387 * Even if window raised up to infinity, do not send window open ACK
1388 * in states, where we will not receive more. It is useless.
1390 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1391 __u32 rcv_window_now = tcp_receive_window(tp);
1393 /* Optimize, __tcp_select_window() is not cheap. */
1394 if (2*rcv_window_now <= tp->window_clamp) {
1395 __u32 new_window = __tcp_select_window(sk);
1397 /* Send ACK now, if this read freed lots of space
1398 * in our buffer. Certainly, new_window is new window.
1399 * We can advertise it now, if it is not less than current one.
1400 * "Lots" means "at least twice" here.
1402 if (new_window && new_window >= 2 * rcv_window_now)
1410 /* Now socket state including sk->sk_err is changed only under lock,
1411 * hence we may omit checks after joining wait queue.
1412 * We check receive queue before schedule() only as optimization;
1413 * it is very likely that release_sock() added new data.
1416 static long tcp_data_wait(struct sock *sk, long timeo)
1420 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1422 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1425 if (skb_queue_empty(&sk->sk_receive_queue))
1426 timeo = schedule_timeout(timeo);
1429 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1431 finish_wait(sk->sk_sleep, &wait);
1435 static void tcp_prequeue_process(struct sock *sk)
1437 struct sk_buff *skb;
1438 struct tcp_opt *tp = tcp_sk(sk);
1440 NET_ADD_STATS_USER(TCPPrequeued, skb_queue_len(&tp->ucopy.prequeue));
1442 /* RX process wants to run with disabled BHs, though it is not
1445 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1446 sk->sk_backlog_rcv(sk, skb);
1449 /* Clear memory counter. */
1450 tp->ucopy.memory = 0;
1453 static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1455 struct sk_buff *skb;
1458 skb_queue_walk(&sk->sk_receive_queue, skb) {
1459 offset = seq - TCP_SKB_CB(skb)->seq;
1462 if (offset < skb->len || skb->h.th->fin) {
1471 * This routine provides an alternative to tcp_recvmsg() for routines
1472 * that would like to handle copying from skbuffs directly in 'sendfile'
1475 * - It is assumed that the socket was locked by the caller.
1476 * - The routine does not block.
1477 * - At present, there is no support for reading OOB data
1478 * or for 'peeking' the socket using this routine
1479 * (although both would be easy to implement).
1481 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1482 sk_read_actor_t recv_actor)
1484 struct sk_buff *skb;
1485 struct tcp_opt *tp = tcp_sk(sk);
1486 u32 seq = tp->copied_seq;
1490 if (sk->sk_state == TCP_LISTEN)
1492 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1493 if (offset < skb->len) {
1496 len = skb->len - offset;
1497 /* Stop reading if we hit a patch of urgent data */
1499 u32 urg_offset = tp->urg_seq - seq;
1500 if (urg_offset < len)
1505 used = recv_actor(desc, skb, offset, len);
1511 if (offset != skb->len)
1514 if (skb->h.th->fin) {
1515 tcp_eat_skb(sk, skb);
1519 tcp_eat_skb(sk, skb);
1523 tp->copied_seq = seq;
1524 /* Clean up data we have read: This will do ACK frames. */
1526 cleanup_rbuf(sk, copied);
1531 * This routine copies from a sock struct into the user buffer.
1533 * Technical note: in 2.3 we work on _locked_ socket, so that
1534 * tricks with *seq access order and skb->users are not required.
1535 * Probably, code can be easily improved even more.
1538 int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1539 size_t len, int nonblock, int flags, int *addr_len)
1541 struct tcp_opt *tp = tcp_sk(sk);
1547 int target; /* Read at least this many bytes */
1549 struct task_struct *user_recv = NULL;
1553 TCP_CHECK_TIMER(sk);
1556 if (sk->sk_state == TCP_LISTEN)
1559 timeo = sock_rcvtimeo(sk, nonblock);
1561 /* Urgent data needs to be handled specially. */
1562 if (flags & MSG_OOB)
1565 seq = &tp->copied_seq;
1566 if (flags & MSG_PEEK) {
1567 peek_seq = tp->copied_seq;
1571 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1574 struct sk_buff *skb;
1577 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1578 if (tp->urg_data && tp->urg_seq == *seq) {
1581 if (signal_pending(current)) {
1582 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1587 /* Next get a buffer. */
1589 skb = skb_peek(&sk->sk_receive_queue);
1594 /* Now that we have two receive queues this
1597 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1598 printk(KERN_INFO "recvmsg bug: copied %X "
1599 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1602 offset = *seq - TCP_SKB_CB(skb)->seq;
1605 if (offset < skb->len)
1609 BUG_TRAP(flags & MSG_PEEK);
1611 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1613 /* Well, if we have backlog, try to process it now yet. */
1615 if (copied >= target && !sk->sk_backlog.tail)
1620 sk->sk_state == TCP_CLOSE ||
1621 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1623 signal_pending(current) ||
1627 if (sock_flag(sk, SOCK_DONE))
1631 copied = sock_error(sk);
1635 if (sk->sk_shutdown & RCV_SHUTDOWN)
1638 if (sk->sk_state == TCP_CLOSE) {
1639 if (!sock_flag(sk, SOCK_DONE)) {
1640 /* This occurs when user tries to read
1641 * from never connected socket.
1654 if (signal_pending(current)) {
1655 copied = sock_intr_errno(timeo);
1660 cleanup_rbuf(sk, copied);
1662 if (tp->ucopy.task == user_recv) {
1663 /* Install new reader */
1664 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1665 user_recv = current;
1666 tp->ucopy.task = user_recv;
1667 tp->ucopy.iov = msg->msg_iov;
1670 tp->ucopy.len = len;
1672 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1673 (flags & (MSG_PEEK | MSG_TRUNC)));
1675 /* Ugly... If prequeue is not empty, we have to
1676 * process it before releasing socket, otherwise
1677 * order will be broken at second iteration.
1678 * More elegant solution is required!!!
1680 * Look: we have the following (pseudo)queues:
1682 * 1. packets in flight
1687 * Each queue can be processed only if the next ones
1688 * are empty. At this point we have empty receive_queue.
1689 * But prequeue _can_ be not empty after 2nd iteration,
1690 * when we jumped to start of loop because backlog
1691 * processing added something to receive_queue.
1692 * We cannot release_sock(), because backlog contains
1693 * packets arrived _after_ prequeued ones.
1695 * Shortly, algorithm is clear --- to process all
1696 * the queues in order. We could make it more directly,
1697 * requeueing packets from backlog to prequeue, if
1698 * is not empty. It is more elegant, but eats cycles,
1701 if (skb_queue_len(&tp->ucopy.prequeue))
1704 /* __ Set realtime policy in scheduler __ */
1707 if (copied >= target) {
1708 /* Do not sleep, just process backlog. */
1712 timeo = tcp_data_wait(sk, timeo);
1718 /* __ Restore normal policy in scheduler __ */
1720 if ((chunk = len - tp->ucopy.len) != 0) {
1721 NET_ADD_STATS_USER(TCPDirectCopyFromBacklog, chunk);
1726 if (tp->rcv_nxt == tp->copied_seq &&
1727 skb_queue_len(&tp->ucopy.prequeue)) {
1729 tcp_prequeue_process(sk);
1731 if ((chunk = len - tp->ucopy.len) != 0) {
1732 NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk);
1738 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1739 if (net_ratelimit())
1740 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1741 current->comm, current->pid);
1742 peek_seq = tp->copied_seq;
1747 /* Ok so how much can we use? */
1748 used = skb->len - offset;
1752 /* Do we have urgent data here? */
1754 u32 urg_offset = tp->urg_seq - *seq;
1755 if (urg_offset < used) {
1757 if (!sock_flag(sk, SOCK_URGINLINE)) {
1769 if (!(flags & MSG_TRUNC)) {
1770 err = skb_copy_datagram_iovec(skb, offset,
1771 msg->msg_iov, used);
1773 /* Exception. Bailout! */
1785 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1787 tcp_fast_path_check(sk, tp);
1789 if (used + offset < skb->len)
1794 if (!(flags & MSG_PEEK))
1795 tcp_eat_skb(sk, skb);
1799 /* Process the FIN. */
1801 if (!(flags & MSG_PEEK))
1802 tcp_eat_skb(sk, skb);
1807 if (skb_queue_len(&tp->ucopy.prequeue)) {
1810 tp->ucopy.len = copied > 0 ? len : 0;
1812 tcp_prequeue_process(sk);
1814 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1815 NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk);
1821 tp->ucopy.task = NULL;
1825 /* According to UNIX98, msg_name/msg_namelen are ignored
1826 * on connected socket. I was just happy when found this 8) --ANK
1829 /* Clean up data we have read: This will do ACK frames. */
1830 cleanup_rbuf(sk, copied);
1832 TCP_CHECK_TIMER(sk);
1837 TCP_CHECK_TIMER(sk);
1842 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1847 * State processing on a close. This implements the state shift for
1848 * sending our FIN frame. Note that we only send a FIN for some
1849 * states. A shutdown() may have already sent the FIN, or we may be
1853 static unsigned char new_state[16] = {
1854 /* current state: new state: action: */
1855 /* (Invalid) */ TCP_CLOSE,
1856 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1857 /* TCP_SYN_SENT */ TCP_CLOSE,
1858 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1859 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1860 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1861 /* TCP_TIME_WAIT */ TCP_CLOSE,
1862 /* TCP_CLOSE */ TCP_CLOSE,
1863 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1864 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1865 /* TCP_LISTEN */ TCP_CLOSE,
1866 /* TCP_CLOSING */ TCP_CLOSING,
1869 static int tcp_close_state(struct sock *sk)
1871 int next = (int)new_state[sk->sk_state];
1872 int ns = next & TCP_STATE_MASK;
1874 tcp_set_state(sk, ns);
1876 return next & TCP_ACTION_FIN;
1880 * Shutdown the sending side of a connection. Much like close except
1881 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1884 void tcp_shutdown(struct sock *sk, int how)
1886 /* We need to grab some memory, and put together a FIN,
1887 * and then put it into the queue to be sent.
1888 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1890 if (!(how & SEND_SHUTDOWN))
1893 /* If we've already sent a FIN, or it's a closed state, skip this. */
1894 if ((1 << sk->sk_state) &
1895 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1896 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1897 /* Clear out any half completed packets. FIN if needed. */
1898 if (tcp_close_state(sk))
1905 * Return 1 if we still have things to send in our buffers.
1908 static inline int closing(struct sock *sk)
1910 return (1 << sk->sk_state) &
1911 (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK);
1914 static __inline__ void tcp_kill_sk_queues(struct sock *sk)
1916 /* First the read buffer. */
1917 __skb_queue_purge(&sk->sk_receive_queue);
1919 /* Next, the error queue. */
1920 __skb_queue_purge(&sk->sk_error_queue);
1922 /* Next, the write queue. */
1923 BUG_TRAP(skb_queue_empty(&sk->sk_write_queue));
1925 /* Account for returned memory. */
1926 tcp_mem_reclaim(sk);
1928 BUG_TRAP(!sk->sk_wmem_queued);
1929 BUG_TRAP(!sk->sk_forward_alloc);
1931 /* It is _impossible_ for the backlog to contain anything
1932 * when we get here. All user references to this socket
1933 * have gone away, only the net layer knows can touch it.
1938 * At this point, there should be no process reference to this
1939 * socket, and thus no user references at all. Therefore we
1940 * can assume the socket waitqueue is inactive and nobody will
1941 * try to jump onto it.
1943 void tcp_destroy_sock(struct sock *sk)
1945 BUG_TRAP(sk->sk_state == TCP_CLOSE);
1946 BUG_TRAP(sock_flag(sk, SOCK_DEAD));
1948 /* It cannot be in hash table! */
1949 BUG_TRAP(sk_unhashed(sk));
1951 /* If it has not 0 inet_sk(sk)->num, it must be bound */
1952 BUG_TRAP(!inet_sk(sk)->num || tcp_sk(sk)->bind_hash);
1955 if (sk->sk_zapped) {
1956 printk(KERN_DEBUG "TCP: double destroy sk=%p\n", sk);
1962 sk->sk_prot->destroy(sk);
1964 tcp_kill_sk_queues(sk);
1966 xfrm_sk_free_policy(sk);
1968 #ifdef INET_REFCNT_DEBUG
1969 if (atomic_read(&sk->sk_refcnt) != 1) {
1970 printk(KERN_DEBUG "Destruction TCP %p delayed, c=%d\n",
1971 sk, atomic_read(&sk->sk_refcnt));
1975 atomic_dec(&tcp_orphan_count);
1979 void tcp_close(struct sock *sk, long timeout)
1981 struct sk_buff *skb;
1982 int data_was_unread = 0;
1985 sk->sk_shutdown = SHUTDOWN_MASK;
1987 if (sk->sk_state == TCP_LISTEN) {
1988 tcp_set_state(sk, TCP_CLOSE);
1991 tcp_listen_stop(sk);
1993 goto adjudge_to_death;
1996 /* We need to flush the recv. buffs. We do this only on the
1997 * descriptor close, not protocol-sourced closes, because the
1998 * reader process may not have drained the data yet!
2000 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
2001 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
2003 data_was_unread += len;
2007 tcp_mem_reclaim(sk);
2009 /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
2010 * 3.10, we send a RST here because data was lost. To
2011 * witness the awful effects of the old behavior of always
2012 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
2013 * a bulk GET in an FTP client, suspend the process, wait
2014 * for the client to advertise a zero window, then kill -9
2015 * the FTP client, wheee... Note: timeout is always zero
2018 if (data_was_unread) {
2019 /* Unread data was tossed, zap the connection. */
2020 NET_INC_STATS_USER(TCPAbortOnClose);
2021 tcp_set_state(sk, TCP_CLOSE);
2022 tcp_send_active_reset(sk, GFP_KERNEL);
2023 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
2024 /* Check zero linger _after_ checking for unread data. */
2025 sk->sk_prot->disconnect(sk, 0);
2026 NET_INC_STATS_USER(TCPAbortOnData);
2027 } else if (tcp_close_state(sk)) {
2028 /* We FIN if the application ate all the data before
2029 * zapping the connection.
2032 /* RED-PEN. Formally speaking, we have broken TCP state
2033 * machine. State transitions:
2035 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
2036 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
2037 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
2039 * are legal only when FIN has been sent (i.e. in window),
2040 * rather than queued out of window. Purists blame.
2042 * F.e. "RFC state" is ESTABLISHED,
2043 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
2045 * The visible declinations are that sometimes
2046 * we enter time-wait state, when it is not required really
2047 * (harmless), do not send active resets, when they are
2048 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
2049 * they look as CLOSING or LAST_ACK for Linux)
2050 * Probably, I missed some more holelets.
2057 struct task_struct *tsk = current;
2061 prepare_to_wait(sk->sk_sleep, &wait,
2062 TASK_INTERRUPTIBLE);
2066 timeout = schedule_timeout(timeout);
2068 } while (!signal_pending(tsk) && timeout);
2070 finish_wait(sk->sk_sleep, &wait);
2074 /* It is the last release_sock in its life. It will remove backlog. */
2078 /* Now socket is owned by kernel and we acquire BH lock
2079 to finish close. No need to check for user refs.
2083 BUG_TRAP(!sock_owned_by_user(sk));
2088 /* This is a (useful) BSD violating of the RFC. There is a
2089 * problem with TCP as specified in that the other end could
2090 * keep a socket open forever with no application left this end.
2091 * We use a 3 minute timeout (about the same as BSD) then kill
2092 * our end. If they send after that then tough - BUT: long enough
2093 * that we won't make the old 4*rto = almost no time - whoops
2096 * Nope, it was not mistake. It is really desired behaviour
2097 * f.e. on http servers, when such sockets are useless, but
2098 * consume significant resources. Let's do it with special
2099 * linger2 option. --ANK
2102 if (sk->sk_state == TCP_FIN_WAIT2) {
2103 struct tcp_opt *tp = tcp_sk(sk);
2104 if (tp->linger2 < 0) {
2105 tcp_set_state(sk, TCP_CLOSE);
2106 tcp_send_active_reset(sk, GFP_ATOMIC);
2107 NET_INC_STATS_BH(TCPAbortOnLinger);
2109 int tmo = tcp_fin_time(tp);
2111 if (tmo > TCP_TIMEWAIT_LEN) {
2112 tcp_reset_keepalive_timer(sk, tcp_fin_time(tp));
2114 atomic_inc(&tcp_orphan_count);
2115 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
2120 if (sk->sk_state != TCP_CLOSE) {
2121 tcp_mem_reclaim(sk);
2122 if (atomic_read(&tcp_orphan_count) > sysctl_tcp_max_orphans ||
2123 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
2124 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
2125 if (net_ratelimit())
2126 printk(KERN_INFO "TCP: too many of orphaned "
2128 tcp_set_state(sk, TCP_CLOSE);
2129 tcp_send_active_reset(sk, GFP_ATOMIC);
2130 NET_INC_STATS_BH(TCPAbortOnMemory);
2133 atomic_inc(&tcp_orphan_count);
2135 if (sk->sk_state == TCP_CLOSE)
2136 tcp_destroy_sock(sk);
2137 /* Otherwise, socket is reprieved until protocol close. */
2145 /* These states need RST on ABORT according to RFC793 */
2147 static inline int tcp_need_reset(int state)
2149 return (1 << state) &
2150 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
2151 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
2154 int tcp_disconnect(struct sock *sk, int flags)
2156 struct inet_opt *inet = inet_sk(sk);
2157 struct tcp_opt *tp = tcp_sk(sk);
2159 int old_state = sk->sk_state;
2161 if (old_state != TCP_CLOSE)
2162 tcp_set_state(sk, TCP_CLOSE);
2164 /* ABORT function of RFC793 */
2165 if (old_state == TCP_LISTEN) {
2166 tcp_listen_stop(sk);
2167 } else if (tcp_need_reset(old_state) ||
2168 (tp->snd_nxt != tp->write_seq &&
2169 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
2170 /* The last check adjusts for discrepance of Linux wrt. RFC
2173 tcp_send_active_reset(sk, gfp_any());
2174 sk->sk_err = ECONNRESET;
2175 } else if (old_state == TCP_SYN_SENT)
2176 sk->sk_err = ECONNRESET;
2178 tcp_clear_xmit_timers(sk);
2179 __skb_queue_purge(&sk->sk_receive_queue);
2180 tcp_writequeue_purge(sk);
2181 __skb_queue_purge(&tp->out_of_order_queue);
2185 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
2186 inet_reset_saddr(sk);
2188 sk->sk_shutdown = 0;
2189 sock_reset_flag(sk, SOCK_DONE);
2191 if ((tp->write_seq += tp->max_window + 2) == 0)
2196 tp->packets_out = 0;
2197 tp->snd_ssthresh = 0x7fffffff;
2198 tp->snd_cwnd_cnt = 0;
2199 tcp_set_ca_state(tp, TCP_CA_Open);
2200 tcp_clear_retrans(tp);
2201 tcp_delack_init(tp);
2202 tp->send_head = NULL;
2207 BUG_TRAP(!inet->num || tp->bind_hash);
2209 sk->sk_error_report(sk);
2214 * Wait for an incoming connection, avoid race
2215 * conditions. This must be called with the socket locked.
2217 static int wait_for_connect(struct sock *sk, long timeo)
2219 struct tcp_opt *tp = tcp_sk(sk);
2224 * True wake-one mechanism for incoming connections: only
2225 * one process gets woken up, not the 'whole herd'.
2226 * Since we do not 'race & poll' for established sockets
2227 * anymore, the common case will execute the loop only once.
2229 * Subtle issue: "add_wait_queue_exclusive()" will be added
2230 * after any current non-exclusive waiters, and we know that
2231 * it will always _stay_ after any new non-exclusive waiters
2232 * because all non-exclusive waiters are added at the
2233 * beginning of the wait-queue. As such, it's ok to "drop"
2234 * our exclusiveness temporarily when we get woken up without
2235 * having to remove and re-insert us on the wait queue.
2238 prepare_to_wait_exclusive(sk->sk_sleep, &wait,
2239 TASK_INTERRUPTIBLE);
2241 if (!tp->accept_queue)
2242 timeo = schedule_timeout(timeo);
2245 if (tp->accept_queue)
2248 if (sk->sk_state != TCP_LISTEN)
2250 err = sock_intr_errno(timeo);
2251 if (signal_pending(current))
2257 finish_wait(sk->sk_sleep, &wait);
2262 * This will accept the next outstanding connection.
2265 struct sock *tcp_accept(struct sock *sk, int flags, int *err)
2267 struct tcp_opt *tp = tcp_sk(sk);
2268 struct open_request *req;
2271 #ifdef CONFIG_ACCEPT_QUEUES
2278 /* We need to make sure that this socket is listening,
2279 * and that it has something pending.
2282 if (sk->sk_state != TCP_LISTEN)
2285 /* Find already established connection */
2286 if (!tp->accept_queue) {
2287 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2288 /* If this is a non blocking socket don't sleep */
2293 error = wait_for_connect(sk, timeo);
2298 #ifndef CONFIG_ACCEPT_QUEUES
2299 req = tp->accept_queue;
2300 if ((tp->accept_queue = req->dl_next) == NULL)
2301 tp->accept_queue_tail = NULL;
2303 tcp_acceptq_removed(sk);
2305 first = tp->class_index;
2306 /* We should always have request queued here. The accept_queue
2307 * is already checked for NULL above.
2309 while(!tp->acceptq[first].aq_head) {
2310 tp->acceptq[first].aq_cnt = 0;
2311 first = (first+1) & ~NUM_ACCEPT_QUEUES;
2313 req = tp->acceptq[first].aq_head;
2314 tp->acceptq[first].aq_qcount--;
2315 tp->acceptq[first].aq_count++;
2316 tp->acceptq[first].aq_wait_time+=(jiffies - req->acceptq_time_stamp);
2318 for (prev_class= first-1 ; prev_class >=0; prev_class--)
2319 if (tp->acceptq[prev_class].aq_tail)
2322 tp->acceptq[prev_class].aq_tail->dl_next = req->dl_next;
2324 tp->accept_queue = req->dl_next;
2326 if (req == tp->acceptq[first].aq_tail)
2327 tp->acceptq[first].aq_head = tp->acceptq[first].aq_tail = NULL;
2329 tp->acceptq[first].aq_head = req->dl_next;
2331 if((++(tp->acceptq[first].aq_cnt)) >= tp->acceptq[first].aq_ratio){
2332 tp->acceptq[first].aq_cnt = 0;
2333 tp->class_index = ++first & ~NUM_ACCEPT_QUEUES;
2335 tcp_acceptq_removed(sk, req->acceptq_class);
2338 tcp_openreq_fastfree(req);
2339 BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
2350 * Socket option code for TCP.
2352 int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval,
2355 struct tcp_opt *tp = tcp_sk(sk);
2359 if (level != SOL_TCP)
2360 return tp->af_specific->setsockopt(sk, level, optname,
2363 if (optlen < sizeof(int))
2366 if (get_user(val, (int *)optval))
2373 /* Values greater than interface MTU won't take effect. However
2374 * at the point when this call is done we typically don't yet
2375 * know which interface is going to be used */
2376 if (val < 8 || val > MAX_TCP_WINDOW) {
2385 /* TCP_NODELAY is weaker than TCP_CORK, so that
2386 * this option on corked socket is remembered, but
2387 * it is not activated until cork is cleared.
2389 * However, when TCP_NODELAY is set we make
2390 * an explicit push, which overrides even TCP_CORK
2391 * for currently queued segments.
2393 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
2394 tcp_push_pending_frames(sk, tp);
2396 tp->nonagle &= ~TCP_NAGLE_OFF;
2401 /* When set indicates to always queue non-full frames.
2402 * Later the user clears this option and we transmit
2403 * any pending partial frames in the queue. This is
2404 * meant to be used alongside sendfile() to get properly
2405 * filled frames when the user (for example) must write
2406 * out headers with a write() call first and then use
2407 * sendfile to send out the data parts.
2409 * TCP_CORK can be set together with TCP_NODELAY and it is
2410 * stronger than TCP_NODELAY.
2413 tp->nonagle |= TCP_NAGLE_CORK;
2415 tp->nonagle &= ~TCP_NAGLE_CORK;
2416 if (tp->nonagle&TCP_NAGLE_OFF)
2417 tp->nonagle |= TCP_NAGLE_PUSH;
2418 tcp_push_pending_frames(sk, tp);
2423 if (val < 1 || val > MAX_TCP_KEEPIDLE)
2426 tp->keepalive_time = val * HZ;
2427 if (sock_flag(sk, SOCK_KEEPOPEN) &&
2428 !((1 << sk->sk_state) &
2429 (TCPF_CLOSE | TCPF_LISTEN))) {
2430 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
2431 if (tp->keepalive_time > elapsed)
2432 elapsed = tp->keepalive_time - elapsed;
2435 tcp_reset_keepalive_timer(sk, elapsed);
2440 if (val < 1 || val > MAX_TCP_KEEPINTVL)
2443 tp->keepalive_intvl = val * HZ;
2446 if (val < 1 || val > MAX_TCP_KEEPCNT)
2449 tp->keepalive_probes = val;
2452 if (val < 1 || val > MAX_TCP_SYNCNT)
2455 tp->syn_retries = val;
2461 else if (val > sysctl_tcp_fin_timeout / HZ)
2464 tp->linger2 = val * HZ;
2467 case TCP_DEFER_ACCEPT:
2468 tp->defer_accept = 0;
2470 /* Translate value in seconds to number of
2472 while (tp->defer_accept < 32 &&
2473 val > ((TCP_TIMEOUT_INIT / HZ) <<
2480 case TCP_WINDOW_CLAMP:
2482 if (sk->sk_state != TCP_CLOSE) {
2486 tp->window_clamp = 0;
2488 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2489 SOCK_MIN_RCVBUF / 2 : val;
2494 tp->ack.pingpong = 1;
2496 tp->ack.pingpong = 0;
2497 if ((1 << sk->sk_state) &
2498 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2499 tcp_ack_scheduled(tp)) {
2500 tp->ack.pending |= TCP_ACK_PUSHED;
2501 cleanup_rbuf(sk, 1);
2503 tp->ack.pingpong = 1;
2508 #ifdef CONFIG_ACCEPT_QUEUES
2509 case TCP_ACCEPTQ_SHARE:
2511 char share_wt[NUM_ACCEPT_QUEUES];
2514 if (sk->sk_state != TCP_LISTEN)
2517 if (copy_from_user(share_wt,optval, optlen)) {
2522 for (i = 0; i < NUM_ACCEPT_QUEUES; i++) {
2526 else if (share_wt[i] < j) {
2529 tp->acceptq[i].aq_valid = 1;
2532 tp->acceptq[i].aq_valid = 0;
2536 /* Class 0 is always valid. If nothing is
2537 * specified set class 0 as 1.
2540 tp->acceptq[0].aq_valid = 1;
2543 for (i=0; i < NUM_ACCEPT_QUEUES; i++) {
2544 tp->acceptq[i].aq_ratio = share_wt[i]/j;
2545 tp->acceptq[i].aq_cnt = 0;
2559 int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval,
2562 struct tcp_opt *tp = tcp_sk(sk);
2565 if (level != SOL_TCP)
2566 return tp->af_specific->getsockopt(sk, level, optname,
2569 if (get_user(len, optlen))
2572 len = min_t(unsigned int, len, sizeof(int));
2579 val = tp->mss_cache_std;
2580 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2584 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2587 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2590 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2593 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2596 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2599 val = tp->syn_retries ? : sysctl_tcp_syn_retries;
2604 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2606 case TCP_DEFER_ACCEPT:
2607 val = !tp->defer_accept ? 0 : ((TCP_TIMEOUT_INIT / HZ) <<
2608 (tp->defer_accept - 1));
2610 case TCP_WINDOW_CLAMP:
2611 val = tp->window_clamp;
2614 struct tcp_info info;
2615 u32 now = tcp_time_stamp;
2617 if (get_user(len, optlen))
2619 info.tcpi_state = sk->sk_state;
2620 info.tcpi_ca_state = tp->ca_state;
2621 info.tcpi_retransmits = tp->retransmits;
2622 info.tcpi_probes = tp->probes_out;
2623 info.tcpi_backoff = tp->backoff;
2624 info.tcpi_options = 0;
2626 info.tcpi_options |= TCPI_OPT_TIMESTAMPS;
2628 info.tcpi_options |= TCPI_OPT_SACK;
2629 if (tp->wscale_ok) {
2630 info.tcpi_options |= TCPI_OPT_WSCALE;
2631 info.tcpi_snd_wscale = tp->snd_wscale;
2632 info.tcpi_rcv_wscale = tp->rcv_wscale;
2634 info.tcpi_snd_wscale = 0;
2635 info.tcpi_rcv_wscale = 0;
2637 if (tp->ecn_flags & TCP_ECN_OK)
2638 info.tcpi_options |= TCPI_OPT_ECN;
2640 info.tcpi_rto = (1000000 * tp->rto) / HZ;
2641 info.tcpi_ato = (1000000 * tp->ack.ato) / HZ;
2642 info.tcpi_snd_mss = tp->mss_cache_std;
2643 info.tcpi_rcv_mss = tp->ack.rcv_mss;
2645 info.tcpi_unacked = tp->packets_out;
2646 info.tcpi_sacked = tp->sacked_out;
2647 info.tcpi_lost = tp->lost_out;
2648 info.tcpi_retrans = tp->retrans_out;
2649 info.tcpi_fackets = tp->fackets_out;
2651 info.tcpi_last_data_sent = ((now - tp->lsndtime) * 1000) / HZ;
2652 info.tcpi_last_ack_sent = 0;
2653 info.tcpi_last_data_recv = ((now -
2654 tp->ack.lrcvtime) * 1000) / HZ;
2655 info.tcpi_last_ack_recv = ((now - tp->rcv_tstamp) * 1000) / HZ;
2657 info.tcpi_pmtu = tp->pmtu_cookie;
2658 info.tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2659 info.tcpi_rtt = ((1000000 * tp->srtt) / HZ) >> 3;
2660 info.tcpi_rttvar = ((1000000 * tp->mdev) / HZ) >> 2;
2661 info.tcpi_snd_ssthresh = tp->snd_ssthresh;
2662 info.tcpi_snd_cwnd = tp->snd_cwnd;
2663 info.tcpi_advmss = tp->advmss;
2664 info.tcpi_reordering = tp->reordering;
2666 len = min_t(unsigned int, len, sizeof(info));
2667 if (put_user(len, optlen))
2669 if (copy_to_user(optval, &info, len))
2674 val = !tp->ack.pingpong;
2677 #ifdef CONFIG_ACCEPT_QUEUES
2678 case TCP_ACCEPTQ_SHARE: {
2679 struct tcp_acceptq_info tinfo[NUM_ACCEPT_QUEUES];
2682 if (sk->sk_state != TCP_LISTEN)
2685 if (get_user(len, optlen))
2688 memset(tinfo, 0, sizeof(tinfo));
2690 for(i=0; i < NUM_ACCEPT_QUEUES; i++) {
2691 tinfo[i].acceptq_wait_time =
2692 tp->acceptq[i].aq_wait_time/(HZ/USER_HZ);
2693 tinfo[i].acceptq_qcount = tp->acceptq[i].aq_qcount;
2694 tinfo[i].acceptq_count = tp->acceptq[i].aq_count;
2695 if (tp->acceptq[i].aq_valid)
2696 tinfo[i].acceptq_shares=tp->acceptq[i].aq_ratio;
2698 tinfo[i].acceptq_shares = 0;
2701 len = min_t(unsigned int, len, sizeof(tinfo));
2702 if (put_user(len, optlen))
2705 if (copy_to_user(optval, (char *)tinfo, len))
2712 return -ENOPROTOOPT;
2715 if (put_user(len, optlen))
2717 if (copy_to_user(optval, &val, len))
2723 extern void __skb_cb_too_small_for_tcp(int, int);
2724 extern void tcpdiag_init(void);
2726 static __initdata unsigned long thash_entries;
2727 static int __init set_thash_entries(char *str)
2731 thash_entries = simple_strtoul(str, &str, 0);
2734 __setup("thash_entries=", set_thash_entries);
2736 void __init tcp_init(void)
2738 struct sk_buff *skb = NULL;
2742 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2743 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2746 tcp_openreq_cachep = kmem_cache_create("tcp_open_request",
2747 sizeof(struct open_request),
2748 0, SLAB_HWCACHE_ALIGN,
2750 if (!tcp_openreq_cachep)
2751 panic("tcp_init: Cannot alloc open_request cache.");
2753 tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket",
2754 sizeof(struct tcp_bind_bucket),
2755 0, SLAB_HWCACHE_ALIGN,
2757 if (!tcp_bucket_cachep)
2758 panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
2760 tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket",
2761 sizeof(struct tcp_tw_bucket),
2762 0, SLAB_HWCACHE_ALIGN,
2764 if (!tcp_timewait_cachep)
2765 panic("tcp_init: Cannot alloc tcp_tw_bucket cache.");
2767 /* Size and allocate the main established and bind bucket
2770 * The methodology is similar to that of the buffer cache.
2772 if (num_physpages >= (128 * 1024))
2773 goal = num_physpages >> (21 - PAGE_SHIFT);
2775 goal = num_physpages >> (23 - PAGE_SHIFT);
2778 goal = (thash_entries * sizeof(struct tcp_ehash_bucket)) >> PAGE_SHIFT;
2779 for (order = 0; (1UL << order) < goal; order++)
2782 tcp_ehash_size = (1UL << order) * PAGE_SIZE /
2783 sizeof(struct tcp_ehash_bucket);
2784 tcp_ehash_size >>= 1;
2785 while (tcp_ehash_size & (tcp_ehash_size - 1))
2787 tcp_ehash = (struct tcp_ehash_bucket *)
2788 __get_free_pages(GFP_ATOMIC, order);
2789 } while (!tcp_ehash && --order > 0);
2792 panic("Failed to allocate TCP established hash table\n");
2793 for (i = 0; i < (tcp_ehash_size << 1); i++) {
2794 tcp_ehash[i].lock = RW_LOCK_UNLOCKED;
2795 INIT_HLIST_HEAD(&tcp_ehash[i].chain);
2799 tcp_bhash_size = (1UL << order) * PAGE_SIZE /
2800 sizeof(struct tcp_bind_hashbucket);
2801 if ((tcp_bhash_size > (64 * 1024)) && order > 0)
2803 tcp_bhash = (struct tcp_bind_hashbucket *)
2804 __get_free_pages(GFP_ATOMIC, order);
2805 } while (!tcp_bhash && --order >= 0);
2808 panic("Failed to allocate TCP bind hash table\n");
2809 for (i = 0; i < tcp_bhash_size; i++) {
2810 tcp_bhash[i].lock = SPIN_LOCK_UNLOCKED;
2811 INIT_HLIST_HEAD(&tcp_bhash[i].chain);
2814 /* Try to be a bit smarter and adjust defaults depending
2815 * on available memory.
2818 sysctl_local_port_range[0] = 32768;
2819 sysctl_local_port_range[1] = 61000;
2820 sysctl_tcp_max_tw_buckets = 180000;
2821 sysctl_tcp_max_orphans = 4096 << (order - 4);
2822 sysctl_max_syn_backlog = 1024;
2823 } else if (order < 3) {
2824 sysctl_local_port_range[0] = 1024 * (3 - order);
2825 sysctl_tcp_max_tw_buckets >>= (3 - order);
2826 sysctl_tcp_max_orphans >>= (3 - order);
2827 sysctl_max_syn_backlog = 128;
2829 tcp_port_rover = sysctl_local_port_range[0] - 1;
2831 sysctl_tcp_mem[0] = 768 << order;
2832 sysctl_tcp_mem[1] = 1024 << order;
2833 sysctl_tcp_mem[2] = 1536 << order;
2834 if (sysctl_tcp_mem[2] - sysctl_tcp_mem[1] > 512)
2835 sysctl_tcp_mem[1] = sysctl_tcp_mem[2] - 512;
2836 if (sysctl_tcp_mem[1] - sysctl_tcp_mem[0] > 512)
2837 sysctl_tcp_mem[0] = sysctl_tcp_mem[1] - 512;
2840 sysctl_tcp_wmem[2] = 64 * 1024;
2841 sysctl_tcp_rmem[0] = PAGE_SIZE;
2842 sysctl_tcp_rmem[1] = 43689;
2843 sysctl_tcp_rmem[2] = 2 * 43689;
2846 printk(KERN_INFO "TCP: Hash tables configured "
2847 "(established %d bind %d)\n",
2848 tcp_ehash_size << 1, tcp_bhash_size);
2853 EXPORT_SYMBOL(__tcp_mem_reclaim);
2854 EXPORT_SYMBOL(sysctl_tcp_rmem);
2855 EXPORT_SYMBOL(sysctl_tcp_wmem);
2856 EXPORT_SYMBOL(tcp_accept);
2857 EXPORT_SYMBOL(tcp_close);
2858 EXPORT_SYMBOL(tcp_close_state);
2859 EXPORT_SYMBOL(tcp_destroy_sock);
2860 EXPORT_SYMBOL(tcp_disconnect);
2861 EXPORT_SYMBOL(tcp_getsockopt);
2862 EXPORT_SYMBOL(tcp_ioctl);
2863 EXPORT_SYMBOL(tcp_openreq_cachep);
2864 EXPORT_SYMBOL(tcp_poll);
2865 EXPORT_SYMBOL(tcp_read_sock);
2866 EXPORT_SYMBOL(tcp_recvmsg);
2867 EXPORT_SYMBOL(tcp_sendmsg);
2868 EXPORT_SYMBOL(tcp_sendpage);
2869 EXPORT_SYMBOL(tcp_setsockopt);
2870 EXPORT_SYMBOL(tcp_shutdown);
2871 EXPORT_SYMBOL(tcp_sockets_allocated);
2872 EXPORT_SYMBOL(tcp_statistics);
2873 EXPORT_SYMBOL(tcp_timewait_cachep);
2874 EXPORT_SYMBOL(tcp_write_space);