2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
57 * Alan Cox : Tidied tcp_data to avoid a potential
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
215 * Description of States:
217 * TCP_SYN_SENT sent a connection request, waiting for ack
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
222 * TCP_ESTABLISHED connection established
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
247 * TCP_CLOSE socket is finished
250 #include <linux/config.h>
251 #include <linux/module.h>
252 #include <linux/types.h>
253 #include <linux/fcntl.h>
254 #include <linux/poll.h>
255 #include <linux/init.h>
256 #include <linux/smp_lock.h>
257 #include <linux/fs.h>
258 #include <linux/random.h>
260 #include <net/icmp.h>
262 #include <net/xfrm.h>
266 #include <asm/uaccess.h>
267 #include <asm/ioctls.h>
269 int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
271 DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
273 kmem_cache_t *tcp_openreq_cachep;
274 kmem_cache_t *tcp_bucket_cachep;
275 kmem_cache_t *tcp_timewait_cachep;
277 atomic_t tcp_orphan_count = ATOMIC_INIT(0);
279 int sysctl_tcp_mem[3];
280 int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
281 int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
283 atomic_t tcp_memory_allocated; /* Current allocated memory. */
284 atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
286 /* Pressure flag: try to collapse.
287 * Technical note: it is used by multiple contexts non atomically.
288 * All the tcp_mem_schedule() is of this nature: accounting
289 * is strict, actions are advisory and have some latency. */
290 int tcp_memory_pressure;
292 #define TCP_PAGES(amt) (((amt) + TCP_MEM_QUANTUM - 1) / TCP_MEM_QUANTUM)
294 int tcp_mem_schedule(struct sock *sk, int size, int kind)
296 int amt = TCP_PAGES(size);
298 sk->sk_forward_alloc += amt * TCP_MEM_QUANTUM;
299 atomic_add(amt, &tcp_memory_allocated);
302 if (atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
303 if (tcp_memory_pressure)
304 tcp_memory_pressure = 0;
308 /* Over hard limit. */
309 if (atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) {
310 tcp_enter_memory_pressure();
311 goto suppress_allocation;
314 /* Under pressure. */
315 if (atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[1])
316 tcp_enter_memory_pressure();
319 if (atomic_read(&sk->sk_rmem_alloc) < sysctl_tcp_rmem[0])
321 } else if (sk->sk_wmem_queued < sysctl_tcp_wmem[0])
324 if (!tcp_memory_pressure ||
325 sysctl_tcp_mem[2] > atomic_read(&tcp_sockets_allocated) *
326 TCP_PAGES(sk->sk_wmem_queued +
327 atomic_read(&sk->sk_rmem_alloc) +
328 sk->sk_forward_alloc))
334 tcp_moderate_sndbuf(sk);
336 /* Fail only if socket is _under_ its sndbuf.
337 * In this case we cannot block, so that we have to fail.
339 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
343 /* Alas. Undo changes. */
344 sk->sk_forward_alloc -= amt * TCP_MEM_QUANTUM;
345 atomic_sub(amt, &tcp_memory_allocated);
349 void __tcp_mem_reclaim(struct sock *sk)
351 if (sk->sk_forward_alloc >= TCP_MEM_QUANTUM) {
352 atomic_sub(sk->sk_forward_alloc / TCP_MEM_QUANTUM,
353 &tcp_memory_allocated);
354 sk->sk_forward_alloc &= TCP_MEM_QUANTUM - 1;
355 if (tcp_memory_pressure &&
356 atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0])
357 tcp_memory_pressure = 0;
361 void tcp_rfree(struct sk_buff *skb)
363 struct sock *sk = skb->sk;
365 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
366 sk->sk_forward_alloc += skb->truesize;
370 * LISTEN is a special case for poll..
372 static __inline__ unsigned int tcp_listen_poll(struct sock *sk,
375 return tcp_sk(sk)->accept_queue ? (POLLIN | POLLRDNORM) : 0;
379 * Wait for a TCP event.
381 * Note that we don't need to lock the socket, as the upper poll layers
382 * take care of normal races (between the test and the event) and we don't
383 * go look at any of the socket buffers directly.
385 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
388 struct sock *sk = sock->sk;
389 struct tcp_opt *tp = tcp_sk(sk);
391 poll_wait(file, sk->sk_sleep, wait);
392 if (sk->sk_state == TCP_LISTEN)
393 return tcp_listen_poll(sk, wait);
395 /* Socket is not locked. We are protected from async events
396 by poll logic and correct handling of state changes
397 made by another threads is impossible in any case.
405 * POLLHUP is certainly not done right. But poll() doesn't
406 * have a notion of HUP in just one direction, and for a
407 * socket the read side is more interesting.
409 * Some poll() documentation says that POLLHUP is incompatible
410 * with the POLLOUT/POLLWR flags, so somebody should check this
411 * all. But careful, it tends to be safer to return too many
412 * bits than too few, and you can easily break real applications
413 * if you don't tell them that something has hung up!
417 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
418 * our fs/select.c). It means that after we received EOF,
419 * poll always returns immediately, making impossible poll() on write()
420 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
421 * if and only if shutdown has been made in both directions.
422 * Actually, it is interesting to look how Solaris and DUX
423 * solve this dilemma. I would prefer, if PULLHUP were maskable,
424 * then we could set it on SND_SHUTDOWN. BTW examples given
425 * in Stevens' books assume exactly this behaviour, it explains
426 * why PULLHUP is incompatible with POLLOUT. --ANK
428 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
429 * blocking on fresh not-connected or disconnected socket. --ANK
431 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
433 if (sk->sk_shutdown & RCV_SHUTDOWN)
434 mask |= POLLIN | POLLRDNORM;
437 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
438 /* Potential race condition. If read of tp below will
439 * escape above sk->sk_state, we can be illegally awaken
440 * in SYN_* states. */
441 if ((tp->rcv_nxt != tp->copied_seq) &&
442 (tp->urg_seq != tp->copied_seq ||
443 tp->rcv_nxt != tp->copied_seq + 1 ||
444 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
445 mask |= POLLIN | POLLRDNORM;
447 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
448 if (tcp_wspace(sk) >= tcp_min_write_space(sk)) {
449 mask |= POLLOUT | POLLWRNORM;
450 } else { /* send SIGIO later */
451 set_bit(SOCK_ASYNC_NOSPACE,
452 &sk->sk_socket->flags);
453 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
455 /* Race breaker. If space is freed after
456 * wspace test but before the flags are set,
457 * IO signal will be lost.
459 if (tcp_wspace(sk) >= tcp_min_write_space(sk))
460 mask |= POLLOUT | POLLWRNORM;
464 if (tp->urg_data & TCP_URG_VALID)
471 * TCP socket write_space callback.
473 void tcp_write_space(struct sock *sk)
475 struct socket *sock = sk->sk_socket;
477 if (tcp_wspace(sk) >= tcp_min_write_space(sk) && sock) {
478 clear_bit(SOCK_NOSPACE, &sock->flags);
480 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
481 wake_up_interruptible(sk->sk_sleep);
483 if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
484 sock_wake_async(sock, 2, POLL_OUT);
488 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
490 struct tcp_opt *tp = tcp_sk(sk);
495 if (sk->sk_state == TCP_LISTEN)
499 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
501 else if (sock_flag(sk, SOCK_URGINLINE) ||
503 before(tp->urg_seq, tp->copied_seq) ||
504 !before(tp->urg_seq, tp->rcv_nxt)) {
505 answ = tp->rcv_nxt - tp->copied_seq;
507 /* Subtract 1, if FIN is in queue. */
508 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
510 ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
512 answ = tp->urg_seq - tp->copied_seq;
516 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
519 if (sk->sk_state == TCP_LISTEN)
522 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
525 answ = tp->write_seq - tp->snd_una;
531 return put_user(answ, (int *)arg);
535 int tcp_listen_start(struct sock *sk)
537 struct inet_opt *inet = inet_sk(sk);
538 struct tcp_opt *tp = tcp_sk(sk);
539 struct tcp_listen_opt *lopt;
541 sk->sk_max_ack_backlog = 0;
542 sk->sk_ack_backlog = 0;
543 tp->accept_queue = tp->accept_queue_tail = NULL;
544 tp->syn_wait_lock = RW_LOCK_UNLOCKED;
547 lopt = kmalloc(sizeof(struct tcp_listen_opt), GFP_KERNEL);
551 memset(lopt, 0, sizeof(struct tcp_listen_opt));
552 for (lopt->max_qlen_log = 6; ; lopt->max_qlen_log++)
553 if ((1 << lopt->max_qlen_log) >= sysctl_max_syn_backlog)
555 get_random_bytes(&lopt->hash_rnd, 4);
557 write_lock_bh(&tp->syn_wait_lock);
558 tp->listen_opt = lopt;
559 write_unlock_bh(&tp->syn_wait_lock);
561 /* There is race window here: we announce ourselves listening,
562 * but this transition is still not validated by get_port().
563 * It is OK, because this socket enters to hash table only
564 * after validation is complete.
566 sk->sk_state = TCP_LISTEN;
567 if (!sk->sk_prot->get_port(sk, inet->num)) {
568 inet->sport = htons(inet->num);
571 sk->sk_prot->hash(sk);
576 sk->sk_state = TCP_CLOSE;
577 write_lock_bh(&tp->syn_wait_lock);
578 tp->listen_opt = NULL;
579 write_unlock_bh(&tp->syn_wait_lock);
585 * This routine closes sockets which have been at least partially
586 * opened, but not yet accepted.
589 static void tcp_listen_stop (struct sock *sk)
591 struct tcp_opt *tp = tcp_sk(sk);
592 struct tcp_listen_opt *lopt = tp->listen_opt;
593 struct open_request *acc_req = tp->accept_queue;
594 struct open_request *req;
597 tcp_delete_keepalive_timer(sk);
599 /* make all the listen_opt local to us */
600 write_lock_bh(&tp->syn_wait_lock);
601 tp->listen_opt = NULL;
602 write_unlock_bh(&tp->syn_wait_lock);
603 tp->accept_queue = tp->accept_queue_tail = NULL;
606 for (i = 0; i < TCP_SYNQ_HSIZE; i++) {
607 while ((req = lopt->syn_table[i]) != NULL) {
608 lopt->syn_table[i] = req->dl_next;
610 tcp_openreq_free(req);
612 /* Following specs, it would be better either to send FIN
613 * (and enter FIN-WAIT-1, it is normal close)
614 * or to send active reset (abort).
615 * Certainly, it is pretty dangerous while synflood, but it is
616 * bad justification for our negligence 8)
617 * To be honest, we are not able to make either
618 * of the variants now. --ANK
623 BUG_TRAP(!lopt->qlen);
627 while ((req = acc_req) != NULL) {
628 struct sock *child = req->sk;
630 acc_req = req->dl_next;
634 BUG_TRAP(!sock_owned_by_user(child));
637 tcp_disconnect(child, O_NONBLOCK);
641 atomic_inc(&tcp_orphan_count);
643 tcp_destroy_sock(child);
645 bh_unlock_sock(child);
649 tcp_acceptq_removed(sk);
650 tcp_openreq_fastfree(req);
652 BUG_TRAP(!sk->sk_ack_backlog);
656 * Wait for a socket to get into the connected state
658 * Note: Must be called with the socket locked.
660 static int wait_for_tcp_connect(struct sock *sk, int flags, long *timeo_p)
662 struct tcp_opt *tp = tcp_sk(sk);
663 struct task_struct *tsk = current;
666 while ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
668 return sock_error(sk);
669 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV))
673 if (signal_pending(tsk))
674 return sock_intr_errno(*timeo_p);
676 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
680 *timeo_p = schedule_timeout(*timeo_p);
683 finish_wait(sk->sk_sleep, &wait);
689 static inline int tcp_memory_free(struct sock *sk)
691 return sk->sk_wmem_queued < sk->sk_sndbuf;
695 * Wait for more memory for a socket
697 static int wait_for_tcp_memory(struct sock *sk, long *timeo)
699 struct tcp_opt *tp = tcp_sk(sk);
702 long current_timeo = *timeo;
705 if (tcp_memory_free(sk))
706 current_timeo = vm_wait = (net_random() % (HZ / 5)) + 2;
709 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
711 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
713 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
717 if (signal_pending(current))
719 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
720 if (tcp_memory_free(sk) && !vm_wait)
723 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
726 if (!tcp_memory_free(sk) || vm_wait)
727 current_timeo = schedule_timeout(current_timeo);
732 vm_wait -= current_timeo;
733 current_timeo = *timeo;
734 if (current_timeo != MAX_SCHEDULE_TIMEOUT &&
735 (current_timeo -= vm_wait) < 0)
739 *timeo = current_timeo;
742 finish_wait(sk->sk_sleep, &wait);
752 err = sock_intr_errno(*timeo);
756 static inline int can_coalesce(struct sk_buff *skb, int i, struct page *page,
760 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
761 return page == frag->page &&
762 off == frag->page_offset + frag->size;
767 static inline void fill_page_desc(struct sk_buff *skb, int i,
768 struct page *page, int off, int size)
770 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
772 frag->page_offset = off;
774 skb_shinfo(skb)->nr_frags = i + 1;
777 static inline void tcp_mark_push(struct tcp_opt *tp, struct sk_buff *skb)
779 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
780 tp->pushed_seq = tp->write_seq;
783 static inline int forced_push(struct tcp_opt *tp)
785 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
788 static inline void skb_entail(struct sock *sk, struct tcp_opt *tp,
792 TCP_SKB_CB(skb)->seq = tp->write_seq;
793 TCP_SKB_CB(skb)->end_seq = tp->write_seq;
794 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
795 TCP_SKB_CB(skb)->sacked = 0;
796 __skb_queue_tail(&sk->sk_write_queue, skb);
797 tcp_charge_skb(sk, skb);
800 else if (tp->nonagle&TCP_NAGLE_PUSH)
801 tp->nonagle &= ~TCP_NAGLE_PUSH;
804 static inline void tcp_mark_urg(struct tcp_opt *tp, int flags,
807 if (flags & MSG_OOB) {
809 tp->snd_up = tp->write_seq;
810 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
814 static inline void tcp_push(struct sock *sk, struct tcp_opt *tp, int flags,
815 int mss_now, int nonagle)
818 struct sk_buff *skb = sk->sk_write_queue.prev;
819 if (!(flags & MSG_MORE) || forced_push(tp))
820 tcp_mark_push(tp, skb);
821 tcp_mark_urg(tp, flags, skb);
822 __tcp_push_pending_frames(sk, tp, mss_now,
823 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
827 static int tcp_error(struct sock *sk, int flags, int err)
830 err = sock_error(sk) ? : -EPIPE;
831 if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
832 send_sig(SIGPIPE, current, 0);
836 static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
837 size_t psize, int flags)
839 struct tcp_opt *tp = tcp_sk(sk);
843 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
845 /* Wait for a connection to finish. */
846 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
847 if ((err = wait_for_tcp_connect(sk, 0, &timeo)) != 0)
850 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
852 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
856 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
860 struct sk_buff *skb = sk->sk_write_queue.prev;
861 struct page *page = pages[poffset / PAGE_SIZE];
863 int offset = poffset % PAGE_SIZE;
864 int size = min_t(size_t, psize, PAGE_SIZE - offset);
866 if (!tp->send_head || (copy = mss_now - skb->len) <= 0) {
868 if (!tcp_memory_free(sk))
869 goto wait_for_sndbuf;
871 skb = tcp_alloc_pskb(sk, 0, tp->mss_cache,
874 goto wait_for_memory;
876 skb_entail(sk, tp, skb);
883 i = skb_shinfo(skb)->nr_frags;
884 if (can_coalesce(skb, i, page, offset)) {
885 skb_shinfo(skb)->frags[i - 1].size += copy;
886 } else if (i < MAX_SKB_FRAGS) {
888 fill_page_desc(skb, i, page, offset, copy);
890 tcp_mark_push(tp, skb);
895 skb->data_len += copy;
896 skb->ip_summed = CHECKSUM_HW;
897 tp->write_seq += copy;
898 TCP_SKB_CB(skb)->end_seq += copy;
901 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
905 if (!(psize -= copy))
908 if (skb->len != mss_now || (flags & MSG_OOB))
911 if (forced_push(tp)) {
912 tcp_mark_push(tp, skb);
913 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
914 } else if (skb == tp->send_head)
915 tcp_push_one(sk, mss_now);
919 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
922 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
924 if ((err = wait_for_tcp_memory(sk, &timeo)) != 0)
927 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
932 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
939 return tcp_error(sk, flags, err);
942 ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
943 size_t size, int flags)
946 struct sock *sk = sock->sk;
948 #define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
950 if (!(sk->sk_route_caps & NETIF_F_SG) ||
951 !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS))
952 return sock_no_sendpage(sock, page, offset, size, flags);
954 #undef TCP_ZC_CSUM_FLAGS
958 res = do_tcp_sendpages(sk, &page, offset, size, flags);
964 #define TCP_PAGE(sk) (inet_sk(sk)->sndmsg_page)
965 #define TCP_OFF(sk) (inet_sk(sk)->sndmsg_off)
967 static inline int tcp_copy_to_page(struct sock *sk, char *from,
968 struct sk_buff *skb, struct page *page,
974 if (skb->ip_summed == CHECKSUM_NONE) {
975 csum = csum_and_copy_from_user(from, page_address(page) + off,
978 skb->csum = csum_block_add(skb->csum, csum, skb->len);
980 if (copy_from_user(page_address(page) + off, from, copy))
985 skb->data_len += copy;
986 skb->truesize += copy;
987 sk->sk_wmem_queued += copy;
988 sk->sk_forward_alloc -= copy;
992 static inline int skb_add_data(struct sk_buff *skb, char *from, int copy)
998 if (skb->ip_summed == CHECKSUM_NONE) {
999 csum = csum_and_copy_from_user(from, skb_put(skb, copy),
1002 skb->csum = csum_block_add(skb->csum, csum, off);
1006 if (!copy_from_user(skb_put(skb, copy), from, copy))
1010 __skb_trim(skb, off);
1014 static inline int select_size(struct sock *sk, struct tcp_opt *tp)
1016 int tmp = tp->mss_cache_std;
1018 if (sk->sk_route_caps & NETIF_F_SG) {
1019 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
1021 if (tmp >= pgbreak &&
1022 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
1028 int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1032 struct tcp_opt *tp = tcp_sk(sk);
1033 struct sk_buff *skb;
1040 TCP_CHECK_TIMER(sk);
1042 flags = msg->msg_flags;
1043 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1045 /* Wait for a connection to finish. */
1046 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
1047 if ((err = wait_for_tcp_connect(sk, flags, &timeo)) != 0)
1050 /* This should be in poll */
1051 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1053 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
1055 /* Ok commence sending. */
1056 iovlen = msg->msg_iovlen;
1061 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
1064 while (--iovlen >= 0) {
1065 int seglen = iov->iov_len;
1066 unsigned char *from = iov->iov_base;
1070 while (seglen > 0) {
1073 skb = sk->sk_write_queue.prev;
1075 if (!tp->send_head ||
1076 (copy = mss_now - skb->len) <= 0) {
1079 /* Allocate new segment. If the interface is SG,
1080 * allocate skb fitting to single page.
1082 if (!tcp_memory_free(sk))
1083 goto wait_for_sndbuf;
1085 skb = tcp_alloc_pskb(sk, select_size(sk, tp),
1086 0, sk->sk_allocation);
1088 goto wait_for_memory;
1091 * Check whether we can use HW checksum.
1093 if (sk->sk_route_caps &
1094 (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM |
1096 skb->ip_summed = CHECKSUM_HW;
1098 skb_entail(sk, tp, skb);
1102 /* Try to append data to the end of skb. */
1106 /* Where to copy to? */
1107 if (skb_tailroom(skb) > 0) {
1108 /* We have some space in skb head. Superb! */
1109 if (copy > skb_tailroom(skb))
1110 copy = skb_tailroom(skb);
1111 if ((err = skb_add_data(skb, from, copy)) != 0)
1115 int i = skb_shinfo(skb)->nr_frags;
1116 struct page *page = TCP_PAGE(sk);
1117 int off = TCP_OFF(sk);
1119 if (can_coalesce(skb, i, page, off) &&
1121 /* We can extend the last page
1124 } else if (i == MAX_SKB_FRAGS ||
1126 !(sk->sk_route_caps & NETIF_F_SG))) {
1127 /* Need to add new fragment and cannot
1128 * do this because interface is non-SG,
1129 * or because all the page slots are
1131 tcp_mark_push(tp, skb);
1134 /* If page is cached, align
1135 * offset to L1 cache boundary
1137 off = (off + L1_CACHE_BYTES - 1) &
1138 ~(L1_CACHE_BYTES - 1);
1139 if (off == PAGE_SIZE) {
1141 TCP_PAGE(sk) = page = NULL;
1146 /* Allocate new cache page. */
1147 if (!(page = tcp_alloc_page(sk)))
1148 goto wait_for_memory;
1152 if (copy > PAGE_SIZE - off)
1153 copy = PAGE_SIZE - off;
1155 /* Time to copy data. We are close to
1157 err = tcp_copy_to_page(sk, from, skb, page,
1160 /* If this page was new, give it to the
1161 * socket so it does not get leaked.
1163 if (!TCP_PAGE(sk)) {
1164 TCP_PAGE(sk) = page;
1170 /* Update the skb. */
1172 skb_shinfo(skb)->frags[i - 1].size +=
1175 fill_page_desc(skb, i, page, off, copy);
1178 } else if (off + copy < PAGE_SIZE) {
1180 TCP_PAGE(sk) = page;
1184 TCP_OFF(sk) = off + copy;
1188 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
1190 tp->write_seq += copy;
1191 TCP_SKB_CB(skb)->end_seq += copy;
1195 if ((seglen -= copy) == 0 && iovlen == 0)
1198 if (skb->len != mss_now || (flags & MSG_OOB))
1201 if (forced_push(tp)) {
1202 tcp_mark_push(tp, skb);
1203 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
1204 } else if (skb == tp->send_head)
1205 tcp_push_one(sk, mss_now);
1209 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1212 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
1214 if ((err = wait_for_tcp_memory(sk, &timeo)) != 0)
1217 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
1223 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
1224 TCP_CHECK_TIMER(sk);
1230 if (tp->send_head == skb)
1231 tp->send_head = NULL;
1232 __skb_unlink(skb, skb->list);
1233 tcp_free_skb(sk, skb);
1240 err = tcp_error(sk, flags, err);
1241 TCP_CHECK_TIMER(sk);
1247 * Handle reading urgent data. BSD has very simple semantics for
1248 * this, no blocking and very strange errors 8)
1251 static int tcp_recv_urg(struct sock *sk, long timeo,
1252 struct msghdr *msg, int len, int flags,
1255 struct tcp_opt *tp = tcp_sk(sk);
1257 /* No URG data to read. */
1258 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1259 tp->urg_data == TCP_URG_READ)
1260 return -EINVAL; /* Yes this is right ! */
1262 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1265 if (tp->urg_data & TCP_URG_VALID) {
1267 char c = tp->urg_data;
1269 if (!(flags & MSG_PEEK))
1270 tp->urg_data = TCP_URG_READ;
1272 /* Read urgent data. */
1273 msg->msg_flags |= MSG_OOB;
1276 if (!(flags & MSG_TRUNC))
1277 err = memcpy_toiovec(msg->msg_iov, &c, 1);
1280 msg->msg_flags |= MSG_TRUNC;
1282 return err ? -EFAULT : len;
1285 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1288 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1289 * the available implementations agree in this case:
1290 * this call should never block, independent of the
1291 * blocking state of the socket.
1292 * Mike <pall@rz.uni-karlsruhe.de>
1298 * Release a skb if it is no longer needed. This routine
1299 * must be called with interrupts disabled or with the
1300 * socket locked so that the sk_buff queue operation is ok.
1303 static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
1305 __skb_unlink(skb, &sk->sk_receive_queue);
1309 /* Clean up the receive buffer for full frames taken by the user,
1310 * then send an ACK if necessary. COPIED is the number of bytes
1311 * tcp_recvmsg has given to the user so far, it speeds up the
1312 * calculation of whether or not we must ACK for the sake of
1315 static void cleanup_rbuf(struct sock *sk, int copied)
1317 struct tcp_opt *tp = tcp_sk(sk);
1318 int time_to_ack = 0;
1321 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1323 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
1326 if (tcp_ack_scheduled(tp)) {
1327 /* Delayed ACKs frequently hit locked sockets during bulk
1329 if (tp->ack.blocked ||
1330 /* Once-per-two-segments ACK was not sent by tcp_input.c */
1331 tp->rcv_nxt - tp->rcv_wup > tp->ack.rcv_mss ||
1333 * If this read emptied read buffer, we send ACK, if
1334 * connection is not bidirectional, user drained
1335 * receive buffer and there was a small segment
1338 (copied > 0 && (tp->ack.pending & TCP_ACK_PUSHED) &&
1339 !tp->ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
1343 /* We send an ACK if we can now advertise a non-zero window
1344 * which has been raised "significantly".
1346 * Even if window raised up to infinity, do not send window open ACK
1347 * in states, where we will not receive more. It is useless.
1349 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1350 __u32 rcv_window_now = tcp_receive_window(tp);
1352 /* Optimize, __tcp_select_window() is not cheap. */
1353 if (2*rcv_window_now <= tp->window_clamp) {
1354 __u32 new_window = __tcp_select_window(sk);
1356 /* Send ACK now, if this read freed lots of space
1357 * in our buffer. Certainly, new_window is new window.
1358 * We can advertise it now, if it is not less than current one.
1359 * "Lots" means "at least twice" here.
1361 if (new_window && new_window >= 2 * rcv_window_now)
1369 /* Now socket state including sk->sk_err is changed only under lock,
1370 * hence we may omit checks after joining wait queue.
1371 * We check receive queue before schedule() only as optimization;
1372 * it is very likely that release_sock() added new data.
1375 static long tcp_data_wait(struct sock *sk, long timeo)
1379 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1381 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1384 if (skb_queue_empty(&sk->sk_receive_queue))
1385 timeo = schedule_timeout(timeo);
1388 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1390 finish_wait(sk->sk_sleep, &wait);
1394 static void tcp_prequeue_process(struct sock *sk)
1396 struct sk_buff *skb;
1397 struct tcp_opt *tp = tcp_sk(sk);
1399 NET_ADD_STATS_USER(TCPPrequeued, skb_queue_len(&tp->ucopy.prequeue));
1401 /* RX process wants to run with disabled BHs, though it is not
1404 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1405 sk->sk_backlog_rcv(sk, skb);
1408 /* Clear memory counter. */
1409 tp->ucopy.memory = 0;
1412 static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1414 struct sk_buff *skb;
1417 skb_queue_walk(&sk->sk_receive_queue, skb) {
1418 offset = seq - TCP_SKB_CB(skb)->seq;
1421 if (offset < skb->len || skb->h.th->fin) {
1430 * This routine provides an alternative to tcp_recvmsg() for routines
1431 * that would like to handle copying from skbuffs directly in 'sendfile'
1434 * - It is assumed that the socket was locked by the caller.
1435 * - The routine does not block.
1436 * - At present, there is no support for reading OOB data
1437 * or for 'peeking' the socket using this routine
1438 * (although both would be easy to implement).
1440 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1441 sk_read_actor_t recv_actor)
1443 struct sk_buff *skb;
1444 struct tcp_opt *tp = tcp_sk(sk);
1445 u32 seq = tp->copied_seq;
1449 if (sk->sk_state == TCP_LISTEN)
1451 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1452 if (offset < skb->len) {
1455 len = skb->len - offset;
1456 /* Stop reading if we hit a patch of urgent data */
1458 u32 urg_offset = tp->urg_seq - seq;
1459 if (urg_offset < len)
1464 used = recv_actor(desc, skb, offset, len);
1470 if (offset != skb->len)
1473 if (skb->h.th->fin) {
1474 tcp_eat_skb(sk, skb);
1478 tcp_eat_skb(sk, skb);
1482 tp->copied_seq = seq;
1483 /* Clean up data we have read: This will do ACK frames. */
1485 cleanup_rbuf(sk, copied);
1490 * This routine copies from a sock struct into the user buffer.
1492 * Technical note: in 2.3 we work on _locked_ socket, so that
1493 * tricks with *seq access order and skb->users are not required.
1494 * Probably, code can be easily improved even more.
1497 int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1498 size_t len, int nonblock, int flags, int *addr_len)
1500 struct tcp_opt *tp = tcp_sk(sk);
1506 int target; /* Read at least this many bytes */
1508 struct task_struct *user_recv = NULL;
1512 TCP_CHECK_TIMER(sk);
1515 if (sk->sk_state == TCP_LISTEN)
1518 timeo = sock_rcvtimeo(sk, nonblock);
1520 /* Urgent data needs to be handled specially. */
1521 if (flags & MSG_OOB)
1524 seq = &tp->copied_seq;
1525 if (flags & MSG_PEEK) {
1526 peek_seq = tp->copied_seq;
1530 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1533 struct sk_buff *skb;
1536 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1537 if (tp->urg_data && tp->urg_seq == *seq) {
1540 if (signal_pending(current)) {
1541 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1546 /* Next get a buffer. */
1548 skb = skb_peek(&sk->sk_receive_queue);
1553 /* Now that we have two receive queues this
1556 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1557 printk(KERN_INFO "recvmsg bug: copied %X "
1558 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1561 offset = *seq - TCP_SKB_CB(skb)->seq;
1564 if (offset < skb->len)
1568 BUG_TRAP(flags & MSG_PEEK);
1570 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1572 /* Well, if we have backlog, try to process it now yet. */
1574 if (copied >= target && !sk->sk_backlog.tail)
1579 sk->sk_state == TCP_CLOSE ||
1580 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1582 signal_pending(current) ||
1586 if (sock_flag(sk, SOCK_DONE))
1590 copied = sock_error(sk);
1594 if (sk->sk_shutdown & RCV_SHUTDOWN)
1597 if (sk->sk_state == TCP_CLOSE) {
1598 if (!sock_flag(sk, SOCK_DONE)) {
1599 /* This occurs when user tries to read
1600 * from never connected socket.
1613 if (signal_pending(current)) {
1614 copied = sock_intr_errno(timeo);
1619 cleanup_rbuf(sk, copied);
1621 if (tp->ucopy.task == user_recv) {
1622 /* Install new reader */
1623 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1624 user_recv = current;
1625 tp->ucopy.task = user_recv;
1626 tp->ucopy.iov = msg->msg_iov;
1629 tp->ucopy.len = len;
1631 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1632 (flags & (MSG_PEEK | MSG_TRUNC)));
1634 /* Ugly... If prequeue is not empty, we have to
1635 * process it before releasing socket, otherwise
1636 * order will be broken at second iteration.
1637 * More elegant solution is required!!!
1639 * Look: we have the following (pseudo)queues:
1641 * 1. packets in flight
1646 * Each queue can be processed only if the next ones
1647 * are empty. At this point we have empty receive_queue.
1648 * But prequeue _can_ be not empty after 2nd iteration,
1649 * when we jumped to start of loop because backlog
1650 * processing added something to receive_queue.
1651 * We cannot release_sock(), because backlog contains
1652 * packets arrived _after_ prequeued ones.
1654 * Shortly, algorithm is clear --- to process all
1655 * the queues in order. We could make it more directly,
1656 * requeueing packets from backlog to prequeue, if
1657 * is not empty. It is more elegant, but eats cycles,
1660 if (skb_queue_len(&tp->ucopy.prequeue))
1663 /* __ Set realtime policy in scheduler __ */
1666 if (copied >= target) {
1667 /* Do not sleep, just process backlog. */
1671 timeo = tcp_data_wait(sk, timeo);
1677 /* __ Restore normal policy in scheduler __ */
1679 if ((chunk = len - tp->ucopy.len) != 0) {
1680 NET_ADD_STATS_USER(TCPDirectCopyFromBacklog, chunk);
1685 if (tp->rcv_nxt == tp->copied_seq &&
1686 skb_queue_len(&tp->ucopy.prequeue)) {
1688 tcp_prequeue_process(sk);
1690 if ((chunk = len - tp->ucopy.len) != 0) {
1691 NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk);
1697 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1698 if (net_ratelimit())
1699 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1700 current->comm, current->pid);
1701 peek_seq = tp->copied_seq;
1706 /* Ok so how much can we use? */
1707 used = skb->len - offset;
1711 /* Do we have urgent data here? */
1713 u32 urg_offset = tp->urg_seq - *seq;
1714 if (urg_offset < used) {
1716 if (!sock_flag(sk, SOCK_URGINLINE)) {
1728 if (!(flags & MSG_TRUNC)) {
1729 err = skb_copy_datagram_iovec(skb, offset,
1730 msg->msg_iov, used);
1732 /* Exception. Bailout! */
1744 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1746 tcp_fast_path_check(sk, tp);
1748 if (used + offset < skb->len)
1753 if (!(flags & MSG_PEEK))
1754 tcp_eat_skb(sk, skb);
1758 /* Process the FIN. */
1760 if (!(flags & MSG_PEEK))
1761 tcp_eat_skb(sk, skb);
1766 if (skb_queue_len(&tp->ucopy.prequeue)) {
1769 tp->ucopy.len = copied > 0 ? len : 0;
1771 tcp_prequeue_process(sk);
1773 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1774 NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk);
1780 tp->ucopy.task = NULL;
1784 /* According to UNIX98, msg_name/msg_namelen are ignored
1785 * on connected socket. I was just happy when found this 8) --ANK
1788 /* Clean up data we have read: This will do ACK frames. */
1789 cleanup_rbuf(sk, copied);
1791 TCP_CHECK_TIMER(sk);
1796 TCP_CHECK_TIMER(sk);
1801 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1806 * State processing on a close. This implements the state shift for
1807 * sending our FIN frame. Note that we only send a FIN for some
1808 * states. A shutdown() may have already sent the FIN, or we may be
1812 static unsigned char new_state[16] = {
1813 /* current state: new state: action: */
1814 /* (Invalid) */ TCP_CLOSE,
1815 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1816 /* TCP_SYN_SENT */ TCP_CLOSE,
1817 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1818 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1819 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1820 /* TCP_TIME_WAIT */ TCP_CLOSE,
1821 /* TCP_CLOSE */ TCP_CLOSE,
1822 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1823 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1824 /* TCP_LISTEN */ TCP_CLOSE,
1825 /* TCP_CLOSING */ TCP_CLOSING,
1828 static int tcp_close_state(struct sock *sk)
1830 int next = (int)new_state[sk->sk_state];
1831 int ns = next & TCP_STATE_MASK;
1833 tcp_set_state(sk, ns);
1835 return next & TCP_ACTION_FIN;
1839 * Shutdown the sending side of a connection. Much like close except
1840 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1843 void tcp_shutdown(struct sock *sk, int how)
1845 /* We need to grab some memory, and put together a FIN,
1846 * and then put it into the queue to be sent.
1847 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1849 if (!(how & SEND_SHUTDOWN))
1852 /* If we've already sent a FIN, or it's a closed state, skip this. */
1853 if ((1 << sk->sk_state) &
1854 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1855 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1856 /* Clear out any half completed packets. FIN if needed. */
1857 if (tcp_close_state(sk))
1864 * Return 1 if we still have things to send in our buffers.
1867 static inline int closing(struct sock *sk)
1869 return (1 << sk->sk_state) &
1870 (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK);
1873 static __inline__ void tcp_kill_sk_queues(struct sock *sk)
1875 /* First the read buffer. */
1876 __skb_queue_purge(&sk->sk_receive_queue);
1878 /* Next, the error queue. */
1879 __skb_queue_purge(&sk->sk_error_queue);
1881 /* Next, the write queue. */
1882 BUG_TRAP(skb_queue_empty(&sk->sk_write_queue));
1884 /* Account for returned memory. */
1885 tcp_mem_reclaim(sk);
1887 BUG_TRAP(!sk->sk_wmem_queued);
1888 BUG_TRAP(!sk->sk_forward_alloc);
1890 /* It is _impossible_ for the backlog to contain anything
1891 * when we get here. All user references to this socket
1892 * have gone away, only the net layer knows can touch it.
1897 * At this point, there should be no process reference to this
1898 * socket, and thus no user references at all. Therefore we
1899 * can assume the socket waitqueue is inactive and nobody will
1900 * try to jump onto it.
1902 void tcp_destroy_sock(struct sock *sk)
1904 BUG_TRAP(sk->sk_state == TCP_CLOSE);
1905 BUG_TRAP(sock_flag(sk, SOCK_DEAD));
1907 /* It cannot be in hash table! */
1908 BUG_TRAP(sk_unhashed(sk));
1910 /* If it has not 0 inet_sk(sk)->num, it must be bound */
1911 BUG_TRAP(!inet_sk(sk)->num || tcp_sk(sk)->bind_hash);
1914 if (sk->sk_zapped) {
1915 printk(KERN_DEBUG "TCP: double destroy sk=%p\n", sk);
1921 sk->sk_prot->destroy(sk);
1923 tcp_kill_sk_queues(sk);
1925 xfrm_sk_free_policy(sk);
1927 #ifdef INET_REFCNT_DEBUG
1928 if (atomic_read(&sk->sk_refcnt) != 1) {
1929 printk(KERN_DEBUG "Destruction TCP %p delayed, c=%d\n",
1930 sk, atomic_read(&sk->sk_refcnt));
1934 atomic_dec(&tcp_orphan_count);
1938 void tcp_close(struct sock *sk, long timeout)
1940 struct sk_buff *skb;
1941 int data_was_unread = 0;
1944 sk->sk_shutdown = SHUTDOWN_MASK;
1946 if (sk->sk_state == TCP_LISTEN) {
1947 tcp_set_state(sk, TCP_CLOSE);
1950 tcp_listen_stop(sk);
1952 goto adjudge_to_death;
1955 /* We need to flush the recv. buffs. We do this only on the
1956 * descriptor close, not protocol-sourced closes, because the
1957 * reader process may not have drained the data yet!
1959 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1960 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1962 data_was_unread += len;
1966 tcp_mem_reclaim(sk);
1968 /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
1969 * 3.10, we send a RST here because data was lost. To
1970 * witness the awful effects of the old behavior of always
1971 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
1972 * a bulk GET in an FTP client, suspend the process, wait
1973 * for the client to advertise a zero window, then kill -9
1974 * the FTP client, wheee... Note: timeout is always zero
1977 if (data_was_unread) {
1978 /* Unread data was tossed, zap the connection. */
1979 NET_INC_STATS_USER(TCPAbortOnClose);
1980 tcp_set_state(sk, TCP_CLOSE);
1981 tcp_send_active_reset(sk, GFP_KERNEL);
1982 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1983 /* Check zero linger _after_ checking for unread data. */
1984 sk->sk_prot->disconnect(sk, 0);
1985 NET_INC_STATS_USER(TCPAbortOnData);
1986 } else if (tcp_close_state(sk)) {
1987 /* We FIN if the application ate all the data before
1988 * zapping the connection.
1991 /* RED-PEN. Formally speaking, we have broken TCP state
1992 * machine. State transitions:
1994 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1995 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1996 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1998 * are legal only when FIN has been sent (i.e. in window),
1999 * rather than queued out of window. Purists blame.
2001 * F.e. "RFC state" is ESTABLISHED,
2002 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
2004 * The visible declinations are that sometimes
2005 * we enter time-wait state, when it is not required really
2006 * (harmless), do not send active resets, when they are
2007 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
2008 * they look as CLOSING or LAST_ACK for Linux)
2009 * Probably, I missed some more holelets.
2016 struct task_struct *tsk = current;
2020 prepare_to_wait(sk->sk_sleep, &wait,
2021 TASK_INTERRUPTIBLE);
2025 timeout = schedule_timeout(timeout);
2027 } while (!signal_pending(tsk) && timeout);
2029 finish_wait(sk->sk_sleep, &wait);
2033 /* It is the last release_sock in its life. It will remove backlog. */
2037 /* Now socket is owned by kernel and we acquire BH lock
2038 to finish close. No need to check for user refs.
2042 BUG_TRAP(!sock_owned_by_user(sk));
2047 /* This is a (useful) BSD violating of the RFC. There is a
2048 * problem with TCP as specified in that the other end could
2049 * keep a socket open forever with no application left this end.
2050 * We use a 3 minute timeout (about the same as BSD) then kill
2051 * our end. If they send after that then tough - BUT: long enough
2052 * that we won't make the old 4*rto = almost no time - whoops
2055 * Nope, it was not mistake. It is really desired behaviour
2056 * f.e. on http servers, when such sockets are useless, but
2057 * consume significant resources. Let's do it with special
2058 * linger2 option. --ANK
2061 if (sk->sk_state == TCP_FIN_WAIT2) {
2062 struct tcp_opt *tp = tcp_sk(sk);
2063 if (tp->linger2 < 0) {
2064 tcp_set_state(sk, TCP_CLOSE);
2065 tcp_send_active_reset(sk, GFP_ATOMIC);
2066 NET_INC_STATS_BH(TCPAbortOnLinger);
2068 int tmo = tcp_fin_time(tp);
2070 if (tmo > TCP_TIMEWAIT_LEN) {
2071 tcp_reset_keepalive_timer(sk, tcp_fin_time(tp));
2073 atomic_inc(&tcp_orphan_count);
2074 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
2079 if (sk->sk_state != TCP_CLOSE) {
2080 tcp_mem_reclaim(sk);
2081 if (atomic_read(&tcp_orphan_count) > sysctl_tcp_max_orphans ||
2082 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
2083 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
2084 if (net_ratelimit())
2085 printk(KERN_INFO "TCP: too many of orphaned "
2087 tcp_set_state(sk, TCP_CLOSE);
2088 tcp_send_active_reset(sk, GFP_ATOMIC);
2089 NET_INC_STATS_BH(TCPAbortOnMemory);
2092 atomic_inc(&tcp_orphan_count);
2094 if (sk->sk_state == TCP_CLOSE)
2095 tcp_destroy_sock(sk);
2096 /* Otherwise, socket is reprieved until protocol close. */
2104 /* These states need RST on ABORT according to RFC793 */
2106 static inline int tcp_need_reset(int state)
2108 return (1 << state) &
2109 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
2110 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
2113 int tcp_disconnect(struct sock *sk, int flags)
2115 struct inet_opt *inet = inet_sk(sk);
2116 struct tcp_opt *tp = tcp_sk(sk);
2118 int old_state = sk->sk_state;
2120 if (old_state != TCP_CLOSE)
2121 tcp_set_state(sk, TCP_CLOSE);
2123 /* ABORT function of RFC793 */
2124 if (old_state == TCP_LISTEN) {
2125 tcp_listen_stop(sk);
2126 } else if (tcp_need_reset(old_state) ||
2127 (tp->snd_nxt != tp->write_seq &&
2128 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
2129 /* The last check adjusts for discrepance of Linux wrt. RFC
2132 tcp_send_active_reset(sk, gfp_any());
2133 sk->sk_err = ECONNRESET;
2134 } else if (old_state == TCP_SYN_SENT)
2135 sk->sk_err = ECONNRESET;
2137 tcp_clear_xmit_timers(sk);
2138 __skb_queue_purge(&sk->sk_receive_queue);
2139 tcp_writequeue_purge(sk);
2140 __skb_queue_purge(&tp->out_of_order_queue);
2144 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
2145 inet_reset_saddr(sk);
2147 sk->sk_shutdown = 0;
2148 sock_reset_flag(sk, SOCK_DONE);
2150 if ((tp->write_seq += tp->max_window + 2) == 0)
2155 tp->packets_out = 0;
2156 tp->snd_ssthresh = 0x7fffffff;
2157 tp->snd_cwnd_cnt = 0;
2158 tcp_set_ca_state(tp, TCP_CA_Open);
2159 tcp_clear_retrans(tp);
2160 tcp_delack_init(tp);
2161 tp->send_head = NULL;
2166 BUG_TRAP(!inet->num || tp->bind_hash);
2168 sk->sk_error_report(sk);
2173 * Wait for an incoming connection, avoid race
2174 * conditions. This must be called with the socket locked.
2176 static int wait_for_connect(struct sock *sk, long timeo)
2178 struct tcp_opt *tp = tcp_sk(sk);
2183 * True wake-one mechanism for incoming connections: only
2184 * one process gets woken up, not the 'whole herd'.
2185 * Since we do not 'race & poll' for established sockets
2186 * anymore, the common case will execute the loop only once.
2188 * Subtle issue: "add_wait_queue_exclusive()" will be added
2189 * after any current non-exclusive waiters, and we know that
2190 * it will always _stay_ after any new non-exclusive waiters
2191 * because all non-exclusive waiters are added at the
2192 * beginning of the wait-queue. As such, it's ok to "drop"
2193 * our exclusiveness temporarily when we get woken up without
2194 * having to remove and re-insert us on the wait queue.
2197 prepare_to_wait_exclusive(sk->sk_sleep, &wait,
2198 TASK_INTERRUPTIBLE);
2200 if (!tp->accept_queue)
2201 timeo = schedule_timeout(timeo);
2204 if (tp->accept_queue)
2207 if (sk->sk_state != TCP_LISTEN)
2209 err = sock_intr_errno(timeo);
2210 if (signal_pending(current))
2216 finish_wait(sk->sk_sleep, &wait);
2221 * This will accept the next outstanding connection.
2224 struct sock *tcp_accept(struct sock *sk, int flags, int *err)
2226 struct tcp_opt *tp = tcp_sk(sk);
2227 struct open_request *req;
2233 /* We need to make sure that this socket is listening,
2234 * and that it has something pending.
2237 if (sk->sk_state != TCP_LISTEN)
2240 /* Find already established connection */
2241 if (!tp->accept_queue) {
2242 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2244 /* If this is a non blocking socket don't sleep */
2249 error = wait_for_connect(sk, timeo);
2254 req = tp->accept_queue;
2255 if ((tp->accept_queue = req->dl_next) == NULL)
2256 tp->accept_queue_tail = NULL;
2259 tcp_acceptq_removed(sk);
2260 tcp_openreq_fastfree(req);
2261 BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
2272 * Socket option code for TCP.
2274 int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval,
2277 struct tcp_opt *tp = tcp_sk(sk);
2281 if (level != SOL_TCP)
2282 return tp->af_specific->setsockopt(sk, level, optname,
2285 if (optlen < sizeof(int))
2288 if (get_user(val, (int *)optval))
2295 /* Values greater than interface MTU won't take effect. However
2296 * at the point when this call is done we typically don't yet
2297 * know which interface is going to be used */
2298 if (val < 8 || val > MAX_TCP_WINDOW) {
2307 /* TCP_NODELAY is weaker than TCP_CORK, so that
2308 * this option on corked socket is remembered, but
2309 * it is not activated until cork is cleared.
2311 * However, when TCP_NODELAY is set we make
2312 * an explicit push, which overrides even TCP_CORK
2313 * for currently queued segments.
2315 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
2316 tcp_push_pending_frames(sk, tp);
2318 tp->nonagle &= ~TCP_NAGLE_OFF;
2323 /* When set indicates to always queue non-full frames.
2324 * Later the user clears this option and we transmit
2325 * any pending partial frames in the queue. This is
2326 * meant to be used alongside sendfile() to get properly
2327 * filled frames when the user (for example) must write
2328 * out headers with a write() call first and then use
2329 * sendfile to send out the data parts.
2331 * TCP_CORK can be set together with TCP_NODELAY and it is
2332 * stronger than TCP_NODELAY.
2335 tp->nonagle |= TCP_NAGLE_CORK;
2337 tp->nonagle &= ~TCP_NAGLE_CORK;
2338 if (tp->nonagle&TCP_NAGLE_OFF)
2339 tp->nonagle |= TCP_NAGLE_PUSH;
2340 tcp_push_pending_frames(sk, tp);
2345 if (val < 1 || val > MAX_TCP_KEEPIDLE)
2348 tp->keepalive_time = val * HZ;
2349 if (sock_flag(sk, SOCK_KEEPOPEN) &&
2350 !((1 << sk->sk_state) &
2351 (TCPF_CLOSE | TCPF_LISTEN))) {
2352 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
2353 if (tp->keepalive_time > elapsed)
2354 elapsed = tp->keepalive_time - elapsed;
2357 tcp_reset_keepalive_timer(sk, elapsed);
2362 if (val < 1 || val > MAX_TCP_KEEPINTVL)
2365 tp->keepalive_intvl = val * HZ;
2368 if (val < 1 || val > MAX_TCP_KEEPCNT)
2371 tp->keepalive_probes = val;
2374 if (val < 1 || val > MAX_TCP_SYNCNT)
2377 tp->syn_retries = val;
2383 else if (val > sysctl_tcp_fin_timeout / HZ)
2386 tp->linger2 = val * HZ;
2389 case TCP_DEFER_ACCEPT:
2390 tp->defer_accept = 0;
2392 /* Translate value in seconds to number of
2394 while (tp->defer_accept < 32 &&
2395 val > ((TCP_TIMEOUT_INIT / HZ) <<
2402 case TCP_WINDOW_CLAMP:
2404 if (sk->sk_state != TCP_CLOSE) {
2408 tp->window_clamp = 0;
2410 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2411 SOCK_MIN_RCVBUF / 2 : val;
2416 tp->ack.pingpong = 1;
2418 tp->ack.pingpong = 0;
2419 if ((1 << sk->sk_state) &
2420 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2421 tcp_ack_scheduled(tp)) {
2422 tp->ack.pending |= TCP_ACK_PUSHED;
2423 cleanup_rbuf(sk, 1);
2425 tp->ack.pingpong = 1;
2438 int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval,
2441 struct tcp_opt *tp = tcp_sk(sk);
2444 if (level != SOL_TCP)
2445 return tp->af_specific->getsockopt(sk, level, optname,
2448 if (get_user(len, optlen))
2451 len = min_t(unsigned int, len, sizeof(int));
2458 val = tp->mss_cache_std;
2459 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2463 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2466 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2469 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2472 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2475 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2478 val = tp->syn_retries ? : sysctl_tcp_syn_retries;
2483 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2485 case TCP_DEFER_ACCEPT:
2486 val = !tp->defer_accept ? 0 : ((TCP_TIMEOUT_INIT / HZ) <<
2487 (tp->defer_accept - 1));
2489 case TCP_WINDOW_CLAMP:
2490 val = tp->window_clamp;
2493 struct tcp_info info;
2494 u32 now = tcp_time_stamp;
2496 if (get_user(len, optlen))
2498 info.tcpi_state = sk->sk_state;
2499 info.tcpi_ca_state = tp->ca_state;
2500 info.tcpi_retransmits = tp->retransmits;
2501 info.tcpi_probes = tp->probes_out;
2502 info.tcpi_backoff = tp->backoff;
2503 info.tcpi_options = 0;
2505 info.tcpi_options |= TCPI_OPT_TIMESTAMPS;
2507 info.tcpi_options |= TCPI_OPT_SACK;
2508 if (tp->wscale_ok) {
2509 info.tcpi_options |= TCPI_OPT_WSCALE;
2510 info.tcpi_snd_wscale = tp->snd_wscale;
2511 info.tcpi_rcv_wscale = tp->rcv_wscale;
2513 info.tcpi_snd_wscale = 0;
2514 info.tcpi_rcv_wscale = 0;
2516 if (tp->ecn_flags & TCP_ECN_OK)
2517 info.tcpi_options |= TCPI_OPT_ECN;
2519 info.tcpi_rto = (1000000 * tp->rto) / HZ;
2520 info.tcpi_ato = (1000000 * tp->ack.ato) / HZ;
2521 info.tcpi_snd_mss = tp->mss_cache_std;
2522 info.tcpi_rcv_mss = tp->ack.rcv_mss;
2524 info.tcpi_unacked = tp->packets_out;
2525 info.tcpi_sacked = tp->sacked_out;
2526 info.tcpi_lost = tp->lost_out;
2527 info.tcpi_retrans = tp->retrans_out;
2528 info.tcpi_fackets = tp->fackets_out;
2530 info.tcpi_last_data_sent = ((now - tp->lsndtime) * 1000) / HZ;
2531 info.tcpi_last_ack_sent = 0;
2532 info.tcpi_last_data_recv = ((now -
2533 tp->ack.lrcvtime) * 1000) / HZ;
2534 info.tcpi_last_ack_recv = ((now - tp->rcv_tstamp) * 1000) / HZ;
2536 info.tcpi_pmtu = tp->pmtu_cookie;
2537 info.tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2538 info.tcpi_rtt = ((1000000 * tp->srtt) / HZ) >> 3;
2539 info.tcpi_rttvar = ((1000000 * tp->mdev) / HZ) >> 2;
2540 info.tcpi_snd_ssthresh = tp->snd_ssthresh;
2541 info.tcpi_snd_cwnd = tp->snd_cwnd;
2542 info.tcpi_advmss = tp->advmss;
2543 info.tcpi_reordering = tp->reordering;
2545 len = min_t(unsigned int, len, sizeof(info));
2546 if (put_user(len, optlen))
2548 if (copy_to_user(optval, &info, len))
2553 val = !tp->ack.pingpong;
2556 return -ENOPROTOOPT;
2559 if (put_user(len, optlen))
2561 if (copy_to_user(optval, &val, len))
2567 extern void __skb_cb_too_small_for_tcp(int, int);
2568 extern void tcpdiag_init(void);
2570 static __initdata unsigned long thash_entries;
2571 static int __init set_thash_entries(char *str)
2575 thash_entries = simple_strtoul(str, &str, 0);
2578 __setup("thash_entries=", set_thash_entries);
2580 void __init tcp_init(void)
2582 struct sk_buff *skb = NULL;
2586 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2587 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2590 tcp_openreq_cachep = kmem_cache_create("tcp_open_request",
2591 sizeof(struct open_request),
2592 0, SLAB_HWCACHE_ALIGN,
2594 if (!tcp_openreq_cachep)
2595 panic("tcp_init: Cannot alloc open_request cache.");
2597 tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket",
2598 sizeof(struct tcp_bind_bucket),
2599 0, SLAB_HWCACHE_ALIGN,
2601 if (!tcp_bucket_cachep)
2602 panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
2604 tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket",
2605 sizeof(struct tcp_tw_bucket),
2606 0, SLAB_HWCACHE_ALIGN,
2608 if (!tcp_timewait_cachep)
2609 panic("tcp_init: Cannot alloc tcp_tw_bucket cache.");
2611 /* Size and allocate the main established and bind bucket
2614 * The methodology is similar to that of the buffer cache.
2616 if (num_physpages >= (128 * 1024))
2617 goal = num_physpages >> (21 - PAGE_SHIFT);
2619 goal = num_physpages >> (23 - PAGE_SHIFT);
2622 goal = (thash_entries * sizeof(struct tcp_ehash_bucket)) >> PAGE_SHIFT;
2623 for (order = 0; (1UL << order) < goal; order++)
2626 tcp_ehash_size = (1UL << order) * PAGE_SIZE /
2627 sizeof(struct tcp_ehash_bucket);
2628 tcp_ehash_size >>= 1;
2629 while (tcp_ehash_size & (tcp_ehash_size - 1))
2631 tcp_ehash = (struct tcp_ehash_bucket *)
2632 __get_free_pages(GFP_ATOMIC, order);
2633 } while (!tcp_ehash && --order > 0);
2636 panic("Failed to allocate TCP established hash table\n");
2637 for (i = 0; i < (tcp_ehash_size << 1); i++) {
2638 tcp_ehash[i].lock = RW_LOCK_UNLOCKED;
2639 INIT_HLIST_HEAD(&tcp_ehash[i].chain);
2643 tcp_bhash_size = (1UL << order) * PAGE_SIZE /
2644 sizeof(struct tcp_bind_hashbucket);
2645 if ((tcp_bhash_size > (64 * 1024)) && order > 0)
2647 tcp_bhash = (struct tcp_bind_hashbucket *)
2648 __get_free_pages(GFP_ATOMIC, order);
2649 } while (!tcp_bhash && --order >= 0);
2652 panic("Failed to allocate TCP bind hash table\n");
2653 for (i = 0; i < tcp_bhash_size; i++) {
2654 tcp_bhash[i].lock = SPIN_LOCK_UNLOCKED;
2655 INIT_HLIST_HEAD(&tcp_bhash[i].chain);
2658 /* Try to be a bit smarter and adjust defaults depending
2659 * on available memory.
2662 sysctl_local_port_range[0] = 32768;
2663 sysctl_local_port_range[1] = 61000;
2664 sysctl_tcp_max_tw_buckets = 180000;
2665 sysctl_tcp_max_orphans = 4096 << (order - 4);
2666 sysctl_max_syn_backlog = 1024;
2667 } else if (order < 3) {
2668 sysctl_local_port_range[0] = 1024 * (3 - order);
2669 sysctl_tcp_max_tw_buckets >>= (3 - order);
2670 sysctl_tcp_max_orphans >>= (3 - order);
2671 sysctl_max_syn_backlog = 128;
2673 tcp_port_rover = sysctl_local_port_range[0] - 1;
2675 sysctl_tcp_mem[0] = 768 << order;
2676 sysctl_tcp_mem[1] = 1024 << order;
2677 sysctl_tcp_mem[2] = 1536 << order;
2678 if (sysctl_tcp_mem[2] - sysctl_tcp_mem[1] > 512)
2679 sysctl_tcp_mem[1] = sysctl_tcp_mem[2] - 512;
2680 if (sysctl_tcp_mem[1] - sysctl_tcp_mem[0] > 512)
2681 sysctl_tcp_mem[0] = sysctl_tcp_mem[1] - 512;
2684 sysctl_tcp_wmem[2] = 64 * 1024;
2685 sysctl_tcp_rmem[0] = PAGE_SIZE;
2686 sysctl_tcp_rmem[1] = 43689;
2687 sysctl_tcp_rmem[2] = 2 * 43689;
2690 printk(KERN_INFO "TCP: Hash tables configured "
2691 "(established %d bind %d)\n",
2692 tcp_ehash_size << 1, tcp_bhash_size);
2697 EXPORT_SYMBOL(__tcp_mem_reclaim);
2698 EXPORT_SYMBOL(sysctl_tcp_rmem);
2699 EXPORT_SYMBOL(sysctl_tcp_wmem);
2700 EXPORT_SYMBOL(tcp_accept);
2701 EXPORT_SYMBOL(tcp_close);
2702 EXPORT_SYMBOL(tcp_close_state);
2703 EXPORT_SYMBOL(tcp_destroy_sock);
2704 EXPORT_SYMBOL(tcp_disconnect);
2705 EXPORT_SYMBOL(tcp_getsockopt);
2706 EXPORT_SYMBOL(tcp_ioctl);
2707 EXPORT_SYMBOL(tcp_openreq_cachep);
2708 EXPORT_SYMBOL(tcp_poll);
2709 EXPORT_SYMBOL(tcp_read_sock);
2710 EXPORT_SYMBOL(tcp_recvmsg);
2711 EXPORT_SYMBOL(tcp_sendmsg);
2712 EXPORT_SYMBOL(tcp_sendpage);
2713 EXPORT_SYMBOL(tcp_setsockopt);
2714 EXPORT_SYMBOL(tcp_shutdown);
2715 EXPORT_SYMBOL(tcp_sockets_allocated);
2716 EXPORT_SYMBOL(tcp_statistics);
2717 EXPORT_SYMBOL(tcp_timewait_cachep);
2718 EXPORT_SYMBOL(tcp_write_space);