2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
57 * Alan Cox : Tidied tcp_data to avoid a potential
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
215 * Description of States:
217 * TCP_SYN_SENT sent a connection request, waiting for ack
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
222 * TCP_ESTABLISHED connection established
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
247 * TCP_CLOSE socket is finished
250 #include <linux/module.h>
251 #include <linux/types.h>
252 #include <linux/fcntl.h>
253 #include <linux/poll.h>
254 #include <linux/init.h>
255 #include <linux/smp_lock.h>
256 #include <linux/fs.h>
257 #include <linux/random.h>
258 #include <linux/bootmem.h>
259 #include <linux/cache.h>
260 #include <linux/err.h>
261 #include <linux/crypto.h>
262 #include <linux/in.h>
264 #include <net/icmp.h>
266 #include <net/xfrm.h>
268 #include <net/netdma.h>
270 #include <asm/uaccess.h>
271 #include <asm/ioctls.h>
273 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
275 DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
277 atomic_t tcp_orphan_count = ATOMIC_INIT(0);
279 EXPORT_SYMBOL_GPL(tcp_orphan_count);
281 int sysctl_tcp_mem[3] __read_mostly;
282 int sysctl_tcp_wmem[3] __read_mostly;
283 int sysctl_tcp_rmem[3] __read_mostly;
285 EXPORT_SYMBOL(sysctl_tcp_mem);
286 EXPORT_SYMBOL(sysctl_tcp_rmem);
287 EXPORT_SYMBOL(sysctl_tcp_wmem);
289 atomic_t tcp_memory_allocated; /* Current allocated memory. */
290 atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
292 EXPORT_SYMBOL(tcp_memory_allocated);
293 EXPORT_SYMBOL(tcp_sockets_allocated);
296 * Pressure flag: try to collapse.
297 * Technical note: it is used by multiple contexts non atomically.
298 * All the sk_stream_mem_schedule() is of this nature: accounting
299 * is strict, actions are advisory and have some latency.
301 int tcp_memory_pressure;
303 EXPORT_SYMBOL(tcp_memory_pressure);
305 void tcp_enter_memory_pressure(void)
307 if (!tcp_memory_pressure) {
308 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
309 tcp_memory_pressure = 1;
313 EXPORT_SYMBOL(tcp_enter_memory_pressure);
316 * Wait for a TCP event.
318 * Note that we don't need to lock the socket, as the upper poll layers
319 * take care of normal races (between the test and the event) and we don't
320 * go look at any of the socket buffers directly.
322 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
325 struct sock *sk = sock->sk;
326 struct tcp_sock *tp = tcp_sk(sk);
328 poll_wait(file, sk->sk_sleep, wait);
329 if (sk->sk_state == TCP_LISTEN)
330 return inet_csk_listen_poll(sk);
332 /* Socket is not locked. We are protected from async events
333 by poll logic and correct handling of state changes
334 made by another threads is impossible in any case.
342 * POLLHUP is certainly not done right. But poll() doesn't
343 * have a notion of HUP in just one direction, and for a
344 * socket the read side is more interesting.
346 * Some poll() documentation says that POLLHUP is incompatible
347 * with the POLLOUT/POLLWR flags, so somebody should check this
348 * all. But careful, it tends to be safer to return too many
349 * bits than too few, and you can easily break real applications
350 * if you don't tell them that something has hung up!
354 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
355 * our fs/select.c). It means that after we received EOF,
356 * poll always returns immediately, making impossible poll() on write()
357 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
358 * if and only if shutdown has been made in both directions.
359 * Actually, it is interesting to look how Solaris and DUX
360 * solve this dilemma. I would prefer, if PULLHUP were maskable,
361 * then we could set it on SND_SHUTDOWN. BTW examples given
362 * in Stevens' books assume exactly this behaviour, it explains
363 * why PULLHUP is incompatible with POLLOUT. --ANK
365 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
366 * blocking on fresh not-connected or disconnected socket. --ANK
368 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
370 if (sk->sk_shutdown & RCV_SHUTDOWN)
371 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
374 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
375 /* Potential race condition. If read of tp below will
376 * escape above sk->sk_state, we can be illegally awaken
377 * in SYN_* states. */
378 if ((tp->rcv_nxt != tp->copied_seq) &&
379 (tp->urg_seq != tp->copied_seq ||
380 tp->rcv_nxt != tp->copied_seq + 1 ||
381 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
382 mask |= POLLIN | POLLRDNORM;
384 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
385 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
386 mask |= POLLOUT | POLLWRNORM;
387 } else { /* send SIGIO later */
388 set_bit(SOCK_ASYNC_NOSPACE,
389 &sk->sk_socket->flags);
390 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
392 /* Race breaker. If space is freed after
393 * wspace test but before the flags are set,
394 * IO signal will be lost.
396 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
397 mask |= POLLOUT | POLLWRNORM;
401 if (tp->urg_data & TCP_URG_VALID)
407 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
409 struct tcp_sock *tp = tcp_sk(sk);
414 if (sk->sk_state == TCP_LISTEN)
418 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
420 else if (sock_flag(sk, SOCK_URGINLINE) ||
422 before(tp->urg_seq, tp->copied_seq) ||
423 !before(tp->urg_seq, tp->rcv_nxt)) {
424 answ = tp->rcv_nxt - tp->copied_seq;
426 /* Subtract 1, if FIN is in queue. */
427 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
429 ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
431 answ = tp->urg_seq - tp->copied_seq;
435 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
438 if (sk->sk_state == TCP_LISTEN)
441 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
444 answ = tp->write_seq - tp->snd_una;
450 return put_user(answ, (int __user *)arg);
453 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
455 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
456 tp->pushed_seq = tp->write_seq;
459 static inline int forced_push(struct tcp_sock *tp)
461 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
464 static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
467 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
470 tcb->seq = tcb->end_seq = tp->write_seq;
471 tcb->flags = TCPCB_FLAG_ACK;
473 skb_header_release(skb);
474 __skb_queue_tail(&sk->sk_write_queue, skb);
475 sk_charge_skb(sk, skb);
476 if (!sk->sk_send_head)
477 sk->sk_send_head = skb;
478 if (tp->nonagle & TCP_NAGLE_PUSH)
479 tp->nonagle &= ~TCP_NAGLE_PUSH;
482 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
485 if (flags & MSG_OOB) {
487 tp->snd_up = tp->write_seq;
488 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
492 static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
493 int mss_now, int nonagle)
495 if (sk->sk_send_head) {
496 struct sk_buff *skb = sk->sk_write_queue.prev;
497 if (!(flags & MSG_MORE) || forced_push(tp))
498 tcp_mark_push(tp, skb);
499 tcp_mark_urg(tp, flags, skb);
500 __tcp_push_pending_frames(sk, tp, mss_now,
501 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
505 static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
506 size_t psize, int flags)
508 struct tcp_sock *tp = tcp_sk(sk);
509 int mss_now, size_goal;
512 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
514 /* Wait for a connection to finish. */
515 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
516 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
519 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
521 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
522 size_goal = tp->xmit_size_goal;
526 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
530 struct sk_buff *skb = sk->sk_write_queue.prev;
531 struct page *page = pages[poffset / PAGE_SIZE];
532 int copy, i, can_coalesce;
533 int offset = poffset % PAGE_SIZE;
534 int size = min_t(size_t, psize, PAGE_SIZE - offset);
536 if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) {
538 if (!sk_stream_memory_free(sk))
539 goto wait_for_sndbuf;
541 skb = sk_stream_alloc_pskb(sk, 0, 0,
544 goto wait_for_memory;
546 skb_entail(sk, tp, skb);
553 i = skb_shinfo(skb)->nr_frags;
554 can_coalesce = skb_can_coalesce(skb, i, page, offset);
555 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
556 tcp_mark_push(tp, skb);
559 if (!sk_stream_wmem_schedule(sk, copy))
560 goto wait_for_memory;
563 skb_shinfo(skb)->frags[i - 1].size += copy;
566 skb_fill_page_desc(skb, i, page, offset, copy);
570 skb->data_len += copy;
571 skb->truesize += copy;
572 sk->sk_wmem_queued += copy;
573 sk->sk_forward_alloc -= copy;
574 skb->ip_summed = CHECKSUM_PARTIAL;
575 tp->write_seq += copy;
576 TCP_SKB_CB(skb)->end_seq += copy;
577 skb_shinfo(skb)->gso_segs = 0;
580 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
584 if (!(psize -= copy))
587 if (skb->len < mss_now || (flags & MSG_OOB))
590 if (forced_push(tp)) {
591 tcp_mark_push(tp, skb);
592 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
593 } else if (skb == sk->sk_send_head)
594 tcp_push_one(sk, mss_now);
598 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
601 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
603 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
606 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
607 size_goal = tp->xmit_size_goal;
612 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
619 return sk_stream_error(sk, flags, err);
622 ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
623 size_t size, int flags)
626 struct sock *sk = sock->sk;
628 if (!(sk->sk_route_caps & NETIF_F_SG) ||
629 !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
630 return sock_no_sendpage(sock, page, offset, size, flags);
634 res = do_tcp_sendpages(sk, &page, offset, size, flags);
640 #define TCP_PAGE(sk) (sk->sk_sndmsg_page)
641 #define TCP_OFF(sk) (sk->sk_sndmsg_off)
643 static inline int select_size(struct sock *sk, struct tcp_sock *tp)
645 int tmp = tp->mss_cache;
647 if (sk->sk_route_caps & NETIF_F_SG) {
651 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
653 if (tmp >= pgbreak &&
654 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
662 int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
666 struct tcp_sock *tp = tcp_sk(sk);
669 int mss_now, size_goal;
676 flags = msg->msg_flags;
677 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
679 /* Wait for a connection to finish. */
680 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
681 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
684 /* This should be in poll */
685 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
687 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
688 size_goal = tp->xmit_size_goal;
690 /* Ok commence sending. */
691 iovlen = msg->msg_iovlen;
696 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
699 while (--iovlen >= 0) {
700 int seglen = iov->iov_len;
701 unsigned char __user *from = iov->iov_base;
708 skb = sk->sk_write_queue.prev;
710 if (!sk->sk_send_head ||
711 (copy = size_goal - skb->len) <= 0) {
714 /* Allocate new segment. If the interface is SG,
715 * allocate skb fitting to single page.
717 if (!sk_stream_memory_free(sk))
718 goto wait_for_sndbuf;
720 skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
721 0, sk->sk_allocation);
723 goto wait_for_memory;
726 * Check whether we can use HW checksum.
728 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
729 skb->ip_summed = CHECKSUM_PARTIAL;
731 skb_entail(sk, tp, skb);
735 /* Try to append data to the end of skb. */
739 /* Where to copy to? */
740 if (skb_tailroom(skb) > 0) {
741 /* We have some space in skb head. Superb! */
742 if (copy > skb_tailroom(skb))
743 copy = skb_tailroom(skb);
744 if ((err = skb_add_data(skb, from, copy)) != 0)
748 int i = skb_shinfo(skb)->nr_frags;
749 struct page *page = TCP_PAGE(sk);
750 int off = TCP_OFF(sk);
752 if (skb_can_coalesce(skb, i, page, off) &&
754 /* We can extend the last page
757 } else if (i == MAX_SKB_FRAGS ||
759 !(sk->sk_route_caps & NETIF_F_SG))) {
760 /* Need to add new fragment and cannot
761 * do this because interface is non-SG,
762 * or because all the page slots are
764 tcp_mark_push(tp, skb);
767 if (off == PAGE_SIZE) {
769 TCP_PAGE(sk) = page = NULL;
775 if (copy > PAGE_SIZE - off)
776 copy = PAGE_SIZE - off;
778 if (!sk_stream_wmem_schedule(sk, copy))
779 goto wait_for_memory;
782 /* Allocate new cache page. */
783 if (!(page = sk_stream_alloc_page(sk)))
784 goto wait_for_memory;
787 /* Time to copy data. We are close to
789 err = skb_copy_to_page(sk, from, skb, page,
792 /* If this page was new, give it to the
793 * socket so it does not get leaked.
802 /* Update the skb. */
804 skb_shinfo(skb)->frags[i - 1].size +=
807 skb_fill_page_desc(skb, i, page, off, copy);
810 } else if (off + copy < PAGE_SIZE) {
816 TCP_OFF(sk) = off + copy;
820 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
822 tp->write_seq += copy;
823 TCP_SKB_CB(skb)->end_seq += copy;
824 skb_shinfo(skb)->gso_segs = 0;
828 if ((seglen -= copy) == 0 && iovlen == 0)
831 if (skb->len < mss_now || (flags & MSG_OOB))
834 if (forced_push(tp)) {
835 tcp_mark_push(tp, skb);
836 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
837 } else if (skb == sk->sk_send_head)
838 tcp_push_one(sk, mss_now);
842 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
845 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
847 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
850 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
851 size_goal = tp->xmit_size_goal;
857 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
864 if (sk->sk_send_head == skb)
865 sk->sk_send_head = NULL;
866 __skb_unlink(skb, &sk->sk_write_queue);
867 sk_stream_free_skb(sk, skb);
874 err = sk_stream_error(sk, flags, err);
881 * Handle reading urgent data. BSD has very simple semantics for
882 * this, no blocking and very strange errors 8)
885 static int tcp_recv_urg(struct sock *sk, long timeo,
886 struct msghdr *msg, int len, int flags,
889 struct tcp_sock *tp = tcp_sk(sk);
891 /* No URG data to read. */
892 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
893 tp->urg_data == TCP_URG_READ)
894 return -EINVAL; /* Yes this is right ! */
896 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
899 if (tp->urg_data & TCP_URG_VALID) {
901 char c = tp->urg_data;
903 if (!(flags & MSG_PEEK))
904 tp->urg_data = TCP_URG_READ;
906 /* Read urgent data. */
907 msg->msg_flags |= MSG_OOB;
910 if (!(flags & MSG_TRUNC))
911 err = memcpy_toiovec(msg->msg_iov, &c, 1);
914 msg->msg_flags |= MSG_TRUNC;
916 return err ? -EFAULT : len;
919 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
922 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
923 * the available implementations agree in this case:
924 * this call should never block, independent of the
925 * blocking state of the socket.
926 * Mike <pall@rz.uni-karlsruhe.de>
931 /* Clean up the receive buffer for full frames taken by the user,
932 * then send an ACK if necessary. COPIED is the number of bytes
933 * tcp_recvmsg has given to the user so far, it speeds up the
934 * calculation of whether or not we must ACK for the sake of
937 void tcp_cleanup_rbuf(struct sock *sk, int copied)
939 struct tcp_sock *tp = tcp_sk(sk);
943 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
945 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
948 if (inet_csk_ack_scheduled(sk)) {
949 const struct inet_connection_sock *icsk = inet_csk(sk);
950 /* Delayed ACKs frequently hit locked sockets during bulk
952 if (icsk->icsk_ack.blocked ||
953 /* Once-per-two-segments ACK was not sent by tcp_input.c */
954 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
956 * If this read emptied read buffer, we send ACK, if
957 * connection is not bidirectional, user drained
958 * receive buffer and there was a small segment
962 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
963 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
964 !icsk->icsk_ack.pingpong)) &&
965 !atomic_read(&sk->sk_rmem_alloc)))
969 /* We send an ACK if we can now advertise a non-zero window
970 * which has been raised "significantly".
972 * Even if window raised up to infinity, do not send window open ACK
973 * in states, where we will not receive more. It is useless.
975 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
976 __u32 rcv_window_now = tcp_receive_window(tp);
978 /* Optimize, __tcp_select_window() is not cheap. */
979 if (2*rcv_window_now <= tp->window_clamp) {
980 __u32 new_window = __tcp_select_window(sk);
982 /* Send ACK now, if this read freed lots of space
983 * in our buffer. Certainly, new_window is new window.
984 * We can advertise it now, if it is not less than current one.
985 * "Lots" means "at least twice" here.
987 if (new_window && new_window >= 2 * rcv_window_now)
995 static void tcp_prequeue_process(struct sock *sk)
998 struct tcp_sock *tp = tcp_sk(sk);
1000 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
1002 /* RX process wants to run with disabled BHs, though it is not
1005 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1006 sk->sk_backlog_rcv(sk, skb);
1009 /* Clear memory counter. */
1010 tp->ucopy.memory = 0;
1013 static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1015 struct sk_buff *skb;
1018 skb_queue_walk(&sk->sk_receive_queue, skb) {
1019 offset = seq - TCP_SKB_CB(skb)->seq;
1022 if (offset < skb->len || skb->h.th->fin) {
1031 * This routine provides an alternative to tcp_recvmsg() for routines
1032 * that would like to handle copying from skbuffs directly in 'sendfile'
1035 * - It is assumed that the socket was locked by the caller.
1036 * - The routine does not block.
1037 * - At present, there is no support for reading OOB data
1038 * or for 'peeking' the socket using this routine
1039 * (although both would be easy to implement).
1041 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1042 sk_read_actor_t recv_actor)
1044 struct sk_buff *skb;
1045 struct tcp_sock *tp = tcp_sk(sk);
1046 u32 seq = tp->copied_seq;
1050 if (sk->sk_state == TCP_LISTEN)
1052 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1053 if (offset < skb->len) {
1056 len = skb->len - offset;
1057 /* Stop reading if we hit a patch of urgent data */
1059 u32 urg_offset = tp->urg_seq - seq;
1060 if (urg_offset < len)
1065 used = recv_actor(desc, skb, offset, len);
1071 if (offset != skb->len)
1074 if (skb->h.th->fin) {
1075 sk_eat_skb(sk, skb, 0);
1079 sk_eat_skb(sk, skb, 0);
1083 tp->copied_seq = seq;
1085 tcp_rcv_space_adjust(sk);
1087 /* Clean up data we have read: This will do ACK frames. */
1089 tcp_cleanup_rbuf(sk, copied);
1094 * This routine copies from a sock struct into the user buffer.
1096 * Technical note: in 2.3 we work on _locked_ socket, so that
1097 * tricks with *seq access order and skb->users are not required.
1098 * Probably, code can be easily improved even more.
1101 int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1102 size_t len, int nonblock, int flags, int *addr_len)
1104 struct tcp_sock *tp = tcp_sk(sk);
1110 int target; /* Read at least this many bytes */
1112 struct task_struct *user_recv = NULL;
1113 int copied_early = 0;
1117 TCP_CHECK_TIMER(sk);
1120 if (sk->sk_state == TCP_LISTEN)
1123 timeo = sock_rcvtimeo(sk, nonblock);
1125 /* Urgent data needs to be handled specially. */
1126 if (flags & MSG_OOB)
1129 seq = &tp->copied_seq;
1130 if (flags & MSG_PEEK) {
1131 peek_seq = tp->copied_seq;
1135 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1137 #ifdef CONFIG_NET_DMA
1138 tp->ucopy.dma_chan = NULL;
1140 if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1141 !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) {
1142 preempt_enable_no_resched();
1143 tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len);
1145 preempt_enable_no_resched();
1149 struct sk_buff *skb;
1152 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1153 if (tp->urg_data && tp->urg_seq == *seq) {
1156 if (signal_pending(current)) {
1157 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1162 /* Next get a buffer. */
1164 skb = skb_peek(&sk->sk_receive_queue);
1169 /* Now that we have two receive queues this
1172 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1173 printk(KERN_INFO "recvmsg bug: copied %X "
1174 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1177 offset = *seq - TCP_SKB_CB(skb)->seq;
1180 if (offset < skb->len)
1184 BUG_TRAP(flags & MSG_PEEK);
1186 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1188 /* Well, if we have backlog, try to process it now yet. */
1190 if (copied >= target && !sk->sk_backlog.tail)
1195 sk->sk_state == TCP_CLOSE ||
1196 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1198 signal_pending(current) ||
1202 if (sock_flag(sk, SOCK_DONE))
1206 copied = sock_error(sk);
1210 if (sk->sk_shutdown & RCV_SHUTDOWN)
1213 if (sk->sk_state == TCP_CLOSE) {
1214 if (!sock_flag(sk, SOCK_DONE)) {
1215 /* This occurs when user tries to read
1216 * from never connected socket.
1229 if (signal_pending(current)) {
1230 copied = sock_intr_errno(timeo);
1235 tcp_cleanup_rbuf(sk, copied);
1237 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1238 /* Install new reader */
1239 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1240 user_recv = current;
1241 tp->ucopy.task = user_recv;
1242 tp->ucopy.iov = msg->msg_iov;
1245 tp->ucopy.len = len;
1247 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1248 (flags & (MSG_PEEK | MSG_TRUNC)));
1250 /* Ugly... If prequeue is not empty, we have to
1251 * process it before releasing socket, otherwise
1252 * order will be broken at second iteration.
1253 * More elegant solution is required!!!
1255 * Look: we have the following (pseudo)queues:
1257 * 1. packets in flight
1262 * Each queue can be processed only if the next ones
1263 * are empty. At this point we have empty receive_queue.
1264 * But prequeue _can_ be not empty after 2nd iteration,
1265 * when we jumped to start of loop because backlog
1266 * processing added something to receive_queue.
1267 * We cannot release_sock(), because backlog contains
1268 * packets arrived _after_ prequeued ones.
1270 * Shortly, algorithm is clear --- to process all
1271 * the queues in order. We could make it more directly,
1272 * requeueing packets from backlog to prequeue, if
1273 * is not empty. It is more elegant, but eats cycles,
1276 if (!skb_queue_empty(&tp->ucopy.prequeue))
1279 /* __ Set realtime policy in scheduler __ */
1282 if (copied >= target) {
1283 /* Do not sleep, just process backlog. */
1287 sk_wait_data(sk, &timeo);
1289 #ifdef CONFIG_NET_DMA
1290 tp->ucopy.wakeup = 0;
1296 /* __ Restore normal policy in scheduler __ */
1298 if ((chunk = len - tp->ucopy.len) != 0) {
1299 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1304 if (tp->rcv_nxt == tp->copied_seq &&
1305 !skb_queue_empty(&tp->ucopy.prequeue)) {
1307 tcp_prequeue_process(sk);
1309 if ((chunk = len - tp->ucopy.len) != 0) {
1310 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1316 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1317 if (net_ratelimit())
1318 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1319 current->comm, current->pid);
1320 peek_seq = tp->copied_seq;
1325 /* Ok so how much can we use? */
1326 used = skb->len - offset;
1330 /* Do we have urgent data here? */
1332 u32 urg_offset = tp->urg_seq - *seq;
1333 if (urg_offset < used) {
1335 if (!sock_flag(sk, SOCK_URGINLINE)) {
1347 if (!(flags & MSG_TRUNC)) {
1348 #ifdef CONFIG_NET_DMA
1349 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1350 tp->ucopy.dma_chan = get_softnet_dma();
1352 if (tp->ucopy.dma_chan) {
1353 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1354 tp->ucopy.dma_chan, skb, offset,
1356 tp->ucopy.pinned_list);
1358 if (tp->ucopy.dma_cookie < 0) {
1360 printk(KERN_ALERT "dma_cookie < 0\n");
1362 /* Exception. Bailout! */
1367 if ((offset + used) == skb->len)
1373 err = skb_copy_datagram_iovec(skb, offset,
1374 msg->msg_iov, used);
1376 /* Exception. Bailout! */
1388 tcp_rcv_space_adjust(sk);
1391 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1393 tcp_fast_path_check(sk, tp);
1395 if (used + offset < skb->len)
1400 if (!(flags & MSG_PEEK)) {
1401 sk_eat_skb(sk, skb, copied_early);
1407 /* Process the FIN. */
1409 if (!(flags & MSG_PEEK)) {
1410 sk_eat_skb(sk, skb, copied_early);
1417 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1420 tp->ucopy.len = copied > 0 ? len : 0;
1422 tcp_prequeue_process(sk);
1424 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1425 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1431 tp->ucopy.task = NULL;
1435 #ifdef CONFIG_NET_DMA
1436 if (tp->ucopy.dma_chan) {
1437 struct sk_buff *skb;
1438 dma_cookie_t done, used;
1440 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1442 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1443 tp->ucopy.dma_cookie, &done,
1444 &used) == DMA_IN_PROGRESS) {
1445 /* do partial cleanup of sk_async_wait_queue */
1446 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1447 (dma_async_is_complete(skb->dma_cookie, done,
1448 used) == DMA_SUCCESS)) {
1449 __skb_dequeue(&sk->sk_async_wait_queue);
1454 /* Safe to free early-copied skbs now */
1455 __skb_queue_purge(&sk->sk_async_wait_queue);
1456 dma_chan_put(tp->ucopy.dma_chan);
1457 tp->ucopy.dma_chan = NULL;
1459 if (tp->ucopy.pinned_list) {
1460 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1461 tp->ucopy.pinned_list = NULL;
1465 /* According to UNIX98, msg_name/msg_namelen are ignored
1466 * on connected socket. I was just happy when found this 8) --ANK
1469 /* Clean up data we have read: This will do ACK frames. */
1470 tcp_cleanup_rbuf(sk, copied);
1472 TCP_CHECK_TIMER(sk);
1477 TCP_CHECK_TIMER(sk);
1482 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1487 * State processing on a close. This implements the state shift for
1488 * sending our FIN frame. Note that we only send a FIN for some
1489 * states. A shutdown() may have already sent the FIN, or we may be
1493 static const unsigned char new_state[16] = {
1494 /* current state: new state: action: */
1495 /* (Invalid) */ TCP_CLOSE,
1496 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1497 /* TCP_SYN_SENT */ TCP_CLOSE,
1498 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1499 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1500 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1501 /* TCP_TIME_WAIT */ TCP_CLOSE,
1502 /* TCP_CLOSE */ TCP_CLOSE,
1503 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1504 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1505 /* TCP_LISTEN */ TCP_CLOSE,
1506 /* TCP_CLOSING */ TCP_CLOSING,
1509 static int tcp_close_state(struct sock *sk)
1511 int next = (int)new_state[sk->sk_state];
1512 int ns = next & TCP_STATE_MASK;
1514 tcp_set_state(sk, ns);
1516 return next & TCP_ACTION_FIN;
1520 * Shutdown the sending side of a connection. Much like close except
1521 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1524 void tcp_shutdown(struct sock *sk, int how)
1526 /* We need to grab some memory, and put together a FIN,
1527 * and then put it into the queue to be sent.
1528 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1530 if (!(how & SEND_SHUTDOWN))
1533 /* If we've already sent a FIN, or it's a closed state, skip this. */
1534 if ((1 << sk->sk_state) &
1535 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1536 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1537 /* Clear out any half completed packets. FIN if needed. */
1538 if (tcp_close_state(sk))
1543 void tcp_close(struct sock *sk, long timeout)
1545 struct sk_buff *skb;
1546 int data_was_unread = 0;
1550 sk->sk_shutdown = SHUTDOWN_MASK;
1552 if (sk->sk_state == TCP_LISTEN) {
1553 tcp_set_state(sk, TCP_CLOSE);
1556 inet_csk_listen_stop(sk);
1558 goto adjudge_to_death;
1561 /* We need to flush the recv. buffs. We do this only on the
1562 * descriptor close, not protocol-sourced closes, because the
1563 * reader process may not have drained the data yet!
1565 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1566 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1568 data_was_unread += len;
1572 sk_stream_mem_reclaim(sk);
1574 /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
1575 * 3.10, we send a RST here because data was lost. To
1576 * witness the awful effects of the old behavior of always
1577 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
1578 * a bulk GET in an FTP client, suspend the process, wait
1579 * for the client to advertise a zero window, then kill -9
1580 * the FTP client, wheee... Note: timeout is always zero
1583 if (data_was_unread) {
1584 /* Unread data was tossed, zap the connection. */
1585 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1586 tcp_set_state(sk, TCP_CLOSE);
1587 tcp_send_active_reset(sk, GFP_KERNEL);
1588 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1589 /* Check zero linger _after_ checking for unread data. */
1590 sk->sk_prot->disconnect(sk, 0);
1591 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1592 } else if (tcp_close_state(sk)) {
1593 /* We FIN if the application ate all the data before
1594 * zapping the connection.
1597 /* RED-PEN. Formally speaking, we have broken TCP state
1598 * machine. State transitions:
1600 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1601 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1602 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1604 * are legal only when FIN has been sent (i.e. in window),
1605 * rather than queued out of window. Purists blame.
1607 * F.e. "RFC state" is ESTABLISHED,
1608 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1610 * The visible declinations are that sometimes
1611 * we enter time-wait state, when it is not required really
1612 * (harmless), do not send active resets, when they are
1613 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1614 * they look as CLOSING or LAST_ACK for Linux)
1615 * Probably, I missed some more holelets.
1621 sk_stream_wait_close(sk, timeout);
1624 state = sk->sk_state;
1627 atomic_inc(sk->sk_prot->orphan_count);
1629 /* It is the last release_sock in its life. It will remove backlog. */
1633 /* Now socket is owned by kernel and we acquire BH lock
1634 to finish close. No need to check for user refs.
1638 BUG_TRAP(!sock_owned_by_user(sk));
1640 /* Have we already been destroyed by a softirq or backlog? */
1641 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1644 /* This is a (useful) BSD violating of the RFC. There is a
1645 * problem with TCP as specified in that the other end could
1646 * keep a socket open forever with no application left this end.
1647 * We use a 3 minute timeout (about the same as BSD) then kill
1648 * our end. If they send after that then tough - BUT: long enough
1649 * that we won't make the old 4*rto = almost no time - whoops
1652 * Nope, it was not mistake. It is really desired behaviour
1653 * f.e. on http servers, when such sockets are useless, but
1654 * consume significant resources. Let's do it with special
1655 * linger2 option. --ANK
1658 if (sk->sk_state == TCP_FIN_WAIT2) {
1659 struct tcp_sock *tp = tcp_sk(sk);
1660 if (tp->linger2 < 0) {
1661 tcp_set_state(sk, TCP_CLOSE);
1662 tcp_send_active_reset(sk, GFP_ATOMIC);
1663 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1665 const int tmo = tcp_fin_time(sk);
1667 if (tmo > TCP_TIMEWAIT_LEN) {
1668 inet_csk_reset_keepalive_timer(sk,
1669 tmo - TCP_TIMEWAIT_LEN);
1671 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1676 if (sk->sk_state != TCP_CLOSE) {
1677 sk_stream_mem_reclaim(sk);
1678 if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans ||
1679 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
1680 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
1681 if (net_ratelimit())
1682 printk(KERN_INFO "TCP: too many of orphaned "
1684 tcp_set_state(sk, TCP_CLOSE);
1685 tcp_send_active_reset(sk, GFP_ATOMIC);
1686 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1690 if (sk->sk_state == TCP_CLOSE)
1691 inet_csk_destroy_sock(sk);
1692 /* Otherwise, socket is reprieved until protocol close. */
1700 /* These states need RST on ABORT according to RFC793 */
1702 static inline int tcp_need_reset(int state)
1704 return (1 << state) &
1705 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1706 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1709 int tcp_disconnect(struct sock *sk, int flags)
1711 struct inet_sock *inet = inet_sk(sk);
1712 struct inet_connection_sock *icsk = inet_csk(sk);
1713 struct tcp_sock *tp = tcp_sk(sk);
1715 int old_state = sk->sk_state;
1717 if (old_state != TCP_CLOSE)
1718 tcp_set_state(sk, TCP_CLOSE);
1720 /* ABORT function of RFC793 */
1721 if (old_state == TCP_LISTEN) {
1722 inet_csk_listen_stop(sk);
1723 } else if (tcp_need_reset(old_state) ||
1724 (tp->snd_nxt != tp->write_seq &&
1725 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
1726 /* The last check adjusts for discrepancy of Linux wrt. RFC
1729 tcp_send_active_reset(sk, gfp_any());
1730 sk->sk_err = ECONNRESET;
1731 } else if (old_state == TCP_SYN_SENT)
1732 sk->sk_err = ECONNRESET;
1734 tcp_clear_xmit_timers(sk);
1735 __skb_queue_purge(&sk->sk_receive_queue);
1736 sk_stream_writequeue_purge(sk);
1737 __skb_queue_purge(&tp->out_of_order_queue);
1738 #ifdef CONFIG_NET_DMA
1739 __skb_queue_purge(&sk->sk_async_wait_queue);
1744 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1745 inet_reset_saddr(sk);
1747 sk->sk_shutdown = 0;
1748 sock_reset_flag(sk, SOCK_DONE);
1750 if ((tp->write_seq += tp->max_window + 2) == 0)
1752 icsk->icsk_backoff = 0;
1754 icsk->icsk_probes_out = 0;
1755 tp->packets_out = 0;
1756 tp->snd_ssthresh = 0x7fffffff;
1757 tp->snd_cwnd_cnt = 0;
1758 tp->bytes_acked = 0;
1759 tcp_set_ca_state(sk, TCP_CA_Open);
1760 tcp_clear_retrans(tp);
1761 inet_csk_delack_init(sk);
1762 sk->sk_send_head = NULL;
1763 tp->rx_opt.saw_tstamp = 0;
1764 tcp_sack_reset(&tp->rx_opt);
1767 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
1769 sk->sk_error_report(sk);
1774 * Socket option code for TCP.
1776 static int do_tcp_setsockopt(struct sock *sk, int level,
1777 int optname, char __user *optval, int optlen)
1779 struct tcp_sock *tp = tcp_sk(sk);
1780 struct inet_connection_sock *icsk = inet_csk(sk);
1784 /* This is a string value all the others are int's */
1785 if (optname == TCP_CONGESTION) {
1786 char name[TCP_CA_NAME_MAX];
1791 val = strncpy_from_user(name, optval,
1792 min(TCP_CA_NAME_MAX-1, optlen));
1798 err = tcp_set_congestion_control(sk, name);
1803 if (optlen < sizeof(int))
1806 if (get_user(val, (int __user *)optval))
1813 /* Values greater than interface MTU won't take effect. However
1814 * at the point when this call is done we typically don't yet
1815 * know which interface is going to be used */
1816 if (val < 8 || val > MAX_TCP_WINDOW) {
1820 tp->rx_opt.user_mss = val;
1825 /* TCP_NODELAY is weaker than TCP_CORK, so that
1826 * this option on corked socket is remembered, but
1827 * it is not activated until cork is cleared.
1829 * However, when TCP_NODELAY is set we make
1830 * an explicit push, which overrides even TCP_CORK
1831 * for currently queued segments.
1833 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
1834 tcp_push_pending_frames(sk, tp);
1836 tp->nonagle &= ~TCP_NAGLE_OFF;
1841 /* When set indicates to always queue non-full frames.
1842 * Later the user clears this option and we transmit
1843 * any pending partial frames in the queue. This is
1844 * meant to be used alongside sendfile() to get properly
1845 * filled frames when the user (for example) must write
1846 * out headers with a write() call first and then use
1847 * sendfile to send out the data parts.
1849 * TCP_CORK can be set together with TCP_NODELAY and it is
1850 * stronger than TCP_NODELAY.
1853 tp->nonagle |= TCP_NAGLE_CORK;
1855 tp->nonagle &= ~TCP_NAGLE_CORK;
1856 if (tp->nonagle&TCP_NAGLE_OFF)
1857 tp->nonagle |= TCP_NAGLE_PUSH;
1858 tcp_push_pending_frames(sk, tp);
1863 if (val < 1 || val > MAX_TCP_KEEPIDLE)
1866 tp->keepalive_time = val * HZ;
1867 if (sock_flag(sk, SOCK_KEEPOPEN) &&
1868 !((1 << sk->sk_state) &
1869 (TCPF_CLOSE | TCPF_LISTEN))) {
1870 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
1871 if (tp->keepalive_time > elapsed)
1872 elapsed = tp->keepalive_time - elapsed;
1875 inet_csk_reset_keepalive_timer(sk, elapsed);
1880 if (val < 1 || val > MAX_TCP_KEEPINTVL)
1883 tp->keepalive_intvl = val * HZ;
1886 if (val < 1 || val > MAX_TCP_KEEPCNT)
1889 tp->keepalive_probes = val;
1892 if (val < 1 || val > MAX_TCP_SYNCNT)
1895 icsk->icsk_syn_retries = val;
1901 else if (val > sysctl_tcp_fin_timeout / HZ)
1904 tp->linger2 = val * HZ;
1907 case TCP_DEFER_ACCEPT:
1908 icsk->icsk_accept_queue.rskq_defer_accept = 0;
1910 /* Translate value in seconds to number of
1912 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
1913 val > ((TCP_TIMEOUT_INIT / HZ) <<
1914 icsk->icsk_accept_queue.rskq_defer_accept))
1915 icsk->icsk_accept_queue.rskq_defer_accept++;
1916 icsk->icsk_accept_queue.rskq_defer_accept++;
1920 case TCP_WINDOW_CLAMP:
1922 if (sk->sk_state != TCP_CLOSE) {
1926 tp->window_clamp = 0;
1928 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
1929 SOCK_MIN_RCVBUF / 2 : val;
1934 icsk->icsk_ack.pingpong = 1;
1936 icsk->icsk_ack.pingpong = 0;
1937 if ((1 << sk->sk_state) &
1938 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
1939 inet_csk_ack_scheduled(sk)) {
1940 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
1941 tcp_cleanup_rbuf(sk, 1);
1943 icsk->icsk_ack.pingpong = 1;
1948 #ifdef CONFIG_TCP_MD5SIG
1950 /* Read the IP->Key mappings from userspace */
1951 err = tp->af_specific->md5_parse(sk, optval, optlen);
1963 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1966 struct inet_connection_sock *icsk = inet_csk(sk);
1968 if (level != SOL_TCP)
1969 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
1971 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1974 #ifdef CONFIG_COMPAT
1975 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
1976 char __user *optval, int optlen)
1978 if (level != SOL_TCP)
1979 return inet_csk_compat_setsockopt(sk, level, optname,
1981 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1984 EXPORT_SYMBOL(compat_tcp_setsockopt);
1987 /* Return information about state of tcp endpoint in API format. */
1988 void tcp_get_info(struct sock *sk, struct tcp_info *info)
1990 struct tcp_sock *tp = tcp_sk(sk);
1991 const struct inet_connection_sock *icsk = inet_csk(sk);
1992 u32 now = tcp_time_stamp;
1994 memset(info, 0, sizeof(*info));
1996 info->tcpi_state = sk->sk_state;
1997 info->tcpi_ca_state = icsk->icsk_ca_state;
1998 info->tcpi_retransmits = icsk->icsk_retransmits;
1999 info->tcpi_probes = icsk->icsk_probes_out;
2000 info->tcpi_backoff = icsk->icsk_backoff;
2002 if (tp->rx_opt.tstamp_ok)
2003 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2004 if (tp->rx_opt.sack_ok)
2005 info->tcpi_options |= TCPI_OPT_SACK;
2006 if (tp->rx_opt.wscale_ok) {
2007 info->tcpi_options |= TCPI_OPT_WSCALE;
2008 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2009 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2012 if (tp->ecn_flags&TCP_ECN_OK)
2013 info->tcpi_options |= TCPI_OPT_ECN;
2015 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2016 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2017 info->tcpi_snd_mss = tp->mss_cache;
2018 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2020 info->tcpi_unacked = tp->packets_out;
2021 info->tcpi_sacked = tp->sacked_out;
2022 info->tcpi_lost = tp->lost_out;
2023 info->tcpi_retrans = tp->retrans_out;
2024 info->tcpi_fackets = tp->fackets_out;
2026 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2027 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
2028 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2030 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
2031 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2032 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2033 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2034 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2035 info->tcpi_snd_cwnd = tp->snd_cwnd;
2036 info->tcpi_advmss = tp->advmss;
2037 info->tcpi_reordering = tp->reordering;
2039 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2040 info->tcpi_rcv_space = tp->rcvq_space.space;
2042 info->tcpi_total_retrans = tp->total_retrans;
2045 EXPORT_SYMBOL_GPL(tcp_get_info);
2047 static int do_tcp_getsockopt(struct sock *sk, int level,
2048 int optname, char __user *optval, int __user *optlen)
2050 struct inet_connection_sock *icsk = inet_csk(sk);
2051 struct tcp_sock *tp = tcp_sk(sk);
2054 if (get_user(len, optlen))
2057 len = min_t(unsigned int, len, sizeof(int));
2064 val = tp->mss_cache;
2065 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2066 val = tp->rx_opt.user_mss;
2069 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2072 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2075 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2078 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2081 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2084 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
2089 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2091 case TCP_DEFER_ACCEPT:
2092 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2093 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
2095 case TCP_WINDOW_CLAMP:
2096 val = tp->window_clamp;
2099 struct tcp_info info;
2101 if (get_user(len, optlen))
2104 tcp_get_info(sk, &info);
2106 len = min_t(unsigned int, len, sizeof(info));
2107 if (put_user(len, optlen))
2109 if (copy_to_user(optval, &info, len))
2114 val = !icsk->icsk_ack.pingpong;
2117 case TCP_CONGESTION:
2118 if (get_user(len, optlen))
2120 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2121 if (put_user(len, optlen))
2123 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
2127 return -ENOPROTOOPT;
2130 if (put_user(len, optlen))
2132 if (copy_to_user(optval, &val, len))
2137 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2140 struct inet_connection_sock *icsk = inet_csk(sk);
2142 if (level != SOL_TCP)
2143 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2145 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2148 #ifdef CONFIG_COMPAT
2149 int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2150 char __user *optval, int __user *optlen)
2152 if (level != SOL_TCP)
2153 return inet_csk_compat_getsockopt(sk, level, optname,
2155 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2158 EXPORT_SYMBOL(compat_tcp_getsockopt);
2161 struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2163 struct sk_buff *segs = ERR_PTR(-EINVAL);
2168 unsigned int oldlen;
2171 if (!pskb_may_pull(skb, sizeof(*th)))
2175 thlen = th->doff * 4;
2176 if (thlen < sizeof(*th))
2179 if (!pskb_may_pull(skb, thlen))
2182 oldlen = (u16)~skb->len;
2183 __skb_pull(skb, thlen);
2185 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2186 /* Packet is from an untrusted source, reset gso_segs. */
2187 int type = skb_shinfo(skb)->gso_type;
2196 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2199 mss = skb_shinfo(skb)->gso_size;
2200 skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss;
2206 segs = skb_segment(skb, features);
2210 len = skb_shinfo(skb)->gso_size;
2211 delta = htonl(oldlen + (thlen + len));
2215 seq = ntohl(th->seq);
2218 th->fin = th->psh = 0;
2220 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2221 (__force u32)delta));
2222 if (skb->ip_summed != CHECKSUM_PARTIAL)
2223 th->check = csum_fold(csum_partial(skb->h.raw, thlen,
2230 th->seq = htonl(seq);
2232 } while (skb->next);
2234 delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
2235 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2236 (__force u32)delta));
2237 if (skb->ip_summed != CHECKSUM_PARTIAL)
2238 th->check = csum_fold(csum_partial(skb->h.raw, thlen,
2244 EXPORT_SYMBOL(tcp_tso_segment);
2246 #ifdef CONFIG_TCP_MD5SIG
2247 static unsigned long tcp_md5sig_users;
2248 static struct tcp_md5sig_pool **tcp_md5sig_pool;
2249 static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2251 static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2254 for_each_possible_cpu(cpu) {
2255 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2257 if (p->md5_desc.tfm)
2258 crypto_free_hash(p->md5_desc.tfm);
2266 void tcp_free_md5sig_pool(void)
2268 struct tcp_md5sig_pool **pool = NULL;
2270 spin_lock_bh(&tcp_md5sig_pool_lock);
2271 if (--tcp_md5sig_users == 0) {
2272 pool = tcp_md5sig_pool;
2273 tcp_md5sig_pool = NULL;
2275 spin_unlock_bh(&tcp_md5sig_pool_lock);
2277 __tcp_free_md5sig_pool(pool);
2280 EXPORT_SYMBOL(tcp_free_md5sig_pool);
2282 static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
2285 struct tcp_md5sig_pool **pool;
2287 pool = alloc_percpu(struct tcp_md5sig_pool *);
2291 for_each_possible_cpu(cpu) {
2292 struct tcp_md5sig_pool *p;
2293 struct crypto_hash *hash;
2295 p = kzalloc(sizeof(*p), GFP_KERNEL);
2298 *per_cpu_ptr(pool, cpu) = p;
2300 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2301 if (!hash || IS_ERR(hash))
2304 p->md5_desc.tfm = hash;
2308 __tcp_free_md5sig_pool(pool);
2312 struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
2314 struct tcp_md5sig_pool **pool;
2318 spin_lock_bh(&tcp_md5sig_pool_lock);
2319 pool = tcp_md5sig_pool;
2320 if (tcp_md5sig_users++ == 0) {
2322 spin_unlock_bh(&tcp_md5sig_pool_lock);
2325 spin_unlock_bh(&tcp_md5sig_pool_lock);
2329 spin_unlock_bh(&tcp_md5sig_pool_lock);
2332 /* we cannot hold spinlock here because this may sleep. */
2333 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
2334 spin_lock_bh(&tcp_md5sig_pool_lock);
2337 spin_unlock_bh(&tcp_md5sig_pool_lock);
2340 pool = tcp_md5sig_pool;
2342 /* oops, it has already been assigned. */
2343 spin_unlock_bh(&tcp_md5sig_pool_lock);
2344 __tcp_free_md5sig_pool(p);
2346 tcp_md5sig_pool = pool = p;
2347 spin_unlock_bh(&tcp_md5sig_pool_lock);
2353 EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2355 struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2357 struct tcp_md5sig_pool **p;
2358 spin_lock_bh(&tcp_md5sig_pool_lock);
2359 p = tcp_md5sig_pool;
2362 spin_unlock_bh(&tcp_md5sig_pool_lock);
2363 return (p ? *per_cpu_ptr(p, cpu) : NULL);
2366 EXPORT_SYMBOL(__tcp_get_md5sig_pool);
2368 void __tcp_put_md5sig_pool(void)
2370 tcp_free_md5sig_pool();
2373 EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2376 extern void __skb_cb_too_small_for_tcp(int, int);
2377 extern struct tcp_congestion_ops tcp_reno;
2379 static __initdata unsigned long thash_entries;
2380 static int __init set_thash_entries(char *str)
2384 thash_entries = simple_strtoul(str, &str, 0);
2387 __setup("thash_entries=", set_thash_entries);
2389 void __init tcp_init(void)
2391 struct sk_buff *skb = NULL;
2392 unsigned long limit;
2393 int order, i, max_share;
2395 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2396 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2399 tcp_hashinfo.bind_bucket_cachep =
2400 kmem_cache_create("tcp_bind_bucket",
2401 sizeof(struct inet_bind_bucket), 0,
2402 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
2404 /* Size and allocate the main established and bind bucket
2407 * The methodology is similar to that of the buffer cache.
2409 tcp_hashinfo.ehash =
2410 alloc_large_system_hash("TCP established",
2411 sizeof(struct inet_ehash_bucket),
2413 (num_physpages >= 128 * 1024) ?
2416 &tcp_hashinfo.ehash_size,
2419 tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
2420 for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
2421 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2422 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
2425 tcp_hashinfo.bhash =
2426 alloc_large_system_hash("TCP bind",
2427 sizeof(struct inet_bind_hashbucket),
2428 tcp_hashinfo.ehash_size,
2429 (num_physpages >= 128 * 1024) ?
2432 &tcp_hashinfo.bhash_size,
2435 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2436 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2437 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2438 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
2441 /* Try to be a bit smarter and adjust defaults depending
2442 * on available memory.
2444 for (order = 0; ((1 << order) << PAGE_SHIFT) <
2445 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
2449 sysctl_local_port_range[0] = 32768;
2450 sysctl_local_port_range[1] = 61000;
2451 tcp_death_row.sysctl_max_tw_buckets = 180000;
2452 sysctl_tcp_max_orphans = 4096 << (order - 4);
2453 sysctl_max_syn_backlog = 1024;
2454 } else if (order < 3) {
2455 sysctl_local_port_range[0] = 1024 * (3 - order);
2456 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
2457 sysctl_tcp_max_orphans >>= (3 - order);
2458 sysctl_max_syn_backlog = 128;
2461 /* Set the pressure threshold to be a fraction of global memory that
2462 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
2463 * memory, with a floor of 128 pages.
2465 limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
2466 limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
2467 limit = max(limit, 128UL);
2468 sysctl_tcp_mem[0] = limit / 4 * 3;
2469 sysctl_tcp_mem[1] = limit;
2470 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
2472 /* Set per-socket limits to no more than 1/128 the pressure threshold */
2473 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2474 max_share = min(4UL*1024*1024, limit);
2476 sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
2477 sysctl_tcp_wmem[1] = 16*1024;
2478 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2480 sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
2481 sysctl_tcp_rmem[1] = 87380;
2482 sysctl_tcp_rmem[2] = max(87380, max_share);
2484 printk(KERN_INFO "TCP: Hash tables configured "
2485 "(established %d bind %d)\n",
2486 tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
2488 tcp_register_congestion_control(&tcp_reno);
2491 EXPORT_SYMBOL(tcp_close);
2492 EXPORT_SYMBOL(tcp_disconnect);
2493 EXPORT_SYMBOL(tcp_getsockopt);
2494 EXPORT_SYMBOL(tcp_ioctl);
2495 EXPORT_SYMBOL(tcp_poll);
2496 EXPORT_SYMBOL(tcp_read_sock);
2497 EXPORT_SYMBOL(tcp_recvmsg);
2498 EXPORT_SYMBOL(tcp_sendmsg);
2499 EXPORT_SYMBOL(tcp_sendpage);
2500 EXPORT_SYMBOL(tcp_setsockopt);
2501 EXPORT_SYMBOL(tcp_shutdown);
2502 EXPORT_SYMBOL(tcp_statistics);