2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/sysctl.h>
27 #include <linux/workqueue.h>
29 #include <net/inet_common.h>
32 #include <linux/vs_limit.h>
33 #include <linux/vs_socket.h>
34 #include <linux/vs_context.h>
37 #define SYNC_INIT 0 /* let the user enable it */
42 int sysctl_tcp_syncookies = SYNC_INIT;
43 int sysctl_tcp_abort_on_overflow;
45 struct inet_timewait_death_row tcp_death_row = {
46 .sysctl_max_tw_buckets = NR_FILE * 2,
47 .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
48 .death_lock = SPIN_LOCK_UNLOCKED,
49 .hashinfo = &tcp_hashinfo,
50 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
51 (unsigned long)&tcp_death_row),
52 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
53 inet_twdr_twkill_work,
55 /* Short-time timewait calendar */
58 .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
59 (unsigned long)&tcp_death_row),
62 EXPORT_SYMBOL_GPL(tcp_death_row);
64 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
68 if (after(end_seq, s_win) && before(seq, e_win))
70 return (seq == e_win && seq == end_seq);
74 * * Main purpose of TIME-WAIT state is to close connection gracefully,
75 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
76 * (and, probably, tail of data) and one or more our ACKs are lost.
77 * * What is TIME-WAIT timeout? It is associated with maximal packet
78 * lifetime in the internet, which results in wrong conclusion, that
79 * it is set to catch "old duplicate segments" wandering out of their path.
80 * It is not quite correct. This timeout is calculated so that it exceeds
81 * maximal retransmission timeout enough to allow to lose one (or more)
82 * segments sent by peer and our ACKs. This time may be calculated from RTO.
83 * * When TIME-WAIT socket receives RST, it means that another end
84 * finally closed and we are allowed to kill TIME-WAIT too.
85 * * Second purpose of TIME-WAIT is catching old duplicate segments.
86 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
87 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
88 * * If we invented some more clever way to catch duplicates
89 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
91 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
92 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
93 * from the very beginning.
95 * NOTE. With recycling (and later with fin-wait-2) TW bucket
96 * is _not_ stateless. It means, that strictly speaking we must
97 * spinlock it. I do not want! Well, probability of misbehaviour
98 * is ridiculously low and, seems, we could use some mb() tricks
99 * to avoid misread sequence numbers, states etc. --ANK
102 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
103 const struct tcphdr *th)
105 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
106 struct tcp_options_received tmp_opt;
109 tmp_opt.saw_tstamp = 0;
110 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
111 tcp_parse_options(skb, &tmp_opt, 0);
113 if (tmp_opt.saw_tstamp) {
114 tmp_opt.ts_recent = tcptw->tw_ts_recent;
115 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
116 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
120 if (tw->tw_substate == TCP_FIN_WAIT2) {
121 /* Just repeat all the checks of tcp_rcv_state_process() */
123 /* Out of window, send ACK */
125 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
127 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
133 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
137 if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
138 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
140 return TCP_TW_SUCCESS;
143 /* New data or FIN. If new data arrive after half-duplex close,
147 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
149 inet_twsk_deschedule(tw, &tcp_death_row);
154 /* FIN arrived, enter true time-wait state. */
155 tw->tw_substate = TCP_TIME_WAIT;
156 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
157 if (tmp_opt.saw_tstamp) {
158 tcptw->tw_ts_recent_stamp = xtime.tv_sec;
159 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
162 /* I am shamed, but failed to make it more elegant.
163 * Yes, it is direct reference to IP, which is impossible
164 * to generalize to IPv6. Taking into account that IPv6
165 * do not understand recycling in any case, it not
166 * a big problem in practice. --ANK */
167 if (tw->tw_family == AF_INET &&
168 tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp &&
169 tcp_v4_tw_remember_stamp(tw))
170 inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
173 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
179 * Now real TIME-WAIT state.
182 * "When a connection is [...] on TIME-WAIT state [...]
183 * [a TCP] MAY accept a new SYN from the remote TCP to
184 * reopen the connection directly, if it:
186 * (1) assigns its initial sequence number for the new
187 * connection to be larger than the largest sequence
188 * number it used on the previous connection incarnation,
191 * (2) returns to TIME-WAIT state if the SYN turns out
192 * to be an old duplicate".
196 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
197 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
198 /* In window segment, it may be only reset or bare ack. */
201 /* This is TIME_WAIT assassination, in two flavors.
202 * Oh well... nobody has a sufficient solution to this
205 if (sysctl_tcp_rfc1337 == 0) {
207 inet_twsk_deschedule(tw, &tcp_death_row);
209 return TCP_TW_SUCCESS;
212 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
215 if (tmp_opt.saw_tstamp) {
216 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
217 tcptw->tw_ts_recent_stamp = xtime.tv_sec;
221 return TCP_TW_SUCCESS;
224 /* Out of window segment.
226 All the segments are ACKed immediately.
228 The only exception is new SYN. We accept it, if it is
229 not old duplicate and we are not in danger to be killed
230 by delayed old duplicates. RFC check is that it has
231 newer sequence number works at rates <40Mbit/sec.
232 However, if paws works, it is reliable AND even more,
233 we even may relax silly seq space cutoff.
235 RED-PEN: we violate main RFC requirement, if this SYN will appear
236 old duplicate (i.e. we receive RST in reply to SYN-ACK),
237 we must return socket to time-wait state. It is not good,
241 if (th->syn && !th->rst && !th->ack && !paws_reject &&
242 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
243 (tmp_opt.saw_tstamp &&
244 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
245 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
248 TCP_SKB_CB(skb)->when = isn;
253 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
256 /* In this case we must reset the TIMEWAIT timer.
258 * If it is ACKless SYN it may be both old duplicate
259 * and new good SYN with random sequence number <rcv_nxt.
260 * Do not reschedule in the last case.
262 if (paws_reject || th->ack)
263 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
266 /* Send ACK. Note, we do not put the bucket,
267 * it will be released by caller.
272 return TCP_TW_SUCCESS;
276 * Move a socket to time-wait or dead fin-wait-2 state.
278 void tcp_time_wait(struct sock *sk, int state, int timeo)
280 struct inet_timewait_sock *tw = NULL;
281 const struct inet_connection_sock *icsk = inet_csk(sk);
282 const struct tcp_sock *tp = tcp_sk(sk);
285 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
286 recycle_ok = icsk->icsk_af_ops->remember_stamp(sk);
288 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
289 tw = inet_twsk_alloc(sk, state);
292 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
293 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
295 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
296 tcptw->tw_rcv_nxt = tp->rcv_nxt;
297 tcptw->tw_snd_nxt = tp->snd_nxt;
298 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
299 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
300 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
302 tw->tw_xid = sk->sk_xid;
303 tw->tw_vx_info = NULL;
304 tw->tw_nid = sk->sk_nid;
305 tw->tw_nx_info = NULL;
307 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
308 if (tw->tw_family == PF_INET6) {
309 struct ipv6_pinfo *np = inet6_sk(sk);
310 struct inet6_timewait_sock *tw6;
312 tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
313 tw6 = inet6_twsk((struct sock *)tw);
314 ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
315 ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
316 tw->tw_ipv6only = np->ipv6only;
319 /* Linkage updates. */
320 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
322 /* Get the TIME_WAIT timeout firing. */
327 tw->tw_timeout = rto;
329 tw->tw_timeout = TCP_TIMEWAIT_LEN;
330 if (state == TCP_TIME_WAIT)
331 timeo = TCP_TIMEWAIT_LEN;
334 inet_twsk_schedule(tw, &tcp_death_row, timeo,
338 /* Sorry, if we're out of memory, just CLOSE this
339 * socket up. We've got bigger problems than
340 * non-graceful socket closings.
343 printk(KERN_INFO "TCP: time wait bucket table overflow\n");
346 tcp_update_metrics(sk);
350 /* This is not only more efficient than what we used to do, it eliminates
351 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
353 * Actually, we could lots of memory writes here. tp of listening
354 * socket contains all necessary default parameters.
356 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
358 struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
361 const struct inet_request_sock *ireq = inet_rsk(req);
362 struct tcp_request_sock *treq = tcp_rsk(req);
363 struct inet_connection_sock *newicsk = inet_csk(sk);
364 struct tcp_sock *newtp;
366 /* Now setup tcp_sock */
367 newtp = tcp_sk(newsk);
368 newtp->pred_flags = 0;
369 newtp->rcv_nxt = treq->rcv_isn + 1;
370 newtp->snd_nxt = newtp->snd_una = newtp->snd_sml = treq->snt_isn + 1;
372 tcp_prequeue_init(newtp);
374 tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn);
377 newtp->mdev = TCP_TIMEOUT_INIT;
378 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
380 newtp->packets_out = 0;
382 newtp->retrans_out = 0;
383 newtp->sacked_out = 0;
384 newtp->fackets_out = 0;
385 newtp->snd_ssthresh = 0x7fffffff;
387 /* So many TCP implementations out there (incorrectly) count the
388 * initial SYN frame in their delayed-ACK and congestion control
389 * algorithms that we must have the following bandaid to talk
390 * efficiently to them. -DaveM
393 newtp->snd_cwnd_cnt = 0;
394 newtp->bytes_acked = 0;
396 newtp->frto_counter = 0;
397 newtp->frto_highmark = 0;
399 newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
401 tcp_set_ca_state(newsk, TCP_CA_Open);
402 tcp_init_xmit_timers(newsk);
403 skb_queue_head_init(&newtp->out_of_order_queue);
404 newtp->rcv_wup = treq->rcv_isn + 1;
405 newtp->write_seq = treq->snt_isn + 1;
406 newtp->pushed_seq = newtp->write_seq;
407 newtp->copied_seq = treq->rcv_isn + 1;
409 newtp->rx_opt.saw_tstamp = 0;
411 newtp->rx_opt.dsack = 0;
412 newtp->rx_opt.eff_sacks = 0;
414 newtp->rx_opt.num_sacks = 0;
417 if (sock_flag(newsk, SOCK_KEEPOPEN))
418 inet_csk_reset_keepalive_timer(newsk,
419 keepalive_time_when(newtp));
421 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
422 if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
424 newtp->rx_opt.sack_ok |= 2;
426 newtp->window_clamp = req->window_clamp;
427 newtp->rcv_ssthresh = req->rcv_wnd;
428 newtp->rcv_wnd = req->rcv_wnd;
429 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
430 if (newtp->rx_opt.wscale_ok) {
431 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
432 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
434 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
435 newtp->window_clamp = min(newtp->window_clamp, 65535U);
437 newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale;
438 newtp->max_window = newtp->snd_wnd;
440 if (newtp->rx_opt.tstamp_ok) {
441 newtp->rx_opt.ts_recent = req->ts_recent;
442 newtp->rx_opt.ts_recent_stamp = xtime.tv_sec;
443 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
445 newtp->rx_opt.ts_recent_stamp = 0;
446 newtp->tcp_header_len = sizeof(struct tcphdr);
448 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
449 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
450 newtp->rx_opt.mss_clamp = req->mss;
451 TCP_ECN_openreq_child(newtp, req);
452 if (newtp->ecn_flags&TCP_ECN_OK)
453 sock_set_flag(newsk, SOCK_NO_LARGESEND);
455 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
461 * Process an incoming packet for SYN_RECV sockets represented
465 struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
466 struct request_sock *req,
467 struct request_sock **prev)
469 struct tcphdr *th = skb->h.th;
470 u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
472 struct tcp_options_received tmp_opt;
475 tmp_opt.saw_tstamp = 0;
476 if (th->doff > (sizeof(struct tcphdr)>>2)) {
477 tcp_parse_options(skb, &tmp_opt, 0);
479 if (tmp_opt.saw_tstamp) {
480 tmp_opt.ts_recent = req->ts_recent;
481 /* We do not store true stamp, but it is not required,
482 * it can be estimated (approximately)
485 tmp_opt.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
486 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
490 /* Check for pure retransmitted SYN. */
491 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
492 flg == TCP_FLAG_SYN &&
495 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
496 * this case on figure 6 and figure 8, but formal
497 * protocol description says NOTHING.
498 * To be more exact, it says that we should send ACK,
499 * because this segment (at least, if it has no data)
502 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
503 * describe SYN-RECV state. All the description
504 * is wrong, we cannot believe to it and should
505 * rely only on common sense and implementation
508 * Enforce "SYN-ACK" according to figure 8, figure 6
509 * of RFC793, fixed by RFC1122.
511 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
515 /* Further reproduces section "SEGMENT ARRIVES"
516 for state SYN-RECEIVED of RFC793.
517 It is broken, however, it does not work only
518 when SYNs are crossed.
520 You would think that SYN crossing is impossible here, since
521 we should have a SYN_SENT socket (from connect()) on our end,
522 but this is not true if the crossed SYNs were sent to both
523 ends by a malicious third party. We must defend against this,
524 and to do that we first verify the ACK (as per RFC793, page
525 36) and reset if it is invalid. Is this a true full defense?
526 To convince ourselves, let us consider a way in which the ACK
527 test can still pass in this 'malicious crossed SYNs' case.
528 Malicious sender sends identical SYNs (and thus identical sequence
529 numbers) to both A and B:
534 By our good fortune, both A and B select the same initial
535 send sequence number of seven :-)
537 A: sends SYN|ACK, seq=7, ack_seq=8
538 B: sends SYN|ACK, seq=7, ack_seq=8
540 So we are now A eating this SYN|ACK, ACK test passes. So
541 does sequence test, SYN is truncated, and thus we consider
544 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
545 bare ACK. Otherwise, we create an established connection. Both
546 ends (listening sockets) accept the new incoming connection and try
547 to talk to each other. 8-)
549 Note: This case is both harmless, and rare. Possibility is about the
550 same as us discovering intelligent life on another plant tomorrow.
552 But generally, we should (RFC lies!) to accept ACK
553 from SYNACK both here and in tcp_rcv_state_process().
554 tcp_rcv_state_process() does not, hence, we do not too.
556 Note that the case is absolutely generic:
557 we cannot optimize anything here without
558 violating protocol. All the checks must be made
559 before attempt to create socket.
562 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
563 * and the incoming segment acknowledges something not yet
564 * sent (the segment carries an unacceptable ACK) ...
567 * Invalid ACK: reset will be sent by listening socket
569 if ((flg & TCP_FLAG_ACK) &&
570 (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1))
573 /* Also, it would be not so bad idea to check rcv_tsecr, which
574 * is essentially ACK extension and too early or too late values
575 * should cause reset in unsynchronized states.
578 /* RFC793: "first check sequence number". */
580 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
581 tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
582 /* Out of window: send ACK and drop. */
583 if (!(flg & TCP_FLAG_RST))
584 req->rsk_ops->send_ack(skb, req);
586 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
590 /* In sequence, PAWS is OK. */
592 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
593 req->ts_recent = tmp_opt.rcv_tsval;
595 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
596 /* Truncate SYN, it is out of window starting
597 at tcp_rsk(req)->rcv_isn + 1. */
598 flg &= ~TCP_FLAG_SYN;
601 /* RFC793: "second check the RST bit" and
602 * "fourth, check the SYN bit"
604 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN))
605 goto embryonic_reset;
607 /* ACK sequence verified above, just make sure ACK is
608 * set. If ACK not set, just silently drop the packet.
610 if (!(flg & TCP_FLAG_ACK))
613 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
614 if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
615 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
616 inet_rsk(req)->acked = 1;
620 /* OK, ACK is valid, create big socket and
621 * feed this segment to it. It will repeat all
622 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
623 * ESTABLISHED STATE. If it will be dropped after
624 * socket is created, wait for troubles.
626 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb,
629 goto listen_overflow;
631 inet_csk_reqsk_queue_unlink(sk, req, prev);
632 inet_csk_reqsk_queue_removed(sk, req);
634 inet_csk_reqsk_queue_add(sk, req, child);
638 if (!sysctl_tcp_abort_on_overflow) {
639 inet_rsk(req)->acked = 1;
644 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
645 if (!(flg & TCP_FLAG_RST))
646 req->rsk_ops->send_reset(skb);
648 inet_csk_reqsk_queue_drop(sk, req, prev);
653 * Queue segment on the new socket if the new socket is active,
654 * otherwise we just shortcircuit this and continue with
658 int tcp_child_process(struct sock *parent, struct sock *child,
662 int state = child->sk_state;
664 if (!sock_owned_by_user(child)) {
665 ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
667 /* Wakeup parent, send SIGIO */
668 if (state == TCP_SYN_RECV && child->sk_state != state)
669 parent->sk_data_ready(parent, 0);
671 /* Alas, it is possible again, because we do lookup
672 * in main socket hash table and lock on listening
673 * socket does not protect us more.
675 sk_add_backlog(child, skb);
678 bh_unlock_sock(child);
683 EXPORT_SYMBOL(tcp_check_req);
684 EXPORT_SYMBOL(tcp_child_process);
685 EXPORT_SYMBOL(tcp_create_openreq_child);
686 EXPORT_SYMBOL(tcp_timewait_state_process);