2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
24 #include <linux/module.h>
25 #include <linux/sysctl.h>
26 #include <linux/workqueue.h>
28 #include <net/inet_common.h>
31 #include <linux/vs_limit.h>
32 #include <linux/vs_socket.h>
33 #include <linux/vs_context.h>
36 #define SYNC_INIT 0 /* let the user enable it */
41 int sysctl_tcp_syncookies = SYNC_INIT;
42 int sysctl_tcp_abort_on_overflow;
44 struct inet_timewait_death_row tcp_death_row = {
45 .sysctl_max_tw_buckets = NR_FILE * 2,
46 .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
47 .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),
48 .hashinfo = &tcp_hashinfo,
49 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
50 (unsigned long)&tcp_death_row),
51 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
52 inet_twdr_twkill_work,
54 /* Short-time timewait calendar */
57 .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
58 (unsigned long)&tcp_death_row),
61 EXPORT_SYMBOL_GPL(tcp_death_row);
63 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
67 if (after(end_seq, s_win) && before(seq, e_win))
69 return (seq == e_win && seq == end_seq);
73 * * Main purpose of TIME-WAIT state is to close connection gracefully,
74 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
75 * (and, probably, tail of data) and one or more our ACKs are lost.
76 * * What is TIME-WAIT timeout? It is associated with maximal packet
77 * lifetime in the internet, which results in wrong conclusion, that
78 * it is set to catch "old duplicate segments" wandering out of their path.
79 * It is not quite correct. This timeout is calculated so that it exceeds
80 * maximal retransmission timeout enough to allow to lose one (or more)
81 * segments sent by peer and our ACKs. This time may be calculated from RTO.
82 * * When TIME-WAIT socket receives RST, it means that another end
83 * finally closed and we are allowed to kill TIME-WAIT too.
84 * * Second purpose of TIME-WAIT is catching old duplicate segments.
85 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
86 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
87 * * If we invented some more clever way to catch duplicates
88 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
90 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
91 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
92 * from the very beginning.
94 * NOTE. With recycling (and later with fin-wait-2) TW bucket
95 * is _not_ stateless. It means, that strictly speaking we must
96 * spinlock it. I do not want! Well, probability of misbehaviour
97 * is ridiculously low and, seems, we could use some mb() tricks
98 * to avoid misread sequence numbers, states etc. --ANK
101 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
102 const struct tcphdr *th)
104 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
105 struct tcp_options_received tmp_opt;
108 tmp_opt.saw_tstamp = 0;
109 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
110 tcp_parse_options(skb, &tmp_opt, 0);
112 if (tmp_opt.saw_tstamp) {
113 tmp_opt.ts_recent = tcptw->tw_ts_recent;
114 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
115 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
119 if (tw->tw_substate == TCP_FIN_WAIT2) {
120 /* Just repeat all the checks of tcp_rcv_state_process() */
122 /* Out of window, send ACK */
124 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
126 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
132 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
136 if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
137 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
139 return TCP_TW_SUCCESS;
142 /* New data or FIN. If new data arrive after half-duplex close,
146 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
148 inet_twsk_deschedule(tw, &tcp_death_row);
153 /* FIN arrived, enter true time-wait state. */
154 tw->tw_substate = TCP_TIME_WAIT;
155 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
156 if (tmp_opt.saw_tstamp) {
157 tcptw->tw_ts_recent_stamp = xtime.tv_sec;
158 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
161 /* I am shamed, but failed to make it more elegant.
162 * Yes, it is direct reference to IP, which is impossible
163 * to generalize to IPv6. Taking into account that IPv6
164 * do not understand recycling in any case, it not
165 * a big problem in practice. --ANK */
166 if (tw->tw_family == AF_INET &&
167 tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp &&
168 tcp_v4_tw_remember_stamp(tw))
169 inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
172 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
178 * Now real TIME-WAIT state.
181 * "When a connection is [...] on TIME-WAIT state [...]
182 * [a TCP] MAY accept a new SYN from the remote TCP to
183 * reopen the connection directly, if it:
185 * (1) assigns its initial sequence number for the new
186 * connection to be larger than the largest sequence
187 * number it used on the previous connection incarnation,
190 * (2) returns to TIME-WAIT state if the SYN turns out
191 * to be an old duplicate".
195 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
196 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
197 /* In window segment, it may be only reset or bare ack. */
200 /* This is TIME_WAIT assassination, in two flavors.
201 * Oh well... nobody has a sufficient solution to this
204 if (sysctl_tcp_rfc1337 == 0) {
206 inet_twsk_deschedule(tw, &tcp_death_row);
208 return TCP_TW_SUCCESS;
211 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
214 if (tmp_opt.saw_tstamp) {
215 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
216 tcptw->tw_ts_recent_stamp = xtime.tv_sec;
220 return TCP_TW_SUCCESS;
223 /* Out of window segment.
225 All the segments are ACKed immediately.
227 The only exception is new SYN. We accept it, if it is
228 not old duplicate and we are not in danger to be killed
229 by delayed old duplicates. RFC check is that it has
230 newer sequence number works at rates <40Mbit/sec.
231 However, if paws works, it is reliable AND even more,
232 we even may relax silly seq space cutoff.
234 RED-PEN: we violate main RFC requirement, if this SYN will appear
235 old duplicate (i.e. we receive RST in reply to SYN-ACK),
236 we must return socket to time-wait state. It is not good,
240 if (th->syn && !th->rst && !th->ack && !paws_reject &&
241 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
242 (tmp_opt.saw_tstamp &&
243 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
244 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
247 TCP_SKB_CB(skb)->when = isn;
252 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
255 /* In this case we must reset the TIMEWAIT timer.
257 * If it is ACKless SYN it may be both old duplicate
258 * and new good SYN with random sequence number <rcv_nxt.
259 * Do not reschedule in the last case.
261 if (paws_reject || th->ack)
262 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
265 /* Send ACK. Note, we do not put the bucket,
266 * it will be released by caller.
271 return TCP_TW_SUCCESS;
275 * Move a socket to time-wait or dead fin-wait-2 state.
277 void tcp_time_wait(struct sock *sk, int state, int timeo)
279 struct inet_timewait_sock *tw = NULL;
280 const struct inet_connection_sock *icsk = inet_csk(sk);
281 const struct tcp_sock *tp = tcp_sk(sk);
284 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
285 recycle_ok = icsk->icsk_af_ops->remember_stamp(sk);
287 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
288 tw = inet_twsk_alloc(sk, state);
291 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
292 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
294 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
295 tcptw->tw_rcv_nxt = tp->rcv_nxt;
296 tcptw->tw_snd_nxt = tp->snd_nxt;
297 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
298 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
299 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
301 tw->tw_xid = sk->sk_xid;
302 tw->tw_vx_info = NULL;
303 tw->tw_nid = sk->sk_nid;
304 tw->tw_nx_info = NULL;
306 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
307 if (tw->tw_family == PF_INET6) {
308 struct ipv6_pinfo *np = inet6_sk(sk);
309 struct inet6_timewait_sock *tw6;
311 tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
312 tw6 = inet6_twsk((struct sock *)tw);
313 ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
314 ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
315 tw->tw_ipv6only = np->ipv6only;
318 /* Linkage updates. */
319 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
321 /* Get the TIME_WAIT timeout firing. */
326 tw->tw_timeout = rto;
328 tw->tw_timeout = TCP_TIMEWAIT_LEN;
329 if (state == TCP_TIME_WAIT)
330 timeo = TCP_TIMEWAIT_LEN;
333 inet_twsk_schedule(tw, &tcp_death_row, timeo,
337 /* Sorry, if we're out of memory, just CLOSE this
338 * socket up. We've got bigger problems than
339 * non-graceful socket closings.
342 printk(KERN_INFO "TCP: time wait bucket table overflow\n");
345 tcp_update_metrics(sk);
349 /* This is not only more efficient than what we used to do, it eliminates
350 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
352 * Actually, we could lots of memory writes here. tp of listening
353 * socket contains all necessary default parameters.
355 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
357 struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
360 const struct inet_request_sock *ireq = inet_rsk(req);
361 struct tcp_request_sock *treq = tcp_rsk(req);
362 struct inet_connection_sock *newicsk = inet_csk(sk);
363 struct tcp_sock *newtp;
365 /* Now setup tcp_sock */
366 newtp = tcp_sk(newsk);
367 newtp->pred_flags = 0;
368 newtp->rcv_nxt = treq->rcv_isn + 1;
369 newtp->snd_nxt = newtp->snd_una = newtp->snd_sml = treq->snt_isn + 1;
371 tcp_prequeue_init(newtp);
373 tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn);
376 newtp->mdev = TCP_TIMEOUT_INIT;
377 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
379 newtp->packets_out = 0;
381 newtp->retrans_out = 0;
382 newtp->sacked_out = 0;
383 newtp->fackets_out = 0;
384 newtp->snd_ssthresh = 0x7fffffff;
386 /* So many TCP implementations out there (incorrectly) count the
387 * initial SYN frame in their delayed-ACK and congestion control
388 * algorithms that we must have the following bandaid to talk
389 * efficiently to them. -DaveM
392 newtp->snd_cwnd_cnt = 0;
393 newtp->bytes_acked = 0;
395 newtp->frto_counter = 0;
396 newtp->frto_highmark = 0;
398 newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
400 tcp_set_ca_state(newsk, TCP_CA_Open);
401 tcp_init_xmit_timers(newsk);
402 skb_queue_head_init(&newtp->out_of_order_queue);
403 newtp->rcv_wup = treq->rcv_isn + 1;
404 newtp->write_seq = treq->snt_isn + 1;
405 newtp->pushed_seq = newtp->write_seq;
406 newtp->copied_seq = treq->rcv_isn + 1;
408 newtp->rx_opt.saw_tstamp = 0;
410 newtp->rx_opt.dsack = 0;
411 newtp->rx_opt.eff_sacks = 0;
413 newtp->rx_opt.num_sacks = 0;
416 if (sock_flag(newsk, SOCK_KEEPOPEN))
417 inet_csk_reset_keepalive_timer(newsk,
418 keepalive_time_when(newtp));
420 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
421 if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
423 newtp->rx_opt.sack_ok |= 2;
425 newtp->window_clamp = req->window_clamp;
426 newtp->rcv_ssthresh = req->rcv_wnd;
427 newtp->rcv_wnd = req->rcv_wnd;
428 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
429 if (newtp->rx_opt.wscale_ok) {
430 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
431 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
433 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
434 newtp->window_clamp = min(newtp->window_clamp, 65535U);
436 newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale;
437 newtp->max_window = newtp->snd_wnd;
439 if (newtp->rx_opt.tstamp_ok) {
440 newtp->rx_opt.ts_recent = req->ts_recent;
441 newtp->rx_opt.ts_recent_stamp = xtime.tv_sec;
442 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
444 newtp->rx_opt.ts_recent_stamp = 0;
445 newtp->tcp_header_len = sizeof(struct tcphdr);
447 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
448 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
449 newtp->rx_opt.mss_clamp = req->mss;
450 TCP_ECN_openreq_child(newtp, req);
452 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
458 * Process an incoming packet for SYN_RECV sockets represented
462 struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
463 struct request_sock *req,
464 struct request_sock **prev)
466 struct tcphdr *th = skb->h.th;
467 u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
469 struct tcp_options_received tmp_opt;
472 tmp_opt.saw_tstamp = 0;
473 if (th->doff > (sizeof(struct tcphdr)>>2)) {
474 tcp_parse_options(skb, &tmp_opt, 0);
476 if (tmp_opt.saw_tstamp) {
477 tmp_opt.ts_recent = req->ts_recent;
478 /* We do not store true stamp, but it is not required,
479 * it can be estimated (approximately)
482 tmp_opt.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
483 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
487 /* Check for pure retransmitted SYN. */
488 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
489 flg == TCP_FLAG_SYN &&
492 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
493 * this case on figure 6 and figure 8, but formal
494 * protocol description says NOTHING.
495 * To be more exact, it says that we should send ACK,
496 * because this segment (at least, if it has no data)
499 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
500 * describe SYN-RECV state. All the description
501 * is wrong, we cannot believe to it and should
502 * rely only on common sense and implementation
505 * Enforce "SYN-ACK" according to figure 8, figure 6
506 * of RFC793, fixed by RFC1122.
508 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
512 /* Further reproduces section "SEGMENT ARRIVES"
513 for state SYN-RECEIVED of RFC793.
514 It is broken, however, it does not work only
515 when SYNs are crossed.
517 You would think that SYN crossing is impossible here, since
518 we should have a SYN_SENT socket (from connect()) on our end,
519 but this is not true if the crossed SYNs were sent to both
520 ends by a malicious third party. We must defend against this,
521 and to do that we first verify the ACK (as per RFC793, page
522 36) and reset if it is invalid. Is this a true full defense?
523 To convince ourselves, let us consider a way in which the ACK
524 test can still pass in this 'malicious crossed SYNs' case.
525 Malicious sender sends identical SYNs (and thus identical sequence
526 numbers) to both A and B:
531 By our good fortune, both A and B select the same initial
532 send sequence number of seven :-)
534 A: sends SYN|ACK, seq=7, ack_seq=8
535 B: sends SYN|ACK, seq=7, ack_seq=8
537 So we are now A eating this SYN|ACK, ACK test passes. So
538 does sequence test, SYN is truncated, and thus we consider
541 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
542 bare ACK. Otherwise, we create an established connection. Both
543 ends (listening sockets) accept the new incoming connection and try
544 to talk to each other. 8-)
546 Note: This case is both harmless, and rare. Possibility is about the
547 same as us discovering intelligent life on another plant tomorrow.
549 But generally, we should (RFC lies!) to accept ACK
550 from SYNACK both here and in tcp_rcv_state_process().
551 tcp_rcv_state_process() does not, hence, we do not too.
553 Note that the case is absolutely generic:
554 we cannot optimize anything here without
555 violating protocol. All the checks must be made
556 before attempt to create socket.
559 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
560 * and the incoming segment acknowledges something not yet
561 * sent (the segment carries an unacceptable ACK) ...
564 * Invalid ACK: reset will be sent by listening socket
566 if ((flg & TCP_FLAG_ACK) &&
567 (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1))
570 /* Also, it would be not so bad idea to check rcv_tsecr, which
571 * is essentially ACK extension and too early or too late values
572 * should cause reset in unsynchronized states.
575 /* RFC793: "first check sequence number". */
577 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
578 tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
579 /* Out of window: send ACK and drop. */
580 if (!(flg & TCP_FLAG_RST))
581 req->rsk_ops->send_ack(skb, req);
583 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
587 /* In sequence, PAWS is OK. */
589 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
590 req->ts_recent = tmp_opt.rcv_tsval;
592 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
593 /* Truncate SYN, it is out of window starting
594 at tcp_rsk(req)->rcv_isn + 1. */
595 flg &= ~TCP_FLAG_SYN;
598 /* RFC793: "second check the RST bit" and
599 * "fourth, check the SYN bit"
601 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
602 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
603 goto embryonic_reset;
606 /* ACK sequence verified above, just make sure ACK is
607 * set. If ACK not set, just silently drop the packet.
609 if (!(flg & TCP_FLAG_ACK))
612 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
613 if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
614 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
615 inet_rsk(req)->acked = 1;
619 /* OK, ACK is valid, create big socket and
620 * feed this segment to it. It will repeat all
621 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
622 * ESTABLISHED STATE. If it will be dropped after
623 * socket is created, wait for troubles.
625 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb,
628 goto listen_overflow;
630 inet_csk_reqsk_queue_unlink(sk, req, prev);
631 inet_csk_reqsk_queue_removed(sk, req);
633 inet_csk_reqsk_queue_add(sk, req, child);
637 if (!sysctl_tcp_abort_on_overflow) {
638 inet_rsk(req)->acked = 1;
643 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
644 if (!(flg & TCP_FLAG_RST))
645 req->rsk_ops->send_reset(skb);
647 inet_csk_reqsk_queue_drop(sk, req, prev);
652 * Queue segment on the new socket if the new socket is active,
653 * otherwise we just shortcircuit this and continue with
657 int tcp_child_process(struct sock *parent, struct sock *child,
661 int state = child->sk_state;
663 if (!sock_owned_by_user(child)) {
664 ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
666 /* Wakeup parent, send SIGIO */
667 if (state == TCP_SYN_RECV && child->sk_state != state)
668 parent->sk_data_ready(parent, 0);
670 /* Alas, it is possible again, because we do lookup
671 * in main socket hash table and lock on listening
672 * socket does not protect us more.
674 sk_add_backlog(child, skb);
677 bh_unlock_sock(child);
682 EXPORT_SYMBOL(tcp_check_req);
683 EXPORT_SYMBOL(tcp_child_process);
684 EXPORT_SYMBOL(tcp_create_openreq_child);
685 EXPORT_SYMBOL(tcp_timewait_state_process);