2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/sysctl.h>
27 #include <linux/workqueue.h>
29 #include <net/inet_common.h>
32 #include <linux/vs_limit.h>
33 #include <linux/vs_socket.h>
34 #include <linux/vs_context.h>
37 #define SYNC_INIT 0 /* let the user enable it */
42 int sysctl_tcp_tw_recycle;
43 int sysctl_tcp_max_tw_buckets = NR_FILE*2;
45 int sysctl_tcp_syncookies = SYNC_INIT;
46 int sysctl_tcp_abort_on_overflow;
48 static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo);
50 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
54 if (after(end_seq, s_win) && before(seq, e_win))
56 return (seq == e_win && seq == end_seq);
59 /* New-style handling of TIME_WAIT sockets. */
64 /* Must be called with locally disabled BHs. */
65 static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
67 struct tcp_ehash_bucket *ehead;
68 struct tcp_bind_hashbucket *bhead;
69 struct tcp_bind_bucket *tb;
71 /* Unlink from established hashes. */
72 ehead = &tcp_ehash[tw->tw_hashent];
73 write_lock(&ehead->lock);
74 if (hlist_unhashed(&tw->tw_node)) {
75 write_unlock(&ehead->lock);
78 __hlist_del(&tw->tw_node);
79 sk_node_init(&tw->tw_node);
80 write_unlock(&ehead->lock);
82 /* Disassociate with bind bucket. */
83 bhead = &tcp_bhash[tcp_bhashfn(tw->tw_num)];
84 spin_lock(&bhead->lock);
86 __hlist_del(&tw->tw_bind_node);
88 tcp_bucket_destroy(tb);
89 spin_unlock(&bhead->lock);
91 #ifdef INET_REFCNT_DEBUG
92 if (atomic_read(&tw->tw_refcnt) != 1) {
93 printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw,
94 atomic_read(&tw->tw_refcnt));
101 * * Main purpose of TIME-WAIT state is to close connection gracefully,
102 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
103 * (and, probably, tail of data) and one or more our ACKs are lost.
104 * * What is TIME-WAIT timeout? It is associated with maximal packet
105 * lifetime in the internet, which results in wrong conclusion, that
106 * it is set to catch "old duplicate segments" wandering out of their path.
107 * It is not quite correct. This timeout is calculated so that it exceeds
108 * maximal retransmission timeout enough to allow to lose one (or more)
109 * segments sent by peer and our ACKs. This time may be calculated from RTO.
110 * * When TIME-WAIT socket receives RST, it means that another end
111 * finally closed and we are allowed to kill TIME-WAIT too.
112 * * Second purpose of TIME-WAIT is catching old duplicate segments.
113 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
114 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
115 * * If we invented some more clever way to catch duplicates
116 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
118 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
119 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
120 * from the very beginning.
122 * NOTE. With recycling (and later with fin-wait-2) TW bucket
123 * is _not_ stateless. It means, that strictly speaking we must
124 * spinlock it. I do not want! Well, probability of misbehaviour
125 * is ridiculously low and, seems, we could use some mb() tricks
126 * to avoid misread sequence numbers, states etc. --ANK
129 tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
130 struct tcphdr *th, unsigned len)
132 struct tcp_options_received tmp_opt;
135 tmp_opt.saw_tstamp = 0;
136 if (th->doff > (sizeof(struct tcphdr) >> 2) && tw->tw_ts_recent_stamp) {
137 tcp_parse_options(skb, &tmp_opt, 0);
139 if (tmp_opt.saw_tstamp) {
140 tmp_opt.ts_recent = tw->tw_ts_recent;
141 tmp_opt.ts_recent_stamp = tw->tw_ts_recent_stamp;
142 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
146 if (tw->tw_substate == TCP_FIN_WAIT2) {
147 /* Just repeat all the checks of tcp_rcv_state_process() */
149 /* Out of window, send ACK */
151 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
153 tw->tw_rcv_nxt + tw->tw_rcv_wnd))
159 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt))
163 if (!after(TCP_SKB_CB(skb)->end_seq, tw->tw_rcv_nxt) ||
164 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
166 return TCP_TW_SUCCESS;
169 /* New data or FIN. If new data arrive after half-duplex close,
173 TCP_SKB_CB(skb)->end_seq != tw->tw_rcv_nxt + 1) {
175 tcp_tw_deschedule(tw);
180 /* FIN arrived, enter true time-wait state. */
181 tw->tw_substate = TCP_TIME_WAIT;
182 tw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
183 if (tmp_opt.saw_tstamp) {
184 tw->tw_ts_recent_stamp = xtime.tv_sec;
185 tw->tw_ts_recent = tmp_opt.rcv_tsval;
188 /* I am shamed, but failed to make it more elegant.
189 * Yes, it is direct reference to IP, which is impossible
190 * to generalize to IPv6. Taking into account that IPv6
191 * do not undertsnad recycling in any case, it not
192 * a big problem in practice. --ANK */
193 if (tw->tw_family == AF_INET &&
194 sysctl_tcp_tw_recycle && tw->tw_ts_recent_stamp &&
195 tcp_v4_tw_remember_stamp(tw))
196 tcp_tw_schedule(tw, tw->tw_timeout);
198 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
203 * Now real TIME-WAIT state.
206 * "When a connection is [...] on TIME-WAIT state [...]
207 * [a TCP] MAY accept a new SYN from the remote TCP to
208 * reopen the connection directly, if it:
210 * (1) assigns its initial sequence number for the new
211 * connection to be larger than the largest sequence
212 * number it used on the previous connection incarnation,
215 * (2) returns to TIME-WAIT state if the SYN turns out
216 * to be an old duplicate".
220 (TCP_SKB_CB(skb)->seq == tw->tw_rcv_nxt &&
221 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
222 /* In window segment, it may be only reset or bare ack. */
225 /* This is TIME_WAIT assasination, in two flavors.
226 * Oh well... nobody has a sufficient solution to this
229 if (sysctl_tcp_rfc1337 == 0) {
231 tcp_tw_deschedule(tw);
233 return TCP_TW_SUCCESS;
236 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
238 if (tmp_opt.saw_tstamp) {
239 tw->tw_ts_recent = tmp_opt.rcv_tsval;
240 tw->tw_ts_recent_stamp = xtime.tv_sec;
244 return TCP_TW_SUCCESS;
247 /* Out of window segment.
249 All the segments are ACKed immediately.
251 The only exception is new SYN. We accept it, if it is
252 not old duplicate and we are not in danger to be killed
253 by delayed old duplicates. RFC check is that it has
254 newer sequence number works at rates <40Mbit/sec.
255 However, if paws works, it is reliable AND even more,
256 we even may relax silly seq space cutoff.
258 RED-PEN: we violate main RFC requirement, if this SYN will appear
259 old duplicate (i.e. we receive RST in reply to SYN-ACK),
260 we must return socket to time-wait state. It is not good,
264 if (th->syn && !th->rst && !th->ack && !paws_reject &&
265 (after(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt) ||
266 (tmp_opt.saw_tstamp && (s32)(tw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
267 u32 isn = tw->tw_snd_nxt + 65535 + 2;
270 TCP_SKB_CB(skb)->when = isn;
275 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
278 /* In this case we must reset the TIMEWAIT timer.
280 * If it is ACKless SYN it may be both old duplicate
281 * and new good SYN with random sequence number <rcv_nxt.
282 * Do not reschedule in the last case.
284 if (paws_reject || th->ack)
285 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
287 /* Send ACK. Note, we do not put the bucket,
288 * it will be released by caller.
293 return TCP_TW_SUCCESS;
296 /* Enter the time wait state. This is called with locally disabled BH.
297 * Essentially we whip up a timewait bucket, copy the
298 * relevant info into it from the SK, and mess with hash chains
301 static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
303 struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent];
304 struct tcp_bind_hashbucket *bhead;
306 /* Step 1: Put TW into bind hash. Original socket stays there too.
307 Note, that any socket with inet_sk(sk)->num != 0 MUST be bound in
308 binding cache, even if it is closed.
310 bhead = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)];
311 spin_lock(&bhead->lock);
312 tw->tw_tb = tcp_sk(sk)->bind_hash;
313 BUG_TRAP(tcp_sk(sk)->bind_hash);
314 tw_add_bind_node(tw, &tw->tw_tb->owners);
315 spin_unlock(&bhead->lock);
317 write_lock(&ehead->lock);
319 /* Step 2: Remove SK from established hash. */
320 if (__sk_del_node_init(sk))
321 sock_prot_dec_use(sk->sk_prot);
323 /* Step 3: Hash TW into TIMEWAIT half of established hash table. */
324 tw_add_node(tw, &(ehead + tcp_ehash_size)->chain);
325 atomic_inc(&tw->tw_refcnt);
327 write_unlock(&ehead->lock);
331 * Move a socket to time-wait or dead fin-wait-2 state.
333 void tcp_time_wait(struct sock *sk, int state, int timeo)
335 struct tcp_tw_bucket *tw = NULL;
336 struct tcp_sock *tp = tcp_sk(sk);
339 if (sysctl_tcp_tw_recycle && tp->rx_opt.ts_recent_stamp)
340 recycle_ok = tp->af_specific->remember_stamp(sk);
342 if (tcp_tw_count < sysctl_tcp_max_tw_buckets)
343 tw = kmem_cache_alloc(tcp_timewait_cachep, SLAB_ATOMIC);
346 struct inet_sock *inet = inet_sk(sk);
347 int rto = (tp->rto<<2) - (tp->rto>>1);
349 /* Give us an identity. */
350 tw->tw_daddr = inet->daddr;
351 tw->tw_rcv_saddr = inet->rcv_saddr;
352 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
353 tw->tw_num = inet->num;
354 tw->tw_state = TCP_TIME_WAIT;
355 tw->tw_substate = state;
356 tw->tw_sport = inet->sport;
357 tw->tw_dport = inet->dport;
358 tw->tw_family = sk->sk_family;
359 tw->tw_reuse = sk->sk_reuse;
360 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
361 atomic_set(&tw->tw_refcnt, 1);
363 tw->tw_hashent = sk->sk_hashent;
364 tw->tw_rcv_nxt = tp->rcv_nxt;
365 tw->tw_snd_nxt = tp->snd_nxt;
366 tw->tw_rcv_wnd = tcp_receive_window(tp);
367 tw->tw_ts_recent = tp->rx_opt.ts_recent;
368 tw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
369 tw_dead_node_init(tw);
371 tw->tw_xid = sk->sk_xid;
372 tw->tw_vx_info = NULL;
373 tw->tw_nid = sk->sk_nid;
374 tw->tw_nx_info = NULL;
376 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
377 if (tw->tw_family == PF_INET6) {
378 struct ipv6_pinfo *np = inet6_sk(sk);
380 ipv6_addr_copy(&tw->tw_v6_daddr, &np->daddr);
381 ipv6_addr_copy(&tw->tw_v6_rcv_saddr, &np->rcv_saddr);
382 tw->tw_v6_ipv6only = np->ipv6only;
384 memset(&tw->tw_v6_daddr, 0, sizeof(tw->tw_v6_daddr));
385 memset(&tw->tw_v6_rcv_saddr, 0, sizeof(tw->tw_v6_rcv_saddr));
386 tw->tw_v6_ipv6only = 0;
389 /* Linkage updates. */
390 __tcp_tw_hashdance(sk, tw);
392 /* Get the TIME_WAIT timeout firing. */
397 tw->tw_timeout = rto;
399 tw->tw_timeout = TCP_TIMEWAIT_LEN;
400 if (state == TCP_TIME_WAIT)
401 timeo = TCP_TIMEWAIT_LEN;
404 tcp_tw_schedule(tw, timeo);
407 /* Sorry, if we're out of memory, just CLOSE this
408 * socket up. We've got bigger problems than
409 * non-graceful socket closings.
412 printk(KERN_INFO "TCP: time wait bucket table overflow\n");
415 tcp_update_metrics(sk);
419 /* Kill off TIME_WAIT sockets once their lifetime has expired. */
420 static int tcp_tw_death_row_slot;
422 static void tcp_twkill(unsigned long);
424 /* TIME_WAIT reaping mechanism. */
425 #define TCP_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
426 #define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)
428 #define TCP_TWKILL_QUOTA 100
430 static struct hlist_head tcp_tw_death_row[TCP_TWKILL_SLOTS];
431 static DEFINE_SPINLOCK(tw_death_lock);
432 static struct timer_list tcp_tw_timer = TIMER_INITIALIZER(tcp_twkill, 0, 0);
433 static void twkill_work(void *);
434 static DECLARE_WORK(tcp_twkill_work, twkill_work, NULL);
435 static u32 twkill_thread_slots;
437 /* Returns non-zero if quota exceeded. */
438 static int tcp_do_twkill_work(int slot, unsigned int quota)
440 struct tcp_tw_bucket *tw;
441 struct hlist_node *node;
445 /* NOTE: compare this to previous version where lock
446 * was released after detaching chain. It was racy,
447 * because tw buckets are scheduled in not serialized context
448 * in 2.3 (with netfilter), and with softnet it is common, because
449 * soft irqs are not sequenced.
454 tw_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) {
455 __tw_del_dead_node(tw);
456 spin_unlock(&tw_death_lock);
457 tcp_timewait_kill(tw);
460 spin_lock(&tw_death_lock);
461 if (killed > quota) {
466 /* While we dropped tw_death_lock, another cpu may have
467 * killed off the next TW bucket in the list, therefore
468 * do a fresh re-read of the hlist head node with the
469 * lock reacquired. We still use the hlist traversal
470 * macro in order to get the prefetches.
475 tcp_tw_count -= killed;
476 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
481 static void tcp_twkill(unsigned long dummy)
485 spin_lock(&tw_death_lock);
487 if (tcp_tw_count == 0)
491 ret = tcp_do_twkill_work(tcp_tw_death_row_slot, TCP_TWKILL_QUOTA);
493 twkill_thread_slots |= (1 << tcp_tw_death_row_slot);
495 schedule_work(&tcp_twkill_work);
498 /* We purged the entire slot, anything left? */
502 tcp_tw_death_row_slot =
503 ((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1));
505 mod_timer(&tcp_tw_timer, jiffies + TCP_TWKILL_PERIOD);
507 spin_unlock(&tw_death_lock);
510 extern void twkill_slots_invalid(void);
512 static void twkill_work(void *dummy)
516 if ((TCP_TWKILL_SLOTS - 1) > (sizeof(twkill_thread_slots) * 8))
517 twkill_slots_invalid();
519 while (twkill_thread_slots) {
520 spin_lock_bh(&tw_death_lock);
521 for (i = 0; i < TCP_TWKILL_SLOTS; i++) {
522 if (!(twkill_thread_slots & (1 << i)))
525 while (tcp_do_twkill_work(i, TCP_TWKILL_QUOTA) != 0) {
526 if (need_resched()) {
527 spin_unlock_bh(&tw_death_lock);
529 spin_lock_bh(&tw_death_lock);
533 twkill_thread_slots &= ~(1 << i);
535 spin_unlock_bh(&tw_death_lock);
539 /* These are always called from BH context. See callers in
540 * tcp_input.c to verify this.
543 /* This is for handling early-kills of TIME_WAIT sockets. */
544 void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
546 spin_lock(&tw_death_lock);
547 if (tw_del_dead_node(tw)) {
549 if (--tcp_tw_count == 0)
550 del_timer(&tcp_tw_timer);
552 spin_unlock(&tw_death_lock);
553 tcp_timewait_kill(tw);
556 /* Short-time timewait calendar */
558 static int tcp_twcal_hand = -1;
559 static int tcp_twcal_jiffie;
560 static void tcp_twcal_tick(unsigned long);
561 static struct timer_list tcp_twcal_timer =
562 TIMER_INITIALIZER(tcp_twcal_tick, 0, 0);
563 static struct hlist_head tcp_twcal_row[TCP_TW_RECYCLE_SLOTS];
565 static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
567 struct hlist_head *list;
570 /* timeout := RTO * 3.5
572 * 3.5 = 1+2+0.5 to wait for two retransmits.
574 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
575 * our ACK acking that FIN can be lost. If N subsequent retransmitted
576 * FINs (or previous seqments) are lost (probability of such event
577 * is p^(N+1), where p is probability to lose single packet and
578 * time to detect the loss is about RTO*(2^N - 1) with exponential
579 * backoff). Normal timewait length is calculated so, that we
580 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
581 * [ BTW Linux. following BSD, violates this requirement waiting
582 * only for 60sec, we should wait at least for 240 secs.
583 * Well, 240 consumes too much of resources 8)
585 * This interval is not reduced to catch old duplicate and
586 * responces to our wandering segments living for two MSLs.
587 * However, if we use PAWS to detect
588 * old duplicates, we can reduce the interval to bounds required
589 * by RTO, rather than MSL. So, if peer understands PAWS, we
590 * kill tw bucket after 3.5*RTO (it is important that this number
591 * is greater than TS tick!) and detect old duplicates with help
594 slot = (timeo + (1<<TCP_TW_RECYCLE_TICK) - 1) >> TCP_TW_RECYCLE_TICK;
596 spin_lock(&tw_death_lock);
598 /* Unlink it, if it was scheduled */
599 if (tw_del_dead_node(tw))
602 atomic_inc(&tw->tw_refcnt);
604 if (slot >= TCP_TW_RECYCLE_SLOTS) {
605 /* Schedule to slow timer */
606 if (timeo >= TCP_TIMEWAIT_LEN) {
607 slot = TCP_TWKILL_SLOTS-1;
609 slot = (timeo + TCP_TWKILL_PERIOD-1) / TCP_TWKILL_PERIOD;
610 if (slot >= TCP_TWKILL_SLOTS)
611 slot = TCP_TWKILL_SLOTS-1;
613 tw->tw_ttd = jiffies + timeo;
614 slot = (tcp_tw_death_row_slot + slot) & (TCP_TWKILL_SLOTS - 1);
615 list = &tcp_tw_death_row[slot];
617 tw->tw_ttd = jiffies + (slot << TCP_TW_RECYCLE_TICK);
619 if (tcp_twcal_hand < 0) {
621 tcp_twcal_jiffie = jiffies;
622 tcp_twcal_timer.expires = tcp_twcal_jiffie + (slot<<TCP_TW_RECYCLE_TICK);
623 add_timer(&tcp_twcal_timer);
625 if (time_after(tcp_twcal_timer.expires, jiffies + (slot<<TCP_TW_RECYCLE_TICK)))
626 mod_timer(&tcp_twcal_timer, jiffies + (slot<<TCP_TW_RECYCLE_TICK));
627 slot = (tcp_twcal_hand + slot)&(TCP_TW_RECYCLE_SLOTS-1);
629 list = &tcp_twcal_row[slot];
632 hlist_add_head(&tw->tw_death_node, list);
634 if (tcp_tw_count++ == 0)
635 mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
636 spin_unlock(&tw_death_lock);
639 void tcp_twcal_tick(unsigned long dummy)
643 unsigned long now = jiffies;
647 spin_lock(&tw_death_lock);
648 if (tcp_twcal_hand < 0)
651 slot = tcp_twcal_hand;
652 j = tcp_twcal_jiffie;
654 for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) {
655 if (time_before_eq(j, now)) {
656 struct hlist_node *node, *safe;
657 struct tcp_tw_bucket *tw;
659 tw_for_each_inmate_safe(tw, node, safe,
660 &tcp_twcal_row[slot]) {
661 __tw_del_dead_node(tw);
662 tcp_timewait_kill(tw);
669 tcp_twcal_jiffie = j;
670 tcp_twcal_hand = slot;
673 if (!hlist_empty(&tcp_twcal_row[slot])) {
674 mod_timer(&tcp_twcal_timer, j);
678 j += (1<<TCP_TW_RECYCLE_TICK);
679 slot = (slot+1)&(TCP_TW_RECYCLE_SLOTS-1);
684 if ((tcp_tw_count -= killed) == 0)
685 del_timer(&tcp_tw_timer);
686 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
687 spin_unlock(&tw_death_lock);
690 /* This is not only more efficient than what we used to do, it eliminates
691 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
693 * Actually, we could lots of memory writes here. tp of listening
694 * socket contains all necessary default parameters.
696 struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, struct sk_buff *skb)
698 /* allocate the newsk from the same slab of the master sock,
699 * if not, at sk_free time we'll try to free it from the wrong
700 * slabcache (i.e. is it TCPv4 or v6?), this is handled thru sk->sk_prot -acme */
701 struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, sk->sk_prot, 0);
704 struct tcp_sock *newtp;
705 struct sk_filter *filter;
707 memcpy(newsk, sk, sizeof(struct tcp_sock));
708 newsk->sk_state = TCP_SYN_RECV;
713 sk_node_init(&newsk->sk_node);
714 tcp_sk(newsk)->bind_hash = NULL;
716 /* Clone the TCP header template */
717 inet_sk(newsk)->dport = req->rmt_port;
719 sock_lock_init(newsk);
722 rwlock_init(&newsk->sk_dst_lock);
723 atomic_set(&newsk->sk_rmem_alloc, 0);
724 skb_queue_head_init(&newsk->sk_receive_queue);
725 atomic_set(&newsk->sk_wmem_alloc, 0);
726 skb_queue_head_init(&newsk->sk_write_queue);
727 atomic_set(&newsk->sk_omem_alloc, 0);
728 newsk->sk_wmem_queued = 0;
729 newsk->sk_forward_alloc = 0;
731 sock_reset_flag(newsk, SOCK_DONE);
732 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
733 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
734 newsk->sk_send_head = NULL;
735 rwlock_init(&newsk->sk_callback_lock);
736 skb_queue_head_init(&newsk->sk_error_queue);
737 newsk->sk_write_space = sk_stream_write_space;
739 if ((filter = newsk->sk_filter) != NULL)
740 sk_filter_charge(newsk, filter);
742 if (unlikely(xfrm_sk_clone_policy(newsk))) {
743 /* It is still raw copy of parent, so invalidate
744 * destructor and make plain sk_free() */
745 newsk->sk_destruct = NULL;
750 /* Now setup tcp_sock */
751 newtp = tcp_sk(newsk);
752 newtp->pred_flags = 0;
753 newtp->rcv_nxt = req->rcv_isn + 1;
754 newtp->snd_nxt = req->snt_isn + 1;
755 newtp->snd_una = req->snt_isn + 1;
756 newtp->snd_sml = req->snt_isn + 1;
758 tcp_prequeue_init(newtp);
760 tcp_init_wl(newtp, req->snt_isn, req->rcv_isn);
762 newtp->retransmits = 0;
765 newtp->mdev = TCP_TIMEOUT_INIT;
766 newtp->rto = TCP_TIMEOUT_INIT;
768 newtp->packets_out = 0;
770 newtp->retrans_out = 0;
771 newtp->sacked_out = 0;
772 newtp->fackets_out = 0;
773 newtp->snd_ssthresh = 0x7fffffff;
775 /* So many TCP implementations out there (incorrectly) count the
776 * initial SYN frame in their delayed-ACK and congestion control
777 * algorithms that we must have the following bandaid to talk
778 * efficiently to them. -DaveM
781 newtp->snd_cwnd_cnt = 0;
783 newtp->frto_counter = 0;
784 newtp->frto_highmark = 0;
786 tcp_set_ca_state(newtp, TCP_CA_Open);
787 tcp_init_xmit_timers(newsk);
788 skb_queue_head_init(&newtp->out_of_order_queue);
789 newtp->rcv_wup = req->rcv_isn + 1;
790 newtp->write_seq = req->snt_isn + 1;
791 newtp->pushed_seq = newtp->write_seq;
792 newtp->copied_seq = req->rcv_isn + 1;
794 newtp->rx_opt.saw_tstamp = 0;
796 newtp->rx_opt.dsack = 0;
797 newtp->rx_opt.eff_sacks = 0;
799 newtp->probes_out = 0;
800 newtp->rx_opt.num_sacks = 0;
802 newtp->listen_opt = NULL;
803 newtp->accept_queue = newtp->accept_queue_tail = NULL;
804 /* Deinitialize syn_wait_lock to trap illegal accesses. */
805 memset(&newtp->syn_wait_lock, 0, sizeof(newtp->syn_wait_lock));
807 /* Back to base struct sock members. */
809 newsk->sk_priority = 0;
810 atomic_set(&newsk->sk_refcnt, 2);
812 set_vx_info(&newsk->sk_vx_info, sk->sk_vx_info);
813 newsk->sk_xid = sk->sk_xid;
815 set_nx_info(&newsk->sk_nx_info, sk->sk_nx_info);
816 newsk->sk_nid = sk->sk_nid;
817 #ifdef INET_REFCNT_DEBUG
818 atomic_inc(&inet_sock_nr);
820 atomic_inc(&tcp_sockets_allocated);
822 if (sock_flag(newsk, SOCK_KEEPOPEN))
823 tcp_reset_keepalive_timer(newsk,
824 keepalive_time_when(newtp));
825 newsk->sk_socket = NULL;
826 newsk->sk_sleep = NULL;
828 newtp->rx_opt.tstamp_ok = req->tstamp_ok;
829 if((newtp->rx_opt.sack_ok = req->sack_ok) != 0) {
831 newtp->rx_opt.sack_ok |= 2;
833 newtp->window_clamp = req->window_clamp;
834 newtp->rcv_ssthresh = req->rcv_wnd;
835 newtp->rcv_wnd = req->rcv_wnd;
836 newtp->rx_opt.wscale_ok = req->wscale_ok;
837 if (newtp->rx_opt.wscale_ok) {
838 newtp->rx_opt.snd_wscale = req->snd_wscale;
839 newtp->rx_opt.rcv_wscale = req->rcv_wscale;
841 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
842 newtp->window_clamp = min(newtp->window_clamp, 65535U);
844 newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale;
845 newtp->max_window = newtp->snd_wnd;
847 if (newtp->rx_opt.tstamp_ok) {
848 newtp->rx_opt.ts_recent = req->ts_recent;
849 newtp->rx_opt.ts_recent_stamp = xtime.tv_sec;
850 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
852 newtp->rx_opt.ts_recent_stamp = 0;
853 newtp->tcp_header_len = sizeof(struct tcphdr);
855 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
856 newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len;
857 newtp->rx_opt.mss_clamp = req->mss;
858 TCP_ECN_openreq_child(newtp, req);
859 if (newtp->ecn_flags&TCP_ECN_OK)
860 sock_set_flag(newsk, SOCK_NO_LARGESEND);
864 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
870 * Process an incoming packet for SYN_RECV sockets represented
871 * as an open_request.
874 struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
875 struct open_request *req,
876 struct open_request **prev)
878 struct tcphdr *th = skb->h.th;
879 struct tcp_sock *tp = tcp_sk(sk);
880 u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
882 struct tcp_options_received tmp_opt;
885 tmp_opt.saw_tstamp = 0;
886 if (th->doff > (sizeof(struct tcphdr)>>2)) {
887 tcp_parse_options(skb, &tmp_opt, 0);
889 if (tmp_opt.saw_tstamp) {
890 tmp_opt.ts_recent = req->ts_recent;
891 /* We do not store true stamp, but it is not required,
892 * it can be estimated (approximately)
895 tmp_opt.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
896 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
900 /* Check for pure retransmitted SYN. */
901 if (TCP_SKB_CB(skb)->seq == req->rcv_isn &&
902 flg == TCP_FLAG_SYN &&
905 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
906 * this case on figure 6 and figure 8, but formal
907 * protocol description says NOTHING.
908 * To be more exact, it says that we should send ACK,
909 * because this segment (at least, if it has no data)
912 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
913 * describe SYN-RECV state. All the description
914 * is wrong, we cannot believe to it and should
915 * rely only on common sense and implementation
918 * Enforce "SYN-ACK" according to figure 8, figure 6
919 * of RFC793, fixed by RFC1122.
921 req->class->rtx_syn_ack(sk, req, NULL);
925 /* Further reproduces section "SEGMENT ARRIVES"
926 for state SYN-RECEIVED of RFC793.
927 It is broken, however, it does not work only
928 when SYNs are crossed.
930 You would think that SYN crossing is impossible here, since
931 we should have a SYN_SENT socket (from connect()) on our end,
932 but this is not true if the crossed SYNs were sent to both
933 ends by a malicious third party. We must defend against this,
934 and to do that we first verify the ACK (as per RFC793, page
935 36) and reset if it is invalid. Is this a true full defense?
936 To convince ourselves, let us consider a way in which the ACK
937 test can still pass in this 'malicious crossed SYNs' case.
938 Malicious sender sends identical SYNs (and thus identical sequence
939 numbers) to both A and B:
944 By our good fortune, both A and B select the same initial
945 send sequence number of seven :-)
947 A: sends SYN|ACK, seq=7, ack_seq=8
948 B: sends SYN|ACK, seq=7, ack_seq=8
950 So we are now A eating this SYN|ACK, ACK test passes. So
951 does sequence test, SYN is truncated, and thus we consider
954 If tp->defer_accept, we silently drop this bare ACK. Otherwise,
955 we create an established connection. Both ends (listening sockets)
956 accept the new incoming connection and try to talk to each other. 8-)
958 Note: This case is both harmless, and rare. Possibility is about the
959 same as us discovering intelligent life on another plant tomorrow.
961 But generally, we should (RFC lies!) to accept ACK
962 from SYNACK both here and in tcp_rcv_state_process().
963 tcp_rcv_state_process() does not, hence, we do not too.
965 Note that the case is absolutely generic:
966 we cannot optimize anything here without
967 violating protocol. All the checks must be made
968 before attempt to create socket.
971 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
972 * and the incoming segment acknowledges something not yet
973 * sent (the segment carries an unaccaptable ACK) ...
976 * Invalid ACK: reset will be sent by listening socket
978 if ((flg & TCP_FLAG_ACK) &&
979 (TCP_SKB_CB(skb)->ack_seq != req->snt_isn+1))
982 /* Also, it would be not so bad idea to check rcv_tsecr, which
983 * is essentially ACK extension and too early or too late values
984 * should cause reset in unsynchronized states.
987 /* RFC793: "first check sequence number". */
989 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
990 req->rcv_isn+1, req->rcv_isn+1+req->rcv_wnd)) {
991 /* Out of window: send ACK and drop. */
992 if (!(flg & TCP_FLAG_RST))
993 req->class->send_ack(skb, req);
995 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
999 /* In sequence, PAWS is OK. */
1001 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, req->rcv_isn+1))
1002 req->ts_recent = tmp_opt.rcv_tsval;
1004 if (TCP_SKB_CB(skb)->seq == req->rcv_isn) {
1005 /* Truncate SYN, it is out of window starting
1006 at req->rcv_isn+1. */
1007 flg &= ~TCP_FLAG_SYN;
1010 /* RFC793: "second check the RST bit" and
1011 * "fourth, check the SYN bit"
1013 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN))
1014 goto embryonic_reset;
1016 /* ACK sequence verified above, just make sure ACK is
1017 * set. If ACK not set, just silently drop the packet.
1019 if (!(flg & TCP_FLAG_ACK))
1022 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
1023 if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == req->rcv_isn+1) {
1028 /* OK, ACK is valid, create big socket and
1029 * feed this segment to it. It will repeat all
1030 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
1031 * ESTABLISHED STATE. If it will be dropped after
1032 * socket is created, wait for troubles.
1034 child = tp->af_specific->syn_recv_sock(sk, skb, req, NULL);
1036 goto listen_overflow;
1038 tcp_synq_unlink(tp, req, prev);
1039 tcp_synq_removed(sk, req);
1041 tcp_acceptq_queue(sk, req, child);
1045 if (!sysctl_tcp_abort_on_overflow) {
1051 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
1052 if (!(flg & TCP_FLAG_RST))
1053 req->class->send_reset(skb);
1055 tcp_synq_drop(sk, req, prev);
1060 * Queue segment on the new socket if the new socket is active,
1061 * otherwise we just shortcircuit this and continue with
1065 int tcp_child_process(struct sock *parent, struct sock *child,
1066 struct sk_buff *skb)
1069 int state = child->sk_state;
1071 if (!sock_owned_by_user(child)) {
1072 ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
1074 /* Wakeup parent, send SIGIO */
1075 if (state == TCP_SYN_RECV && child->sk_state != state)
1076 parent->sk_data_ready(parent, 0);
1078 /* Alas, it is possible again, because we do lookup
1079 * in main socket hash table and lock on listening
1080 * socket does not protect us more.
1082 sk_add_backlog(child, skb);
1085 bh_unlock_sock(child);
1090 EXPORT_SYMBOL(tcp_check_req);
1091 EXPORT_SYMBOL(tcp_child_process);
1092 EXPORT_SYMBOL(tcp_create_openreq_child);
1093 EXPORT_SYMBOL(tcp_timewait_state_process);
1094 EXPORT_SYMBOL(tcp_tw_deschedule);