2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/sysctl.h>
27 #include <linux/workqueue.h>
28 #include <linux/vs_limit.h>
29 #include <linux/vs_socket.h>
31 #include <net/inet_common.h>
35 #define SYNC_INIT 0 /* let the user enable it */
40 int sysctl_tcp_tw_recycle;
41 int sysctl_tcp_max_tw_buckets = NR_FILE*2;
43 int sysctl_tcp_syncookies = SYNC_INIT;
44 int sysctl_tcp_abort_on_overflow;
46 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
50 if (after(end_seq, s_win) && before(seq, e_win))
52 return (seq == e_win && seq == end_seq);
55 /* New-style handling of TIME_WAIT sockets. */
60 /* Must be called with locally disabled BHs. */
61 static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
63 struct tcp_ehash_bucket *ehead;
64 struct tcp_bind_hashbucket *bhead;
65 struct tcp_bind_bucket *tb;
67 /* Unlink from established hashes. */
68 ehead = &tcp_ehash[tw->tw_hashent];
69 write_lock(&ehead->lock);
70 if (hlist_unhashed(&tw->tw_node)) {
71 write_unlock(&ehead->lock);
74 __hlist_del(&tw->tw_node);
75 sk_node_init(&tw->tw_node);
76 write_unlock(&ehead->lock);
78 /* Disassociate with bind bucket. */
79 bhead = &tcp_bhash[tcp_bhashfn(tw->tw_num)];
80 spin_lock(&bhead->lock);
82 __hlist_del(&tw->tw_bind_node);
84 tcp_bucket_destroy(tb);
85 spin_unlock(&bhead->lock);
87 #ifdef INET_REFCNT_DEBUG
88 if (atomic_read(&tw->tw_refcnt) != 1) {
89 printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw,
90 atomic_read(&tw->tw_refcnt));
97 * * Main purpose of TIME-WAIT state is to close connection gracefully,
98 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
99 * (and, probably, tail of data) and one or more our ACKs are lost.
100 * * What is TIME-WAIT timeout? It is associated with maximal packet
101 * lifetime in the internet, which results in wrong conclusion, that
102 * it is set to catch "old duplicate segments" wandering out of their path.
103 * It is not quite correct. This timeout is calculated so that it exceeds
104 * maximal retransmission timeout enough to allow to lose one (or more)
105 * segments sent by peer and our ACKs. This time may be calculated from RTO.
106 * * When TIME-WAIT socket receives RST, it means that another end
107 * finally closed and we are allowed to kill TIME-WAIT too.
108 * * Second purpose of TIME-WAIT is catching old duplicate segments.
109 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
110 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
111 * * If we invented some more clever way to catch duplicates
112 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
114 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
115 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
116 * from the very beginning.
118 * NOTE. With recycling (and later with fin-wait-2) TW bucket
119 * is _not_ stateless. It means, that strictly speaking we must
120 * spinlock it. I do not want! Well, probability of misbehaviour
121 * is ridiculously low and, seems, we could use some mb() tricks
122 * to avoid misread sequence numbers, states etc. --ANK
125 tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
126 struct tcphdr *th, unsigned len)
132 if (th->doff > (sizeof(struct tcphdr) >> 2) && tw->tw_ts_recent_stamp) {
133 tcp_parse_options(skb, &tp, 0);
136 tp.ts_recent = tw->tw_ts_recent;
137 tp.ts_recent_stamp = tw->tw_ts_recent_stamp;
138 paws_reject = tcp_paws_check(&tp, th->rst);
142 if (tw->tw_substate == TCP_FIN_WAIT2) {
143 /* Just repeat all the checks of tcp_rcv_state_process() */
145 /* Out of window, send ACK */
147 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
149 tw->tw_rcv_nxt + tw->tw_rcv_wnd))
155 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt))
159 if (!after(TCP_SKB_CB(skb)->end_seq, tw->tw_rcv_nxt) ||
160 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
162 return TCP_TW_SUCCESS;
165 /* New data or FIN. If new data arrive after half-duplex close,
169 TCP_SKB_CB(skb)->end_seq != tw->tw_rcv_nxt + 1) {
171 tcp_tw_deschedule(tw);
176 /* FIN arrived, enter true time-wait state. */
177 tw->tw_substate = TCP_TIME_WAIT;
178 tw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
180 tw->tw_ts_recent_stamp = xtime.tv_sec;
181 tw->tw_ts_recent = tp.rcv_tsval;
184 /* I am shamed, but failed to make it more elegant.
185 * Yes, it is direct reference to IP, which is impossible
186 * to generalize to IPv6. Taking into account that IPv6
187 * do not undertsnad recycling in any case, it not
188 * a big problem in practice. --ANK */
189 if (tw->tw_family == AF_INET &&
190 sysctl_tcp_tw_recycle && tw->tw_ts_recent_stamp &&
191 tcp_v4_tw_remember_stamp(tw))
192 tcp_tw_schedule(tw, tw->tw_timeout);
194 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
199 * Now real TIME-WAIT state.
202 * "When a connection is [...] on TIME-WAIT state [...]
203 * [a TCP] MAY accept a new SYN from the remote TCP to
204 * reopen the connection directly, if it:
206 * (1) assigns its initial sequence number for the new
207 * connection to be larger than the largest sequence
208 * number it used on the previous connection incarnation,
211 * (2) returns to TIME-WAIT state if the SYN turns out
212 * to be an old duplicate".
216 (TCP_SKB_CB(skb)->seq == tw->tw_rcv_nxt &&
217 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
218 /* In window segment, it may be only reset or bare ack. */
221 /* This is TIME_WAIT assasination, in two flavors.
222 * Oh well... nobody has a sufficient solution to this
225 if (sysctl_tcp_rfc1337 == 0) {
227 tcp_tw_deschedule(tw);
229 return TCP_TW_SUCCESS;
232 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
235 tw->tw_ts_recent = tp.rcv_tsval;
236 tw->tw_ts_recent_stamp = xtime.tv_sec;
240 return TCP_TW_SUCCESS;
243 /* Out of window segment.
245 All the segments are ACKed immediately.
247 The only exception is new SYN. We accept it, if it is
248 not old duplicate and we are not in danger to be killed
249 by delayed old duplicates. RFC check is that it has
250 newer sequence number works at rates <40Mbit/sec.
251 However, if paws works, it is reliable AND even more,
252 we even may relax silly seq space cutoff.
254 RED-PEN: we violate main RFC requirement, if this SYN will appear
255 old duplicate (i.e. we receive RST in reply to SYN-ACK),
256 we must return socket to time-wait state. It is not good,
260 if (th->syn && !th->rst && !th->ack && !paws_reject &&
261 (after(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt) ||
262 (tp.saw_tstamp && (s32)(tw->tw_ts_recent - tp.rcv_tsval) < 0))) {
263 u32 isn = tw->tw_snd_nxt + 65535 + 2;
266 TCP_SKB_CB(skb)->when = isn;
271 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
274 /* In this case we must reset the TIMEWAIT timer.
276 * If it is ACKless SYN it may be both old duplicate
277 * and new good SYN with random sequence number <rcv_nxt.
278 * Do not reschedule in the last case.
280 if (paws_reject || th->ack)
281 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
283 /* Send ACK. Note, we do not put the bucket,
284 * it will be released by caller.
289 return TCP_TW_SUCCESS;
292 /* Enter the time wait state. This is called with locally disabled BH.
293 * Essentially we whip up a timewait bucket, copy the
294 * relevant info into it from the SK, and mess with hash chains
297 static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
299 struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent];
300 struct tcp_bind_hashbucket *bhead;
302 /* Step 1: Put TW into bind hash. Original socket stays there too.
303 Note, that any socket with inet_sk(sk)->num != 0 MUST be bound in
304 binding cache, even if it is closed.
306 bhead = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)];
307 spin_lock(&bhead->lock);
308 tw->tw_tb = tcp_sk(sk)->bind_hash;
309 BUG_TRAP(tcp_sk(sk)->bind_hash);
310 tw_add_bind_node(tw, &tw->tw_tb->owners);
311 spin_unlock(&bhead->lock);
313 write_lock(&ehead->lock);
315 /* Step 2: Remove SK from established hash. */
316 if (__sk_del_node_init(sk))
317 sock_prot_dec_use(sk->sk_prot);
319 /* Step 3: Hash TW into TIMEWAIT half of established hash table. */
320 tw_add_node(tw, &(ehead + tcp_ehash_size)->chain);
321 atomic_inc(&tw->tw_refcnt);
323 write_unlock(&ehead->lock);
327 * Move a socket to time-wait or dead fin-wait-2 state.
329 void tcp_time_wait(struct sock *sk, int state, int timeo)
331 struct tcp_tw_bucket *tw = NULL;
332 struct tcp_opt *tp = tcp_sk(sk);
335 if (sysctl_tcp_tw_recycle && tp->ts_recent_stamp)
336 recycle_ok = tp->af_specific->remember_stamp(sk);
338 if (tcp_tw_count < sysctl_tcp_max_tw_buckets)
339 tw = kmem_cache_alloc(tcp_timewait_cachep, SLAB_ATOMIC);
342 struct inet_opt *inet = inet_sk(sk);
343 int rto = (tp->rto<<2) - (tp->rto>>1);
345 /* Give us an identity. */
346 tw->tw_daddr = inet->daddr;
347 tw->tw_rcv_saddr = inet->rcv_saddr;
348 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
349 tw->tw_num = inet->num;
350 tw->tw_state = TCP_TIME_WAIT;
351 tw->tw_substate = state;
352 tw->tw_sport = inet->sport;
353 tw->tw_dport = inet->dport;
354 tw->tw_family = sk->sk_family;
355 tw->tw_reuse = sk->sk_reuse;
356 tw->tw_rcv_wscale = tp->rcv_wscale;
357 atomic_set(&tw->tw_refcnt, 1);
359 tw->tw_hashent = sk->sk_hashent;
360 tw->tw_rcv_nxt = tp->rcv_nxt;
361 tw->tw_snd_nxt = tp->snd_nxt;
362 tw->tw_rcv_wnd = tcp_receive_window(tp);
363 tw->tw_ts_recent = tp->ts_recent;
364 tw->tw_ts_recent_stamp = tp->ts_recent_stamp;
365 tw_dead_node_init(tw);
367 tw->tw_xid = sk->sk_xid;
368 tw->tw_vx_info = NULL;
369 tw->tw_nid = sk->sk_nid;
370 tw->tw_nx_info = NULL;
372 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
373 if (tw->tw_family == PF_INET6) {
374 struct ipv6_pinfo *np = inet6_sk(sk);
376 ipv6_addr_copy(&tw->tw_v6_daddr, &np->daddr);
377 ipv6_addr_copy(&tw->tw_v6_rcv_saddr, &np->rcv_saddr);
378 tw->tw_v6_ipv6only = np->ipv6only;
380 memset(&tw->tw_v6_daddr, 0, sizeof(tw->tw_v6_daddr));
381 memset(&tw->tw_v6_rcv_saddr, 0, sizeof(tw->tw_v6_rcv_saddr));
382 tw->tw_v6_ipv6only = 0;
385 /* Linkage updates. */
386 __tcp_tw_hashdance(sk, tw);
388 /* Get the TIME_WAIT timeout firing. */
393 tw->tw_timeout = rto;
395 tw->tw_timeout = TCP_TIMEWAIT_LEN;
396 if (state == TCP_TIME_WAIT)
397 timeo = TCP_TIMEWAIT_LEN;
400 tcp_tw_schedule(tw, timeo);
403 /* Sorry, if we're out of memory, just CLOSE this
404 * socket up. We've got bigger problems than
405 * non-graceful socket closings.
408 printk(KERN_INFO "TCP: time wait bucket table overflow\n");
411 tcp_update_metrics(sk);
415 /* Kill off TIME_WAIT sockets once their lifetime has expired. */
416 static int tcp_tw_death_row_slot;
418 static void tcp_twkill(unsigned long);
420 /* TIME_WAIT reaping mechanism. */
421 #define TCP_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
422 #define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)
424 #define TCP_TWKILL_QUOTA 100
426 static struct hlist_head tcp_tw_death_row[TCP_TWKILL_SLOTS];
427 static spinlock_t tw_death_lock = SPIN_LOCK_UNLOCKED;
428 static struct timer_list tcp_tw_timer = TIMER_INITIALIZER(tcp_twkill, 0, 0);
429 static void twkill_work(void *);
430 static DECLARE_WORK(tcp_twkill_work, twkill_work, NULL);
431 static u32 twkill_thread_slots;
433 /* Returns non-zero if quota exceeded. */
434 static int tcp_do_twkill_work(int slot, unsigned int quota)
436 struct tcp_tw_bucket *tw;
437 struct hlist_node *node;
441 /* NOTE: compare this to previous version where lock
442 * was released after detaching chain. It was racy,
443 * because tw buckets are scheduled in not serialized context
444 * in 2.3 (with netfilter), and with softnet it is common, because
445 * soft irqs are not sequenced.
450 tw_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) {
451 __tw_del_dead_node(tw);
452 spin_unlock(&tw_death_lock);
453 tcp_timewait_kill(tw);
456 spin_lock(&tw_death_lock);
457 if (killed > quota) {
462 /* While we dropped tw_death_lock, another cpu may have
463 * killed off the next TW bucket in the list, therefore
464 * do a fresh re-read of the hlist head node with the
465 * lock reacquired. We still use the hlist traversal
466 * macro in order to get the prefetches.
471 tcp_tw_count -= killed;
472 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
477 static void tcp_twkill(unsigned long dummy)
481 spin_lock(&tw_death_lock);
483 if (tcp_tw_count == 0)
487 ret = tcp_do_twkill_work(tcp_tw_death_row_slot, TCP_TWKILL_QUOTA);
489 twkill_thread_slots |= (1 << tcp_tw_death_row_slot);
491 schedule_work(&tcp_twkill_work);
494 /* We purged the entire slot, anything left? */
498 tcp_tw_death_row_slot =
499 ((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1));
501 mod_timer(&tcp_tw_timer, jiffies + TCP_TWKILL_PERIOD);
503 spin_unlock(&tw_death_lock);
506 extern void twkill_slots_invalid(void);
508 static void twkill_work(void *dummy)
512 if ((TCP_TWKILL_SLOTS - 1) > (sizeof(twkill_thread_slots) * 8))
513 twkill_slots_invalid();
515 while (twkill_thread_slots) {
516 spin_lock_bh(&tw_death_lock);
517 for (i = 0; i < TCP_TWKILL_SLOTS; i++) {
518 if (!(twkill_thread_slots & (1 << i)))
521 while (tcp_do_twkill_work(i, TCP_TWKILL_QUOTA) != 0) {
522 if (need_resched()) {
523 spin_unlock_bh(&tw_death_lock);
525 spin_lock_bh(&tw_death_lock);
529 twkill_thread_slots &= ~(1 << i);
531 spin_unlock_bh(&tw_death_lock);
535 /* These are always called from BH context. See callers in
536 * tcp_input.c to verify this.
539 /* This is for handling early-kills of TIME_WAIT sockets. */
540 void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
542 spin_lock(&tw_death_lock);
543 if (tw_del_dead_node(tw)) {
545 if (--tcp_tw_count == 0)
546 del_timer(&tcp_tw_timer);
548 spin_unlock(&tw_death_lock);
549 tcp_timewait_kill(tw);
552 /* Short-time timewait calendar */
554 static int tcp_twcal_hand = -1;
555 static int tcp_twcal_jiffie;
556 static void tcp_twcal_tick(unsigned long);
557 static struct timer_list tcp_twcal_timer =
558 TIMER_INITIALIZER(tcp_twcal_tick, 0, 0);
559 static struct hlist_head tcp_twcal_row[TCP_TW_RECYCLE_SLOTS];
561 void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
563 struct hlist_head *list;
566 /* timeout := RTO * 3.5
568 * 3.5 = 1+2+0.5 to wait for two retransmits.
570 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
571 * our ACK acking that FIN can be lost. If N subsequent retransmitted
572 * FINs (or previous seqments) are lost (probability of such event
573 * is p^(N+1), where p is probability to lose single packet and
574 * time to detect the loss is about RTO*(2^N - 1) with exponential
575 * backoff). Normal timewait length is calculated so, that we
576 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
577 * [ BTW Linux. following BSD, violates this requirement waiting
578 * only for 60sec, we should wait at least for 240 secs.
579 * Well, 240 consumes too much of resources 8)
581 * This interval is not reduced to catch old duplicate and
582 * responces to our wandering segments living for two MSLs.
583 * However, if we use PAWS to detect
584 * old duplicates, we can reduce the interval to bounds required
585 * by RTO, rather than MSL. So, if peer understands PAWS, we
586 * kill tw bucket after 3.5*RTO (it is important that this number
587 * is greater than TS tick!) and detect old duplicates with help
590 slot = (timeo + (1<<TCP_TW_RECYCLE_TICK) - 1) >> TCP_TW_RECYCLE_TICK;
592 spin_lock(&tw_death_lock);
594 /* Unlink it, if it was scheduled */
595 if (tw_del_dead_node(tw))
598 atomic_inc(&tw->tw_refcnt);
600 if (slot >= TCP_TW_RECYCLE_SLOTS) {
601 /* Schedule to slow timer */
602 if (timeo >= TCP_TIMEWAIT_LEN) {
603 slot = TCP_TWKILL_SLOTS-1;
605 slot = (timeo + TCP_TWKILL_PERIOD-1) / TCP_TWKILL_PERIOD;
606 if (slot >= TCP_TWKILL_SLOTS)
607 slot = TCP_TWKILL_SLOTS-1;
609 tw->tw_ttd = jiffies + timeo;
610 slot = (tcp_tw_death_row_slot + slot) & (TCP_TWKILL_SLOTS - 1);
611 list = &tcp_tw_death_row[slot];
613 tw->tw_ttd = jiffies + (slot << TCP_TW_RECYCLE_TICK);
615 if (tcp_twcal_hand < 0) {
617 tcp_twcal_jiffie = jiffies;
618 tcp_twcal_timer.expires = tcp_twcal_jiffie + (slot<<TCP_TW_RECYCLE_TICK);
619 add_timer(&tcp_twcal_timer);
621 if (time_after(tcp_twcal_timer.expires, jiffies + (slot<<TCP_TW_RECYCLE_TICK)))
622 mod_timer(&tcp_twcal_timer, jiffies + (slot<<TCP_TW_RECYCLE_TICK));
623 slot = (tcp_twcal_hand + slot)&(TCP_TW_RECYCLE_SLOTS-1);
625 list = &tcp_twcal_row[slot];
628 hlist_add_head(&tw->tw_death_node, list);
630 if (tcp_tw_count++ == 0)
631 mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
632 spin_unlock(&tw_death_lock);
635 void tcp_twcal_tick(unsigned long dummy)
639 unsigned long now = jiffies;
643 spin_lock(&tw_death_lock);
644 if (tcp_twcal_hand < 0)
647 slot = tcp_twcal_hand;
648 j = tcp_twcal_jiffie;
650 for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) {
651 if (time_before_eq(j, now)) {
652 struct hlist_node *node, *safe;
653 struct tcp_tw_bucket *tw;
655 tw_for_each_inmate_safe(tw, node, safe,
656 &tcp_twcal_row[slot]) {
657 __tw_del_dead_node(tw);
658 tcp_timewait_kill(tw);
665 tcp_twcal_jiffie = j;
666 tcp_twcal_hand = slot;
669 if (!hlist_empty(&tcp_twcal_row[slot])) {
670 mod_timer(&tcp_twcal_timer, j);
674 j += (1<<TCP_TW_RECYCLE_TICK);
675 slot = (slot+1)&(TCP_TW_RECYCLE_SLOTS-1);
680 if ((tcp_tw_count -= killed) == 0)
681 del_timer(&tcp_tw_timer);
682 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
683 spin_unlock(&tw_death_lock);
686 /* This is not only more efficient than what we used to do, it eliminates
687 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
689 * Actually, we could lots of memory writes here. tp of listening
690 * socket contains all necessary default parameters.
692 struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, struct sk_buff *skb)
694 /* allocate the newsk from the same slab of the master sock,
695 * if not, at sk_free time we'll try to free it from the wrong
696 * slabcache (i.e. is it TCPv4 or v6?) -acme */
697 struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, 0, sk->sk_prot->slab);
700 struct tcp_opt *newtp;
701 struct sk_filter *filter;
703 memcpy(newsk, sk, sizeof(struct tcp_sock));
704 newsk->sk_state = TCP_SYN_RECV;
709 sk_node_init(&newsk->sk_node);
710 tcp_sk(newsk)->bind_hash = NULL;
712 /* Clone the TCP header template */
713 inet_sk(newsk)->dport = req->rmt_port;
715 sock_lock_init(newsk);
718 rwlock_init(&newsk->sk_dst_lock);
719 atomic_set(&newsk->sk_rmem_alloc, 0);
720 skb_queue_head_init(&newsk->sk_receive_queue);
721 atomic_set(&newsk->sk_wmem_alloc, 0);
722 skb_queue_head_init(&newsk->sk_write_queue);
723 atomic_set(&newsk->sk_omem_alloc, 0);
724 newsk->sk_wmem_queued = 0;
725 newsk->sk_forward_alloc = 0;
727 sock_reset_flag(newsk, SOCK_DONE);
728 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
729 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
730 newsk->sk_send_head = NULL;
731 rwlock_init(&newsk->sk_callback_lock);
732 skb_queue_head_init(&newsk->sk_error_queue);
733 newsk->sk_write_space = sk_stream_write_space;
735 if ((filter = newsk->sk_filter) != NULL)
736 sk_filter_charge(newsk, filter);
738 if (sk->sk_create_child)
739 sk->sk_create_child(sk, newsk);
741 if (unlikely(xfrm_sk_clone_policy(newsk))) {
742 /* It is still raw copy of parent, so invalidate
743 * destructor and make plain sk_free() */
744 newsk->sk_destruct = NULL;
749 /* Now setup tcp_opt */
750 newtp = tcp_sk(newsk);
751 newtp->pred_flags = 0;
752 newtp->rcv_nxt = req->rcv_isn + 1;
753 newtp->snd_nxt = req->snt_isn + 1;
754 newtp->snd_una = req->snt_isn + 1;
755 newtp->snd_sml = req->snt_isn + 1;
757 tcp_prequeue_init(newtp);
759 tcp_init_wl(newtp, req->snt_isn, req->rcv_isn);
761 newtp->retransmits = 0;
764 newtp->mdev = TCP_TIMEOUT_INIT;
765 newtp->rto = TCP_TIMEOUT_INIT;
767 tcp_set_pcount(&newtp->packets_out, 0);
768 tcp_set_pcount(&newtp->left_out, 0);
769 tcp_set_pcount(&newtp->retrans_out, 0);
770 tcp_set_pcount(&newtp->sacked_out, 0);
771 tcp_set_pcount(&newtp->fackets_out, 0);
772 newtp->snd_ssthresh = 0x7fffffff;
774 /* So many TCP implementations out there (incorrectly) count the
775 * initial SYN frame in their delayed-ACK and congestion control
776 * algorithms that we must have the following bandaid to talk
777 * efficiently to them. -DaveM
780 newtp->snd_cwnd_cnt = 0;
782 newtp->frto_counter = 0;
783 newtp->frto_highmark = 0;
785 tcp_set_ca_state(newtp, TCP_CA_Open);
786 tcp_init_xmit_timers(newsk);
787 skb_queue_head_init(&newtp->out_of_order_queue);
788 newtp->rcv_wup = req->rcv_isn + 1;
789 newtp->write_seq = req->snt_isn + 1;
790 newtp->pushed_seq = newtp->write_seq;
791 newtp->copied_seq = req->rcv_isn + 1;
793 newtp->saw_tstamp = 0;
796 newtp->eff_sacks = 0;
798 newtp->probes_out = 0;
799 newtp->num_sacks = 0;
801 newtp->listen_opt = NULL;
802 #ifdef CONFIG_ACCEPT_QUEUES
803 newtp->accept_queue = NULL;
804 memset(newtp->acceptq, 0,sizeof(newtp->acceptq));
805 newtp->class_index = 0;
808 newtp->accept_queue = newtp->accept_queue_tail = NULL;
810 /* Deinitialize syn_wait_lock to trap illegal accesses. */
811 memset(&newtp->syn_wait_lock, 0, sizeof(newtp->syn_wait_lock));
813 /* Back to base struct sock members. */
815 newsk->sk_priority = 0;
816 atomic_set(&newsk->sk_refcnt, 2);
818 set_vx_info(&newsk->sk_vx_info, sk->sk_vx_info);
819 newsk->sk_xid = sk->sk_xid;
821 set_nx_info(&newsk->sk_nx_info, sk->sk_nx_info);
822 newsk->sk_nid = sk->sk_nid;
823 #ifdef INET_REFCNT_DEBUG
824 atomic_inc(&inet_sock_nr);
826 atomic_inc(&tcp_sockets_allocated);
828 if (sock_flag(newsk, SOCK_KEEPOPEN))
829 tcp_reset_keepalive_timer(newsk,
830 keepalive_time_when(newtp));
831 newsk->sk_socket = NULL;
832 newsk->sk_sleep = NULL;
833 newsk->sk_owner = NULL;
835 newtp->tstamp_ok = req->tstamp_ok;
836 if((newtp->sack_ok = req->sack_ok) != 0) {
840 newtp->window_clamp = req->window_clamp;
841 newtp->rcv_ssthresh = req->rcv_wnd;
842 newtp->rcv_wnd = req->rcv_wnd;
843 newtp->wscale_ok = req->wscale_ok;
844 if (newtp->wscale_ok) {
845 newtp->snd_wscale = req->snd_wscale;
846 newtp->rcv_wscale = req->rcv_wscale;
848 newtp->snd_wscale = newtp->rcv_wscale = 0;
849 newtp->window_clamp = min(newtp->window_clamp, 65535U);
851 newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->snd_wscale;
852 newtp->max_window = newtp->snd_wnd;
854 if (newtp->tstamp_ok) {
855 newtp->ts_recent = req->ts_recent;
856 newtp->ts_recent_stamp = xtime.tv_sec;
857 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
859 newtp->ts_recent_stamp = 0;
860 newtp->tcp_header_len = sizeof(struct tcphdr);
862 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
863 newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len;
864 newtp->mss_clamp = req->mss;
865 TCP_ECN_openreq_child(newtp, req);
866 if (newtp->ecn_flags&TCP_ECN_OK)
867 newsk->sk_no_largesend = 1;
871 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
877 * Process an incoming packet for SYN_RECV sockets represented
878 * as an open_request.
881 struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
882 struct open_request *req,
883 struct open_request **prev)
885 struct tcphdr *th = skb->h.th;
886 struct tcp_opt *tp = tcp_sk(sk);
887 u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
893 if (th->doff > (sizeof(struct tcphdr)>>2)) {
894 tcp_parse_options(skb, &ttp, 0);
896 if (ttp.saw_tstamp) {
897 ttp.ts_recent = req->ts_recent;
898 /* We do not store true stamp, but it is not required,
899 * it can be estimated (approximately)
902 ttp.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
903 paws_reject = tcp_paws_check(&ttp, th->rst);
907 /* Check for pure retransmitted SYN. */
908 if (TCP_SKB_CB(skb)->seq == req->rcv_isn &&
909 flg == TCP_FLAG_SYN &&
912 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
913 * this case on figure 6 and figure 8, but formal
914 * protocol description says NOTHING.
915 * To be more exact, it says that we should send ACK,
916 * because this segment (at least, if it has no data)
919 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
920 * describe SYN-RECV state. All the description
921 * is wrong, we cannot believe to it and should
922 * rely only on common sense and implementation
925 * Enforce "SYN-ACK" according to figure 8, figure 6
926 * of RFC793, fixed by RFC1122.
928 req->class->rtx_syn_ack(sk, req, NULL);
932 /* Further reproduces section "SEGMENT ARRIVES"
933 for state SYN-RECEIVED of RFC793.
934 It is broken, however, it does not work only
935 when SYNs are crossed.
937 You would think that SYN crossing is impossible here, since
938 we should have a SYN_SENT socket (from connect()) on our end,
939 but this is not true if the crossed SYNs were sent to both
940 ends by a malicious third party. We must defend against this,
941 and to do that we first verify the ACK (as per RFC793, page
942 36) and reset if it is invalid. Is this a true full defense?
943 To convince ourselves, let us consider a way in which the ACK
944 test can still pass in this 'malicious crossed SYNs' case.
945 Malicious sender sends identical SYNs (and thus identical sequence
946 numbers) to both A and B:
951 By our good fortune, both A and B select the same initial
952 send sequence number of seven :-)
954 A: sends SYN|ACK, seq=7, ack_seq=8
955 B: sends SYN|ACK, seq=7, ack_seq=8
957 So we are now A eating this SYN|ACK, ACK test passes. So
958 does sequence test, SYN is truncated, and thus we consider
961 If tp->defer_accept, we silently drop this bare ACK. Otherwise,
962 we create an established connection. Both ends (listening sockets)
963 accept the new incoming connection and try to talk to each other. 8-)
965 Note: This case is both harmless, and rare. Possibility is about the
966 same as us discovering intelligent life on another plant tomorrow.
968 But generally, we should (RFC lies!) to accept ACK
969 from SYNACK both here and in tcp_rcv_state_process().
970 tcp_rcv_state_process() does not, hence, we do not too.
972 Note that the case is absolutely generic:
973 we cannot optimize anything here without
974 violating protocol. All the checks must be made
975 before attempt to create socket.
978 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
979 * and the incoming segment acknowledges something not yet
980 * sent (the segment carries an unaccaptable ACK) ...
983 * Invalid ACK: reset will be sent by listening socket
985 if ((flg & TCP_FLAG_ACK) &&
986 (TCP_SKB_CB(skb)->ack_seq != req->snt_isn+1))
989 /* Also, it would be not so bad idea to check rcv_tsecr, which
990 * is essentially ACK extension and too early or too late values
991 * should cause reset in unsynchronized states.
994 /* RFC793: "first check sequence number". */
996 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
997 req->rcv_isn+1, req->rcv_isn+1+req->rcv_wnd)) {
998 /* Out of window: send ACK and drop. */
999 if (!(flg & TCP_FLAG_RST))
1000 req->class->send_ack(skb, req);
1002 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
1006 /* In sequence, PAWS is OK. */
1008 if (ttp.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, req->rcv_isn+1))
1009 req->ts_recent = ttp.rcv_tsval;
1011 if (TCP_SKB_CB(skb)->seq == req->rcv_isn) {
1012 /* Truncate SYN, it is out of window starting
1013 at req->rcv_isn+1. */
1014 flg &= ~TCP_FLAG_SYN;
1017 /* RFC793: "second check the RST bit" and
1018 * "fourth, check the SYN bit"
1020 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN))
1021 goto embryonic_reset;
1023 /* ACK sequence verified above, just make sure ACK is
1024 * set. If ACK not set, just silently drop the packet.
1026 if (!(flg & TCP_FLAG_ACK))
1029 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
1030 if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == req->rcv_isn+1) {
1035 /* OK, ACK is valid, create big socket and
1036 * feed this segment to it. It will repeat all
1037 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
1038 * ESTABLISHED STATE. If it will be dropped after
1039 * socket is created, wait for troubles.
1041 child = tp->af_specific->syn_recv_sock(sk, skb, req, NULL);
1043 goto listen_overflow;
1045 sk_set_owner(child, sk->sk_owner);
1046 tcp_synq_unlink(tp, req, prev);
1047 tcp_synq_removed(sk, req);
1049 tcp_acceptq_queue(sk, req, child);
1053 if (!sysctl_tcp_abort_on_overflow) {
1059 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
1060 if (!(flg & TCP_FLAG_RST))
1061 req->class->send_reset(skb);
1063 tcp_synq_drop(sk, req, prev);
1068 * Queue segment on the new socket if the new socket is active,
1069 * otherwise we just shortcircuit this and continue with
1073 int tcp_child_process(struct sock *parent, struct sock *child,
1074 struct sk_buff *skb)
1077 int state = child->sk_state;
1079 if (!sock_owned_by_user(child)) {
1080 ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
1082 /* Wakeup parent, send SIGIO */
1083 if (state == TCP_SYN_RECV && child->sk_state != state)
1084 parent->sk_data_ready(parent, 0);
1086 /* Alas, it is possible again, because we do lookup
1087 * in main socket hash table and lock on listening
1088 * socket does not protect us more.
1090 sk_add_backlog(child, skb);
1093 bh_unlock_sock(child);
1098 EXPORT_SYMBOL(tcp_check_req);
1099 EXPORT_SYMBOL(tcp_child_process);
1100 EXPORT_SYMBOL(tcp_create_openreq_child);
1101 EXPORT_SYMBOL(tcp_timewait_state_process);
1102 EXPORT_SYMBOL(tcp_tw_deschedule);