2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/sysctl.h>
27 #include <linux/workqueue.h>
28 #include <linux/vs_socket.h>
29 #include <linux/vs_context.h>
30 #include <linux/vs_network.h>
32 #include <net/inet_common.h>
36 #define SYNC_INIT 0 /* let the user enable it */
41 int sysctl_tcp_tw_recycle;
42 int sysctl_tcp_max_tw_buckets = NR_FILE*2;
44 int sysctl_tcp_syncookies = SYNC_INIT;
45 int sysctl_tcp_abort_on_overflow;
47 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
51 if (after(end_seq, s_win) && before(seq, e_win))
53 return (seq == e_win && seq == end_seq);
56 /* New-style handling of TIME_WAIT sockets. */
61 /* Must be called with locally disabled BHs. */
62 static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
64 struct tcp_ehash_bucket *ehead;
65 struct tcp_bind_hashbucket *bhead;
66 struct tcp_bind_bucket *tb;
68 /* Unlink from established hashes. */
69 ehead = &tcp_ehash[tw->tw_hashent];
70 write_lock(&ehead->lock);
71 if (hlist_unhashed(&tw->tw_node)) {
72 write_unlock(&ehead->lock);
75 __hlist_del(&tw->tw_node);
76 sk_node_init(&tw->tw_node);
77 write_unlock(&ehead->lock);
79 /* Disassociate with bind bucket. */
80 bhead = &tcp_bhash[tcp_bhashfn(tw->tw_num)];
81 spin_lock(&bhead->lock);
83 __hlist_del(&tw->tw_bind_node);
85 tcp_bucket_destroy(tb);
86 spin_unlock(&bhead->lock);
88 #ifdef INET_REFCNT_DEBUG
89 if (atomic_read(&tw->tw_refcnt) != 1) {
90 printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw,
91 atomic_read(&tw->tw_refcnt));
98 * * Main purpose of TIME-WAIT state is to close connection gracefully,
99 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
100 * (and, probably, tail of data) and one or more our ACKs are lost.
101 * * What is TIME-WAIT timeout? It is associated with maximal packet
102 * lifetime in the internet, which results in wrong conclusion, that
103 * it is set to catch "old duplicate segments" wandering out of their path.
104 * It is not quite correct. This timeout is calculated so that it exceeds
105 * maximal retransmission timeout enough to allow to lose one (or more)
106 * segments sent by peer and our ACKs. This time may be calculated from RTO.
107 * * When TIME-WAIT socket receives RST, it means that another end
108 * finally closed and we are allowed to kill TIME-WAIT too.
109 * * Second purpose of TIME-WAIT is catching old duplicate segments.
110 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
111 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
112 * * If we invented some more clever way to catch duplicates
113 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
115 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
116 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
117 * from the very beginning.
119 * NOTE. With recycling (and later with fin-wait-2) TW bucket
120 * is _not_ stateless. It means, that strictly speaking we must
121 * spinlock it. I do not want! Well, probability of misbehaviour
122 * is ridiculously low and, seems, we could use some mb() tricks
123 * to avoid misread sequence numbers, states etc. --ANK
126 tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
127 struct tcphdr *th, unsigned len)
133 if (th->doff > (sizeof(struct tcphdr) >> 2) && tw->tw_ts_recent_stamp) {
134 tcp_parse_options(skb, &tp, 0);
137 tp.ts_recent = tw->tw_ts_recent;
138 tp.ts_recent_stamp = tw->tw_ts_recent_stamp;
139 paws_reject = tcp_paws_check(&tp, th->rst);
143 if (tw->tw_substate == TCP_FIN_WAIT2) {
144 /* Just repeat all the checks of tcp_rcv_state_process() */
146 /* Out of window, send ACK */
148 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
150 tw->tw_rcv_nxt + tw->tw_rcv_wnd))
156 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt))
160 if (!after(TCP_SKB_CB(skb)->end_seq, tw->tw_rcv_nxt) ||
161 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
163 return TCP_TW_SUCCESS;
166 /* New data or FIN. If new data arrive after half-duplex close,
170 TCP_SKB_CB(skb)->end_seq != tw->tw_rcv_nxt + 1) {
172 tcp_tw_deschedule(tw);
177 /* FIN arrived, enter true time-wait state. */
178 tw->tw_substate = TCP_TIME_WAIT;
179 tw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
181 tw->tw_ts_recent_stamp = xtime.tv_sec;
182 tw->tw_ts_recent = tp.rcv_tsval;
185 /* I am shamed, but failed to make it more elegant.
186 * Yes, it is direct reference to IP, which is impossible
187 * to generalize to IPv6. Taking into account that IPv6
188 * do not undertsnad recycling in any case, it not
189 * a big problem in practice. --ANK */
190 if (tw->tw_family == AF_INET &&
191 sysctl_tcp_tw_recycle && tw->tw_ts_recent_stamp &&
192 tcp_v4_tw_remember_stamp(tw))
193 tcp_tw_schedule(tw, tw->tw_timeout);
195 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
200 * Now real TIME-WAIT state.
203 * "When a connection is [...] on TIME-WAIT state [...]
204 * [a TCP] MAY accept a new SYN from the remote TCP to
205 * reopen the connection directly, if it:
207 * (1) assigns its initial sequence number for the new
208 * connection to be larger than the largest sequence
209 * number it used on the previous connection incarnation,
212 * (2) returns to TIME-WAIT state if the SYN turns out
213 * to be an old duplicate".
217 (TCP_SKB_CB(skb)->seq == tw->tw_rcv_nxt &&
218 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
219 /* In window segment, it may be only reset or bare ack. */
222 /* This is TIME_WAIT assasination, in two flavors.
223 * Oh well... nobody has a sufficient solution to this
226 if (sysctl_tcp_rfc1337 == 0) {
228 tcp_tw_deschedule(tw);
230 return TCP_TW_SUCCESS;
233 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
236 tw->tw_ts_recent = tp.rcv_tsval;
237 tw->tw_ts_recent_stamp = xtime.tv_sec;
241 return TCP_TW_SUCCESS;
244 /* Out of window segment.
246 All the segments are ACKed immediately.
248 The only exception is new SYN. We accept it, if it is
249 not old duplicate and we are not in danger to be killed
250 by delayed old duplicates. RFC check is that it has
251 newer sequence number works at rates <40Mbit/sec.
252 However, if paws works, it is reliable AND even more,
253 we even may relax silly seq space cutoff.
255 RED-PEN: we violate main RFC requirement, if this SYN will appear
256 old duplicate (i.e. we receive RST in reply to SYN-ACK),
257 we must return socket to time-wait state. It is not good,
261 if (th->syn && !th->rst && !th->ack && !paws_reject &&
262 (after(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt) ||
263 (tp.saw_tstamp && (s32)(tw->tw_ts_recent - tp.rcv_tsval) < 0))) {
264 u32 isn = tw->tw_snd_nxt + 65535 + 2;
267 TCP_SKB_CB(skb)->when = isn;
272 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
275 /* In this case we must reset the TIMEWAIT timer.
277 * If it is ACKless SYN it may be both old duplicate
278 * and new good SYN with random sequence number <rcv_nxt.
279 * Do not reschedule in the last case.
281 if (paws_reject || th->ack)
282 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
284 /* Send ACK. Note, we do not put the bucket,
285 * it will be released by caller.
290 return TCP_TW_SUCCESS;
293 /* Enter the time wait state. This is called with locally disabled BH.
294 * Essentially we whip up a timewait bucket, copy the
295 * relevant info into it from the SK, and mess with hash chains
298 static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
300 struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent];
301 struct tcp_bind_hashbucket *bhead;
303 /* Step 1: Put TW into bind hash. Original socket stays there too.
304 Note, that any socket with inet_sk(sk)->num != 0 MUST be bound in
305 binding cache, even if it is closed.
307 bhead = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)];
308 spin_lock(&bhead->lock);
309 tw->tw_tb = tcp_sk(sk)->bind_hash;
310 BUG_TRAP(tcp_sk(sk)->bind_hash);
311 tw_add_bind_node(tw, &tw->tw_tb->owners);
312 spin_unlock(&bhead->lock);
314 write_lock(&ehead->lock);
316 /* Step 2: Remove SK from established hash. */
317 if (__sk_del_node_init(sk))
318 sock_prot_dec_use(sk->sk_prot);
320 /* Step 3: Hash TW into TIMEWAIT half of established hash table. */
321 tw_add_node(tw, &(ehead + tcp_ehash_size)->chain);
322 atomic_inc(&tw->tw_refcnt);
324 write_unlock(&ehead->lock);
328 * Move a socket to time-wait or dead fin-wait-2 state.
330 void tcp_time_wait(struct sock *sk, int state, int timeo)
332 struct tcp_tw_bucket *tw = NULL;
333 struct tcp_opt *tp = tcp_sk(sk);
336 if (sysctl_tcp_tw_recycle && tp->ts_recent_stamp)
337 recycle_ok = tp->af_specific->remember_stamp(sk);
339 if (tcp_tw_count < sysctl_tcp_max_tw_buckets)
340 tw = kmem_cache_alloc(tcp_timewait_cachep, SLAB_ATOMIC);
343 struct inet_opt *inet = inet_sk(sk);
344 int rto = (tp->rto<<2) - (tp->rto>>1);
346 /* Give us an identity. */
347 tw->tw_daddr = inet->daddr;
348 tw->tw_rcv_saddr = inet->rcv_saddr;
349 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
350 tw->tw_num = inet->num;
351 tw->tw_state = TCP_TIME_WAIT;
352 tw->tw_substate = state;
353 tw->tw_sport = inet->sport;
354 tw->tw_dport = inet->dport;
355 tw->tw_family = sk->sk_family;
356 tw->tw_reuse = sk->sk_reuse;
357 tw->tw_rcv_wscale = tp->rcv_wscale;
358 atomic_set(&tw->tw_refcnt, 1);
360 tw->tw_hashent = sk->sk_hashent;
361 tw->tw_rcv_nxt = tp->rcv_nxt;
362 tw->tw_snd_nxt = tp->snd_nxt;
363 tw->tw_rcv_wnd = tcp_receive_window(tp);
364 tw->tw_ts_recent = tp->ts_recent;
365 tw->tw_ts_recent_stamp = tp->ts_recent_stamp;
366 tw_dead_node_init(tw);
368 tw->tw_xid = sk->sk_xid;
369 tw->tw_vx_info = NULL;
370 tw->tw_nid = sk->sk_nid;
371 tw->tw_nx_info = NULL;
373 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
374 if (tw->tw_family == PF_INET6) {
375 struct ipv6_pinfo *np = inet6_sk(sk);
377 ipv6_addr_copy(&tw->tw_v6_daddr, &np->daddr);
378 ipv6_addr_copy(&tw->tw_v6_rcv_saddr, &np->rcv_saddr);
379 tw->tw_v6_ipv6only = np->ipv6only;
381 memset(&tw->tw_v6_daddr, 0, sizeof(tw->tw_v6_daddr));
382 memset(&tw->tw_v6_rcv_saddr, 0, sizeof(tw->tw_v6_rcv_saddr));
383 tw->tw_v6_ipv6only = 0;
386 /* Linkage updates. */
387 __tcp_tw_hashdance(sk, tw);
389 /* Get the TIME_WAIT timeout firing. */
394 tw->tw_timeout = rto;
396 tw->tw_timeout = TCP_TIMEWAIT_LEN;
397 if (state == TCP_TIME_WAIT)
398 timeo = TCP_TIMEWAIT_LEN;
401 tcp_tw_schedule(tw, timeo);
404 /* Sorry, if we're out of memory, just CLOSE this
405 * socket up. We've got bigger problems than
406 * non-graceful socket closings.
409 printk(KERN_INFO "TCP: time wait bucket table overflow\n");
412 tcp_update_metrics(sk);
416 /* Kill off TIME_WAIT sockets once their lifetime has expired. */
417 static int tcp_tw_death_row_slot;
419 static void tcp_twkill(unsigned long);
421 /* TIME_WAIT reaping mechanism. */
422 #define TCP_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
423 #define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)
425 #define TCP_TWKILL_QUOTA 100
427 static struct hlist_head tcp_tw_death_row[TCP_TWKILL_SLOTS];
428 static spinlock_t tw_death_lock = SPIN_LOCK_UNLOCKED;
429 static struct timer_list tcp_tw_timer = TIMER_INITIALIZER(tcp_twkill, 0, 0);
430 static void twkill_work(void *);
431 static DECLARE_WORK(tcp_twkill_work, twkill_work, NULL);
432 static u32 twkill_thread_slots;
434 /* Returns non-zero if quota exceeded. */
435 static int tcp_do_twkill_work(int slot, unsigned int quota)
437 struct tcp_tw_bucket *tw;
438 struct hlist_node *node;
442 /* NOTE: compare this to previous version where lock
443 * was released after detaching chain. It was racy,
444 * because tw buckets are scheduled in not serialized context
445 * in 2.3 (with netfilter), and with softnet it is common, because
446 * soft irqs are not sequenced.
451 tw_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) {
452 __tw_del_dead_node(tw);
453 spin_unlock(&tw_death_lock);
454 tcp_timewait_kill(tw);
457 spin_lock(&tw_death_lock);
458 if (killed > quota) {
463 /* While we dropped tw_death_lock, another cpu may have
464 * killed off the next TW bucket in the list, therefore
465 * do a fresh re-read of the hlist head node with the
466 * lock reacquired. We still use the hlist traversal
467 * macro in order to get the prefetches.
472 tcp_tw_count -= killed;
473 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
478 static void tcp_twkill(unsigned long dummy)
482 spin_lock(&tw_death_lock);
484 if (tcp_tw_count == 0)
488 ret = tcp_do_twkill_work(tcp_tw_death_row_slot, TCP_TWKILL_QUOTA);
490 twkill_thread_slots |= (1 << tcp_tw_death_row_slot);
492 schedule_work(&tcp_twkill_work);
495 /* We purged the entire slot, anything left? */
499 tcp_tw_death_row_slot =
500 ((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1));
502 mod_timer(&tcp_tw_timer, jiffies + TCP_TWKILL_PERIOD);
504 spin_unlock(&tw_death_lock);
507 extern void twkill_slots_invalid(void);
509 static void twkill_work(void *dummy)
513 if ((TCP_TWKILL_SLOTS - 1) > (sizeof(twkill_thread_slots) * 8))
514 twkill_slots_invalid();
516 while (twkill_thread_slots) {
517 spin_lock_bh(&tw_death_lock);
518 for (i = 0; i < TCP_TWKILL_SLOTS; i++) {
519 if (!(twkill_thread_slots & (1 << i)))
522 while (tcp_do_twkill_work(i, TCP_TWKILL_QUOTA) != 0) {
523 if (need_resched()) {
524 spin_unlock_bh(&tw_death_lock);
526 spin_lock_bh(&tw_death_lock);
530 twkill_thread_slots &= ~(1 << i);
532 spin_unlock_bh(&tw_death_lock);
536 /* These are always called from BH context. See callers in
537 * tcp_input.c to verify this.
540 /* This is for handling early-kills of TIME_WAIT sockets. */
541 void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
543 spin_lock(&tw_death_lock);
544 if (tw_del_dead_node(tw)) {
546 if (--tcp_tw_count == 0)
547 del_timer(&tcp_tw_timer);
549 spin_unlock(&tw_death_lock);
550 tcp_timewait_kill(tw);
553 /* Short-time timewait calendar */
555 static int tcp_twcal_hand = -1;
556 static int tcp_twcal_jiffie;
557 static void tcp_twcal_tick(unsigned long);
558 static struct timer_list tcp_twcal_timer =
559 TIMER_INITIALIZER(tcp_twcal_tick, 0, 0);
560 static struct hlist_head tcp_twcal_row[TCP_TW_RECYCLE_SLOTS];
562 void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
564 struct hlist_head *list;
567 /* timeout := RTO * 3.5
569 * 3.5 = 1+2+0.5 to wait for two retransmits.
571 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
572 * our ACK acking that FIN can be lost. If N subsequent retransmitted
573 * FINs (or previous seqments) are lost (probability of such event
574 * is p^(N+1), where p is probability to lose single packet and
575 * time to detect the loss is about RTO*(2^N - 1) with exponential
576 * backoff). Normal timewait length is calculated so, that we
577 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
578 * [ BTW Linux. following BSD, violates this requirement waiting
579 * only for 60sec, we should wait at least for 240 secs.
580 * Well, 240 consumes too much of resources 8)
582 * This interval is not reduced to catch old duplicate and
583 * responces to our wandering segments living for two MSLs.
584 * However, if we use PAWS to detect
585 * old duplicates, we can reduce the interval to bounds required
586 * by RTO, rather than MSL. So, if peer understands PAWS, we
587 * kill tw bucket after 3.5*RTO (it is important that this number
588 * is greater than TS tick!) and detect old duplicates with help
591 slot = (timeo + (1<<TCP_TW_RECYCLE_TICK) - 1) >> TCP_TW_RECYCLE_TICK;
593 spin_lock(&tw_death_lock);
595 /* Unlink it, if it was scheduled */
596 if (tw_del_dead_node(tw))
599 atomic_inc(&tw->tw_refcnt);
601 if (slot >= TCP_TW_RECYCLE_SLOTS) {
602 /* Schedule to slow timer */
603 if (timeo >= TCP_TIMEWAIT_LEN) {
604 slot = TCP_TWKILL_SLOTS-1;
606 slot = (timeo + TCP_TWKILL_PERIOD-1) / TCP_TWKILL_PERIOD;
607 if (slot >= TCP_TWKILL_SLOTS)
608 slot = TCP_TWKILL_SLOTS-1;
610 tw->tw_ttd = jiffies + timeo;
611 slot = (tcp_tw_death_row_slot + slot) & (TCP_TWKILL_SLOTS - 1);
612 list = &tcp_tw_death_row[slot];
614 tw->tw_ttd = jiffies + (slot << TCP_TW_RECYCLE_TICK);
616 if (tcp_twcal_hand < 0) {
618 tcp_twcal_jiffie = jiffies;
619 tcp_twcal_timer.expires = tcp_twcal_jiffie + (slot<<TCP_TW_RECYCLE_TICK);
620 add_timer(&tcp_twcal_timer);
622 if (time_after(tcp_twcal_timer.expires, jiffies + (slot<<TCP_TW_RECYCLE_TICK)))
623 mod_timer(&tcp_twcal_timer, jiffies + (slot<<TCP_TW_RECYCLE_TICK));
624 slot = (tcp_twcal_hand + slot)&(TCP_TW_RECYCLE_SLOTS-1);
626 list = &tcp_twcal_row[slot];
629 hlist_add_head(&tw->tw_death_node, list);
631 if (tcp_tw_count++ == 0)
632 mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
633 spin_unlock(&tw_death_lock);
636 void tcp_twcal_tick(unsigned long dummy)
640 unsigned long now = jiffies;
644 spin_lock(&tw_death_lock);
645 if (tcp_twcal_hand < 0)
648 slot = tcp_twcal_hand;
649 j = tcp_twcal_jiffie;
651 for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) {
652 if (time_before_eq(j, now)) {
653 struct hlist_node *node, *safe;
654 struct tcp_tw_bucket *tw;
656 tw_for_each_inmate_safe(tw, node, safe,
657 &tcp_twcal_row[slot]) {
658 __tw_del_dead_node(tw);
659 tcp_timewait_kill(tw);
666 tcp_twcal_jiffie = j;
667 tcp_twcal_hand = slot;
670 if (!hlist_empty(&tcp_twcal_row[slot])) {
671 mod_timer(&tcp_twcal_timer, j);
675 j += (1<<TCP_TW_RECYCLE_TICK);
676 slot = (slot+1)&(TCP_TW_RECYCLE_SLOTS-1);
681 if ((tcp_tw_count -= killed) == 0)
682 del_timer(&tcp_tw_timer);
683 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
684 spin_unlock(&tw_death_lock);
687 /* This is not only more efficient than what we used to do, it eliminates
688 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
690 * Actually, we could lots of memory writes here. tp of listening
691 * socket contains all necessary default parameters.
693 struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, struct sk_buff *skb)
695 /* allocate the newsk from the same slab of the master sock,
696 * if not, at sk_free time we'll try to free it from the wrong
697 * slabcache (i.e. is it TCPv4 or v6?) -acme */
698 struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, 0, sk->sk_slab);
701 struct tcp_opt *newtp;
702 struct sk_filter *filter;
704 memcpy(newsk, sk, sizeof(struct tcp_sock));
705 newsk->sk_state = TCP_SYN_RECV;
710 sk_node_init(&newsk->sk_node);
711 tcp_sk(newsk)->bind_hash = NULL;
713 /* Clone the TCP header template */
714 inet_sk(newsk)->dport = req->rmt_port;
716 sock_lock_init(newsk);
719 newsk->sk_dst_lock = RW_LOCK_UNLOCKED;
720 atomic_set(&newsk->sk_rmem_alloc, 0);
721 skb_queue_head_init(&newsk->sk_receive_queue);
722 atomic_set(&newsk->sk_wmem_alloc, 0);
723 skb_queue_head_init(&newsk->sk_write_queue);
724 atomic_set(&newsk->sk_omem_alloc, 0);
725 newsk->sk_wmem_queued = 0;
726 newsk->sk_forward_alloc = 0;
728 sock_reset_flag(newsk, SOCK_DONE);
729 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
730 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
731 newsk->sk_send_head = NULL;
732 newsk->sk_callback_lock = RW_LOCK_UNLOCKED;
733 skb_queue_head_init(&newsk->sk_error_queue);
734 newsk->sk_write_space = sk_stream_write_space;
736 if ((filter = newsk->sk_filter) != NULL)
737 sk_filter_charge(newsk, filter);
739 if (sk->sk_create_child)
740 sk->sk_create_child(sk, newsk);
742 if (unlikely(xfrm_sk_clone_policy(newsk))) {
743 /* It is still raw copy of parent, so invalidate
744 * destructor and make plain sk_free() */
745 newsk->sk_destruct = NULL;
750 /* Now setup tcp_opt */
751 newtp = tcp_sk(newsk);
752 newtp->pred_flags = 0;
753 newtp->rcv_nxt = req->rcv_isn + 1;
754 newtp->snd_nxt = req->snt_isn + 1;
755 newtp->snd_una = req->snt_isn + 1;
756 newtp->snd_sml = req->snt_isn + 1;
758 tcp_prequeue_init(newtp);
760 tcp_init_wl(newtp, req->snt_isn, req->rcv_isn);
762 newtp->retransmits = 0;
765 newtp->mdev = TCP_TIMEOUT_INIT;
766 newtp->rto = TCP_TIMEOUT_INIT;
768 newtp->packets_out = 0;
770 newtp->retrans_out = 0;
771 newtp->sacked_out = 0;
772 newtp->fackets_out = 0;
773 newtp->snd_ssthresh = 0x7fffffff;
775 /* So many TCP implementations out there (incorrectly) count the
776 * initial SYN frame in their delayed-ACK and congestion control
777 * algorithms that we must have the following bandaid to talk
778 * efficiently to them. -DaveM
781 newtp->snd_cwnd_cnt = 0;
783 newtp->frto_counter = 0;
784 newtp->frto_highmark = 0;
786 tcp_set_ca_state(newtp, TCP_CA_Open);
787 tcp_init_xmit_timers(newsk);
788 skb_queue_head_init(&newtp->out_of_order_queue);
789 newtp->rcv_wup = req->rcv_isn + 1;
790 newtp->write_seq = req->snt_isn + 1;
791 newtp->pushed_seq = newtp->write_seq;
792 newtp->copied_seq = req->rcv_isn + 1;
794 newtp->saw_tstamp = 0;
797 newtp->eff_sacks = 0;
799 newtp->probes_out = 0;
800 newtp->num_sacks = 0;
802 newtp->listen_opt = NULL;
803 #ifdef CONFIG_ACCEPT_QUEUES
804 newtp->accept_queue = NULL;
805 memset(newtp->acceptq, 0,sizeof(newtp->acceptq));
806 newtp->class_index = 0;
809 newtp->accept_queue = newtp->accept_queue_tail = NULL;
811 /* Deinitialize syn_wait_lock to trap illegal accesses. */
812 memset(&newtp->syn_wait_lock, 0, sizeof(newtp->syn_wait_lock));
814 /* Back to base struct sock members. */
816 newsk->sk_priority = 0;
817 atomic_set(&newsk->sk_refcnt, 2);
819 set_vx_info(&newsk->sk_vx_info, sk->sk_vx_info);
820 newsk->sk_xid = sk->sk_xid;
821 set_nx_info(&newsk->sk_nx_info, sk->sk_nx_info);
822 newsk->sk_nid = sk->sk_nid;
823 #ifdef INET_REFCNT_DEBUG
824 atomic_inc(&inet_sock_nr);
826 atomic_inc(&tcp_sockets_allocated);
828 if (sock_flag(newsk, SOCK_KEEPOPEN))
829 tcp_reset_keepalive_timer(newsk,
830 keepalive_time_when(newtp));
831 newsk->sk_socket = NULL;
832 newsk->sk_sleep = NULL;
833 newsk->sk_owner = NULL;
835 newtp->tstamp_ok = req->tstamp_ok;
836 if((newtp->sack_ok = req->sack_ok) != 0) {
840 newtp->window_clamp = req->window_clamp;
841 newtp->rcv_ssthresh = req->rcv_wnd;
842 newtp->rcv_wnd = req->rcv_wnd;
843 newtp->wscale_ok = req->wscale_ok;
844 if (newtp->wscale_ok) {
845 newtp->snd_wscale = req->snd_wscale;
846 newtp->rcv_wscale = req->rcv_wscale;
848 newtp->snd_wscale = newtp->rcv_wscale = 0;
849 newtp->window_clamp = min(newtp->window_clamp, 65535U);
851 newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->snd_wscale;
852 newtp->max_window = newtp->snd_wnd;
854 if (newtp->tstamp_ok) {
855 newtp->ts_recent = req->ts_recent;
856 newtp->ts_recent_stamp = xtime.tv_sec;
857 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
859 newtp->ts_recent_stamp = 0;
860 newtp->tcp_header_len = sizeof(struct tcphdr);
862 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
863 newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len;
864 newtp->mss_clamp = req->mss;
865 TCP_ECN_openreq_child(newtp, req);
866 if (newtp->ecn_flags&TCP_ECN_OK)
867 newsk->sk_no_largesend = 1;
869 tcp_vegas_init(newtp);
870 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
876 * Process an incoming packet for SYN_RECV sockets represented
877 * as an open_request.
880 struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
881 struct open_request *req,
882 struct open_request **prev)
884 struct tcphdr *th = skb->h.th;
885 struct tcp_opt *tp = tcp_sk(sk);
886 u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
892 if (th->doff > (sizeof(struct tcphdr)>>2)) {
893 tcp_parse_options(skb, &ttp, 0);
895 if (ttp.saw_tstamp) {
896 ttp.ts_recent = req->ts_recent;
897 /* We do not store true stamp, but it is not required,
898 * it can be estimated (approximately)
901 ttp.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
902 paws_reject = tcp_paws_check(&ttp, th->rst);
906 /* Check for pure retransmitted SYN. */
907 if (TCP_SKB_CB(skb)->seq == req->rcv_isn &&
908 flg == TCP_FLAG_SYN &&
911 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
912 * this case on figure 6 and figure 8, but formal
913 * protocol description says NOTHING.
914 * To be more exact, it says that we should send ACK,
915 * because this segment (at least, if it has no data)
918 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
919 * describe SYN-RECV state. All the description
920 * is wrong, we cannot believe to it and should
921 * rely only on common sense and implementation
924 * Enforce "SYN-ACK" according to figure 8, figure 6
925 * of RFC793, fixed by RFC1122.
927 req->class->rtx_syn_ack(sk, req, NULL);
931 /* Further reproduces section "SEGMENT ARRIVES"
932 for state SYN-RECEIVED of RFC793.
933 It is broken, however, it does not work only
934 when SYNs are crossed.
936 You would think that SYN crossing is impossible here, since
937 we should have a SYN_SENT socket (from connect()) on our end,
938 but this is not true if the crossed SYNs were sent to both
939 ends by a malicious third party. We must defend against this,
940 and to do that we first verify the ACK (as per RFC793, page
941 36) and reset if it is invalid. Is this a true full defense?
942 To convince ourselves, let us consider a way in which the ACK
943 test can still pass in this 'malicious crossed SYNs' case.
944 Malicious sender sends identical SYNs (and thus identical sequence
945 numbers) to both A and B:
950 By our good fortune, both A and B select the same initial
951 send sequence number of seven :-)
953 A: sends SYN|ACK, seq=7, ack_seq=8
954 B: sends SYN|ACK, seq=7, ack_seq=8
956 So we are now A eating this SYN|ACK, ACK test passes. So
957 does sequence test, SYN is truncated, and thus we consider
960 If tp->defer_accept, we silently drop this bare ACK. Otherwise,
961 we create an established connection. Both ends (listening sockets)
962 accept the new incoming connection and try to talk to each other. 8-)
964 Note: This case is both harmless, and rare. Possibility is about the
965 same as us discovering intelligent life on another plant tomorrow.
967 But generally, we should (RFC lies!) to accept ACK
968 from SYNACK both here and in tcp_rcv_state_process().
969 tcp_rcv_state_process() does not, hence, we do not too.
971 Note that the case is absolutely generic:
972 we cannot optimize anything here without
973 violating protocol. All the checks must be made
974 before attempt to create socket.
977 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
978 * and the incoming segment acknowledges something not yet
979 * sent (the segment carries an unaccaptable ACK) ...
982 * Invalid ACK: reset will be sent by listening socket
984 if ((flg & TCP_FLAG_ACK) &&
985 (TCP_SKB_CB(skb)->ack_seq != req->snt_isn+1))
988 /* Also, it would be not so bad idea to check rcv_tsecr, which
989 * is essentially ACK extension and too early or too late values
990 * should cause reset in unsynchronized states.
993 /* RFC793: "first check sequence number". */
995 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
996 req->rcv_isn+1, req->rcv_isn+1+req->rcv_wnd)) {
997 /* Out of window: send ACK and drop. */
998 if (!(flg & TCP_FLAG_RST))
999 req->class->send_ack(skb, req);
1001 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
1005 /* In sequence, PAWS is OK. */
1007 if (ttp.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, req->rcv_isn+1))
1008 req->ts_recent = ttp.rcv_tsval;
1010 if (TCP_SKB_CB(skb)->seq == req->rcv_isn) {
1011 /* Truncate SYN, it is out of window starting
1012 at req->rcv_isn+1. */
1013 flg &= ~TCP_FLAG_SYN;
1016 /* RFC793: "second check the RST bit" and
1017 * "fourth, check the SYN bit"
1019 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN))
1020 goto embryonic_reset;
1022 /* ACK sequence verified above, just make sure ACK is
1023 * set. If ACK not set, just silently drop the packet.
1025 if (!(flg & TCP_FLAG_ACK))
1028 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
1029 if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == req->rcv_isn+1) {
1034 /* OK, ACK is valid, create big socket and
1035 * feed this segment to it. It will repeat all
1036 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
1037 * ESTABLISHED STATE. If it will be dropped after
1038 * socket is created, wait for troubles.
1040 child = tp->af_specific->syn_recv_sock(sk, skb, req, NULL);
1042 goto listen_overflow;
1044 sk_set_owner(child, sk->sk_owner);
1045 tcp_synq_unlink(tp, req, prev);
1046 tcp_synq_removed(sk, req);
1048 tcp_acceptq_queue(sk, req, child);
1052 if (!sysctl_tcp_abort_on_overflow) {
1058 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
1059 if (!(flg & TCP_FLAG_RST))
1060 req->class->send_reset(skb);
1062 tcp_synq_drop(sk, req, prev);
1067 * Queue segment on the new socket if the new socket is active,
1068 * otherwise we just shortcircuit this and continue with
1072 int tcp_child_process(struct sock *parent, struct sock *child,
1073 struct sk_buff *skb)
1076 int state = child->sk_state;
1078 if (!sock_owned_by_user(child)) {
1079 ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
1081 /* Wakeup parent, send SIGIO */
1082 if (state == TCP_SYN_RECV && child->sk_state != state)
1083 parent->sk_data_ready(parent, 0);
1085 /* Alas, it is possible again, because we do lookup
1086 * in main socket hash table and lock on listening
1087 * socket does not protect us more.
1089 sk_add_backlog(child, skb);
1092 bh_unlock_sock(child);
1097 EXPORT_SYMBOL(tcp_check_req);
1098 EXPORT_SYMBOL(tcp_child_process);
1099 EXPORT_SYMBOL(tcp_create_openreq_child);
1100 EXPORT_SYMBOL(tcp_timewait_state_process);
1101 EXPORT_SYMBOL(tcp_tw_deschedule);
1103 #ifdef CONFIG_SYSCTL
1104 EXPORT_SYMBOL(sysctl_tcp_tw_recycle);