2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/sysctl.h>
27 #include <linux/workqueue.h>
28 #include <linux/vs_socket.h>
30 #include <net/inet_common.h>
34 #define SYNC_INIT 0 /* let the user enable it */
39 int sysctl_tcp_tw_recycle;
40 int sysctl_tcp_max_tw_buckets = NR_FILE*2;
42 int sysctl_tcp_syncookies = SYNC_INIT;
43 int sysctl_tcp_abort_on_overflow;
45 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
49 if (after(end_seq, s_win) && before(seq, e_win))
51 return (seq == e_win && seq == end_seq);
54 /* New-style handling of TIME_WAIT sockets. */
59 /* Must be called with locally disabled BHs. */
60 static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
62 struct tcp_ehash_bucket *ehead;
63 struct tcp_bind_hashbucket *bhead;
64 struct tcp_bind_bucket *tb;
66 /* Unlink from established hashes. */
67 ehead = &tcp_ehash[tw->tw_hashent];
68 write_lock(&ehead->lock);
69 if (hlist_unhashed(&tw->tw_node)) {
70 write_unlock(&ehead->lock);
73 __hlist_del(&tw->tw_node);
74 sk_node_init(&tw->tw_node);
75 write_unlock(&ehead->lock);
77 /* Disassociate with bind bucket. */
78 bhead = &tcp_bhash[tcp_bhashfn(tw->tw_num)];
79 spin_lock(&bhead->lock);
81 __hlist_del(&tw->tw_bind_node);
83 tcp_bucket_destroy(tb);
84 spin_unlock(&bhead->lock);
86 #ifdef INET_REFCNT_DEBUG
87 if (atomic_read(&tw->tw_refcnt) != 1) {
88 printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw,
89 atomic_read(&tw->tw_refcnt));
96 * * Main purpose of TIME-WAIT state is to close connection gracefully,
97 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
98 * (and, probably, tail of data) and one or more our ACKs are lost.
99 * * What is TIME-WAIT timeout? It is associated with maximal packet
100 * lifetime in the internet, which results in wrong conclusion, that
101 * it is set to catch "old duplicate segments" wandering out of their path.
102 * It is not quite correct. This timeout is calculated so that it exceeds
103 * maximal retransmission timeout enough to allow to lose one (or more)
104 * segments sent by peer and our ACKs. This time may be calculated from RTO.
105 * * When TIME-WAIT socket receives RST, it means that another end
106 * finally closed and we are allowed to kill TIME-WAIT too.
107 * * Second purpose of TIME-WAIT is catching old duplicate segments.
108 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
109 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
110 * * If we invented some more clever way to catch duplicates
111 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
113 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
114 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
115 * from the very beginning.
117 * NOTE. With recycling (and later with fin-wait-2) TW bucket
118 * is _not_ stateless. It means, that strictly speaking we must
119 * spinlock it. I do not want! Well, probability of misbehaviour
120 * is ridiculously low and, seems, we could use some mb() tricks
121 * to avoid misread sequence numbers, states etc. --ANK
124 tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
125 struct tcphdr *th, unsigned len)
131 if (th->doff > (sizeof(struct tcphdr) >> 2) && tw->tw_ts_recent_stamp) {
132 tcp_parse_options(skb, &tp, 0);
135 tp.ts_recent = tw->tw_ts_recent;
136 tp.ts_recent_stamp = tw->tw_ts_recent_stamp;
137 paws_reject = tcp_paws_check(&tp, th->rst);
141 if (tw->tw_substate == TCP_FIN_WAIT2) {
142 /* Just repeat all the checks of tcp_rcv_state_process() */
144 /* Out of window, send ACK */
146 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
148 tw->tw_rcv_nxt + tw->tw_rcv_wnd))
154 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt))
158 if (!after(TCP_SKB_CB(skb)->end_seq, tw->tw_rcv_nxt) ||
159 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
161 return TCP_TW_SUCCESS;
164 /* New data or FIN. If new data arrive after half-duplex close,
168 TCP_SKB_CB(skb)->end_seq != tw->tw_rcv_nxt + 1) {
170 tcp_tw_deschedule(tw);
175 /* FIN arrived, enter true time-wait state. */
176 tw->tw_substate = TCP_TIME_WAIT;
177 tw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
179 tw->tw_ts_recent_stamp = xtime.tv_sec;
180 tw->tw_ts_recent = tp.rcv_tsval;
183 /* I am shamed, but failed to make it more elegant.
184 * Yes, it is direct reference to IP, which is impossible
185 * to generalize to IPv6. Taking into account that IPv6
186 * do not undertsnad recycling in any case, it not
187 * a big problem in practice. --ANK */
188 if (tw->tw_family == AF_INET &&
189 sysctl_tcp_tw_recycle && tw->tw_ts_recent_stamp &&
190 tcp_v4_tw_remember_stamp(tw))
191 tcp_tw_schedule(tw, tw->tw_timeout);
193 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
198 * Now real TIME-WAIT state.
201 * "When a connection is [...] on TIME-WAIT state [...]
202 * [a TCP] MAY accept a new SYN from the remote TCP to
203 * reopen the connection directly, if it:
205 * (1) assigns its initial sequence number for the new
206 * connection to be larger than the largest sequence
207 * number it used on the previous connection incarnation,
210 * (2) returns to TIME-WAIT state if the SYN turns out
211 * to be an old duplicate".
215 (TCP_SKB_CB(skb)->seq == tw->tw_rcv_nxt &&
216 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
217 /* In window segment, it may be only reset or bare ack. */
220 /* This is TIME_WAIT assasination, in two flavors.
221 * Oh well... nobody has a sufficient solution to this
224 if (sysctl_tcp_rfc1337 == 0) {
226 tcp_tw_deschedule(tw);
228 return TCP_TW_SUCCESS;
231 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
234 tw->tw_ts_recent = tp.rcv_tsval;
235 tw->tw_ts_recent_stamp = xtime.tv_sec;
239 return TCP_TW_SUCCESS;
242 /* Out of window segment.
244 All the segments are ACKed immediately.
246 The only exception is new SYN. We accept it, if it is
247 not old duplicate and we are not in danger to be killed
248 by delayed old duplicates. RFC check is that it has
249 newer sequence number works at rates <40Mbit/sec.
250 However, if paws works, it is reliable AND even more,
251 we even may relax silly seq space cutoff.
253 RED-PEN: we violate main RFC requirement, if this SYN will appear
254 old duplicate (i.e. we receive RST in reply to SYN-ACK),
255 we must return socket to time-wait state. It is not good,
259 if (th->syn && !th->rst && !th->ack && !paws_reject &&
260 (after(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt) ||
261 (tp.saw_tstamp && (s32)(tw->tw_ts_recent - tp.rcv_tsval) < 0))) {
262 u32 isn = tw->tw_snd_nxt + 65535 + 2;
265 TCP_SKB_CB(skb)->when = isn;
270 NET_INC_STATS_BH(PAWSEstabRejected);
273 /* In this case we must reset the TIMEWAIT timer.
275 * If it is ACKless SYN it may be both old duplicate
276 * and new good SYN with random sequence number <rcv_nxt.
277 * Do not reschedule in the last case.
279 if (paws_reject || th->ack)
280 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
282 /* Send ACK. Note, we do not put the bucket,
283 * it will be released by caller.
288 return TCP_TW_SUCCESS;
291 /* Enter the time wait state. This is called with locally disabled BH.
292 * Essentially we whip up a timewait bucket, copy the
293 * relevant info into it from the SK, and mess with hash chains
296 static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
298 struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent];
299 struct tcp_bind_hashbucket *bhead;
301 /* Step 1: Put TW into bind hash. Original socket stays there too.
302 Note, that any socket with inet_sk(sk)->num != 0 MUST be bound in
303 binding cache, even if it is closed.
305 bhead = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)];
306 spin_lock(&bhead->lock);
307 tw->tw_tb = tcp_sk(sk)->bind_hash;
308 BUG_TRAP(tcp_sk(sk)->bind_hash);
309 tw_add_bind_node(tw, &tw->tw_tb->owners);
310 spin_unlock(&bhead->lock);
312 write_lock(&ehead->lock);
314 /* Step 2: Remove SK from established hash. */
315 if (__sk_del_node_init(sk))
316 sock_prot_dec_use(sk->sk_prot);
318 /* Step 3: Hash TW into TIMEWAIT half of established hash table. */
319 tw_add_node(tw, &(ehead + tcp_ehash_size)->chain);
320 atomic_inc(&tw->tw_refcnt);
322 write_unlock(&ehead->lock);
326 * Move a socket to time-wait or dead fin-wait-2 state.
328 void tcp_time_wait(struct sock *sk, int state, int timeo)
330 struct tcp_tw_bucket *tw = NULL;
331 struct tcp_opt *tp = tcp_sk(sk);
334 if (sysctl_tcp_tw_recycle && tp->ts_recent_stamp)
335 recycle_ok = tp->af_specific->remember_stamp(sk);
337 if (tcp_tw_count < sysctl_tcp_max_tw_buckets)
338 tw = kmem_cache_alloc(tcp_timewait_cachep, SLAB_ATOMIC);
341 struct inet_opt *inet = inet_sk(sk);
342 int rto = (tp->rto<<2) - (tp->rto>>1);
344 /* Give us an identity. */
345 tw->tw_daddr = inet->daddr;
346 tw->tw_rcv_saddr = inet->rcv_saddr;
347 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
348 tw->tw_num = inet->num;
349 tw->tw_state = TCP_TIME_WAIT;
350 tw->tw_substate = state;
351 tw->tw_sport = inet->sport;
352 tw->tw_dport = inet->dport;
353 tw->tw_family = sk->sk_family;
354 tw->tw_reuse = sk->sk_reuse;
355 tw->tw_rcv_wscale = tp->rcv_wscale;
356 atomic_set(&tw->tw_refcnt, 1);
358 tw->tw_hashent = sk->sk_hashent;
359 tw->tw_rcv_nxt = tp->rcv_nxt;
360 tw->tw_snd_nxt = tp->snd_nxt;
361 tw->tw_rcv_wnd = tcp_receive_window(tp);
362 tw->tw_ts_recent = tp->ts_recent;
363 tw->tw_ts_recent_stamp = tp->ts_recent_stamp;
364 tw_dead_node_init(tw);
366 tw->tw_xid = sk->sk_xid;
367 tw->tw_vx_info = NULL;
368 tw->tw_nid = sk->sk_nid;
369 tw->tw_nx_info = NULL;
371 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
372 if (tw->tw_family == PF_INET6) {
373 struct ipv6_pinfo *np = inet6_sk(sk);
375 ipv6_addr_copy(&tw->tw_v6_daddr, &np->daddr);
376 ipv6_addr_copy(&tw->tw_v6_rcv_saddr, &np->rcv_saddr);
377 tw->tw_v6_ipv6only = np->ipv6only;
379 memset(&tw->tw_v6_daddr, 0, sizeof(tw->tw_v6_daddr));
380 memset(&tw->tw_v6_rcv_saddr, 0, sizeof(tw->tw_v6_rcv_saddr));
381 tw->tw_v6_ipv6only = 0;
384 /* Linkage updates. */
385 __tcp_tw_hashdance(sk, tw);
387 /* Get the TIME_WAIT timeout firing. */
392 tw->tw_timeout = rto;
394 tw->tw_timeout = TCP_TIMEWAIT_LEN;
395 if (state == TCP_TIME_WAIT)
396 timeo = TCP_TIMEWAIT_LEN;
399 tcp_tw_schedule(tw, timeo);
402 /* Sorry, if we're out of memory, just CLOSE this
403 * socket up. We've got bigger problems than
404 * non-graceful socket closings.
407 printk(KERN_INFO "TCP: time wait bucket table overflow\n");
410 tcp_update_metrics(sk);
414 /* Kill off TIME_WAIT sockets once their lifetime has expired. */
415 static int tcp_tw_death_row_slot;
417 static void tcp_twkill(unsigned long);
419 /* TIME_WAIT reaping mechanism. */
420 #define TCP_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
421 #define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)
423 #define TCP_TWKILL_QUOTA 100
425 static struct hlist_head tcp_tw_death_row[TCP_TWKILL_SLOTS];
426 static spinlock_t tw_death_lock = SPIN_LOCK_UNLOCKED;
427 static struct timer_list tcp_tw_timer = TIMER_INITIALIZER(tcp_twkill, 0, 0);
428 static void twkill_work(void *);
429 static DECLARE_WORK(tcp_twkill_work, twkill_work, NULL);
430 static u32 twkill_thread_slots;
432 /* Returns non-zero if quota exceeded. */
433 static int tcp_do_twkill_work(int slot, unsigned int quota)
435 struct tcp_tw_bucket *tw;
436 struct hlist_node *node;
440 /* NOTE: compare this to previous version where lock
441 * was released after detaching chain. It was racy,
442 * because tw buckets are scheduled in not serialized context
443 * in 2.3 (with netfilter), and with softnet it is common, because
444 * soft irqs are not sequenced.
449 tw_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) {
450 __tw_del_dead_node(tw);
451 spin_unlock(&tw_death_lock);
452 tcp_timewait_kill(tw);
455 spin_lock(&tw_death_lock);
456 if (killed > quota) {
461 /* While we dropped tw_death_lock, another cpu may have
462 * killed off the next TW bucket in the list, therefore
463 * do a fresh re-read of the hlist head node with the
464 * lock reacquired. We still use the hlist traversal
465 * macro in order to get the prefetches.
470 tcp_tw_count -= killed;
471 NET_ADD_STATS_BH(TimeWaited, killed);
476 static void tcp_twkill(unsigned long dummy)
480 spin_lock(&tw_death_lock);
482 if (tcp_tw_count == 0)
486 ret = tcp_do_twkill_work(tcp_tw_death_row_slot, TCP_TWKILL_QUOTA);
488 twkill_thread_slots |= (1 << tcp_tw_death_row_slot);
490 schedule_work(&tcp_twkill_work);
493 /* We purged the entire slot, anything left? */
497 tcp_tw_death_row_slot =
498 ((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1));
500 mod_timer(&tcp_tw_timer, jiffies + TCP_TWKILL_PERIOD);
502 spin_unlock(&tw_death_lock);
505 extern void twkill_slots_invalid(void);
507 static void twkill_work(void *dummy)
511 if ((TCP_TWKILL_SLOTS - 1) > (sizeof(twkill_thread_slots) * 8))
512 twkill_slots_invalid();
514 while (twkill_thread_slots) {
515 spin_lock_bh(&tw_death_lock);
516 for (i = 0; i < TCP_TWKILL_SLOTS; i++) {
517 if (!(twkill_thread_slots & (1 << i)))
520 while (tcp_do_twkill_work(i, TCP_TWKILL_QUOTA) != 0) {
521 if (need_resched()) {
522 spin_unlock_bh(&tw_death_lock);
524 spin_lock_bh(&tw_death_lock);
528 twkill_thread_slots &= ~(1 << i);
530 spin_unlock_bh(&tw_death_lock);
534 /* These are always called from BH context. See callers in
535 * tcp_input.c to verify this.
538 /* This is for handling early-kills of TIME_WAIT sockets. */
539 void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
541 spin_lock(&tw_death_lock);
542 if (tw_del_dead_node(tw)) {
544 if (--tcp_tw_count == 0)
545 del_timer(&tcp_tw_timer);
547 spin_unlock(&tw_death_lock);
548 tcp_timewait_kill(tw);
551 /* Short-time timewait calendar */
553 static int tcp_twcal_hand = -1;
554 static int tcp_twcal_jiffie;
555 static void tcp_twcal_tick(unsigned long);
556 static struct timer_list tcp_twcal_timer =
557 TIMER_INITIALIZER(tcp_twcal_tick, 0, 0);
558 static struct hlist_head tcp_twcal_row[TCP_TW_RECYCLE_SLOTS];
560 void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
562 struct hlist_head *list;
565 /* timeout := RTO * 3.5
567 * 3.5 = 1+2+0.5 to wait for two retransmits.
569 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
570 * our ACK acking that FIN can be lost. If N subsequent retransmitted
571 * FINs (or previous seqments) are lost (probability of such event
572 * is p^(N+1), where p is probability to lose single packet and
573 * time to detect the loss is about RTO*(2^N - 1) with exponential
574 * backoff). Normal timewait length is calculated so, that we
575 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
576 * [ BTW Linux. following BSD, violates this requirement waiting
577 * only for 60sec, we should wait at least for 240 secs.
578 * Well, 240 consumes too much of resources 8)
580 * This interval is not reduced to catch old duplicate and
581 * responces to our wandering segments living for two MSLs.
582 * However, if we use PAWS to detect
583 * old duplicates, we can reduce the interval to bounds required
584 * by RTO, rather than MSL. So, if peer understands PAWS, we
585 * kill tw bucket after 3.5*RTO (it is important that this number
586 * is greater than TS tick!) and detect old duplicates with help
589 slot = (timeo + (1<<TCP_TW_RECYCLE_TICK) - 1) >> TCP_TW_RECYCLE_TICK;
591 spin_lock(&tw_death_lock);
593 /* Unlink it, if it was scheduled */
594 if (tw_del_dead_node(tw))
597 atomic_inc(&tw->tw_refcnt);
599 if (slot >= TCP_TW_RECYCLE_SLOTS) {
600 /* Schedule to slow timer */
601 if (timeo >= TCP_TIMEWAIT_LEN) {
602 slot = TCP_TWKILL_SLOTS-1;
604 slot = (timeo + TCP_TWKILL_PERIOD-1) / TCP_TWKILL_PERIOD;
605 if (slot >= TCP_TWKILL_SLOTS)
606 slot = TCP_TWKILL_SLOTS-1;
608 tw->tw_ttd = jiffies + timeo;
609 slot = (tcp_tw_death_row_slot + slot) & (TCP_TWKILL_SLOTS - 1);
610 list = &tcp_tw_death_row[slot];
612 tw->tw_ttd = jiffies + (slot << TCP_TW_RECYCLE_TICK);
614 if (tcp_twcal_hand < 0) {
616 tcp_twcal_jiffie = jiffies;
617 tcp_twcal_timer.expires = tcp_twcal_jiffie + (slot<<TCP_TW_RECYCLE_TICK);
618 add_timer(&tcp_twcal_timer);
620 if (time_after(tcp_twcal_timer.expires, jiffies + (slot<<TCP_TW_RECYCLE_TICK)))
621 mod_timer(&tcp_twcal_timer, jiffies + (slot<<TCP_TW_RECYCLE_TICK));
622 slot = (tcp_twcal_hand + slot)&(TCP_TW_RECYCLE_SLOTS-1);
624 list = &tcp_twcal_row[slot];
627 hlist_add_head(&tw->tw_death_node, list);
629 if (tcp_tw_count++ == 0)
630 mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
631 spin_unlock(&tw_death_lock);
634 void tcp_twcal_tick(unsigned long dummy)
638 unsigned long now = jiffies;
642 spin_lock(&tw_death_lock);
643 if (tcp_twcal_hand < 0)
646 slot = tcp_twcal_hand;
647 j = tcp_twcal_jiffie;
649 for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) {
650 if (time_before_eq(j, now)) {
651 struct hlist_node *node, *safe;
652 struct tcp_tw_bucket *tw;
654 tw_for_each_inmate_safe(tw, node, safe,
655 &tcp_twcal_row[slot]) {
656 __tw_del_dead_node(tw);
657 tcp_timewait_kill(tw);
664 tcp_twcal_jiffie = j;
665 tcp_twcal_hand = slot;
668 if (!hlist_empty(&tcp_twcal_row[slot])) {
669 mod_timer(&tcp_twcal_timer, j);
673 j += (1<<TCP_TW_RECYCLE_TICK);
674 slot = (slot+1)&(TCP_TW_RECYCLE_SLOTS-1);
679 if ((tcp_tw_count -= killed) == 0)
680 del_timer(&tcp_tw_timer);
681 NET_ADD_STATS_BH(TimeWaitKilled, killed);
682 spin_unlock(&tw_death_lock);
685 /* This is not only more efficient than what we used to do, it eliminates
686 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
688 * Actually, we could lots of memory writes here. tp of listening
689 * socket contains all necessary default parameters.
691 struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, struct sk_buff *skb)
693 /* allocate the newsk from the same slab of the master sock,
694 * if not, at sk_free time we'll try to free it from the wrong
695 * slabcache (i.e. is it TCPv4 or v6?) -acme */
696 struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, 0, sk->sk_slab);
699 struct tcp_opt *newtp;
700 struct sk_filter *filter;
702 memcpy(newsk, sk, sizeof(struct tcp_sock));
703 newsk->sk_state = TCP_SYN_RECV;
708 sk_node_init(&newsk->sk_node);
709 tcp_sk(newsk)->bind_hash = NULL;
711 /* Clone the TCP header template */
712 inet_sk(newsk)->dport = req->rmt_port;
714 sock_lock_init(newsk);
717 newsk->sk_dst_lock = RW_LOCK_UNLOCKED;
718 atomic_set(&newsk->sk_rmem_alloc, 0);
719 skb_queue_head_init(&newsk->sk_receive_queue);
720 atomic_set(&newsk->sk_wmem_alloc, 0);
721 skb_queue_head_init(&newsk->sk_write_queue);
722 atomic_set(&newsk->sk_omem_alloc, 0);
723 newsk->sk_wmem_queued = 0;
724 newsk->sk_forward_alloc = 0;
726 sock_reset_flag(newsk, SOCK_DONE);
727 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
728 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
729 newsk->sk_send_head = NULL;
730 newsk->sk_callback_lock = RW_LOCK_UNLOCKED;
731 skb_queue_head_init(&newsk->sk_error_queue);
732 newsk->sk_write_space = sk_stream_write_space;
734 if ((filter = newsk->sk_filter) != NULL)
735 sk_filter_charge(newsk, filter);
737 if (unlikely(xfrm_sk_clone_policy(newsk))) {
738 /* It is still raw copy of parent, so invalidate
739 * destructor and make plain sk_free() */
740 newsk->sk_destruct = NULL;
745 /* Now setup tcp_opt */
746 newtp = tcp_sk(newsk);
747 newtp->pred_flags = 0;
748 newtp->rcv_nxt = req->rcv_isn + 1;
749 newtp->snd_nxt = req->snt_isn + 1;
750 newtp->snd_una = req->snt_isn + 1;
751 newtp->snd_sml = req->snt_isn + 1;
753 tcp_prequeue_init(newtp);
755 tcp_init_wl(newtp, req->snt_isn, req->rcv_isn);
757 newtp->retransmits = 0;
760 newtp->mdev = TCP_TIMEOUT_INIT;
761 newtp->rto = TCP_TIMEOUT_INIT;
763 newtp->packets_out = 0;
765 newtp->retrans_out = 0;
766 newtp->sacked_out = 0;
767 newtp->fackets_out = 0;
768 newtp->snd_ssthresh = 0x7fffffff;
770 /* So many TCP implementations out there (incorrectly) count the
771 * initial SYN frame in their delayed-ACK and congestion control
772 * algorithms that we must have the following bandaid to talk
773 * efficiently to them. -DaveM
776 newtp->snd_cwnd_cnt = 0;
778 newtp->bictcp.cnt = 0;
779 newtp->bictcp.last_max_cwnd = newtp->bictcp.last_cwnd = 0;
781 newtp->frto_counter = 0;
782 newtp->frto_highmark = 0;
784 tcp_set_ca_state(newtp, TCP_CA_Open);
785 tcp_init_xmit_timers(newsk);
786 skb_queue_head_init(&newtp->out_of_order_queue);
787 newtp->rcv_wup = req->rcv_isn + 1;
788 newtp->write_seq = req->snt_isn + 1;
789 newtp->pushed_seq = newtp->write_seq;
790 newtp->copied_seq = req->rcv_isn + 1;
792 newtp->saw_tstamp = 0;
795 newtp->eff_sacks = 0;
797 newtp->probes_out = 0;
798 newtp->num_sacks = 0;
800 newtp->listen_opt = NULL;
801 #ifdef CONFIG_ACCEPT_QUEUES
802 newtp->accept_queue = NULL;
803 memset(newtp->acceptq, 0,sizeof(newtp->acceptq));
804 newtp->class_index = 0;
807 newtp->accept_queue = newtp->accept_queue_tail = NULL;
809 /* Deinitialize syn_wait_lock to trap illegal accesses. */
810 memset(&newtp->syn_wait_lock, 0, sizeof(newtp->syn_wait_lock));
812 /* Back to base struct sock members. */
814 newsk->sk_priority = 0;
815 atomic_set(&newsk->sk_refcnt, 2);
817 /* hmm, maybe from socket? */
818 set_vx_info(&newsk->sk_vx_info, current->vx_info);
819 set_nx_info(&newsk->sk_nx_info, current->nx_info);
820 #ifdef INET_REFCNT_DEBUG
821 atomic_inc(&inet_sock_nr);
823 atomic_inc(&tcp_sockets_allocated);
825 if (sock_flag(newsk, SOCK_KEEPOPEN))
826 tcp_reset_keepalive_timer(newsk,
827 keepalive_time_when(newtp));
828 newsk->sk_socket = NULL;
829 newsk->sk_sleep = NULL;
830 newsk->sk_owner = NULL;
832 newtp->tstamp_ok = req->tstamp_ok;
833 if((newtp->sack_ok = req->sack_ok) != 0) {
837 newtp->window_clamp = req->window_clamp;
838 newtp->rcv_ssthresh = req->rcv_wnd;
839 newtp->rcv_wnd = req->rcv_wnd;
840 newtp->wscale_ok = req->wscale_ok;
841 if (newtp->wscale_ok) {
842 newtp->snd_wscale = req->snd_wscale;
843 newtp->rcv_wscale = req->rcv_wscale;
845 newtp->snd_wscale = newtp->rcv_wscale = 0;
846 newtp->window_clamp = min(newtp->window_clamp, 65535U);
848 newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->snd_wscale;
849 newtp->max_window = newtp->snd_wnd;
851 if (newtp->tstamp_ok) {
852 newtp->ts_recent = req->ts_recent;
853 newtp->ts_recent_stamp = xtime.tv_sec;
854 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
856 newtp->ts_recent_stamp = 0;
857 newtp->tcp_header_len = sizeof(struct tcphdr);
859 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
860 newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len;
861 newtp->mss_clamp = req->mss;
862 TCP_ECN_openreq_child(newtp, req);
863 if (newtp->ecn_flags&TCP_ECN_OK)
864 newsk->sk_no_largesend = 1;
866 tcp_vegas_init(newtp);
867 TCP_INC_STATS_BH(TcpPassiveOpens);
873 * Process an incoming packet for SYN_RECV sockets represented
874 * as an open_request.
877 struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
878 struct open_request *req,
879 struct open_request **prev)
881 struct tcphdr *th = skb->h.th;
882 struct tcp_opt *tp = tcp_sk(sk);
883 u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
889 if (th->doff > (sizeof(struct tcphdr)>>2)) {
890 tcp_parse_options(skb, &ttp, 0);
892 if (ttp.saw_tstamp) {
893 ttp.ts_recent = req->ts_recent;
894 /* We do not store true stamp, but it is not required,
895 * it can be estimated (approximately)
898 ttp.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
899 paws_reject = tcp_paws_check(&ttp, th->rst);
903 /* Check for pure retransmitted SYN. */
904 if (TCP_SKB_CB(skb)->seq == req->rcv_isn &&
905 flg == TCP_FLAG_SYN &&
908 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
909 * this case on figure 6 and figure 8, but formal
910 * protocol description says NOTHING.
911 * To be more exact, it says that we should send ACK,
912 * because this segment (at least, if it has no data)
915 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
916 * describe SYN-RECV state. All the description
917 * is wrong, we cannot believe to it and should
918 * rely only on common sense and implementation
921 * Enforce "SYN-ACK" according to figure 8, figure 6
922 * of RFC793, fixed by RFC1122.
924 req->class->rtx_syn_ack(sk, req, NULL);
928 /* Further reproduces section "SEGMENT ARRIVES"
929 for state SYN-RECEIVED of RFC793.
930 It is broken, however, it does not work only
931 when SYNs are crossed.
933 You would think that SYN crossing is impossible here, since
934 we should have a SYN_SENT socket (from connect()) on our end,
935 but this is not true if the crossed SYNs were sent to both
936 ends by a malicious third party. We must defend against this,
937 and to do that we first verify the ACK (as per RFC793, page
938 36) and reset if it is invalid. Is this a true full defense?
939 To convince ourselves, let us consider a way in which the ACK
940 test can still pass in this 'malicious crossed SYNs' case.
941 Malicious sender sends identical SYNs (and thus identical sequence
942 numbers) to both A and B:
947 By our good fortune, both A and B select the same initial
948 send sequence number of seven :-)
950 A: sends SYN|ACK, seq=7, ack_seq=8
951 B: sends SYN|ACK, seq=7, ack_seq=8
953 So we are now A eating this SYN|ACK, ACK test passes. So
954 does sequence test, SYN is truncated, and thus we consider
957 If tp->defer_accept, we silently drop this bare ACK. Otherwise,
958 we create an established connection. Both ends (listening sockets)
959 accept the new incoming connection and try to talk to each other. 8-)
961 Note: This case is both harmless, and rare. Possibility is about the
962 same as us discovering intelligent life on another plant tomorrow.
964 But generally, we should (RFC lies!) to accept ACK
965 from SYNACK both here and in tcp_rcv_state_process().
966 tcp_rcv_state_process() does not, hence, we do not too.
968 Note that the case is absolutely generic:
969 we cannot optimize anything here without
970 violating protocol. All the checks must be made
971 before attempt to create socket.
974 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
975 * and the incoming segment acknowledges something not yet
976 * sent (the segment carries an unaccaptable ACK) ...
979 * Invalid ACK: reset will be sent by listening socket
981 if ((flg & TCP_FLAG_ACK) &&
982 (TCP_SKB_CB(skb)->ack_seq != req->snt_isn+1))
985 /* Also, it would be not so bad idea to check rcv_tsecr, which
986 * is essentially ACK extension and too early or too late values
987 * should cause reset in unsynchronized states.
990 /* RFC793: "first check sequence number". */
992 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
993 req->rcv_isn+1, req->rcv_isn+1+req->rcv_wnd)) {
994 /* Out of window: send ACK and drop. */
995 if (!(flg & TCP_FLAG_RST))
996 req->class->send_ack(skb, req);
998 NET_INC_STATS_BH(PAWSEstabRejected);
1002 /* In sequence, PAWS is OK. */
1004 if (ttp.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, req->rcv_isn+1))
1005 req->ts_recent = ttp.rcv_tsval;
1007 if (TCP_SKB_CB(skb)->seq == req->rcv_isn) {
1008 /* Truncate SYN, it is out of window starting
1009 at req->rcv_isn+1. */
1010 flg &= ~TCP_FLAG_SYN;
1013 /* RFC793: "second check the RST bit" and
1014 * "fourth, check the SYN bit"
1016 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN))
1017 goto embryonic_reset;
1019 /* ACK sequence verified above, just make sure ACK is
1020 * set. If ACK not set, just silently drop the packet.
1022 if (!(flg & TCP_FLAG_ACK))
1025 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
1026 if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == req->rcv_isn+1) {
1031 /* OK, ACK is valid, create big socket and
1032 * feed this segment to it. It will repeat all
1033 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
1034 * ESTABLISHED STATE. If it will be dropped after
1035 * socket is created, wait for troubles.
1037 child = tp->af_specific->syn_recv_sock(sk, skb, req, NULL);
1039 goto listen_overflow;
1041 sk_set_owner(child, sk->sk_owner);
1042 tcp_synq_unlink(tp, req, prev);
1043 tcp_synq_removed(sk, req);
1045 tcp_acceptq_queue(sk, req, child);
1049 if (!sysctl_tcp_abort_on_overflow) {
1055 NET_INC_STATS_BH(EmbryonicRsts);
1056 if (!(flg & TCP_FLAG_RST))
1057 req->class->send_reset(skb);
1059 tcp_synq_drop(sk, req, prev);
1064 * Queue segment on the new socket if the new socket is active,
1065 * otherwise we just shortcircuit this and continue with
1069 int tcp_child_process(struct sock *parent, struct sock *child,
1070 struct sk_buff *skb)
1073 int state = child->sk_state;
1075 if (!sock_owned_by_user(child)) {
1076 ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
1078 /* Wakeup parent, send SIGIO */
1079 if (state == TCP_SYN_RECV && child->sk_state != state)
1080 parent->sk_data_ready(parent, 0);
1082 /* Alas, it is possible again, because we do lookup
1083 * in main socket hash table and lock on listening
1084 * socket does not protect us more.
1086 sk_add_backlog(child, skb);
1089 bh_unlock_sock(child);
1094 EXPORT_SYMBOL(tcp_check_req);
1095 EXPORT_SYMBOL(tcp_child_process);
1096 EXPORT_SYMBOL(tcp_create_openreq_child);
1097 EXPORT_SYMBOL(tcp_timewait_state_process);
1098 EXPORT_SYMBOL(tcp_tw_deschedule);
1100 #ifdef CONFIG_SYSCTL
1101 EXPORT_SYMBOL(sysctl_tcp_tw_recycle);