2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/sysctl.h>
27 #include <linux/workqueue.h>
28 #include <linux/vs_limit.h>
29 #include <linux/vs_socket.h>
31 #include <net/inet_common.h>
34 #include <linux/vs_limit.h>
35 #include <linux/vs_socket.h>
36 #include <linux/vs_context.h>
39 #define SYNC_INIT 0 /* let the user enable it */
44 int sysctl_tcp_tw_recycle;
45 int sysctl_tcp_max_tw_buckets = NR_FILE*2;
47 int sysctl_tcp_syncookies = SYNC_INIT;
48 int sysctl_tcp_abort_on_overflow;
50 static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo);
52 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
56 if (after(end_seq, s_win) && before(seq, e_win))
58 return (seq == e_win && seq == end_seq);
61 /* New-style handling of TIME_WAIT sockets. */
66 /* Must be called with locally disabled BHs. */
67 static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
69 struct tcp_ehash_bucket *ehead;
70 struct tcp_bind_hashbucket *bhead;
71 struct tcp_bind_bucket *tb;
73 /* Unlink from established hashes. */
74 ehead = &tcp_ehash[tw->tw_hashent];
75 write_lock(&ehead->lock);
76 if (hlist_unhashed(&tw->tw_node)) {
77 write_unlock(&ehead->lock);
80 __hlist_del(&tw->tw_node);
81 sk_node_init(&tw->tw_node);
82 write_unlock(&ehead->lock);
84 /* Disassociate with bind bucket. */
85 bhead = &tcp_bhash[tcp_bhashfn(tw->tw_num)];
86 spin_lock(&bhead->lock);
88 __hlist_del(&tw->tw_bind_node);
90 tcp_bucket_destroy(tb);
91 spin_unlock(&bhead->lock);
93 #ifdef INET_REFCNT_DEBUG
94 if (atomic_read(&tw->tw_refcnt) != 1) {
95 printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw,
96 atomic_read(&tw->tw_refcnt));
103 * * Main purpose of TIME-WAIT state is to close connection gracefully,
104 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
105 * (and, probably, tail of data) and one or more our ACKs are lost.
106 * * What is TIME-WAIT timeout? It is associated with maximal packet
107 * lifetime in the internet, which results in wrong conclusion, that
108 * it is set to catch "old duplicate segments" wandering out of their path.
109 * It is not quite correct. This timeout is calculated so that it exceeds
110 * maximal retransmission timeout enough to allow to lose one (or more)
111 * segments sent by peer and our ACKs. This time may be calculated from RTO.
112 * * When TIME-WAIT socket receives RST, it means that another end
113 * finally closed and we are allowed to kill TIME-WAIT too.
114 * * Second purpose of TIME-WAIT is catching old duplicate segments.
115 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
116 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
117 * * If we invented some more clever way to catch duplicates
118 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
120 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
121 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
122 * from the very beginning.
124 * NOTE. With recycling (and later with fin-wait-2) TW bucket
125 * is _not_ stateless. It means, that strictly speaking we must
126 * spinlock it. I do not want! Well, probability of misbehaviour
127 * is ridiculously low and, seems, we could use some mb() tricks
128 * to avoid misread sequence numbers, states etc. --ANK
131 tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
132 struct tcphdr *th, unsigned len)
134 struct tcp_options_received tmp_opt;
137 tmp_opt.saw_tstamp = 0;
138 if (th->doff > (sizeof(struct tcphdr) >> 2) && tw->tw_ts_recent_stamp) {
139 tcp_parse_options(skb, &tmp_opt, 0);
141 if (tmp_opt.saw_tstamp) {
142 tmp_opt.ts_recent = tw->tw_ts_recent;
143 tmp_opt.ts_recent_stamp = tw->tw_ts_recent_stamp;
144 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
148 if (tw->tw_substate == TCP_FIN_WAIT2) {
149 /* Just repeat all the checks of tcp_rcv_state_process() */
151 /* Out of window, send ACK */
153 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
155 tw->tw_rcv_nxt + tw->tw_rcv_wnd))
161 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt))
165 if (!after(TCP_SKB_CB(skb)->end_seq, tw->tw_rcv_nxt) ||
166 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
168 return TCP_TW_SUCCESS;
171 /* New data or FIN. If new data arrive after half-duplex close,
175 TCP_SKB_CB(skb)->end_seq != tw->tw_rcv_nxt + 1) {
177 tcp_tw_deschedule(tw);
182 /* FIN arrived, enter true time-wait state. */
183 tw->tw_substate = TCP_TIME_WAIT;
184 tw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
185 if (tmp_opt.saw_tstamp) {
186 tw->tw_ts_recent_stamp = xtime.tv_sec;
187 tw->tw_ts_recent = tmp_opt.rcv_tsval;
190 /* I am shamed, but failed to make it more elegant.
191 * Yes, it is direct reference to IP, which is impossible
192 * to generalize to IPv6. Taking into account that IPv6
193 * do not undertsnad recycling in any case, it not
194 * a big problem in practice. --ANK */
195 if (tw->tw_family == AF_INET &&
196 sysctl_tcp_tw_recycle && tw->tw_ts_recent_stamp &&
197 tcp_v4_tw_remember_stamp(tw))
198 tcp_tw_schedule(tw, tw->tw_timeout);
200 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
205 * Now real TIME-WAIT state.
208 * "When a connection is [...] on TIME-WAIT state [...]
209 * [a TCP] MAY accept a new SYN from the remote TCP to
210 * reopen the connection directly, if it:
212 * (1) assigns its initial sequence number for the new
213 * connection to be larger than the largest sequence
214 * number it used on the previous connection incarnation,
217 * (2) returns to TIME-WAIT state if the SYN turns out
218 * to be an old duplicate".
222 (TCP_SKB_CB(skb)->seq == tw->tw_rcv_nxt &&
223 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
224 /* In window segment, it may be only reset or bare ack. */
227 /* This is TIME_WAIT assasination, in two flavors.
228 * Oh well... nobody has a sufficient solution to this
231 if (sysctl_tcp_rfc1337 == 0) {
233 tcp_tw_deschedule(tw);
235 return TCP_TW_SUCCESS;
238 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
240 if (tmp_opt.saw_tstamp) {
241 tw->tw_ts_recent = tmp_opt.rcv_tsval;
242 tw->tw_ts_recent_stamp = xtime.tv_sec;
246 return TCP_TW_SUCCESS;
249 /* Out of window segment.
251 All the segments are ACKed immediately.
253 The only exception is new SYN. We accept it, if it is
254 not old duplicate and we are not in danger to be killed
255 by delayed old duplicates. RFC check is that it has
256 newer sequence number works at rates <40Mbit/sec.
257 However, if paws works, it is reliable AND even more,
258 we even may relax silly seq space cutoff.
260 RED-PEN: we violate main RFC requirement, if this SYN will appear
261 old duplicate (i.e. we receive RST in reply to SYN-ACK),
262 we must return socket to time-wait state. It is not good,
266 if (th->syn && !th->rst && !th->ack && !paws_reject &&
267 (after(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt) ||
268 (tmp_opt.saw_tstamp && (s32)(tw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
269 u32 isn = tw->tw_snd_nxt + 65535 + 2;
272 TCP_SKB_CB(skb)->when = isn;
277 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
280 /* In this case we must reset the TIMEWAIT timer.
282 * If it is ACKless SYN it may be both old duplicate
283 * and new good SYN with random sequence number <rcv_nxt.
284 * Do not reschedule in the last case.
286 if (paws_reject || th->ack)
287 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
289 /* Send ACK. Note, we do not put the bucket,
290 * it will be released by caller.
295 return TCP_TW_SUCCESS;
298 /* Enter the time wait state. This is called with locally disabled BH.
299 * Essentially we whip up a timewait bucket, copy the
300 * relevant info into it from the SK, and mess with hash chains
303 static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
305 struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent];
306 struct tcp_bind_hashbucket *bhead;
308 /* Step 1: Put TW into bind hash. Original socket stays there too.
309 Note, that any socket with inet_sk(sk)->num != 0 MUST be bound in
310 binding cache, even if it is closed.
312 bhead = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)];
313 spin_lock(&bhead->lock);
314 tw->tw_tb = tcp_sk(sk)->bind_hash;
315 BUG_TRAP(tcp_sk(sk)->bind_hash);
316 tw_add_bind_node(tw, &tw->tw_tb->owners);
317 spin_unlock(&bhead->lock);
319 write_lock(&ehead->lock);
321 /* Step 2: Remove SK from established hash. */
322 if (__sk_del_node_init(sk))
323 sock_prot_dec_use(sk->sk_prot);
325 /* Step 3: Hash TW into TIMEWAIT half of established hash table. */
326 tw_add_node(tw, &(ehead + tcp_ehash_size)->chain);
327 atomic_inc(&tw->tw_refcnt);
329 write_unlock(&ehead->lock);
333 * Move a socket to time-wait or dead fin-wait-2 state.
335 void tcp_time_wait(struct sock *sk, int state, int timeo)
337 struct tcp_tw_bucket *tw = NULL;
338 struct tcp_sock *tp = tcp_sk(sk);
341 if (sysctl_tcp_tw_recycle && tp->rx_opt.ts_recent_stamp)
342 recycle_ok = tp->af_specific->remember_stamp(sk);
344 if (tcp_tw_count < sysctl_tcp_max_tw_buckets)
345 tw = kmem_cache_alloc(tcp_timewait_cachep, SLAB_ATOMIC);
348 struct inet_sock *inet = inet_sk(sk);
349 int rto = (tp->rto<<2) - (tp->rto>>1);
351 /* Give us an identity. */
352 tw->tw_daddr = inet->daddr;
353 tw->tw_rcv_saddr = inet->rcv_saddr;
354 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
355 tw->tw_num = inet->num;
356 tw->tw_state = TCP_TIME_WAIT;
357 tw->tw_substate = state;
358 tw->tw_sport = inet->sport;
359 tw->tw_dport = inet->dport;
360 tw->tw_family = sk->sk_family;
361 tw->tw_reuse = sk->sk_reuse;
362 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
363 atomic_set(&tw->tw_refcnt, 1);
365 tw->tw_hashent = sk->sk_hashent;
366 tw->tw_rcv_nxt = tp->rcv_nxt;
367 tw->tw_snd_nxt = tp->snd_nxt;
368 tw->tw_rcv_wnd = tcp_receive_window(tp);
369 tw->tw_ts_recent = tp->rx_opt.ts_recent;
370 tw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
371 tw_dead_node_init(tw);
373 tw->tw_xid = sk->sk_xid;
374 tw->tw_vx_info = NULL;
375 tw->tw_nid = sk->sk_nid;
376 tw->tw_nx_info = NULL;
378 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
379 if (tw->tw_family == PF_INET6) {
380 struct ipv6_pinfo *np = inet6_sk(sk);
382 ipv6_addr_copy(&tw->tw_v6_daddr, &np->daddr);
383 ipv6_addr_copy(&tw->tw_v6_rcv_saddr, &np->rcv_saddr);
384 tw->tw_v6_ipv6only = np->ipv6only;
386 memset(&tw->tw_v6_daddr, 0, sizeof(tw->tw_v6_daddr));
387 memset(&tw->tw_v6_rcv_saddr, 0, sizeof(tw->tw_v6_rcv_saddr));
388 tw->tw_v6_ipv6only = 0;
391 /* Linkage updates. */
392 __tcp_tw_hashdance(sk, tw);
394 /* Get the TIME_WAIT timeout firing. */
399 tw->tw_timeout = rto;
401 tw->tw_timeout = TCP_TIMEWAIT_LEN;
402 if (state == TCP_TIME_WAIT)
403 timeo = TCP_TIMEWAIT_LEN;
406 tcp_tw_schedule(tw, timeo);
409 /* Sorry, if we're out of memory, just CLOSE this
410 * socket up. We've got bigger problems than
411 * non-graceful socket closings.
414 printk(KERN_INFO "TCP: time wait bucket table overflow\n");
417 tcp_update_metrics(sk);
421 /* Kill off TIME_WAIT sockets once their lifetime has expired. */
422 static int tcp_tw_death_row_slot;
424 static void tcp_twkill(unsigned long);
426 /* TIME_WAIT reaping mechanism. */
427 #define TCP_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
428 #define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)
430 #define TCP_TWKILL_QUOTA 100
432 static struct hlist_head tcp_tw_death_row[TCP_TWKILL_SLOTS];
433 static DEFINE_SPINLOCK(tw_death_lock);
434 static struct timer_list tcp_tw_timer = TIMER_INITIALIZER(tcp_twkill, 0, 0);
435 static void twkill_work(void *);
436 static DECLARE_WORK(tcp_twkill_work, twkill_work, NULL);
437 static u32 twkill_thread_slots;
439 /* Returns non-zero if quota exceeded. */
440 static int tcp_do_twkill_work(int slot, unsigned int quota)
442 struct tcp_tw_bucket *tw;
443 struct hlist_node *node;
447 /* NOTE: compare this to previous version where lock
448 * was released after detaching chain. It was racy,
449 * because tw buckets are scheduled in not serialized context
450 * in 2.3 (with netfilter), and with softnet it is common, because
451 * soft irqs are not sequenced.
456 tw_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) {
457 __tw_del_dead_node(tw);
458 spin_unlock(&tw_death_lock);
459 tcp_timewait_kill(tw);
462 spin_lock(&tw_death_lock);
463 if (killed > quota) {
468 /* While we dropped tw_death_lock, another cpu may have
469 * killed off the next TW bucket in the list, therefore
470 * do a fresh re-read of the hlist head node with the
471 * lock reacquired. We still use the hlist traversal
472 * macro in order to get the prefetches.
477 tcp_tw_count -= killed;
478 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
483 static void tcp_twkill(unsigned long dummy)
487 spin_lock(&tw_death_lock);
489 if (tcp_tw_count == 0)
493 ret = tcp_do_twkill_work(tcp_tw_death_row_slot, TCP_TWKILL_QUOTA);
495 twkill_thread_slots |= (1 << tcp_tw_death_row_slot);
497 schedule_work(&tcp_twkill_work);
500 /* We purged the entire slot, anything left? */
504 tcp_tw_death_row_slot =
505 ((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1));
507 mod_timer(&tcp_tw_timer, jiffies + TCP_TWKILL_PERIOD);
509 spin_unlock(&tw_death_lock);
512 extern void twkill_slots_invalid(void);
514 static void twkill_work(void *dummy)
518 if ((TCP_TWKILL_SLOTS - 1) > (sizeof(twkill_thread_slots) * 8))
519 twkill_slots_invalid();
521 while (twkill_thread_slots) {
522 spin_lock_bh(&tw_death_lock);
523 for (i = 0; i < TCP_TWKILL_SLOTS; i++) {
524 if (!(twkill_thread_slots & (1 << i)))
527 while (tcp_do_twkill_work(i, TCP_TWKILL_QUOTA) != 0) {
528 if (need_resched()) {
529 spin_unlock_bh(&tw_death_lock);
531 spin_lock_bh(&tw_death_lock);
535 twkill_thread_slots &= ~(1 << i);
537 spin_unlock_bh(&tw_death_lock);
541 /* These are always called from BH context. See callers in
542 * tcp_input.c to verify this.
545 /* This is for handling early-kills of TIME_WAIT sockets. */
546 void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
548 spin_lock(&tw_death_lock);
549 if (tw_del_dead_node(tw)) {
551 if (--tcp_tw_count == 0)
552 del_timer(&tcp_tw_timer);
554 spin_unlock(&tw_death_lock);
555 tcp_timewait_kill(tw);
558 /* Short-time timewait calendar */
560 static int tcp_twcal_hand = -1;
561 static int tcp_twcal_jiffie;
562 static void tcp_twcal_tick(unsigned long);
563 static struct timer_list tcp_twcal_timer =
564 TIMER_INITIALIZER(tcp_twcal_tick, 0, 0);
565 static struct hlist_head tcp_twcal_row[TCP_TW_RECYCLE_SLOTS];
567 static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
569 struct hlist_head *list;
572 /* timeout := RTO * 3.5
574 * 3.5 = 1+2+0.5 to wait for two retransmits.
576 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
577 * our ACK acking that FIN can be lost. If N subsequent retransmitted
578 * FINs (or previous seqments) are lost (probability of such event
579 * is p^(N+1), where p is probability to lose single packet and
580 * time to detect the loss is about RTO*(2^N - 1) with exponential
581 * backoff). Normal timewait length is calculated so, that we
582 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
583 * [ BTW Linux. following BSD, violates this requirement waiting
584 * only for 60sec, we should wait at least for 240 secs.
585 * Well, 240 consumes too much of resources 8)
587 * This interval is not reduced to catch old duplicate and
588 * responces to our wandering segments living for two MSLs.
589 * However, if we use PAWS to detect
590 * old duplicates, we can reduce the interval to bounds required
591 * by RTO, rather than MSL. So, if peer understands PAWS, we
592 * kill tw bucket after 3.5*RTO (it is important that this number
593 * is greater than TS tick!) and detect old duplicates with help
596 slot = (timeo + (1<<TCP_TW_RECYCLE_TICK) - 1) >> TCP_TW_RECYCLE_TICK;
598 spin_lock(&tw_death_lock);
600 /* Unlink it, if it was scheduled */
601 if (tw_del_dead_node(tw))
604 atomic_inc(&tw->tw_refcnt);
606 if (slot >= TCP_TW_RECYCLE_SLOTS) {
607 /* Schedule to slow timer */
608 if (timeo >= TCP_TIMEWAIT_LEN) {
609 slot = TCP_TWKILL_SLOTS-1;
611 slot = (timeo + TCP_TWKILL_PERIOD-1) / TCP_TWKILL_PERIOD;
612 if (slot >= TCP_TWKILL_SLOTS)
613 slot = TCP_TWKILL_SLOTS-1;
615 tw->tw_ttd = jiffies + timeo;
616 slot = (tcp_tw_death_row_slot + slot) & (TCP_TWKILL_SLOTS - 1);
617 list = &tcp_tw_death_row[slot];
619 tw->tw_ttd = jiffies + (slot << TCP_TW_RECYCLE_TICK);
621 if (tcp_twcal_hand < 0) {
623 tcp_twcal_jiffie = jiffies;
624 tcp_twcal_timer.expires = tcp_twcal_jiffie + (slot<<TCP_TW_RECYCLE_TICK);
625 add_timer(&tcp_twcal_timer);
627 if (time_after(tcp_twcal_timer.expires, jiffies + (slot<<TCP_TW_RECYCLE_TICK)))
628 mod_timer(&tcp_twcal_timer, jiffies + (slot<<TCP_TW_RECYCLE_TICK));
629 slot = (tcp_twcal_hand + slot)&(TCP_TW_RECYCLE_SLOTS-1);
631 list = &tcp_twcal_row[slot];
634 hlist_add_head(&tw->tw_death_node, list);
636 if (tcp_tw_count++ == 0)
637 mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
638 spin_unlock(&tw_death_lock);
641 void tcp_twcal_tick(unsigned long dummy)
645 unsigned long now = jiffies;
649 spin_lock(&tw_death_lock);
650 if (tcp_twcal_hand < 0)
653 slot = tcp_twcal_hand;
654 j = tcp_twcal_jiffie;
656 for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) {
657 if (time_before_eq(j, now)) {
658 struct hlist_node *node, *safe;
659 struct tcp_tw_bucket *tw;
661 tw_for_each_inmate_safe(tw, node, safe,
662 &tcp_twcal_row[slot]) {
663 __tw_del_dead_node(tw);
664 tcp_timewait_kill(tw);
671 tcp_twcal_jiffie = j;
672 tcp_twcal_hand = slot;
675 if (!hlist_empty(&tcp_twcal_row[slot])) {
676 mod_timer(&tcp_twcal_timer, j);
680 j += (1<<TCP_TW_RECYCLE_TICK);
681 slot = (slot+1)&(TCP_TW_RECYCLE_SLOTS-1);
686 if ((tcp_tw_count -= killed) == 0)
687 del_timer(&tcp_tw_timer);
688 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
689 spin_unlock(&tw_death_lock);
692 /* This is not only more efficient than what we used to do, it eliminates
693 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
695 * Actually, we could lots of memory writes here. tp of listening
696 * socket contains all necessary default parameters.
698 struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, struct sk_buff *skb)
700 /* allocate the newsk from the same slab of the master sock,
701 * if not, at sk_free time we'll try to free it from the wrong
702 * slabcache (i.e. is it TCPv4 or v6?), this is handled thru sk->sk_prot -acme */
703 struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, sk->sk_prot, 0);
706 struct tcp_sock *newtp;
707 struct sk_filter *filter;
709 memcpy(newsk, sk, sizeof(struct tcp_sock));
710 newsk->sk_state = TCP_SYN_RECV;
715 sk_node_init(&newsk->sk_node);
716 tcp_sk(newsk)->bind_hash = NULL;
718 /* Clone the TCP header template */
719 inet_sk(newsk)->dport = req->rmt_port;
721 sock_lock_init(newsk);
724 rwlock_init(&newsk->sk_dst_lock);
725 atomic_set(&newsk->sk_rmem_alloc, 0);
726 skb_queue_head_init(&newsk->sk_receive_queue);
727 atomic_set(&newsk->sk_wmem_alloc, 0);
728 skb_queue_head_init(&newsk->sk_write_queue);
729 atomic_set(&newsk->sk_omem_alloc, 0);
730 newsk->sk_wmem_queued = 0;
731 newsk->sk_forward_alloc = 0;
733 sock_reset_flag(newsk, SOCK_DONE);
734 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
735 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
736 newsk->sk_send_head = NULL;
737 rwlock_init(&newsk->sk_callback_lock);
738 skb_queue_head_init(&newsk->sk_error_queue);
739 newsk->sk_write_space = sk_stream_write_space;
741 if ((filter = newsk->sk_filter) != NULL)
742 sk_filter_charge(newsk, filter);
744 if (sk->sk_create_child)
745 sk->sk_create_child(sk, newsk);
747 if (unlikely(xfrm_sk_clone_policy(newsk))) {
748 /* It is still raw copy of parent, so invalidate
749 * destructor and make plain sk_free() */
750 newsk->sk_destruct = NULL;
755 /* Now setup tcp_sock */
756 newtp = tcp_sk(newsk);
757 newtp->pred_flags = 0;
758 newtp->rcv_nxt = req->rcv_isn + 1;
759 newtp->snd_nxt = req->snt_isn + 1;
760 newtp->snd_una = req->snt_isn + 1;
761 newtp->snd_sml = req->snt_isn + 1;
763 tcp_prequeue_init(newtp);
765 tcp_init_wl(newtp, req->snt_isn, req->rcv_isn);
767 newtp->retransmits = 0;
770 newtp->mdev = TCP_TIMEOUT_INIT;
771 newtp->rto = TCP_TIMEOUT_INIT;
773 newtp->packets_out = 0;
775 newtp->retrans_out = 0;
776 newtp->sacked_out = 0;
777 newtp->fackets_out = 0;
778 newtp->snd_ssthresh = 0x7fffffff;
780 /* So many TCP implementations out there (incorrectly) count the
781 * initial SYN frame in their delayed-ACK and congestion control
782 * algorithms that we must have the following bandaid to talk
783 * efficiently to them. -DaveM
786 newtp->snd_cwnd_cnt = 0;
788 newtp->frto_counter = 0;
789 newtp->frto_highmark = 0;
791 tcp_set_ca_state(newtp, TCP_CA_Open);
792 tcp_init_xmit_timers(newsk);
793 skb_queue_head_init(&newtp->out_of_order_queue);
794 newtp->rcv_wup = req->rcv_isn + 1;
795 newtp->write_seq = req->snt_isn + 1;
796 newtp->pushed_seq = newtp->write_seq;
797 newtp->copied_seq = req->rcv_isn + 1;
799 newtp->rx_opt.saw_tstamp = 0;
801 newtp->rx_opt.dsack = 0;
802 newtp->rx_opt.eff_sacks = 0;
804 newtp->probes_out = 0;
805 newtp->rx_opt.num_sacks = 0;
807 newtp->listen_opt = NULL;
808 newtp->accept_queue = newtp->accept_queue_tail = NULL;
809 /* Deinitialize syn_wait_lock to trap illegal accesses. */
810 memset(&newtp->syn_wait_lock, 0, sizeof(newtp->syn_wait_lock));
812 /* Back to base struct sock members. */
814 newsk->sk_priority = 0;
815 atomic_set(&newsk->sk_refcnt, 2);
817 set_vx_info(&newsk->sk_vx_info, sk->sk_vx_info);
818 newsk->sk_xid = sk->sk_xid;
820 set_nx_info(&newsk->sk_nx_info, sk->sk_nx_info);
821 newsk->sk_nid = sk->sk_nid;
822 #ifdef INET_REFCNT_DEBUG
823 atomic_inc(&inet_sock_nr);
825 atomic_inc(&tcp_sockets_allocated);
827 if (sock_flag(newsk, SOCK_KEEPOPEN))
828 tcp_reset_keepalive_timer(newsk,
829 keepalive_time_when(newtp));
830 newsk->sk_socket = NULL;
831 newsk->sk_sleep = NULL;
833 newtp->rx_opt.tstamp_ok = req->tstamp_ok;
834 if((newtp->rx_opt.sack_ok = req->sack_ok) != 0) {
836 newtp->rx_opt.sack_ok |= 2;
838 newtp->window_clamp = req->window_clamp;
839 newtp->rcv_ssthresh = req->rcv_wnd;
840 newtp->rcv_wnd = req->rcv_wnd;
841 newtp->rx_opt.wscale_ok = req->wscale_ok;
842 if (newtp->rx_opt.wscale_ok) {
843 newtp->rx_opt.snd_wscale = req->snd_wscale;
844 newtp->rx_opt.rcv_wscale = req->rcv_wscale;
846 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
847 newtp->window_clamp = min(newtp->window_clamp, 65535U);
849 newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale;
850 newtp->max_window = newtp->snd_wnd;
852 if (newtp->rx_opt.tstamp_ok) {
853 newtp->rx_opt.ts_recent = req->ts_recent;
854 newtp->rx_opt.ts_recent_stamp = xtime.tv_sec;
855 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
857 newtp->rx_opt.ts_recent_stamp = 0;
858 newtp->tcp_header_len = sizeof(struct tcphdr);
860 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
861 newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len;
862 newtp->rx_opt.mss_clamp = req->mss;
863 TCP_ECN_openreq_child(newtp, req);
864 if (newtp->ecn_flags&TCP_ECN_OK)
865 sock_set_flag(newsk, SOCK_NO_LARGESEND);
869 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
875 * Process an incoming packet for SYN_RECV sockets represented
876 * as an open_request.
879 struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
880 struct open_request *req,
881 struct open_request **prev)
883 struct tcphdr *th = skb->h.th;
884 struct tcp_sock *tp = tcp_sk(sk);
885 u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
887 struct tcp_options_received tmp_opt;
890 tmp_opt.saw_tstamp = 0;
891 if (th->doff > (sizeof(struct tcphdr)>>2)) {
892 tcp_parse_options(skb, &tmp_opt, 0);
894 if (tmp_opt.saw_tstamp) {
895 tmp_opt.ts_recent = req->ts_recent;
896 /* We do not store true stamp, but it is not required,
897 * it can be estimated (approximately)
900 tmp_opt.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
901 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
905 /* Check for pure retransmitted SYN. */
906 if (TCP_SKB_CB(skb)->seq == req->rcv_isn &&
907 flg == TCP_FLAG_SYN &&
910 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
911 * this case on figure 6 and figure 8, but formal
912 * protocol description says NOTHING.
913 * To be more exact, it says that we should send ACK,
914 * because this segment (at least, if it has no data)
917 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
918 * describe SYN-RECV state. All the description
919 * is wrong, we cannot believe to it and should
920 * rely only on common sense and implementation
923 * Enforce "SYN-ACK" according to figure 8, figure 6
924 * of RFC793, fixed by RFC1122.
926 req->class->rtx_syn_ack(sk, req, NULL);
930 /* Further reproduces section "SEGMENT ARRIVES"
931 for state SYN-RECEIVED of RFC793.
932 It is broken, however, it does not work only
933 when SYNs are crossed.
935 You would think that SYN crossing is impossible here, since
936 we should have a SYN_SENT socket (from connect()) on our end,
937 but this is not true if the crossed SYNs were sent to both
938 ends by a malicious third party. We must defend against this,
939 and to do that we first verify the ACK (as per RFC793, page
940 36) and reset if it is invalid. Is this a true full defense?
941 To convince ourselves, let us consider a way in which the ACK
942 test can still pass in this 'malicious crossed SYNs' case.
943 Malicious sender sends identical SYNs (and thus identical sequence
944 numbers) to both A and B:
949 By our good fortune, both A and B select the same initial
950 send sequence number of seven :-)
952 A: sends SYN|ACK, seq=7, ack_seq=8
953 B: sends SYN|ACK, seq=7, ack_seq=8
955 So we are now A eating this SYN|ACK, ACK test passes. So
956 does sequence test, SYN is truncated, and thus we consider
959 If tp->defer_accept, we silently drop this bare ACK. Otherwise,
960 we create an established connection. Both ends (listening sockets)
961 accept the new incoming connection and try to talk to each other. 8-)
963 Note: This case is both harmless, and rare. Possibility is about the
964 same as us discovering intelligent life on another plant tomorrow.
966 But generally, we should (RFC lies!) to accept ACK
967 from SYNACK both here and in tcp_rcv_state_process().
968 tcp_rcv_state_process() does not, hence, we do not too.
970 Note that the case is absolutely generic:
971 we cannot optimize anything here without
972 violating protocol. All the checks must be made
973 before attempt to create socket.
976 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
977 * and the incoming segment acknowledges something not yet
978 * sent (the segment carries an unaccaptable ACK) ...
981 * Invalid ACK: reset will be sent by listening socket
983 if ((flg & TCP_FLAG_ACK) &&
984 (TCP_SKB_CB(skb)->ack_seq != req->snt_isn+1))
987 /* Also, it would be not so bad idea to check rcv_tsecr, which
988 * is essentially ACK extension and too early or too late values
989 * should cause reset in unsynchronized states.
992 /* RFC793: "first check sequence number". */
994 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
995 req->rcv_isn+1, req->rcv_isn+1+req->rcv_wnd)) {
996 /* Out of window: send ACK and drop. */
997 if (!(flg & TCP_FLAG_RST))
998 req->class->send_ack(skb, req);
1000 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
1004 /* In sequence, PAWS is OK. */
1006 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, req->rcv_isn+1))
1007 req->ts_recent = tmp_opt.rcv_tsval;
1009 if (TCP_SKB_CB(skb)->seq == req->rcv_isn) {
1010 /* Truncate SYN, it is out of window starting
1011 at req->rcv_isn+1. */
1012 flg &= ~TCP_FLAG_SYN;
1015 /* RFC793: "second check the RST bit" and
1016 * "fourth, check the SYN bit"
1018 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN))
1019 goto embryonic_reset;
1021 /* ACK sequence verified above, just make sure ACK is
1022 * set. If ACK not set, just silently drop the packet.
1024 if (!(flg & TCP_FLAG_ACK))
1027 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
1028 if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == req->rcv_isn+1) {
1033 /* OK, ACK is valid, create big socket and
1034 * feed this segment to it. It will repeat all
1035 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
1036 * ESTABLISHED STATE. If it will be dropped after
1037 * socket is created, wait for troubles.
1039 child = tp->af_specific->syn_recv_sock(sk, skb, req, NULL);
1041 goto listen_overflow;
1043 tcp_synq_unlink(tp, req, prev);
1044 tcp_synq_removed(sk, req);
1046 tcp_acceptq_queue(sk, req, child);
1050 if (!sysctl_tcp_abort_on_overflow) {
1056 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
1057 if (!(flg & TCP_FLAG_RST))
1058 req->class->send_reset(skb);
1060 tcp_synq_drop(sk, req, prev);
1065 * Queue segment on the new socket if the new socket is active,
1066 * otherwise we just shortcircuit this and continue with
1070 int tcp_child_process(struct sock *parent, struct sock *child,
1071 struct sk_buff *skb)
1074 int state = child->sk_state;
1076 if (!sock_owned_by_user(child)) {
1077 ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
1079 /* Wakeup parent, send SIGIO */
1080 if (state == TCP_SYN_RECV && child->sk_state != state)
1081 parent->sk_data_ready(parent, 0);
1083 /* Alas, it is possible again, because we do lookup
1084 * in main socket hash table and lock on listening
1085 * socket does not protect us more.
1087 sk_add_backlog(child, skb);
1090 bh_unlock_sock(child);
1095 EXPORT_SYMBOL(tcp_check_req);
1096 EXPORT_SYMBOL(tcp_child_process);
1097 EXPORT_SYMBOL(tcp_create_openreq_child);
1098 EXPORT_SYMBOL(tcp_timewait_state_process);
1099 EXPORT_SYMBOL(tcp_tw_deschedule);