2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the TCP module.
8 * Version: @(#)tcp.h 1.0.5 05/23/93
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
22 #define FASTRETRANS_DEBUG 1
24 /* Cancel timers, when they are not required. */
25 #undef TCP_CLEAR_TIMERS
27 #include <linux/config.h>
28 #include <linux/list.h>
29 #include <linux/tcp.h>
30 #include <linux/slab.h>
31 #include <linux/cache.h>
32 #include <linux/percpu.h>
33 #include <net/checksum.h>
37 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
38 #include <linux/ipv6.h>
40 #include <linux/seq_file.h>
42 /* This is for all connections with a full identity, no wildcards.
43 * New scheme, half the table is for TIME_WAIT, the other half is
44 * for the rest. I'll experiment with dynamic table growth later.
46 struct tcp_ehash_bucket {
48 struct hlist_head chain;
49 } __attribute__((__aligned__(8)));
51 /* This is for listening sockets, thus all sockets which possess wildcards. */
52 #define TCP_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
54 /* There are a few simple rules, which allow for local port reuse by
55 * an application. In essence:
57 * 1) Sockets bound to different interfaces may share a local port.
58 * Failing that, goto test 2.
59 * 2) If all sockets have sk->sk_reuse set, and none of them are in
60 * TCP_LISTEN state, the port may be shared.
61 * Failing that, goto test 3.
62 * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
63 * address, and none of them are the same, the port may be
65 * Failing this, the port cannot be shared.
67 * The interesting point, is test #2. This is what an FTP server does
68 * all day. To optimize this case we use a specific flag bit defined
69 * below. As we add sockets to a bind bucket list, we perform a
70 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
71 * As long as all sockets added to a bind bucket pass this test,
72 * the flag bit will be set.
73 * The resulting situation is that tcp_v[46]_verify_bind() can just check
74 * for this flag bit, if it is set and the socket trying to bind has
75 * sk->sk_reuse set, we don't even have to walk the owners list at all,
76 * we return that it is ok to bind this socket to the requested local port.
78 * Sounds like a lot of work, but it is worth it. In a more naive
79 * implementation (ie. current FreeBSD etc.) the entire list of ports
80 * must be walked for each data port opened by an ftp server. Needless
81 * to say, this does not scale at all. With a couple thousand FTP
82 * users logged onto your box, isn't it nice to know that new data
83 * ports are created in O(1) time? I thought so. ;-) -DaveM
85 struct tcp_bind_bucket {
87 signed short fastreuse;
88 struct hlist_node node;
89 struct hlist_head owners;
92 #define tb_for_each(tb, node, head) hlist_for_each_entry(tb, node, head, node)
94 struct tcp_bind_hashbucket {
96 struct hlist_head chain;
99 static inline struct tcp_bind_bucket *__tb_head(struct tcp_bind_hashbucket *head)
101 return hlist_entry(head->chain.first, struct tcp_bind_bucket, node);
104 static inline struct tcp_bind_bucket *tb_head(struct tcp_bind_hashbucket *head)
106 return hlist_empty(&head->chain) ? NULL : __tb_head(head);
109 extern struct tcp_hashinfo {
110 /* This is for sockets with full identity only. Sockets here will
111 * always be without wildcards and will have the following invariant:
113 * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
115 * First half of the table is for sockets not in TIME_WAIT, second half
116 * is for TIME_WAIT sockets only.
118 struct tcp_ehash_bucket *__tcp_ehash;
120 /* Ok, let's try this, I give up, we do need a local binding
121 * TCP hash as well as the others for fast bind/connect.
123 struct tcp_bind_hashbucket *__tcp_bhash;
125 int __tcp_bhash_size;
126 int __tcp_ehash_size;
128 /* All sockets in TCP_LISTEN state will be in here. This is the only
129 * table where wildcard'd TCP sockets can exist. Hash function here
130 * is just local port number.
132 struct hlist_head __tcp_listening_hash[TCP_LHTABLE_SIZE];
134 /* All the above members are written once at bootup and
135 * never written again _or_ are predominantly read-access.
137 * Now align to a new cache line as all the following members
140 rwlock_t __tcp_lhash_lock ____cacheline_aligned;
141 atomic_t __tcp_lhash_users;
142 wait_queue_head_t __tcp_lhash_wait;
143 spinlock_t __tcp_portalloc_lock;
146 #define tcp_ehash (tcp_hashinfo.__tcp_ehash)
147 #define tcp_bhash (tcp_hashinfo.__tcp_bhash)
148 #define tcp_ehash_size (tcp_hashinfo.__tcp_ehash_size)
149 #define tcp_bhash_size (tcp_hashinfo.__tcp_bhash_size)
150 #define tcp_listening_hash (tcp_hashinfo.__tcp_listening_hash)
151 #define tcp_lhash_lock (tcp_hashinfo.__tcp_lhash_lock)
152 #define tcp_lhash_users (tcp_hashinfo.__tcp_lhash_users)
153 #define tcp_lhash_wait (tcp_hashinfo.__tcp_lhash_wait)
154 #define tcp_portalloc_lock (tcp_hashinfo.__tcp_portalloc_lock)
156 extern kmem_cache_t *tcp_bucket_cachep;
157 extern struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
158 unsigned short snum);
159 extern void tcp_bucket_destroy(struct tcp_bind_bucket *tb);
160 extern void tcp_bucket_unlock(struct sock *sk);
161 extern int tcp_port_rover;
163 /* These are AF independent. */
164 static __inline__ int tcp_bhashfn(__u16 lport)
166 return (lport & (tcp_bhash_size - 1));
169 extern void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
170 unsigned short snum);
172 #if (BITS_PER_LONG == 64)
173 #define TCP_ADDRCMP_ALIGN_BYTES 8
175 #define TCP_ADDRCMP_ALIGN_BYTES 4
178 /* This is a TIME_WAIT bucket. It works around the memory consumption
179 * problems of sockets in such a state on heavily loaded servers, but
180 * without violating the protocol specification.
182 struct tcp_tw_bucket {
184 * Now struct sock also uses sock_common, so please just
185 * don't add nothing before this first member (__tw_common) --acme
187 struct sock_common __tw_common;
188 #define tw_family __tw_common.skc_family
189 #define tw_state __tw_common.skc_state
190 #define tw_reuse __tw_common.skc_reuse
191 #define tw_bound_dev_if __tw_common.skc_bound_dev_if
192 #define tw_node __tw_common.skc_node
193 #define tw_bind_node __tw_common.skc_bind_node
194 #define tw_refcnt __tw_common.skc_refcnt
195 #define tw_xid __tw_common.skc_xid
196 #define tw_vx_info __tw_common.skc_vx_info
197 #define tw_nid __tw_common.skc_nid
198 #define tw_nx_info __tw_common.skc_nx_info
199 volatile unsigned char tw_substate;
200 unsigned char tw_rcv_wscale;
202 /* Socket demultiplex comparisons on incoming packets. */
203 /* these five are in inet_sock */
205 __attribute__((aligned(TCP_ADDRCMP_ALIGN_BYTES)));
209 /* And these are ours. */
216 long tw_ts_recent_stamp;
217 unsigned long tw_ttd;
218 struct tcp_bind_bucket *tw_tb;
219 struct hlist_node tw_death_node;
220 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
221 struct in6_addr tw_v6_daddr;
222 struct in6_addr tw_v6_rcv_saddr;
227 static __inline__ void tw_add_node(struct tcp_tw_bucket *tw,
228 struct hlist_head *list)
230 hlist_add_head(&tw->tw_node, list);
233 static __inline__ void tw_add_bind_node(struct tcp_tw_bucket *tw,
234 struct hlist_head *list)
236 hlist_add_head(&tw->tw_bind_node, list);
239 static inline int tw_dead_hashed(struct tcp_tw_bucket *tw)
241 return tw->tw_death_node.pprev != NULL;
244 static __inline__ void tw_dead_node_init(struct tcp_tw_bucket *tw)
246 tw->tw_death_node.pprev = NULL;
249 static __inline__ void __tw_del_dead_node(struct tcp_tw_bucket *tw)
251 __hlist_del(&tw->tw_death_node);
252 tw_dead_node_init(tw);
255 static __inline__ int tw_del_dead_node(struct tcp_tw_bucket *tw)
257 if (tw_dead_hashed(tw)) {
258 __tw_del_dead_node(tw);
264 #define tw_for_each(tw, node, head) \
265 hlist_for_each_entry(tw, node, head, tw_node)
267 #define tw_for_each_inmate(tw, node, jail) \
268 hlist_for_each_entry(tw, node, jail, tw_death_node)
270 #define tw_for_each_inmate_safe(tw, node, safe, jail) \
271 hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node)
273 #define tcptw_sk(__sk) ((struct tcp_tw_bucket *)(__sk))
275 static inline u32 tcp_v4_rcv_saddr(const struct sock *sk)
277 return likely(sk->sk_state != TCP_TIME_WAIT) ?
278 inet_sk(sk)->rcv_saddr : tcptw_sk(sk)->tw_rcv_saddr;
281 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
282 static inline struct in6_addr *__tcp_v6_rcv_saddr(const struct sock *sk)
284 return likely(sk->sk_state != TCP_TIME_WAIT) ?
285 &inet6_sk(sk)->rcv_saddr : &tcptw_sk(sk)->tw_v6_rcv_saddr;
288 static inline struct in6_addr *tcp_v6_rcv_saddr(const struct sock *sk)
290 return sk->sk_family == AF_INET6 ? __tcp_v6_rcv_saddr(sk) : NULL;
293 #define tcptw_sk_ipv6only(__sk) (tcptw_sk(__sk)->tw_v6_ipv6only)
295 static inline int tcp_v6_ipv6only(const struct sock *sk)
297 return likely(sk->sk_state != TCP_TIME_WAIT) ?
298 ipv6_only_sock(sk) : tcptw_sk_ipv6only(sk);
301 # define __tcp_v6_rcv_saddr(__sk) NULL
302 # define tcp_v6_rcv_saddr(__sk) NULL
303 # define tcptw_sk_ipv6only(__sk) 0
304 # define tcp_v6_ipv6only(__sk) 0
307 extern kmem_cache_t *tcp_timewait_cachep;
309 static inline void tcp_tw_put(struct tcp_tw_bucket *tw)
311 if (atomic_dec_and_test(&tw->tw_refcnt)) {
312 #ifdef INET_REFCNT_DEBUG
313 printk(KERN_DEBUG "tw_bucket %p released\n", tw);
315 kmem_cache_free(tcp_timewait_cachep, tw);
319 extern atomic_t tcp_orphan_count;
320 extern int tcp_tw_count;
321 extern void tcp_time_wait(struct sock *sk, int state, int timeo);
322 extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw);
325 /* Socket demux engine toys. */
327 #define TCP_COMBINED_PORTS(__sport, __dport) \
328 (((__u32)(__sport)<<16) | (__u32)(__dport))
329 #else /* __LITTLE_ENDIAN */
330 #define TCP_COMBINED_PORTS(__sport, __dport) \
331 (((__u32)(__dport)<<16) | (__u32)(__sport))
334 #if (BITS_PER_LONG == 64)
336 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
337 __u64 __name = (((__u64)(__saddr))<<32)|((__u64)(__daddr));
338 #else /* __LITTLE_ENDIAN */
339 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
340 __u64 __name = (((__u64)(__daddr))<<32)|((__u64)(__saddr));
341 #endif /* __BIG_ENDIAN */
342 #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
343 (((*((__u64 *)&(inet_sk(__sk)->daddr)))== (__cookie)) && \
344 ((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
345 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
346 #define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
347 (((*((__u64 *)&(tcptw_sk(__sk)->tw_daddr))) == (__cookie)) && \
348 ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \
349 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
350 #else /* 32-bit arch */
351 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr)
352 #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
353 ((inet_sk(__sk)->daddr == (__saddr)) && \
354 (inet_sk(__sk)->rcv_saddr == (__daddr)) && \
355 ((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
356 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
357 #define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
358 ((tcptw_sk(__sk)->tw_daddr == (__saddr)) && \
359 (tcptw_sk(__sk)->tw_rcv_saddr == (__daddr)) && \
360 ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \
361 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
362 #endif /* 64-bit arch */
364 #define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
365 (((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
366 ((__sk)->sk_family == AF_INET6) && \
367 ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \
368 ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \
369 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
371 /* These can have wildcards, don't try too hard. */
372 static __inline__ int tcp_lhashfn(unsigned short num)
374 return num & (TCP_LHTABLE_SIZE - 1);
377 static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
379 return tcp_lhashfn(inet_sk(sk)->num);
382 #define MAX_TCP_HEADER (128 + MAX_HEADER)
385 * Never offer a window over 32767 without using window scaling. Some
386 * poor stacks do signed 16bit maths!
388 #define MAX_TCP_WINDOW 32767U
390 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
391 #define TCP_MIN_MSS 88U
393 /* Minimal RCV_MSS. */
394 #define TCP_MIN_RCVMSS 536U
396 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
397 #define TCP_FASTRETRANS_THRESH 3
399 /* Maximal reordering. */
400 #define TCP_MAX_REORDERING 127
402 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
403 #define TCP_MAX_QUICKACKS 16U
405 /* urg_data states */
406 #define TCP_URG_VALID 0x0100
407 #define TCP_URG_NOTYET 0x0200
408 #define TCP_URG_READ 0x0400
410 #define TCP_RETR1 3 /*
411 * This is how many retries it does before it
412 * tries to figure out if the gateway is
413 * down. Minimal RFC value is 3; it corresponds
414 * to ~3sec-8min depending on RTO.
417 #define TCP_RETR2 15 /*
418 * This should take at least
419 * 90 minutes to time out.
420 * RFC1122 says that the limit is 100 sec.
421 * 15 is ~13-30min depending on RTO.
424 #define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
425 * connection: ~180sec is RFC minumum */
427 #define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
428 * connection: ~180sec is RFC minumum */
431 #define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
432 * socket. 7 is ~50sec-16min.
436 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
437 * state, about 60 seconds */
438 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
439 /* BSD style FIN_WAIT2 deadlock breaker.
440 * It used to be 3min, new value is 60sec,
441 * to combine FIN-WAIT-2 timeout with
445 #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
447 #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
448 #define TCP_ATO_MIN ((unsigned)(HZ/25))
450 #define TCP_DELACK_MIN 4U
451 #define TCP_ATO_MIN 4U
453 #define TCP_RTO_MAX ((unsigned)(120*HZ))
454 #define TCP_RTO_MIN ((unsigned)(HZ/5))
455 #define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */
457 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
458 * for local resources.
461 #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
462 #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
463 #define TCP_KEEPALIVE_INTVL (75*HZ)
465 #define MAX_TCP_KEEPIDLE 32767
466 #define MAX_TCP_KEEPINTVL 32767
467 #define MAX_TCP_KEEPCNT 127
468 #define MAX_TCP_SYNCNT 127
470 #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
471 #define TCP_SYNQ_HSIZE 512 /* Size of SYNACK hash table */
473 #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
474 #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
475 * after this time. It should be equal
476 * (or greater than) TCP_TIMEWAIT_LEN
477 * to provide reliability equal to one
478 * provided by timewait state.
480 #define TCP_PAWS_WINDOW 1 /* Replay window for per-host
481 * timestamps. It must be less than
482 * minimal timewait lifetime.
485 #define TCP_TW_RECYCLE_SLOTS_LOG 5
486 #define TCP_TW_RECYCLE_SLOTS (1<<TCP_TW_RECYCLE_SLOTS_LOG)
488 /* If time > 4sec, it is "slow" path, no recycling is required,
489 so that we select tick to get range about 4 seconds.
492 #if HZ <= 16 || HZ > 4096
493 # error Unsupported: HZ <= 16 or HZ > 4096
495 # define TCP_TW_RECYCLE_TICK (5+2-TCP_TW_RECYCLE_SLOTS_LOG)
497 # define TCP_TW_RECYCLE_TICK (6+2-TCP_TW_RECYCLE_SLOTS_LOG)
499 # define TCP_TW_RECYCLE_TICK (7+2-TCP_TW_RECYCLE_SLOTS_LOG)
501 # define TCP_TW_RECYCLE_TICK (8+2-TCP_TW_RECYCLE_SLOTS_LOG)
503 # define TCP_TW_RECYCLE_TICK (9+2-TCP_TW_RECYCLE_SLOTS_LOG)
505 # define TCP_TW_RECYCLE_TICK (10+2-TCP_TW_RECYCLE_SLOTS_LOG)
507 # define TCP_TW_RECYCLE_TICK (11+2-TCP_TW_RECYCLE_SLOTS_LOG)
509 # define TCP_TW_RECYCLE_TICK (12+2-TCP_TW_RECYCLE_SLOTS_LOG)
512 #define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
513 * max_cwnd = snd_cwnd * beta
515 #define BICTCP_MAX_INCREMENT 32 /*
516 * Limit on the amount of
517 * increment allowed during
520 #define BICTCP_FUNC_OF_MIN_INCR 11 /*
521 * log(B/Smin)/log(B/(B-1))+1,
525 #define BICTCP_B 4 /*
527 * go to point (max+min)/N
534 #define TCPOPT_NOP 1 /* Padding */
535 #define TCPOPT_EOL 0 /* End of options */
536 #define TCPOPT_MSS 2 /* Segment size negotiating */
537 #define TCPOPT_WINDOW 3 /* Window scaling */
538 #define TCPOPT_SACK_PERM 4 /* SACK Permitted */
539 #define TCPOPT_SACK 5 /* SACK Block */
540 #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
546 #define TCPOLEN_MSS 4
547 #define TCPOLEN_WINDOW 3
548 #define TCPOLEN_SACK_PERM 2
549 #define TCPOLEN_TIMESTAMP 10
551 /* But this is what stacks really send out. */
552 #define TCPOLEN_TSTAMP_ALIGNED 12
553 #define TCPOLEN_WSCALE_ALIGNED 4
554 #define TCPOLEN_SACKPERM_ALIGNED 4
555 #define TCPOLEN_SACK_BASE 2
556 #define TCPOLEN_SACK_BASE_ALIGNED 4
557 #define TCPOLEN_SACK_PERBLOCK 8
559 #define TCP_TIME_RETRANS 1 /* Retransmit timer */
560 #define TCP_TIME_DACK 2 /* Delayed ack timer */
561 #define TCP_TIME_PROBE0 3 /* Zero window probe timer */
562 #define TCP_TIME_KEEPOPEN 4 /* Keepalive timer */
564 /* Flags in tp->nonagle */
565 #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
566 #define TCP_NAGLE_CORK 2 /* Socket is corked */
567 #define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */
569 /* sysctl variables for tcp */
570 extern int sysctl_max_syn_backlog;
571 extern int sysctl_tcp_timestamps;
572 extern int sysctl_tcp_window_scaling;
573 extern int sysctl_tcp_sack;
574 extern int sysctl_tcp_fin_timeout;
575 extern int sysctl_tcp_tw_recycle;
576 extern int sysctl_tcp_keepalive_time;
577 extern int sysctl_tcp_keepalive_probes;
578 extern int sysctl_tcp_keepalive_intvl;
579 extern int sysctl_tcp_syn_retries;
580 extern int sysctl_tcp_synack_retries;
581 extern int sysctl_tcp_retries1;
582 extern int sysctl_tcp_retries2;
583 extern int sysctl_tcp_orphan_retries;
584 extern int sysctl_tcp_syncookies;
585 extern int sysctl_tcp_retrans_collapse;
586 extern int sysctl_tcp_stdurg;
587 extern int sysctl_tcp_rfc1337;
588 extern int sysctl_tcp_abort_on_overflow;
589 extern int sysctl_tcp_max_orphans;
590 extern int sysctl_tcp_max_tw_buckets;
591 extern int sysctl_tcp_fack;
592 extern int sysctl_tcp_reordering;
593 extern int sysctl_tcp_ecn;
594 extern int sysctl_tcp_dsack;
595 extern int sysctl_tcp_mem[3];
596 extern int sysctl_tcp_wmem[3];
597 extern int sysctl_tcp_rmem[3];
598 extern int sysctl_tcp_app_win;
599 extern int sysctl_tcp_adv_win_scale;
600 extern int sysctl_tcp_tw_reuse;
601 extern int sysctl_tcp_frto;
602 extern int sysctl_tcp_low_latency;
603 extern int sysctl_tcp_westwood;
604 extern int sysctl_tcp_vegas_cong_avoid;
605 extern int sysctl_tcp_vegas_alpha;
606 extern int sysctl_tcp_vegas_beta;
607 extern int sysctl_tcp_vegas_gamma;
608 extern int sysctl_tcp_nometrics_save;
609 extern int sysctl_tcp_bic;
610 extern int sysctl_tcp_bic_fast_convergence;
611 extern int sysctl_tcp_bic_low_window;
612 extern int sysctl_tcp_bic_beta;
613 extern int sysctl_tcp_moderate_rcvbuf;
614 extern int sysctl_tcp_tso_win_divisor;
616 extern atomic_t tcp_memory_allocated;
617 extern atomic_t tcp_sockets_allocated;
618 extern int tcp_memory_pressure;
622 struct or_calltable {
624 int (*rtx_syn_ack) (struct sock *sk, struct open_request *req, struct dst_entry*);
625 void (*send_ack) (struct sk_buff *skb, struct open_request *req);
626 void (*destructor) (struct open_request *req);
627 void (*send_reset) (struct sk_buff *skb);
630 struct tcp_v4_open_req {
633 struct ip_options *opt;
636 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
637 struct tcp_v6_open_req {
638 struct in6_addr loc_addr;
639 struct in6_addr rmt_addr;
640 struct sk_buff *pktopts;
645 /* this structure is too big */
646 struct open_request {
647 struct open_request *dl_next; /* Must be first member! */
654 __u16 snd_wscale : 4,
661 /* The following two fields can be easily recomputed I think -AK */
662 __u32 window_clamp; /* window clamp at creation time */
663 __u32 rcv_wnd; /* rcv_wnd offered first time */
665 unsigned long expires;
666 struct or_calltable *class;
669 struct tcp_v4_open_req v4_req;
670 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
671 struct tcp_v6_open_req v6_req;
676 /* SLAB cache for open requests. */
677 extern kmem_cache_t *tcp_openreq_cachep;
679 #define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)
680 #define tcp_openreq_fastfree(req) kmem_cache_free(tcp_openreq_cachep, req)
682 static inline void tcp_openreq_free(struct open_request *req)
684 req->class->destructor(req);
685 tcp_openreq_fastfree(req);
688 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
689 #define TCP_INET_FAMILY(fam) ((fam) == AF_INET)
691 #define TCP_INET_FAMILY(fam) 1
695 * Pointers to address related TCP functions
696 * (i.e. things that depend on the address family)
700 int (*queue_xmit) (struct sk_buff *skb,
703 void (*send_check) (struct sock *sk,
706 struct sk_buff *skb);
708 int (*rebuild_header) (struct sock *sk);
710 int (*conn_request) (struct sock *sk,
711 struct sk_buff *skb);
713 struct sock * (*syn_recv_sock) (struct sock *sk,
715 struct open_request *req,
716 struct dst_entry *dst);
718 int (*remember_stamp) (struct sock *sk);
720 __u16 net_header_len;
722 int (*setsockopt) (struct sock *sk,
728 int (*getsockopt) (struct sock *sk,
735 void (*addr2sockaddr) (struct sock *sk,
742 * The next routines deal with comparing 32 bit unsigned ints
743 * and worry about wraparound (automatic with unsigned arithmetic).
746 static inline int before(__u32 seq1, __u32 seq2)
748 return (__s32)(seq1-seq2) < 0;
751 static inline int after(__u32 seq1, __u32 seq2)
753 return (__s32)(seq2-seq1) < 0;
757 /* is s2<=s1<=s3 ? */
758 static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
760 return seq3 - seq2 >= seq1 - seq2;
764 extern struct proto tcp_prot;
766 DECLARE_SNMP_STAT(struct tcp_mib, tcp_statistics);
767 #define TCP_INC_STATS(field) SNMP_INC_STATS(tcp_statistics, field)
768 #define TCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(tcp_statistics, field)
769 #define TCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(tcp_statistics, field)
770 #define TCP_DEC_STATS(field) SNMP_DEC_STATS(tcp_statistics, field)
771 #define TCP_ADD_STATS_BH(field, val) SNMP_ADD_STATS_BH(tcp_statistics, field, val)
772 #define TCP_ADD_STATS_USER(field, val) SNMP_ADD_STATS_USER(tcp_statistics, field, val)
774 extern void tcp_put_port(struct sock *sk);
775 extern void tcp_inherit_port(struct sock *sk, struct sock *child);
777 extern void tcp_v4_err(struct sk_buff *skb, u32);
779 extern void tcp_shutdown (struct sock *sk, int how);
781 extern int tcp_v4_rcv(struct sk_buff *skb);
783 extern int tcp_v4_remember_stamp(struct sock *sk);
785 extern int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw);
787 extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk,
788 struct msghdr *msg, size_t size);
789 extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
791 extern int tcp_ioctl(struct sock *sk,
795 extern int tcp_rcv_state_process(struct sock *sk,
800 extern int tcp_rcv_established(struct sock *sk,
805 extern void tcp_rcv_space_adjust(struct sock *sk);
814 static inline void tcp_schedule_ack(struct tcp_sock *tp)
816 tp->ack.pending |= TCP_ACK_SCHED;
819 static inline int tcp_ack_scheduled(struct tcp_sock *tp)
821 return tp->ack.pending&TCP_ACK_SCHED;
824 static __inline__ void tcp_dec_quickack_mode(struct tcp_sock *tp)
826 if (tp->ack.quick && --tp->ack.quick == 0) {
827 /* Leaving quickack mode we deflate ATO. */
828 tp->ack.ato = TCP_ATO_MIN;
832 extern void tcp_enter_quickack_mode(struct tcp_sock *tp);
834 static __inline__ void tcp_delack_init(struct tcp_sock *tp)
836 memset(&tp->ack, 0, sizeof(tp->ack));
839 static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
841 rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
853 extern enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw,
858 extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
859 struct open_request *req,
860 struct open_request **prev);
861 extern int tcp_child_process(struct sock *parent,
863 struct sk_buff *skb);
864 extern void tcp_enter_frto(struct sock *sk);
865 extern void tcp_enter_loss(struct sock *sk, int how);
866 extern void tcp_clear_retrans(struct tcp_sock *tp);
867 extern void tcp_update_metrics(struct sock *sk);
869 extern void tcp_close(struct sock *sk,
871 extern struct sock * tcp_accept(struct sock *sk, int flags, int *err);
872 extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
874 extern int tcp_getsockopt(struct sock *sk, int level,
878 extern int tcp_setsockopt(struct sock *sk, int level,
879 int optname, char __user *optval,
881 extern void tcp_set_keepalive(struct sock *sk, int val);
882 extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
884 size_t len, int nonblock,
885 int flags, int *addr_len);
887 extern int tcp_listen_start(struct sock *sk);
889 extern void tcp_parse_options(struct sk_buff *skb,
890 struct tcp_options_received *opt_rx,
894 * TCP v4 functions exported for the inet6 API
897 extern int tcp_v4_rebuild_header(struct sock *sk);
899 extern int tcp_v4_build_header(struct sock *sk,
900 struct sk_buff *skb);
902 extern void tcp_v4_send_check(struct sock *sk,
903 struct tcphdr *th, int len,
904 struct sk_buff *skb);
906 extern int tcp_v4_conn_request(struct sock *sk,
907 struct sk_buff *skb);
909 extern struct sock * tcp_create_openreq_child(struct sock *sk,
910 struct open_request *req,
911 struct sk_buff *skb);
913 extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
915 struct open_request *req,
916 struct dst_entry *dst);
918 extern int tcp_v4_do_rcv(struct sock *sk,
919 struct sk_buff *skb);
921 extern int tcp_v4_connect(struct sock *sk,
922 struct sockaddr *uaddr,
925 extern int tcp_connect(struct sock *sk);
927 extern struct sk_buff * tcp_make_synack(struct sock *sk,
928 struct dst_entry *dst,
929 struct open_request *req);
931 extern int tcp_disconnect(struct sock *sk, int flags);
933 extern void tcp_unhash(struct sock *sk);
935 extern int tcp_v4_hash_connecting(struct sock *sk);
938 /* From syncookies.c */
939 extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
940 struct ip_options *opt);
941 extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
946 extern int tcp_write_xmit(struct sock *, int nonagle);
947 extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
948 extern void tcp_xmit_retransmit_queue(struct sock *);
949 extern void tcp_simple_retransmit(struct sock *);
950 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
952 extern void tcp_send_probe0(struct sock *);
953 extern void tcp_send_partial(struct sock *);
954 extern int tcp_write_wakeup(struct sock *);
955 extern void tcp_send_fin(struct sock *sk);
956 extern void tcp_send_active_reset(struct sock *sk, int priority);
957 extern int tcp_send_synack(struct sock *);
958 extern void tcp_push_one(struct sock *, unsigned mss_now);
959 extern void tcp_send_ack(struct sock *sk);
960 extern void tcp_send_delayed_ack(struct sock *sk);
963 extern void tcp_init_xmit_timers(struct sock *);
964 extern void tcp_clear_xmit_timers(struct sock *);
966 extern void tcp_delete_keepalive_timer(struct sock *);
967 extern void tcp_reset_keepalive_timer(struct sock *, unsigned long);
968 extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
969 extern unsigned int tcp_current_mss(struct sock *sk, int large);
972 extern const char tcp_timer_bug_msg[];
976 extern void tcp_get_info(struct sock *, struct tcp_info *);
978 /* Read 'sendfile()'-style from a TCP socket */
979 typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
980 unsigned int, size_t);
981 extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
982 sk_read_actor_t recv_actor);
984 static inline void tcp_clear_xmit_timer(struct sock *sk, int what)
986 struct tcp_sock *tp = tcp_sk(sk);
989 case TCP_TIME_RETRANS:
990 case TCP_TIME_PROBE0:
993 #ifdef TCP_CLEAR_TIMERS
994 sk_stop_timer(sk, &tp->retransmit_timer);
1001 #ifdef TCP_CLEAR_TIMERS
1002 sk_stop_timer(sk, &tp->delack_timer);
1007 printk(tcp_timer_bug_msg);
1015 * Reset the retransmission timer
1017 static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
1019 struct tcp_sock *tp = tcp_sk(sk);
1021 if (when > TCP_RTO_MAX) {
1023 printk(KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr());
1029 case TCP_TIME_RETRANS:
1030 case TCP_TIME_PROBE0:
1032 tp->timeout = jiffies+when;
1033 sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout);
1037 tp->ack.pending |= TCP_ACK_TIMER;
1038 tp->ack.timeout = jiffies+when;
1039 sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout);
1044 printk(tcp_timer_bug_msg);
1049 /* Initialize RCV_MSS value.
1050 * RCV_MSS is an our guess about MSS used by the peer.
1051 * We haven't any direct information about the MSS.
1052 * It's better to underestimate the RCV_MSS rather than overestimate.
1053 * Overestimations make us ACKing less frequently than needed.
1054 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
1057 static inline void tcp_initialize_rcv_mss(struct sock *sk)
1059 struct tcp_sock *tp = tcp_sk(sk);
1060 unsigned int hint = min(tp->advmss, tp->mss_cache_std);
1062 hint = min(hint, tp->rcv_wnd/2);
1063 hint = min(hint, TCP_MIN_RCVMSS);
1064 hint = max(hint, TCP_MIN_MSS);
1066 tp->ack.rcv_mss = hint;
1069 static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
1071 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
1072 ntohl(TCP_FLAG_ACK) |
1076 static __inline__ void tcp_fast_path_on(struct tcp_sock *tp)
1078 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
1081 static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp)
1083 if (skb_queue_len(&tp->out_of_order_queue) == 0 &&
1085 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
1087 tcp_fast_path_on(tp);
1090 /* Compute the actual receive window we are currently advertising.
1091 * Rcv_nxt can be after the window if our peer push more data
1092 * than the offered window.
1094 static __inline__ u32 tcp_receive_window(const struct tcp_sock *tp)
1096 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
1103 /* Choose a new window, without checks for shrinking, and without
1104 * scaling applied to the result. The caller does these things
1105 * if necessary. This is a "raw" window selection.
1107 extern u32 __tcp_select_window(struct sock *sk);
1109 /* TCP timestamps are only 32-bits, this causes a slight
1110 * complication on 64-bit systems since we store a snapshot
1111 * of jiffies in the buffer control blocks below. We decidely
1112 * only use of the low 32-bits of jiffies and hide the ugly
1113 * casts with the following macro.
1115 #define tcp_time_stamp ((__u32)(jiffies))
1117 /* This is what the send packet queueing engine uses to pass
1118 * TCP per-packet control information to the transmission
1119 * code. We also store the host-order sequence numbers in
1120 * here too. This is 36 bytes on 32-bit architectures,
1121 * 40 bytes on 64-bit machines, if this grows please adjust
1122 * skbuff.h:skbuff->cb[xxx] size appropriately.
1126 struct inet_skb_parm h4;
1127 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
1128 struct inet6_skb_parm h6;
1130 } header; /* For incoming frames */
1131 __u32 seq; /* Starting sequence number */
1132 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
1133 __u32 when; /* used to compute rtt's */
1134 __u8 flags; /* TCP header flags. */
1136 /* NOTE: These must match up to the flags byte in a
1139 #define TCPCB_FLAG_FIN 0x01
1140 #define TCPCB_FLAG_SYN 0x02
1141 #define TCPCB_FLAG_RST 0x04
1142 #define TCPCB_FLAG_PSH 0x08
1143 #define TCPCB_FLAG_ACK 0x10
1144 #define TCPCB_FLAG_URG 0x20
1145 #define TCPCB_FLAG_ECE 0x40
1146 #define TCPCB_FLAG_CWR 0x80
1148 __u8 sacked; /* State flags for SACK/FACK. */
1149 #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
1150 #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
1151 #define TCPCB_LOST 0x04 /* SKB is lost */
1152 #define TCPCB_TAGBITS 0x07 /* All tag bits */
1154 #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
1155 #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
1157 #define TCPCB_URG 0x20 /* Urgent pointer advenced here */
1159 #define TCPCB_AT_TAIL (TCPCB_URG)
1161 __u16 urg_ptr; /* Valid w/URG flags is set. */
1162 __u32 ack_seq; /* Sequence number ACK'd */
1165 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
1167 #include <net/tcp_ecn.h>
1169 /* Due to TSO, an SKB can be composed of multiple actual
1170 * packets. To keep these tracked properly, we use this.
1172 static inline int tcp_skb_pcount(const struct sk_buff *skb)
1174 return skb_shinfo(skb)->tso_segs;
1177 /* This is valid iff tcp_skb_pcount() > 1. */
1178 static inline int tcp_skb_mss(const struct sk_buff *skb)
1180 return skb_shinfo(skb)->tso_size;
1183 static inline void tcp_dec_pcount_approx(__u32 *count,
1184 const struct sk_buff *skb)
1187 *count -= tcp_skb_pcount(skb);
1188 if ((int)*count < 0)
1193 static inline void tcp_packets_out_inc(struct sock *sk,
1194 struct tcp_sock *tp,
1195 const struct sk_buff *skb)
1197 int orig = tp->packets_out;
1199 tp->packets_out += tcp_skb_pcount(skb);
1201 tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
1204 static inline void tcp_packets_out_dec(struct tcp_sock *tp,
1205 const struct sk_buff *skb)
1207 tp->packets_out -= tcp_skb_pcount(skb);
1210 /* This determines how many packets are "in the network" to the best
1211 * of our knowledge. In many cases it is conservative, but where
1212 * detailed information is available from the receiver (via SACK
1213 * blocks etc.) we can make more aggressive calculations.
1215 * Use this for decisions involving congestion control, use just
1216 * tp->packets_out to determine if the send queue is empty or not.
1218 * Read this equation as:
1220 * "Packets sent once on transmission queue" MINUS
1221 * "Packets left network, but not honestly ACKed yet" PLUS
1222 * "Packets fast retransmitted"
1224 static __inline__ unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1226 return (tp->packets_out - tp->left_out + tp->retrans_out);
1230 * Which congestion algorithim is in use on the connection.
1232 #define tcp_is_vegas(__tp) ((__tp)->adv_cong == TCP_VEGAS)
1233 #define tcp_is_westwood(__tp) ((__tp)->adv_cong == TCP_WESTWOOD)
1234 #define tcp_is_bic(__tp) ((__tp)->adv_cong == TCP_BIC)
1236 /* Recalculate snd_ssthresh, we want to set it to:
1239 * one half the current congestion window, but no
1240 * less than two segments
1243 * behave like Reno until low_window is reached,
1244 * then increase congestion window slowly
1246 static inline __u32 tcp_recalc_ssthresh(struct tcp_sock *tp)
1248 if (tcp_is_bic(tp)) {
1249 if (sysctl_tcp_bic_fast_convergence &&
1250 tp->snd_cwnd < tp->bictcp.last_max_cwnd)
1251 tp->bictcp.last_max_cwnd = (tp->snd_cwnd *
1253 + sysctl_tcp_bic_beta))
1254 / (2 * BICTCP_BETA_SCALE);
1256 tp->bictcp.last_max_cwnd = tp->snd_cwnd;
1258 if (tp->snd_cwnd > sysctl_tcp_bic_low_window)
1259 return max((tp->snd_cwnd * sysctl_tcp_bic_beta)
1260 / BICTCP_BETA_SCALE, 2U);
1263 return max(tp->snd_cwnd >> 1U, 2U);
1266 /* Stop taking Vegas samples for now. */
1267 #define tcp_vegas_disable(__tp) ((__tp)->vegas.doing_vegas_now = 0)
1269 static inline void tcp_vegas_enable(struct tcp_sock *tp)
1271 /* There are several situations when we must "re-start" Vegas:
1273 * o when a connection is established
1275 * o after fast recovery
1276 * o when we send a packet and there is no outstanding
1277 * unacknowledged data (restarting an idle connection)
1279 * In these circumstances we cannot do a Vegas calculation at the
1280 * end of the first RTT, because any calculation we do is using
1281 * stale info -- both the saved cwnd and congestion feedback are
1284 * Instead we must wait until the completion of an RTT during
1285 * which we actually receive ACKs.
1288 /* Begin taking Vegas samples next time we send something. */
1289 tp->vegas.doing_vegas_now = 1;
1291 /* Set the beginning of the next send window. */
1292 tp->vegas.beg_snd_nxt = tp->snd_nxt;
1294 tp->vegas.cntRTT = 0;
1295 tp->vegas.minRTT = 0x7fffffff;
1298 /* Should we be taking Vegas samples right now? */
1299 #define tcp_vegas_enabled(__tp) ((__tp)->vegas.doing_vegas_now)
1301 extern void tcp_ca_init(struct tcp_sock *tp);
1303 static inline void tcp_set_ca_state(struct tcp_sock *tp, u8 ca_state)
1305 if (tcp_is_vegas(tp)) {
1306 if (ca_state == TCP_CA_Open)
1307 tcp_vegas_enable(tp);
1309 tcp_vegas_disable(tp);
1311 tp->ca_state = ca_state;
1314 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1315 * The exception is rate halving phase, when cwnd is decreasing towards
1318 static inline __u32 tcp_current_ssthresh(struct tcp_sock *tp)
1320 if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery))
1321 return tp->snd_ssthresh;
1323 return max(tp->snd_ssthresh,
1324 ((tp->snd_cwnd >> 1) +
1325 (tp->snd_cwnd >> 2)));
1328 static inline void tcp_sync_left_out(struct tcp_sock *tp)
1330 if (tp->rx_opt.sack_ok &&
1331 (tp->sacked_out >= tp->packets_out - tp->lost_out))
1332 tp->sacked_out = tp->packets_out - tp->lost_out;
1333 tp->left_out = tp->sacked_out + tp->lost_out;
1336 extern void tcp_cwnd_application_limited(struct sock *sk);
1338 /* Congestion window validation. (RFC2861) */
1340 static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
1342 __u32 packets_out = tp->packets_out;
1344 if (packets_out >= tp->snd_cwnd) {
1345 /* Network is feed fully. */
1346 tp->snd_cwnd_used = 0;
1347 tp->snd_cwnd_stamp = tcp_time_stamp;
1349 /* Network starves. */
1350 if (tp->packets_out > tp->snd_cwnd_used)
1351 tp->snd_cwnd_used = tp->packets_out;
1353 if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto)
1354 tcp_cwnd_application_limited(sk);
1358 /* Set slow start threshould and cwnd not falling to slow start */
1359 static inline void __tcp_enter_cwr(struct tcp_sock *tp)
1361 tp->undo_marker = 0;
1362 tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
1363 tp->snd_cwnd = min(tp->snd_cwnd,
1364 tcp_packets_in_flight(tp) + 1U);
1365 tp->snd_cwnd_cnt = 0;
1366 tp->high_seq = tp->snd_nxt;
1367 tp->snd_cwnd_stamp = tcp_time_stamp;
1368 TCP_ECN_queue_cwr(tp);
1371 static inline void tcp_enter_cwr(struct tcp_sock *tp)
1373 tp->prior_ssthresh = 0;
1374 if (tp->ca_state < TCP_CA_CWR) {
1375 __tcp_enter_cwr(tp);
1376 tcp_set_ca_state(tp, TCP_CA_CWR);
1380 extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
1382 /* Slow start with delack produces 3 packets of burst, so that
1383 * it is safe "de facto".
1385 static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
1390 static __inline__ int tcp_minshall_check(const struct tcp_sock *tp)
1392 return after(tp->snd_sml,tp->snd_una) &&
1393 !after(tp->snd_sml, tp->snd_nxt);
1396 static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss,
1397 const struct sk_buff *skb)
1400 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1403 /* Return 0, if packet can be sent now without violation Nagle's rules:
1404 1. It is full sized.
1405 2. Or it contains FIN.
1406 3. Or TCP_NODELAY was set.
1407 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1408 With Minshall's modification: all sent small packets are ACKed.
1411 static __inline__ int
1412 tcp_nagle_check(const struct tcp_sock *tp, const struct sk_buff *skb,
1413 unsigned mss_now, int nonagle)
1415 return (skb->len < mss_now &&
1416 !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
1417 ((nonagle&TCP_NAGLE_CORK) ||
1420 tcp_minshall_check(tp))));
1423 extern void tcp_set_skb_tso_segs(struct sk_buff *, unsigned int);
1425 /* This checks if the data bearing packet SKB (usually sk->sk_send_head)
1426 * should be put on the wire right now.
1428 static __inline__ int tcp_snd_test(const struct tcp_sock *tp,
1429 struct sk_buff *skb,
1430 unsigned cur_mss, int nonagle)
1432 int pkts = tcp_skb_pcount(skb);
1435 tcp_set_skb_tso_segs(skb, tp->mss_cache_std);
1436 pkts = tcp_skb_pcount(skb);
1439 /* RFC 1122 - section 4.2.3.4
1443 * a) The right edge of this frame exceeds the window
1444 * b) There are packets in flight and we have a small segment
1445 * [SWS avoidance and Nagle algorithm]
1446 * (part of SWS is done on packetization)
1447 * Minshall version sounds: there are no _small_
1448 * segments in flight. (tcp_nagle_check)
1449 * c) We have too many packets 'in flight'
1451 * Don't use the nagle rule for urgent data (or
1452 * for the final FIN -DaveM).
1454 * Also, Nagle rule does not apply to frames, which
1455 * sit in the middle of queue (they have no chances
1456 * to get new data) and if room at tail of skb is
1457 * not enough to save something seriously (<32 for now).
1460 /* Don't be strict about the congestion window for the
1461 * final FIN frame. -DaveM
1463 return (((nonagle&TCP_NAGLE_PUSH) || tp->urg_mode
1464 || !tcp_nagle_check(tp, skb, cur_mss, nonagle)) &&
1465 (((tcp_packets_in_flight(tp) + (pkts-1)) < tp->snd_cwnd) ||
1466 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) &&
1467 !after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd));
1470 static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp)
1472 if (!tp->packets_out && !tp->pending)
1473 tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto);
1476 static __inline__ int tcp_skb_is_last(const struct sock *sk,
1477 const struct sk_buff *skb)
1479 return skb->next == (struct sk_buff *)&sk->sk_write_queue;
1482 /* Push out any pending frames which were held back due to
1483 * TCP_CORK or attempt at coalescing tiny packets.
1484 * The socket must be locked by the caller.
1486 static __inline__ void __tcp_push_pending_frames(struct sock *sk,
1487 struct tcp_sock *tp,
1491 struct sk_buff *skb = sk->sk_send_head;
1494 if (!tcp_skb_is_last(sk, skb))
1495 nonagle = TCP_NAGLE_PUSH;
1496 if (!tcp_snd_test(tp, skb, cur_mss, nonagle) ||
1497 tcp_write_xmit(sk, nonagle))
1498 tcp_check_probe_timer(sk, tp);
1500 tcp_cwnd_validate(sk, tp);
1503 static __inline__ void tcp_push_pending_frames(struct sock *sk,
1504 struct tcp_sock *tp)
1506 __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle);
1509 static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
1511 struct sk_buff *skb = sk->sk_send_head;
1514 tcp_snd_test(tp, skb, tcp_current_mss(sk, 1),
1515 tcp_skb_is_last(sk, skb) ? TCP_NAGLE_PUSH : tp->nonagle));
1518 static __inline__ void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq)
1523 static __inline__ void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq)
1528 extern void tcp_destroy_sock(struct sock *sk);
1532 * Calculate(/check) TCP checksum
1534 static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len,
1535 unsigned long saddr, unsigned long daddr,
1538 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1541 static __inline__ int __tcp_checksum_complete(struct sk_buff *skb)
1543 return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
1546 static __inline__ int tcp_checksum_complete(struct sk_buff *skb)
1548 return skb->ip_summed != CHECKSUM_UNNECESSARY &&
1549 __tcp_checksum_complete(skb);
1552 /* Prequeue for VJ style copy to user, combined with checksumming. */
1554 static __inline__ void tcp_prequeue_init(struct tcp_sock *tp)
1556 tp->ucopy.task = NULL;
1558 tp->ucopy.memory = 0;
1559 skb_queue_head_init(&tp->ucopy.prequeue);
1562 /* Packet is added to VJ-style prequeue for processing in process
1563 * context, if a reader task is waiting. Apparently, this exciting
1564 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1565 * failed somewhere. Latency? Burstiness? Well, at least now we will
1566 * see, why it failed. 8)8) --ANK
1568 * NOTE: is this not too big to inline?
1570 static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1572 struct tcp_sock *tp = tcp_sk(sk);
1574 if (!sysctl_tcp_low_latency && tp->ucopy.task) {
1575 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1576 tp->ucopy.memory += skb->truesize;
1577 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1578 struct sk_buff *skb1;
1580 BUG_ON(sock_owned_by_user(sk));
1582 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1583 sk->sk_backlog_rcv(sk, skb1);
1584 NET_INC_STATS_BH(LINUX_MIB_TCPPREQUEUEDROPPED);
1587 tp->ucopy.memory = 0;
1588 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1589 wake_up_interruptible(sk->sk_sleep);
1590 if (!tcp_ack_scheduled(tp))
1591 tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4);
1602 static const char *statename[]={
1603 "Unused","Established","Syn Sent","Syn Recv",
1604 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1605 "Close Wait","Last ACK","Listen","Closing"
1609 static __inline__ void tcp_set_state(struct sock *sk, int state)
1611 int oldstate = sk->sk_state;
1614 case TCP_ESTABLISHED:
1615 if (oldstate != TCP_ESTABLISHED)
1616 TCP_INC_STATS(TCP_MIB_CURRESTAB);
1620 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
1621 TCP_INC_STATS(TCP_MIB_ESTABRESETS);
1623 sk->sk_prot->unhash(sk);
1624 if (tcp_sk(sk)->bind_hash &&
1625 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
1629 if (oldstate==TCP_ESTABLISHED)
1630 TCP_DEC_STATS(TCP_MIB_CURRESTAB);
1633 /* Change state AFTER socket is unhashed to avoid closed
1634 * socket sitting in hash tables.
1636 sk->sk_state = state;
1639 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
1643 static __inline__ void tcp_done(struct sock *sk)
1645 tcp_set_state(sk, TCP_CLOSE);
1646 tcp_clear_xmit_timers(sk);
1648 sk->sk_shutdown = SHUTDOWN_MASK;
1650 if (!sock_flag(sk, SOCK_DEAD))
1651 sk->sk_state_change(sk);
1653 tcp_destroy_sock(sk);
1656 static __inline__ void tcp_sack_reset(struct tcp_options_received *rx_opt)
1659 rx_opt->eff_sacks = 0;
1660 rx_opt->num_sacks = 0;
1663 static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp, __u32 tstamp)
1665 if (tp->rx_opt.tstamp_ok) {
1666 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
1667 (TCPOPT_NOP << 16) |
1668 (TCPOPT_TIMESTAMP << 8) |
1670 *ptr++ = htonl(tstamp);
1671 *ptr++ = htonl(tp->rx_opt.ts_recent);
1673 if (tp->rx_opt.eff_sacks) {
1674 struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks;
1677 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
1678 (TCPOPT_NOP << 16) |
1679 (TCPOPT_SACK << 8) |
1680 (TCPOLEN_SACK_BASE +
1681 (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)));
1682 for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
1683 *ptr++ = htonl(sp[this_sack].start_seq);
1684 *ptr++ = htonl(sp[this_sack].end_seq);
1686 if (tp->rx_opt.dsack) {
1687 tp->rx_opt.dsack = 0;
1688 tp->rx_opt.eff_sacks--;
1693 /* Construct a tcp options header for a SYN or SYN_ACK packet.
1694 * If this is every changed make sure to change the definition of
1695 * MAX_SYN_SIZE to match the new maximum number of options that you
1698 static inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
1699 int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent)
1701 /* We always get an MSS option.
1702 * The option bytes which will be seen in normal data
1703 * packets should timestamps be used, must be in the MSS
1704 * advertised. But we subtract them from tp->mss_cache so
1705 * that calculations in tcp_sendmsg are simpler etc.
1706 * So account for this fact here if necessary. If we
1707 * don't do this correctly, as a receiver we won't
1708 * recognize data packets as being full sized when we
1709 * should, and thus we won't abide by the delayed ACK
1711 * SACKs don't matter, we never delay an ACK when we
1712 * have any of those going out.
1714 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
1717 *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |
1718 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1720 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1721 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1722 *ptr++ = htonl(tstamp); /* TSVAL */
1723 *ptr++ = htonl(ts_recent); /* TSECR */
1725 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1726 (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);
1728 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
1731 /* Determine a window scaling and initial window to offer. */
1732 extern void tcp_select_initial_window(int __space, __u32 mss,
1733 __u32 *rcv_wnd, __u32 *window_clamp,
1734 int wscale_ok, __u8 *rcv_wscale);
1736 static inline int tcp_win_from_space(int space)
1738 return sysctl_tcp_adv_win_scale<=0 ?
1739 (space>>(-sysctl_tcp_adv_win_scale)) :
1740 space - (space>>sysctl_tcp_adv_win_scale);
1743 /* Note: caller must be prepared to deal with negative returns */
1744 static inline int tcp_space(const struct sock *sk)
1746 return tcp_win_from_space(sk->sk_rcvbuf -
1747 atomic_read(&sk->sk_rmem_alloc));
1750 static inline int tcp_full_space(const struct sock *sk)
1752 return tcp_win_from_space(sk->sk_rcvbuf);
1755 static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
1758 struct tcp_sock *tp = tcp_sk(sk);
1761 sk_acceptq_added(sk);
1763 if (!tp->accept_queue_tail) {
1764 tp->accept_queue = req;
1766 tp->accept_queue_tail->dl_next = req;
1768 tp->accept_queue_tail = req;
1769 req->dl_next = NULL;
1772 struct tcp_listen_opt
1774 u8 max_qlen_log; /* log_2 of maximal queued SYNs */
1779 struct open_request *syn_table[TCP_SYNQ_HSIZE];
1783 tcp_synq_removed(struct sock *sk, struct open_request *req)
1785 struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
1787 if (--lopt->qlen == 0)
1788 tcp_delete_keepalive_timer(sk);
1789 if (req->retrans == 0)
1793 static inline void tcp_synq_added(struct sock *sk)
1795 struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
1797 if (lopt->qlen++ == 0)
1798 tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
1802 static inline int tcp_synq_len(struct sock *sk)
1804 return tcp_sk(sk)->listen_opt->qlen;
1807 static inline int tcp_synq_young(struct sock *sk)
1809 return tcp_sk(sk)->listen_opt->qlen_young;
1812 static inline int tcp_synq_is_full(struct sock *sk)
1814 return tcp_synq_len(sk) >> tcp_sk(sk)->listen_opt->max_qlen_log;
1817 static inline void tcp_synq_unlink(struct tcp_sock *tp, struct open_request *req,
1818 struct open_request **prev)
1820 write_lock(&tp->syn_wait_lock);
1821 *prev = req->dl_next;
1822 write_unlock(&tp->syn_wait_lock);
1825 static inline void tcp_synq_drop(struct sock *sk, struct open_request *req,
1826 struct open_request **prev)
1828 tcp_synq_unlink(tcp_sk(sk), req, prev);
1829 tcp_synq_removed(sk, req);
1830 tcp_openreq_free(req);
1833 static __inline__ void tcp_openreq_init(struct open_request *req,
1834 struct tcp_options_received *rx_opt,
1835 struct sk_buff *skb)
1837 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
1838 req->rcv_isn = TCP_SKB_CB(skb)->seq;
1839 req->mss = rx_opt->mss_clamp;
1840 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1841 req->tstamp_ok = rx_opt->tstamp_ok;
1842 req->sack_ok = rx_opt->sack_ok;
1843 req->snd_wscale = rx_opt->snd_wscale;
1844 req->wscale_ok = rx_opt->wscale_ok;
1847 req->rmt_port = skb->h.th->source;
1850 extern void tcp_enter_memory_pressure(void);
1852 extern void tcp_listen_wlock(void);
1854 /* - We may sleep inside this lock.
1855 * - If sleeping is not required (or called from BH),
1856 * use plain read_(un)lock(&tcp_lhash_lock).
1859 static inline void tcp_listen_lock(void)
1861 /* read_lock synchronizes to candidates to writers */
1862 read_lock(&tcp_lhash_lock);
1863 atomic_inc(&tcp_lhash_users);
1864 read_unlock(&tcp_lhash_lock);
1867 static inline void tcp_listen_unlock(void)
1869 if (atomic_dec_and_test(&tcp_lhash_users))
1870 wake_up(&tcp_lhash_wait);
1873 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1875 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1878 static inline int keepalive_time_when(const struct tcp_sock *tp)
1880 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1883 static inline int tcp_fin_time(const struct tcp_sock *tp)
1885 int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout;
1887 if (fin_timeout < (tp->rto<<2) - (tp->rto>>1))
1888 fin_timeout = (tp->rto<<2) - (tp->rto>>1);
1893 static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int rst)
1895 if ((s32)(rx_opt->rcv_tsval - rx_opt->ts_recent) >= 0)
1897 if (xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)
1900 /* RST segments are not recommended to carry timestamp,
1901 and, if they do, it is recommended to ignore PAWS because
1902 "their cleanup function should take precedence over timestamps."
1903 Certainly, it is mistake. It is necessary to understand the reasons
1904 of this constraint to relax it: if peer reboots, clock may go
1905 out-of-sync and half-open connections will not be reset.
1906 Actually, the problem would be not existing if all
1907 the implementations followed draft about maintaining clock
1908 via reboots. Linux-2.2 DOES NOT!
1910 However, we can relax time bounds for RST segments to MSL.
1912 if (rst && xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1917 static inline void tcp_v4_setup_caps(struct sock *sk, struct dst_entry *dst)
1919 sk->sk_route_caps = dst->dev->features;
1920 if (sk->sk_route_caps & NETIF_F_TSO) {
1921 if (sk->sk_no_largesend || dst->header_len)
1922 sk->sk_route_caps &= ~NETIF_F_TSO;
1926 #define TCP_CHECK_TIMER(sk) do { } while (0)
1928 static inline int tcp_use_frto(const struct sock *sk)
1930 const struct tcp_sock *tp = tcp_sk(sk);
1932 /* F-RTO must be activated in sysctl and there must be some
1933 * unsent new data, and the advertised window should allow
1936 return (sysctl_tcp_frto && sk->sk_send_head &&
1937 !after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
1938 tp->snd_una + tp->snd_wnd));
1941 static inline void tcp_mib_init(void)
1944 TCP_ADD_STATS_USER(TCP_MIB_RTOALGORITHM, 1);
1945 TCP_ADD_STATS_USER(TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1946 TCP_ADD_STATS_USER(TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1947 TCP_ADD_STATS_USER(TCP_MIB_MAXCONN, -1);
1951 enum tcp_seq_states {
1952 TCP_SEQ_STATE_LISTENING,
1953 TCP_SEQ_STATE_OPENREQ,
1954 TCP_SEQ_STATE_ESTABLISHED,
1955 TCP_SEQ_STATE_TIME_WAIT,
1958 struct tcp_seq_afinfo {
1959 struct module *owner;
1962 int (*seq_show) (struct seq_file *m, void *v);
1963 struct file_operations *seq_fops;
1966 struct tcp_iter_state {
1968 enum tcp_seq_states state;
1969 struct sock *syn_wait_sk;
1970 int bucket, sbucket, num, uid;
1971 struct seq_operations seq_ops;
1974 extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo);
1975 extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo);
1977 /* TCP Westwood functions and constants */
1979 #define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
1980 #define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
1982 static inline void tcp_westwood_update_rtt(struct tcp_sock *tp, __u32 rtt_seq)
1984 if (tcp_is_westwood(tp))
1985 tp->westwood.rtt = rtt_seq;
1988 static inline __u32 __tcp_westwood_bw_rttmin(const struct tcp_sock *tp)
1990 return max((tp->westwood.bw_est) * (tp->westwood.rtt_min) /
1991 (__u32) (tp->mss_cache_std),
1995 static inline __u32 tcp_westwood_bw_rttmin(const struct tcp_sock *tp)
1997 return tcp_is_westwood(tp) ? __tcp_westwood_bw_rttmin(tp) : 0;
2000 static inline int tcp_westwood_ssthresh(struct tcp_sock *tp)
2004 if (tcp_is_westwood(tp)) {
2005 ssthresh = __tcp_westwood_bw_rttmin(tp);
2007 tp->snd_ssthresh = ssthresh;
2010 return (ssthresh != 0);
2013 static inline int tcp_westwood_cwnd(struct tcp_sock *tp)
2017 if (tcp_is_westwood(tp)) {
2018 cwnd = __tcp_westwood_bw_rttmin(tp);
2020 tp->snd_cwnd = cwnd;