2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the TCP module.
8 * Version: @(#)tcp.h 1.0.5 05/23/93
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
22 #define FASTRETRANS_DEBUG 1
24 /* Cancel timers, when they are not required. */
25 #undef TCP_CLEAR_TIMERS
27 #include <linux/config.h>
28 #include <linux/list.h>
29 #include <linux/tcp.h>
30 #include <linux/slab.h>
31 #include <linux/cache.h>
32 #include <linux/percpu.h>
33 #include <net/checksum.h>
37 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
38 #include <linux/ipv6.h>
40 #include <linux/seq_file.h>
42 /* This is for all connections with a full identity, no wildcards.
43 * New scheme, half the table is for TIME_WAIT, the other half is
44 * for the rest. I'll experiment with dynamic table growth later.
46 struct tcp_ehash_bucket {
48 struct hlist_head chain;
49 } __attribute__((__aligned__(8)));
51 /* This is for listening sockets, thus all sockets which possess wildcards. */
52 #define TCP_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
54 /* There are a few simple rules, which allow for local port reuse by
55 * an application. In essence:
57 * 1) Sockets bound to different interfaces may share a local port.
58 * Failing that, goto test 2.
59 * 2) If all sockets have sk->sk_reuse set, and none of them are in
60 * TCP_LISTEN state, the port may be shared.
61 * Failing that, goto test 3.
62 * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
63 * address, and none of them are the same, the port may be
65 * Failing this, the port cannot be shared.
67 * The interesting point, is test #2. This is what an FTP server does
68 * all day. To optimize this case we use a specific flag bit defined
69 * below. As we add sockets to a bind bucket list, we perform a
70 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
71 * As long as all sockets added to a bind bucket pass this test,
72 * the flag bit will be set.
73 * The resulting situation is that tcp_v[46]_verify_bind() can just check
74 * for this flag bit, if it is set and the socket trying to bind has
75 * sk->sk_reuse set, we don't even have to walk the owners list at all,
76 * we return that it is ok to bind this socket to the requested local port.
78 * Sounds like a lot of work, but it is worth it. In a more naive
79 * implementation (ie. current FreeBSD etc.) the entire list of ports
80 * must be walked for each data port opened by an ftp server. Needless
81 * to say, this does not scale at all. With a couple thousand FTP
82 * users logged onto your box, isn't it nice to know that new data
83 * ports are created in O(1) time? I thought so. ;-) -DaveM
85 struct tcp_bind_bucket {
87 signed short fastreuse;
88 struct hlist_node node;
89 struct hlist_head owners;
92 #define tb_for_each(tb, node, head) hlist_for_each_entry(tb, node, head, node)
94 struct tcp_bind_hashbucket {
96 struct hlist_head chain;
99 static inline struct tcp_bind_bucket *__tb_head(struct tcp_bind_hashbucket *head)
101 return hlist_entry(head->chain.first, struct tcp_bind_bucket, node);
104 static inline struct tcp_bind_bucket *tb_head(struct tcp_bind_hashbucket *head)
106 return hlist_empty(&head->chain) ? NULL : __tb_head(head);
109 extern struct tcp_hashinfo {
110 /* This is for sockets with full identity only. Sockets here will
111 * always be without wildcards and will have the following invariant:
113 * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
115 * First half of the table is for sockets not in TIME_WAIT, second half
116 * is for TIME_WAIT sockets only.
118 struct tcp_ehash_bucket *__tcp_ehash;
120 /* Ok, let's try this, I give up, we do need a local binding
121 * TCP hash as well as the others for fast bind/connect.
123 struct tcp_bind_hashbucket *__tcp_bhash;
125 int __tcp_bhash_size;
126 int __tcp_ehash_size;
128 /* All sockets in TCP_LISTEN state will be in here. This is the only
129 * table where wildcard'd TCP sockets can exist. Hash function here
130 * is just local port number.
132 struct hlist_head __tcp_listening_hash[TCP_LHTABLE_SIZE];
134 /* All the above members are written once at bootup and
135 * never written again _or_ are predominantly read-access.
137 * Now align to a new cache line as all the following members
140 rwlock_t __tcp_lhash_lock ____cacheline_aligned;
141 atomic_t __tcp_lhash_users;
142 wait_queue_head_t __tcp_lhash_wait;
143 spinlock_t __tcp_portalloc_lock;
146 #define tcp_ehash (tcp_hashinfo.__tcp_ehash)
147 #define tcp_bhash (tcp_hashinfo.__tcp_bhash)
148 #define tcp_ehash_size (tcp_hashinfo.__tcp_ehash_size)
149 #define tcp_bhash_size (tcp_hashinfo.__tcp_bhash_size)
150 #define tcp_listening_hash (tcp_hashinfo.__tcp_listening_hash)
151 #define tcp_lhash_lock (tcp_hashinfo.__tcp_lhash_lock)
152 #define tcp_lhash_users (tcp_hashinfo.__tcp_lhash_users)
153 #define tcp_lhash_wait (tcp_hashinfo.__tcp_lhash_wait)
154 #define tcp_portalloc_lock (tcp_hashinfo.__tcp_portalloc_lock)
156 extern kmem_cache_t *tcp_bucket_cachep;
157 extern struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
158 unsigned short snum);
159 extern void tcp_bucket_destroy(struct tcp_bind_bucket *tb);
160 extern void tcp_bucket_unlock(struct sock *sk);
161 extern int tcp_port_rover;
163 /* These are AF independent. */
164 static __inline__ int tcp_bhashfn(__u16 lport)
166 return (lport & (tcp_bhash_size - 1));
169 extern void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
170 unsigned short snum);
172 #if (BITS_PER_LONG == 64)
173 #define TCP_ADDRCMP_ALIGN_BYTES 8
175 #define TCP_ADDRCMP_ALIGN_BYTES 4
178 /* This is a TIME_WAIT bucket. It works around the memory consumption
179 * problems of sockets in such a state on heavily loaded servers, but
180 * without violating the protocol specification.
182 struct tcp_tw_bucket {
184 * Now struct sock also uses sock_common, so please just
185 * don't add nothing before this first member (__tw_common) --acme
187 struct sock_common __tw_common;
188 #define tw_family __tw_common.skc_family
189 #define tw_state __tw_common.skc_state
190 #define tw_reuse __tw_common.skc_reuse
191 #define tw_bound_dev_if __tw_common.skc_bound_dev_if
192 #define tw_node __tw_common.skc_node
193 #define tw_bind_node __tw_common.skc_bind_node
194 #define tw_refcnt __tw_common.skc_refcnt
195 #define tw_xid __tw_common.skc_xid
196 #define tw_vx_info __tw_common.skc_vx_info
197 #define tw_nid __tw_common.skc_nid
198 #define tw_nx_info __tw_common.skc_nx_info
199 volatile unsigned char tw_substate;
200 unsigned char tw_rcv_wscale;
202 /* Socket demultiplex comparisons on incoming packets. */
203 /* these five are in inet_opt */
205 __attribute__((aligned(TCP_ADDRCMP_ALIGN_BYTES)));
209 /* And these are ours. */
216 long tw_ts_recent_stamp;
217 unsigned long tw_ttd;
218 struct tcp_bind_bucket *tw_tb;
219 struct hlist_node tw_death_node;
220 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
221 struct in6_addr tw_v6_daddr;
222 struct in6_addr tw_v6_rcv_saddr;
227 static __inline__ void tw_add_node(struct tcp_tw_bucket *tw,
228 struct hlist_head *list)
230 hlist_add_head(&tw->tw_node, list);
233 static __inline__ void tw_add_bind_node(struct tcp_tw_bucket *tw,
234 struct hlist_head *list)
236 hlist_add_head(&tw->tw_bind_node, list);
239 static inline int tw_dead_hashed(struct tcp_tw_bucket *tw)
241 return tw->tw_death_node.pprev != NULL;
244 static __inline__ void tw_dead_node_init(struct tcp_tw_bucket *tw)
246 tw->tw_death_node.pprev = NULL;
249 static __inline__ void __tw_del_dead_node(struct tcp_tw_bucket *tw)
251 __hlist_del(&tw->tw_death_node);
252 tw_dead_node_init(tw);
255 static __inline__ int tw_del_dead_node(struct tcp_tw_bucket *tw)
257 if (tw_dead_hashed(tw)) {
258 __tw_del_dead_node(tw);
264 #define tw_for_each(tw, node, head) \
265 hlist_for_each_entry(tw, node, head, tw_node)
267 #define tw_for_each_inmate(tw, node, jail) \
268 hlist_for_each_entry(tw, node, jail, tw_death_node)
270 #define tw_for_each_inmate_safe(tw, node, safe, jail) \
271 hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node)
273 #define tcptw_sk(__sk) ((struct tcp_tw_bucket *)(__sk))
275 static inline u32 tcp_v4_rcv_saddr(const struct sock *sk)
277 return likely(sk->sk_state != TCP_TIME_WAIT) ?
278 inet_sk(sk)->rcv_saddr : tcptw_sk(sk)->tw_rcv_saddr;
281 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
282 static inline struct in6_addr *__tcp_v6_rcv_saddr(const struct sock *sk)
284 return likely(sk->sk_state != TCP_TIME_WAIT) ?
285 &inet6_sk(sk)->rcv_saddr : &tcptw_sk(sk)->tw_v6_rcv_saddr;
288 static inline struct in6_addr *tcp_v6_rcv_saddr(const struct sock *sk)
290 return sk->sk_family == AF_INET6 ? __tcp_v6_rcv_saddr(sk) : NULL;
293 #define tcptw_sk_ipv6only(__sk) (tcptw_sk(__sk)->tw_v6_ipv6only)
295 static inline int tcp_v6_ipv6only(const struct sock *sk)
297 return likely(sk->sk_state != TCP_TIME_WAIT) ?
298 ipv6_only_sock(sk) : tcptw_sk_ipv6only(sk);
301 # define __tcp_v6_rcv_saddr(__sk) NULL
302 # define tcp_v6_rcv_saddr(__sk) NULL
303 # define tcptw_sk_ipv6only(__sk) 0
304 # define tcp_v6_ipv6only(__sk) 0
307 extern kmem_cache_t *tcp_timewait_cachep;
309 static inline void tcp_tw_put(struct tcp_tw_bucket *tw)
311 if (atomic_dec_and_test(&tw->tw_refcnt)) {
312 #ifdef INET_REFCNT_DEBUG
313 printk(KERN_DEBUG "tw_bucket %p released\n", tw);
315 kmem_cache_free(tcp_timewait_cachep, tw);
319 extern atomic_t tcp_orphan_count;
320 extern int tcp_tw_count;
321 extern void tcp_time_wait(struct sock *sk, int state, int timeo);
322 extern void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo);
323 extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw);
326 /* Socket demux engine toys. */
328 #define TCP_COMBINED_PORTS(__sport, __dport) \
329 (((__u32)(__sport)<<16) | (__u32)(__dport))
330 #else /* __LITTLE_ENDIAN */
331 #define TCP_COMBINED_PORTS(__sport, __dport) \
332 (((__u32)(__dport)<<16) | (__u32)(__sport))
335 #if (BITS_PER_LONG == 64)
337 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
338 __u64 __name = (((__u64)(__saddr))<<32)|((__u64)(__daddr));
339 #else /* __LITTLE_ENDIAN */
340 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
341 __u64 __name = (((__u64)(__daddr))<<32)|((__u64)(__saddr));
342 #endif /* __BIG_ENDIAN */
343 #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
344 (((*((__u64 *)&(inet_sk(__sk)->daddr)))== (__cookie)) && \
345 ((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
346 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
347 #define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
348 (((*((__u64 *)&(tcptw_sk(__sk)->tw_daddr))) == (__cookie)) && \
349 ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \
350 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
351 #else /* 32-bit arch */
352 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr)
353 #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
354 ((inet_sk(__sk)->daddr == (__saddr)) && \
355 (inet_sk(__sk)->rcv_saddr == (__daddr)) && \
356 ((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
357 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
358 #define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
359 ((tcptw_sk(__sk)->tw_daddr == (__saddr)) && \
360 (tcptw_sk(__sk)->tw_rcv_saddr == (__daddr)) && \
361 ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \
362 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
363 #endif /* 64-bit arch */
365 #define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
366 (((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
367 ((__sk)->sk_family == AF_INET6) && \
368 ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \
369 ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \
370 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
372 /* These can have wildcards, don't try too hard. */
373 static __inline__ int tcp_lhashfn(unsigned short num)
375 return num & (TCP_LHTABLE_SIZE - 1);
378 static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
380 return tcp_lhashfn(inet_sk(sk)->num);
383 #define MAX_TCP_HEADER (128 + MAX_HEADER)
386 * Never offer a window over 32767 without using window scaling. Some
387 * poor stacks do signed 16bit maths!
389 #define MAX_TCP_WINDOW 32767U
391 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
392 #define TCP_MIN_MSS 88U
394 /* Minimal RCV_MSS. */
395 #define TCP_MIN_RCVMSS 536U
397 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
398 #define TCP_FASTRETRANS_THRESH 3
400 /* Maximal reordering. */
401 #define TCP_MAX_REORDERING 127
403 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
404 #define TCP_MAX_QUICKACKS 16U
406 /* urg_data states */
407 #define TCP_URG_VALID 0x0100
408 #define TCP_URG_NOTYET 0x0200
409 #define TCP_URG_READ 0x0400
411 #define TCP_RETR1 3 /*
412 * This is how many retries it does before it
413 * tries to figure out if the gateway is
414 * down. Minimal RFC value is 3; it corresponds
415 * to ~3sec-8min depending on RTO.
418 #define TCP_RETR2 15 /*
419 * This should take at least
420 * 90 minutes to time out.
421 * RFC1122 says that the limit is 100 sec.
422 * 15 is ~13-30min depending on RTO.
425 #define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
426 * connection: ~180sec is RFC minumum */
428 #define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
429 * connection: ~180sec is RFC minumum */
432 #define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
433 * socket. 7 is ~50sec-16min.
437 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
438 * state, about 60 seconds */
439 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
440 /* BSD style FIN_WAIT2 deadlock breaker.
441 * It used to be 3min, new value is 60sec,
442 * to combine FIN-WAIT-2 timeout with
446 #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
448 #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
449 #define TCP_ATO_MIN ((unsigned)(HZ/25))
451 #define TCP_DELACK_MIN 4U
452 #define TCP_ATO_MIN 4U
454 #define TCP_RTO_MAX ((unsigned)(120*HZ))
455 #define TCP_RTO_MIN ((unsigned)(HZ/5))
456 #define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */
458 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
459 * for local resources.
462 #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
463 #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
464 #define TCP_KEEPALIVE_INTVL (75*HZ)
466 #define MAX_TCP_KEEPIDLE 32767
467 #define MAX_TCP_KEEPINTVL 32767
468 #define MAX_TCP_KEEPCNT 127
469 #define MAX_TCP_SYNCNT 127
471 #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
472 #define TCP_SYNQ_HSIZE 512 /* Size of SYNACK hash table */
474 #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
475 #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
476 * after this time. It should be equal
477 * (or greater than) TCP_TIMEWAIT_LEN
478 * to provide reliability equal to one
479 * provided by timewait state.
481 #define TCP_PAWS_WINDOW 1 /* Replay window for per-host
482 * timestamps. It must be less than
483 * minimal timewait lifetime.
486 #define TCP_TW_RECYCLE_SLOTS_LOG 5
487 #define TCP_TW_RECYCLE_SLOTS (1<<TCP_TW_RECYCLE_SLOTS_LOG)
489 /* If time > 4sec, it is "slow" path, no recycling is required,
490 so that we select tick to get range about 4 seconds.
493 #if HZ <= 16 || HZ > 4096
494 # error Unsupported: HZ <= 16 or HZ > 4096
496 # define TCP_TW_RECYCLE_TICK (5+2-TCP_TW_RECYCLE_SLOTS_LOG)
498 # define TCP_TW_RECYCLE_TICK (6+2-TCP_TW_RECYCLE_SLOTS_LOG)
500 # define TCP_TW_RECYCLE_TICK (7+2-TCP_TW_RECYCLE_SLOTS_LOG)
502 # define TCP_TW_RECYCLE_TICK (8+2-TCP_TW_RECYCLE_SLOTS_LOG)
504 # define TCP_TW_RECYCLE_TICK (9+2-TCP_TW_RECYCLE_SLOTS_LOG)
506 # define TCP_TW_RECYCLE_TICK (10+2-TCP_TW_RECYCLE_SLOTS_LOG)
508 # define TCP_TW_RECYCLE_TICK (11+2-TCP_TW_RECYCLE_SLOTS_LOG)
510 # define TCP_TW_RECYCLE_TICK (12+2-TCP_TW_RECYCLE_SLOTS_LOG)
513 #define BICTCP_1_OVER_BETA 8 /*
515 * multiplicative decrease factor
517 #define BICTCP_MAX_INCREMENT 32 /*
518 * Limit on the amount of
519 * increment allowed during
522 #define BICTCP_FUNC_OF_MIN_INCR 11 /*
523 * log(B/Smin)/log(B/(B-1))+1,
527 #define BICTCP_B 4 /*
529 * go to point (max+min)/N
536 #define TCPOPT_NOP 1 /* Padding */
537 #define TCPOPT_EOL 0 /* End of options */
538 #define TCPOPT_MSS 2 /* Segment size negotiating */
539 #define TCPOPT_WINDOW 3 /* Window scaling */
540 #define TCPOPT_SACK_PERM 4 /* SACK Permitted */
541 #define TCPOPT_SACK 5 /* SACK Block */
542 #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
548 #define TCPOLEN_MSS 4
549 #define TCPOLEN_WINDOW 3
550 #define TCPOLEN_SACK_PERM 2
551 #define TCPOLEN_TIMESTAMP 10
553 /* But this is what stacks really send out. */
554 #define TCPOLEN_TSTAMP_ALIGNED 12
555 #define TCPOLEN_WSCALE_ALIGNED 4
556 #define TCPOLEN_SACKPERM_ALIGNED 4
557 #define TCPOLEN_SACK_BASE 2
558 #define TCPOLEN_SACK_BASE_ALIGNED 4
559 #define TCPOLEN_SACK_PERBLOCK 8
561 #define TCP_TIME_RETRANS 1 /* Retransmit timer */
562 #define TCP_TIME_DACK 2 /* Delayed ack timer */
563 #define TCP_TIME_PROBE0 3 /* Zero window probe timer */
564 #define TCP_TIME_KEEPOPEN 4 /* Keepalive timer */
566 /* Flags in tp->nonagle */
567 #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
568 #define TCP_NAGLE_CORK 2 /* Socket is corked */
569 #define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */
571 /* sysctl variables for tcp */
572 extern int sysctl_max_syn_backlog;
573 extern int sysctl_tcp_timestamps;
574 extern int sysctl_tcp_window_scaling;
575 extern int sysctl_tcp_sack;
576 extern int sysctl_tcp_fin_timeout;
577 extern int sysctl_tcp_tw_recycle;
578 extern int sysctl_tcp_keepalive_time;
579 extern int sysctl_tcp_keepalive_probes;
580 extern int sysctl_tcp_keepalive_intvl;
581 extern int sysctl_tcp_syn_retries;
582 extern int sysctl_tcp_synack_retries;
583 extern int sysctl_tcp_retries1;
584 extern int sysctl_tcp_retries2;
585 extern int sysctl_tcp_orphan_retries;
586 extern int sysctl_tcp_syncookies;
587 extern int sysctl_tcp_retrans_collapse;
588 extern int sysctl_tcp_stdurg;
589 extern int sysctl_tcp_rfc1337;
590 extern int sysctl_tcp_abort_on_overflow;
591 extern int sysctl_tcp_max_orphans;
592 extern int sysctl_tcp_max_tw_buckets;
593 extern int sysctl_tcp_fack;
594 extern int sysctl_tcp_reordering;
595 extern int sysctl_tcp_ecn;
596 extern int sysctl_tcp_dsack;
597 extern int sysctl_tcp_mem[3];
598 extern int sysctl_tcp_wmem[3];
599 extern int sysctl_tcp_rmem[3];
600 extern int sysctl_tcp_app_win;
601 extern int sysctl_tcp_adv_win_scale;
602 extern int sysctl_tcp_tw_reuse;
603 extern int sysctl_tcp_frto;
604 extern int sysctl_tcp_low_latency;
605 extern int sysctl_tcp_westwood;
606 extern int sysctl_tcp_vegas_cong_avoid;
607 extern int sysctl_tcp_vegas_alpha;
608 extern int sysctl_tcp_vegas_beta;
609 extern int sysctl_tcp_vegas_gamma;
610 extern int sysctl_tcp_nometrics_save;
611 extern int sysctl_tcp_bic;
612 extern int sysctl_tcp_bic_fast_convergence;
613 extern int sysctl_tcp_bic_low_window;
614 extern int sysctl_tcp_moderate_rcvbuf;
615 extern int sysctl_tcp_tso_win_divisor;
617 extern atomic_t tcp_memory_allocated;
618 extern atomic_t tcp_sockets_allocated;
619 extern int tcp_memory_pressure;
623 struct or_calltable {
625 int (*rtx_syn_ack) (struct sock *sk, struct open_request *req, struct dst_entry*);
626 void (*send_ack) (struct sk_buff *skb, struct open_request *req);
627 void (*destructor) (struct open_request *req);
628 void (*send_reset) (struct sk_buff *skb);
631 struct tcp_v4_open_req {
634 struct ip_options *opt;
637 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
638 struct tcp_v6_open_req {
639 struct in6_addr loc_addr;
640 struct in6_addr rmt_addr;
641 struct sk_buff *pktopts;
646 /* this structure is too big */
647 struct open_request {
648 struct open_request *dl_next; /* Must be first member! */
655 __u16 snd_wscale : 4,
662 /* The following two fields can be easily recomputed I think -AK */
663 __u32 window_clamp; /* window clamp at creation time */
664 __u32 rcv_wnd; /* rcv_wnd offered first time */
666 unsigned long expires;
667 struct or_calltable *class;
670 struct tcp_v4_open_req v4_req;
671 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
672 struct tcp_v6_open_req v6_req;
675 #ifdef CONFIG_ACCEPT_QUEUES
676 unsigned long acceptq_time_stamp;
681 /* SLAB cache for open requests. */
682 extern kmem_cache_t *tcp_openreq_cachep;
684 #define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)
685 #define tcp_openreq_fastfree(req) kmem_cache_free(tcp_openreq_cachep, req)
687 static inline void tcp_openreq_free(struct open_request *req)
689 req->class->destructor(req);
690 tcp_openreq_fastfree(req);
693 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
694 #define TCP_INET_FAMILY(fam) ((fam) == AF_INET)
696 #define TCP_INET_FAMILY(fam) 1
700 * Pointers to address related TCP functions
701 * (i.e. things that depend on the address family)
705 int (*queue_xmit) (struct sk_buff *skb,
708 void (*send_check) (struct sock *sk,
711 struct sk_buff *skb);
713 int (*rebuild_header) (struct sock *sk);
715 int (*conn_request) (struct sock *sk,
716 struct sk_buff *skb);
718 struct sock * (*syn_recv_sock) (struct sock *sk,
720 struct open_request *req,
721 struct dst_entry *dst);
723 int (*remember_stamp) (struct sock *sk);
725 __u16 net_header_len;
727 int (*setsockopt) (struct sock *sk,
733 int (*getsockopt) (struct sock *sk,
740 void (*addr2sockaddr) (struct sock *sk,
747 * The next routines deal with comparing 32 bit unsigned ints
748 * and worry about wraparound (automatic with unsigned arithmetic).
751 static inline int before(__u32 seq1, __u32 seq2)
753 return (__s32)(seq1-seq2) < 0;
756 static inline int after(__u32 seq1, __u32 seq2)
758 return (__s32)(seq2-seq1) < 0;
762 /* is s2<=s1<=s3 ? */
763 static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
765 return seq3 - seq2 >= seq1 - seq2;
769 extern struct proto tcp_prot;
771 DECLARE_SNMP_STAT(struct tcp_mib, tcp_statistics);
772 #define TCP_INC_STATS(field) SNMP_INC_STATS(tcp_statistics, field)
773 #define TCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(tcp_statistics, field)
774 #define TCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(tcp_statistics, field)
775 #define TCP_DEC_STATS(field) SNMP_DEC_STATS(tcp_statistics, field)
776 #define TCP_ADD_STATS_BH(field, val) SNMP_ADD_STATS_BH(tcp_statistics, field, val)
777 #define TCP_ADD_STATS_USER(field, val) SNMP_ADD_STATS_USER(tcp_statistics, field, val)
779 extern void tcp_put_port(struct sock *sk);
780 extern void tcp_inherit_port(struct sock *sk, struct sock *child);
782 extern void tcp_v4_err(struct sk_buff *skb, u32);
784 extern void tcp_shutdown (struct sock *sk, int how);
786 extern int tcp_v4_rcv(struct sk_buff *skb);
788 extern struct sock * tcp_v4_lookup_listener(u32 daddr, unsigned short hnum, int dif);
790 extern int tcp_v4_remember_stamp(struct sock *sk);
792 extern int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw);
794 extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk,
795 struct msghdr *msg, size_t size);
796 extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
798 extern int tcp_ioctl(struct sock *sk,
802 extern int tcp_rcv_state_process(struct sock *sk,
807 extern int tcp_rcv_established(struct sock *sk,
812 extern void tcp_rcv_space_adjust(struct sock *sk);
821 static inline void tcp_schedule_ack(struct tcp_opt *tp)
823 tp->ack.pending |= TCP_ACK_SCHED;
826 static inline int tcp_ack_scheduled(struct tcp_opt *tp)
828 return tp->ack.pending&TCP_ACK_SCHED;
831 static __inline__ void tcp_dec_quickack_mode(struct tcp_opt *tp)
833 if (tp->ack.quick && --tp->ack.quick == 0) {
834 /* Leaving quickack mode we deflate ATO. */
835 tp->ack.ato = TCP_ATO_MIN;
839 extern void tcp_enter_quickack_mode(struct tcp_opt *tp);
841 static __inline__ void tcp_delack_init(struct tcp_opt *tp)
843 memset(&tp->ack, 0, sizeof(tp->ack));
846 static inline void tcp_clear_options(struct tcp_opt *tp)
848 tp->tstamp_ok = tp->sack_ok = tp->wscale_ok = tp->snd_wscale = 0;
860 extern enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw,
865 extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
866 struct open_request *req,
867 struct open_request **prev);
868 extern int tcp_child_process(struct sock *parent,
870 struct sk_buff *skb);
871 extern void tcp_enter_frto(struct sock *sk);
872 extern void tcp_enter_loss(struct sock *sk, int how);
873 extern void tcp_clear_retrans(struct tcp_opt *tp);
874 extern void tcp_update_metrics(struct sock *sk);
876 extern void tcp_close(struct sock *sk,
878 extern struct sock * tcp_accept(struct sock *sk, int flags, int *err);
879 extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
881 extern int tcp_getsockopt(struct sock *sk, int level,
885 extern int tcp_setsockopt(struct sock *sk, int level,
886 int optname, char __user *optval,
888 extern void tcp_set_keepalive(struct sock *sk, int val);
889 extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
891 size_t len, int nonblock,
892 int flags, int *addr_len);
894 extern int tcp_listen_start(struct sock *sk);
896 extern void tcp_parse_options(struct sk_buff *skb,
901 * TCP v4 functions exported for the inet6 API
904 extern int tcp_v4_rebuild_header(struct sock *sk);
906 extern int tcp_v4_build_header(struct sock *sk,
907 struct sk_buff *skb);
909 extern void tcp_v4_send_check(struct sock *sk,
910 struct tcphdr *th, int len,
911 struct sk_buff *skb);
913 extern int tcp_v4_conn_request(struct sock *sk,
914 struct sk_buff *skb);
916 extern struct sock * tcp_create_openreq_child(struct sock *sk,
917 struct open_request *req,
918 struct sk_buff *skb);
920 extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
922 struct open_request *req,
923 struct dst_entry *dst);
925 extern int tcp_v4_do_rcv(struct sock *sk,
926 struct sk_buff *skb);
928 extern int tcp_v4_connect(struct sock *sk,
929 struct sockaddr *uaddr,
932 extern int tcp_connect(struct sock *sk);
934 extern struct sk_buff * tcp_make_synack(struct sock *sk,
935 struct dst_entry *dst,
936 struct open_request *req);
938 extern int tcp_disconnect(struct sock *sk, int flags);
940 extern void tcp_unhash(struct sock *sk);
942 extern int tcp_v4_hash_connecting(struct sock *sk);
945 /* From syncookies.c */
946 extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
947 struct ip_options *opt);
948 extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
953 extern int tcp_write_xmit(struct sock *, int nonagle);
954 extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
955 extern void tcp_xmit_retransmit_queue(struct sock *);
956 extern void tcp_simple_retransmit(struct sock *);
957 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
959 extern void tcp_send_probe0(struct sock *);
960 extern void tcp_send_partial(struct sock *);
961 extern int tcp_write_wakeup(struct sock *);
962 extern void tcp_send_fin(struct sock *sk);
963 extern void tcp_send_active_reset(struct sock *sk, int priority);
964 extern int tcp_send_synack(struct sock *);
965 extern void tcp_push_one(struct sock *, unsigned mss_now);
966 extern void tcp_send_ack(struct sock *sk);
967 extern void tcp_send_delayed_ack(struct sock *sk);
968 extern void cleanup_rbuf(struct sock *sk, int copied);
971 extern void tcp_init_xmit_timers(struct sock *);
972 extern void tcp_clear_xmit_timers(struct sock *);
974 extern void tcp_delete_keepalive_timer(struct sock *);
975 extern void tcp_reset_keepalive_timer(struct sock *, unsigned long);
976 extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
977 extern unsigned int tcp_current_mss(struct sock *sk, int large);
980 extern const char tcp_timer_bug_msg[];
984 extern void tcp_get_info(struct sock *, struct tcp_info *);
986 /* Read 'sendfile()'-style from a TCP socket */
987 typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
988 unsigned int, size_t);
989 extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
990 sk_read_actor_t recv_actor);
992 static inline void tcp_clear_xmit_timer(struct sock *sk, int what)
994 struct tcp_opt *tp = tcp_sk(sk);
997 case TCP_TIME_RETRANS:
998 case TCP_TIME_PROBE0:
1001 #ifdef TCP_CLEAR_TIMERS
1002 sk_stop_timer(sk, &tp->retransmit_timer);
1006 tp->ack.blocked = 0;
1007 tp->ack.pending = 0;
1009 #ifdef TCP_CLEAR_TIMERS
1010 sk_stop_timer(sk, &tp->delack_timer);
1015 printk(tcp_timer_bug_msg);
1023 * Reset the retransmission timer
1025 static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
1027 struct tcp_opt *tp = tcp_sk(sk);
1029 if (when > TCP_RTO_MAX) {
1031 printk(KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr());
1037 case TCP_TIME_RETRANS:
1038 case TCP_TIME_PROBE0:
1040 tp->timeout = jiffies+when;
1041 sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout);
1045 tp->ack.pending |= TCP_ACK_TIMER;
1046 tp->ack.timeout = jiffies+when;
1047 sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout);
1052 printk(tcp_timer_bug_msg);
1057 /* Initialize RCV_MSS value.
1058 * RCV_MSS is an our guess about MSS used by the peer.
1059 * We haven't any direct information about the MSS.
1060 * It's better to underestimate the RCV_MSS rather than overestimate.
1061 * Overestimations make us ACKing less frequently than needed.
1062 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
1065 static inline void tcp_initialize_rcv_mss(struct sock *sk)
1067 struct tcp_opt *tp = tcp_sk(sk);
1068 unsigned int hint = min(tp->advmss, tp->mss_cache_std);
1070 hint = min(hint, tp->rcv_wnd/2);
1071 hint = min(hint, TCP_MIN_RCVMSS);
1072 hint = max(hint, TCP_MIN_MSS);
1074 tp->ack.rcv_mss = hint;
1077 static __inline__ void __tcp_fast_path_on(struct tcp_opt *tp, u32 snd_wnd)
1079 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
1080 ntohl(TCP_FLAG_ACK) |
1084 static __inline__ void tcp_fast_path_on(struct tcp_opt *tp)
1086 __tcp_fast_path_on(tp, tp->snd_wnd>>tp->snd_wscale);
1089 static inline void tcp_fast_path_check(struct sock *sk, struct tcp_opt *tp)
1091 if (skb_queue_len(&tp->out_of_order_queue) == 0 &&
1093 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
1095 tcp_fast_path_on(tp);
1098 /* Compute the actual receive window we are currently advertising.
1099 * Rcv_nxt can be after the window if our peer push more data
1100 * than the offered window.
1102 static __inline__ u32 tcp_receive_window(const struct tcp_opt *tp)
1104 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
1111 /* Choose a new window, without checks for shrinking, and without
1112 * scaling applied to the result. The caller does these things
1113 * if necessary. This is a "raw" window selection.
1115 extern u32 __tcp_select_window(struct sock *sk);
1117 /* TCP timestamps are only 32-bits, this causes a slight
1118 * complication on 64-bit systems since we store a snapshot
1119 * of jiffies in the buffer control blocks below. We decidely
1120 * only use of the low 32-bits of jiffies and hide the ugly
1121 * casts with the following macro.
1123 #define tcp_time_stamp ((__u32)(jiffies))
1125 /* This is what the send packet queueing engine uses to pass
1126 * TCP per-packet control information to the transmission
1127 * code. We also store the host-order sequence numbers in
1128 * here too. This is 36 bytes on 32-bit architectures,
1129 * 40 bytes on 64-bit machines, if this grows please adjust
1130 * skbuff.h:skbuff->cb[xxx] size appropriately.
1134 struct inet_skb_parm h4;
1135 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
1136 struct inet6_skb_parm h6;
1138 } header; /* For incoming frames */
1139 __u32 seq; /* Starting sequence number */
1140 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
1141 __u32 when; /* used to compute rtt's */
1142 __u8 flags; /* TCP header flags. */
1144 /* NOTE: These must match up to the flags byte in a
1147 #define TCPCB_FLAG_FIN 0x01
1148 #define TCPCB_FLAG_SYN 0x02
1149 #define TCPCB_FLAG_RST 0x04
1150 #define TCPCB_FLAG_PSH 0x08
1151 #define TCPCB_FLAG_ACK 0x10
1152 #define TCPCB_FLAG_URG 0x20
1153 #define TCPCB_FLAG_ECE 0x40
1154 #define TCPCB_FLAG_CWR 0x80
1156 __u8 sacked; /* State flags for SACK/FACK. */
1157 #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
1158 #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
1159 #define TCPCB_LOST 0x04 /* SKB is lost */
1160 #define TCPCB_TAGBITS 0x07 /* All tag bits */
1162 #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
1163 #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
1165 #define TCPCB_URG 0x20 /* Urgent pointer advenced here */
1167 #define TCPCB_AT_TAIL (TCPCB_URG)
1169 __u16 urg_ptr; /* Valid w/URG flags is set. */
1170 __u32 ack_seq; /* Sequence number ACK'd */
1173 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
1175 #include <net/tcp_ecn.h>
1177 /* Due to TSO, an SKB can be composed of multiple actual
1178 * packets. To keep these tracked properly, we use this.
1180 static inline int tcp_skb_pcount(const struct sk_buff *skb)
1182 return skb_shinfo(skb)->tso_segs;
1185 /* This is valid iff tcp_skb_pcount() > 1. */
1186 static inline int tcp_skb_mss(const struct sk_buff *skb)
1188 return skb_shinfo(skb)->tso_size;
1191 static inline void tcp_inc_pcount(tcp_pcount_t *count,
1192 const struct sk_buff *skb)
1194 count->val += tcp_skb_pcount(skb);
1197 static inline void tcp_inc_pcount_explicit(tcp_pcount_t *count, int amt)
1202 static inline void tcp_dec_pcount_explicit(tcp_pcount_t *count, int amt)
1207 static inline void tcp_dec_pcount(tcp_pcount_t *count,
1208 const struct sk_buff *skb)
1210 count->val -= tcp_skb_pcount(skb);
1213 static inline void tcp_dec_pcount_approx(tcp_pcount_t *count,
1214 const struct sk_buff *skb)
1217 count->val -= tcp_skb_pcount(skb);
1218 if ((int)count->val < 0)
1223 static inline __u32 tcp_get_pcount(const tcp_pcount_t *count)
1228 static inline void tcp_set_pcount(tcp_pcount_t *count, __u32 val)
1233 static inline void tcp_packets_out_inc(struct sock *sk,
1235 const struct sk_buff *skb)
1237 int orig = tcp_get_pcount(&tp->packets_out);
1239 tcp_inc_pcount(&tp->packets_out, skb);
1241 tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
1244 static inline void tcp_packets_out_dec(struct tcp_opt *tp,
1245 const struct sk_buff *skb)
1247 tcp_dec_pcount(&tp->packets_out, skb);
1250 /* This determines how many packets are "in the network" to the best
1251 * of our knowledge. In many cases it is conservative, but where
1252 * detailed information is available from the receiver (via SACK
1253 * blocks etc.) we can make more aggressive calculations.
1255 * Use this for decisions involving congestion control, use just
1256 * tp->packets_out to determine if the send queue is empty or not.
1258 * Read this equation as:
1260 * "Packets sent once on transmission queue" MINUS
1261 * "Packets left network, but not honestly ACKed yet" PLUS
1262 * "Packets fast retransmitted"
1264 static __inline__ unsigned int tcp_packets_in_flight(const struct tcp_opt *tp)
1266 return (tcp_get_pcount(&tp->packets_out) -
1267 tcp_get_pcount(&tp->left_out) +
1268 tcp_get_pcount(&tp->retrans_out));
1272 * Which congestion algorithim is in use on the connection.
1274 #define tcp_is_vegas(__tp) ((__tp)->adv_cong == TCP_VEGAS)
1275 #define tcp_is_westwood(__tp) ((__tp)->adv_cong == TCP_WESTWOOD)
1276 #define tcp_is_bic(__tp) ((__tp)->adv_cong == TCP_BIC)
1278 /* Recalculate snd_ssthresh, we want to set it to:
1281 * one half the current congestion window, but no
1282 * less than two segments
1285 * behave like Reno until low_window is reached,
1286 * then increase congestion window slowly
1288 static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
1290 if (tcp_is_bic(tp)) {
1291 if (sysctl_tcp_bic_fast_convergence &&
1292 tp->snd_cwnd < tp->bictcp.last_max_cwnd)
1293 tp->bictcp.last_max_cwnd
1294 = (tp->snd_cwnd * (2*BICTCP_1_OVER_BETA-1))
1295 / (BICTCP_1_OVER_BETA/2);
1297 tp->bictcp.last_max_cwnd = tp->snd_cwnd;
1299 if (tp->snd_cwnd > sysctl_tcp_bic_low_window)
1300 return max(tp->snd_cwnd - (tp->snd_cwnd/BICTCP_1_OVER_BETA),
1304 return max(tp->snd_cwnd >> 1U, 2U);
1307 /* Stop taking Vegas samples for now. */
1308 #define tcp_vegas_disable(__tp) ((__tp)->vegas.doing_vegas_now = 0)
1310 static inline void tcp_vegas_enable(struct tcp_opt *tp)
1312 /* There are several situations when we must "re-start" Vegas:
1314 * o when a connection is established
1316 * o after fast recovery
1317 * o when we send a packet and there is no outstanding
1318 * unacknowledged data (restarting an idle connection)
1320 * In these circumstances we cannot do a Vegas calculation at the
1321 * end of the first RTT, because any calculation we do is using
1322 * stale info -- both the saved cwnd and congestion feedback are
1325 * Instead we must wait until the completion of an RTT during
1326 * which we actually receive ACKs.
1329 /* Begin taking Vegas samples next time we send something. */
1330 tp->vegas.doing_vegas_now = 1;
1332 /* Set the beginning of the next send window. */
1333 tp->vegas.beg_snd_nxt = tp->snd_nxt;
1335 tp->vegas.cntRTT = 0;
1336 tp->vegas.minRTT = 0x7fffffff;
1339 /* Should we be taking Vegas samples right now? */
1340 #define tcp_vegas_enabled(__tp) ((__tp)->vegas.doing_vegas_now)
1342 extern void tcp_ca_init(struct tcp_opt *tp);
1344 static inline void tcp_set_ca_state(struct tcp_opt *tp, u8 ca_state)
1346 if (tcp_is_vegas(tp)) {
1347 if (ca_state == TCP_CA_Open)
1348 tcp_vegas_enable(tp);
1350 tcp_vegas_disable(tp);
1352 tp->ca_state = ca_state;
1355 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1356 * The exception is rate halving phase, when cwnd is decreasing towards
1359 static inline __u32 tcp_current_ssthresh(struct tcp_opt *tp)
1361 if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery))
1362 return tp->snd_ssthresh;
1364 return max(tp->snd_ssthresh,
1365 ((tp->snd_cwnd >> 1) +
1366 (tp->snd_cwnd >> 2)));
1369 static inline void tcp_sync_left_out(struct tcp_opt *tp)
1372 (tcp_get_pcount(&tp->sacked_out) >=
1373 tcp_get_pcount(&tp->packets_out) - tcp_get_pcount(&tp->lost_out)))
1374 tcp_set_pcount(&tp->sacked_out,
1375 (tcp_get_pcount(&tp->packets_out) -
1376 tcp_get_pcount(&tp->lost_out)));
1377 tcp_set_pcount(&tp->left_out,
1378 (tcp_get_pcount(&tp->sacked_out) +
1379 tcp_get_pcount(&tp->lost_out)));
1382 extern void tcp_cwnd_application_limited(struct sock *sk);
1384 /* Congestion window validation. (RFC2861) */
1386 static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_opt *tp)
1388 __u32 packets_out = tcp_get_pcount(&tp->packets_out);
1390 if (packets_out >= tp->snd_cwnd) {
1391 /* Network is feed fully. */
1392 tp->snd_cwnd_used = 0;
1393 tp->snd_cwnd_stamp = tcp_time_stamp;
1395 /* Network starves. */
1396 if (tcp_get_pcount(&tp->packets_out) > tp->snd_cwnd_used)
1397 tp->snd_cwnd_used = tcp_get_pcount(&tp->packets_out);
1399 if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto)
1400 tcp_cwnd_application_limited(sk);
1404 /* Set slow start threshould and cwnd not falling to slow start */
1405 static inline void __tcp_enter_cwr(struct tcp_opt *tp)
1407 tp->undo_marker = 0;
1408 tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
1409 tp->snd_cwnd = min(tp->snd_cwnd,
1410 tcp_packets_in_flight(tp) + 1U);
1411 tp->snd_cwnd_cnt = 0;
1412 tp->high_seq = tp->snd_nxt;
1413 tp->snd_cwnd_stamp = tcp_time_stamp;
1414 TCP_ECN_queue_cwr(tp);
1417 static inline void tcp_enter_cwr(struct tcp_opt *tp)
1419 tp->prior_ssthresh = 0;
1420 if (tp->ca_state < TCP_CA_CWR) {
1421 __tcp_enter_cwr(tp);
1422 tcp_set_ca_state(tp, TCP_CA_CWR);
1426 extern __u32 tcp_init_cwnd(struct tcp_opt *tp, struct dst_entry *dst);
1428 /* Slow start with delack produces 3 packets of burst, so that
1429 * it is safe "de facto".
1431 static __inline__ __u32 tcp_max_burst(const struct tcp_opt *tp)
1436 static __inline__ int tcp_minshall_check(const struct tcp_opt *tp)
1438 return after(tp->snd_sml,tp->snd_una) &&
1439 !after(tp->snd_sml, tp->snd_nxt);
1442 static __inline__ void tcp_minshall_update(struct tcp_opt *tp, int mss,
1443 const struct sk_buff *skb)
1446 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1449 /* Return 0, if packet can be sent now without violation Nagle's rules:
1450 1. It is full sized.
1451 2. Or it contains FIN.
1452 3. Or higher layers meant to force a packet boundary, hence the PSH bit.
1453 4. Or TCP_NODELAY was set.
1454 5. Or TCP_CORK is not set, and all sent packets are ACKed.
1455 With Minshall's modification: all sent small packets are ACKed.
1458 static __inline__ int
1459 tcp_nagle_check(const struct tcp_opt *tp, const struct sk_buff *skb,
1460 unsigned mss_now, int nonagle)
1462 return (skb->len < mss_now &&
1463 !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
1464 ((nonagle&TCP_NAGLE_CORK) ||
1466 tcp_get_pcount(&tp->packets_out) &&
1467 tcp_minshall_check(tp))));
1470 extern void tcp_set_skb_tso_segs(struct sk_buff *, unsigned int);
1472 /* This checks if the data bearing packet SKB (usually sk->sk_send_head)
1473 * should be put on the wire right now.
1475 static __inline__ int tcp_snd_test(const struct tcp_opt *tp,
1476 struct sk_buff *skb,
1477 unsigned cur_mss, int nonagle)
1479 int pkts = tcp_skb_pcount(skb);
1482 tcp_set_skb_tso_segs(skb, tp->mss_cache_std);
1483 pkts = tcp_skb_pcount(skb);
1486 /* RFC 1122 - section 4.2.3.4
1490 * a) The right edge of this frame exceeds the window
1491 * b) There are packets in flight and we have a small segment
1492 * [SWS avoidance and Nagle algorithm]
1493 * (part of SWS is done on packetization)
1494 * Minshall version sounds: there are no _small_
1495 * segments in flight. (tcp_nagle_check)
1496 * c) We have too many packets 'in flight'
1498 * Don't use the nagle rule for urgent data (or
1499 * for the final FIN -DaveM).
1501 * Also, Nagle rule does not apply to frames, which
1502 * sit in the middle of queue (they have no chances
1503 * to get new data) and if room at tail of skb is
1504 * not enough to save something seriously (<32 for now).
1507 /* Don't be strict about the congestion window for the
1508 * final FIN frame. -DaveM
1510 return (((nonagle&TCP_NAGLE_PUSH) || tp->urg_mode
1511 || !tcp_nagle_check(tp, skb, cur_mss, nonagle)) &&
1512 (((tcp_packets_in_flight(tp) + (pkts-1)) < tp->snd_cwnd) ||
1513 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) &&
1514 !after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd));
1517 static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_opt *tp)
1519 if (!tcp_get_pcount(&tp->packets_out) && !tp->pending)
1520 tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto);
1523 static __inline__ int tcp_skb_is_last(const struct sock *sk,
1524 const struct sk_buff *skb)
1526 return skb->next == (struct sk_buff *)&sk->sk_write_queue;
1529 /* Push out any pending frames which were held back due to
1530 * TCP_CORK or attempt at coalescing tiny packets.
1531 * The socket must be locked by the caller.
1533 static __inline__ void __tcp_push_pending_frames(struct sock *sk,
1538 struct sk_buff *skb = sk->sk_send_head;
1541 if (!tcp_skb_is_last(sk, skb))
1542 nonagle = TCP_NAGLE_PUSH;
1543 if (!tcp_snd_test(tp, skb, cur_mss, nonagle) ||
1544 tcp_write_xmit(sk, nonagle))
1545 tcp_check_probe_timer(sk, tp);
1547 tcp_cwnd_validate(sk, tp);
1550 static __inline__ void tcp_push_pending_frames(struct sock *sk,
1553 __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle);
1556 static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_opt *tp)
1558 struct sk_buff *skb = sk->sk_send_head;
1561 tcp_snd_test(tp, skb, tcp_current_mss(sk, 1),
1562 tcp_skb_is_last(sk, skb) ? TCP_NAGLE_PUSH : tp->nonagle));
1565 static __inline__ void tcp_init_wl(struct tcp_opt *tp, u32 ack, u32 seq)
1570 static __inline__ void tcp_update_wl(struct tcp_opt *tp, u32 ack, u32 seq)
1575 extern void tcp_destroy_sock(struct sock *sk);
1579 * Calculate(/check) TCP checksum
1581 static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len,
1582 unsigned long saddr, unsigned long daddr,
1585 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1588 static __inline__ int __tcp_checksum_complete(struct sk_buff *skb)
1590 return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
1593 static __inline__ int tcp_checksum_complete(struct sk_buff *skb)
1595 return skb->ip_summed != CHECKSUM_UNNECESSARY &&
1596 __tcp_checksum_complete(skb);
1599 /* Prequeue for VJ style copy to user, combined with checksumming. */
1601 static __inline__ void tcp_prequeue_init(struct tcp_opt *tp)
1603 tp->ucopy.task = NULL;
1605 tp->ucopy.memory = 0;
1606 skb_queue_head_init(&tp->ucopy.prequeue);
1609 /* Packet is added to VJ-style prequeue for processing in process
1610 * context, if a reader task is waiting. Apparently, this exciting
1611 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1612 * failed somewhere. Latency? Burstiness? Well, at least now we will
1613 * see, why it failed. 8)8) --ANK
1615 * NOTE: is this not too big to inline?
1617 static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1619 struct tcp_opt *tp = tcp_sk(sk);
1621 if (!sysctl_tcp_low_latency && tp->ucopy.task) {
1622 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1623 tp->ucopy.memory += skb->truesize;
1624 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1625 struct sk_buff *skb1;
1627 BUG_ON(sock_owned_by_user(sk));
1629 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1630 sk->sk_backlog_rcv(sk, skb1);
1631 NET_INC_STATS_BH(LINUX_MIB_TCPPREQUEUEDROPPED);
1634 tp->ucopy.memory = 0;
1635 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1636 wake_up_interruptible(sk->sk_sleep);
1637 if (!tcp_ack_scheduled(tp))
1638 tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4);
1649 static const char *statename[]={
1650 "Unused","Established","Syn Sent","Syn Recv",
1651 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1652 "Close Wait","Last ACK","Listen","Closing"
1656 static __inline__ void tcp_set_state(struct sock *sk, int state)
1658 int oldstate = sk->sk_state;
1661 case TCP_ESTABLISHED:
1662 if (oldstate != TCP_ESTABLISHED)
1663 TCP_INC_STATS(TCP_MIB_CURRESTAB);
1667 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
1668 TCP_INC_STATS(TCP_MIB_ESTABRESETS);
1670 sk->sk_prot->unhash(sk);
1671 if (tcp_sk(sk)->bind_hash &&
1672 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
1676 if (oldstate==TCP_ESTABLISHED)
1677 TCP_DEC_STATS(TCP_MIB_CURRESTAB);
1680 /* Change state AFTER socket is unhashed to avoid closed
1681 * socket sitting in hash tables.
1683 sk->sk_state = state;
1686 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
1690 static __inline__ void tcp_done(struct sock *sk)
1692 tcp_set_state(sk, TCP_CLOSE);
1693 tcp_clear_xmit_timers(sk);
1695 sk->sk_shutdown = SHUTDOWN_MASK;
1697 if (!sock_flag(sk, SOCK_DEAD))
1698 sk->sk_state_change(sk);
1700 tcp_destroy_sock(sk);
1703 static __inline__ void tcp_sack_reset(struct tcp_opt *tp)
1710 static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_opt *tp, __u32 tstamp)
1712 if (tp->tstamp_ok) {
1713 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
1714 (TCPOPT_NOP << 16) |
1715 (TCPOPT_TIMESTAMP << 8) |
1717 *ptr++ = htonl(tstamp);
1718 *ptr++ = htonl(tp->ts_recent);
1720 if (tp->eff_sacks) {
1721 struct tcp_sack_block *sp = tp->dsack ? tp->duplicate_sack : tp->selective_acks;
1724 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
1725 (TCPOPT_NOP << 16) |
1726 (TCPOPT_SACK << 8) |
1727 (TCPOLEN_SACK_BASE +
1728 (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK)));
1729 for(this_sack = 0; this_sack < tp->eff_sacks; this_sack++) {
1730 *ptr++ = htonl(sp[this_sack].start_seq);
1731 *ptr++ = htonl(sp[this_sack].end_seq);
1740 /* Construct a tcp options header for a SYN or SYN_ACK packet.
1741 * If this is every changed make sure to change the definition of
1742 * MAX_SYN_SIZE to match the new maximum number of options that you
1745 static inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
1746 int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent)
1748 /* We always get an MSS option.
1749 * The option bytes which will be seen in normal data
1750 * packets should timestamps be used, must be in the MSS
1751 * advertised. But we subtract them from tp->mss_cache so
1752 * that calculations in tcp_sendmsg are simpler etc.
1753 * So account for this fact here if necessary. If we
1754 * don't do this correctly, as a receiver we won't
1755 * recognize data packets as being full sized when we
1756 * should, and thus we won't abide by the delayed ACK
1758 * SACKs don't matter, we never delay an ACK when we
1759 * have any of those going out.
1761 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
1764 *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |
1765 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1767 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1768 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1769 *ptr++ = htonl(tstamp); /* TSVAL */
1770 *ptr++ = htonl(ts_recent); /* TSECR */
1772 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1773 (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);
1775 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
1778 /* Determine a window scaling and initial window to offer. */
1779 extern void tcp_select_initial_window(int __space, __u32 mss,
1780 __u32 *rcv_wnd, __u32 *window_clamp,
1781 int wscale_ok, __u8 *rcv_wscale);
1783 static inline int tcp_win_from_space(int space)
1785 return sysctl_tcp_adv_win_scale<=0 ?
1786 (space>>(-sysctl_tcp_adv_win_scale)) :
1787 space - (space>>sysctl_tcp_adv_win_scale);
1790 /* Note: caller must be prepared to deal with negative returns */
1791 static inline int tcp_space(const struct sock *sk)
1793 return tcp_win_from_space(sk->sk_rcvbuf -
1794 atomic_read(&sk->sk_rmem_alloc));
1797 static inline int tcp_full_space(const struct sock *sk)
1799 return tcp_win_from_space(sk->sk_rcvbuf);
1802 struct tcp_listen_opt
1804 u8 max_qlen_log; /* log_2 of maximal queued SYNs */
1806 #ifdef CONFIG_ACCEPT_QUEUES
1807 int qlen_young[NUM_ACCEPT_QUEUES];
1813 struct open_request *syn_table[TCP_SYNQ_HSIZE];
1816 #ifdef CONFIG_ACCEPT_QUEUES
1817 static inline void sk_acceptq_removed(struct sock *sk, int class)
1819 tcp_sk(sk)->acceptq[class].aq_backlog--;
1822 static inline void sk_acceptq_added(struct sock *sk, int class)
1824 tcp_sk(sk)->acceptq[class].aq_backlog++;
1827 static inline int sk_acceptq_is_full(struct sock *sk, int class)
1829 return tcp_sk(sk)->acceptq[class].aq_backlog >
1830 sk->sk_max_ack_backlog;
1833 static inline void tcp_set_acceptq(struct tcp_opt *tp, struct open_request *req)
1835 int class = req->acceptq_class;
1838 if (!tp->acceptq[class].aq_ratio) {
1839 req->acceptq_class = 0;
1843 tp->acceptq[class].aq_qcount++;
1844 req->acceptq_time_stamp = jiffies;
1846 if (tp->acceptq[class].aq_tail) {
1847 req->dl_next = tp->acceptq[class].aq_tail->dl_next;
1848 tp->acceptq[class].aq_tail->dl_next = req;
1849 tp->acceptq[class].aq_tail = req;
1850 } else { /* if first request in the class */
1851 tp->acceptq[class].aq_head = req;
1852 tp->acceptq[class].aq_tail = req;
1854 prev_class = class - 1;
1855 while (prev_class >= 0) {
1856 if (tp->acceptq[prev_class].aq_tail)
1860 if (prev_class < 0) {
1861 req->dl_next = tp->accept_queue;
1862 tp->accept_queue = req;
1865 req->dl_next = tp->acceptq[prev_class].aq_tail->dl_next;
1866 tp->acceptq[prev_class].aq_tail->dl_next = req;
1870 static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
1873 tcp_set_acceptq(tcp_sk(sk),req);
1875 sk_acceptq_added(sk,req->acceptq_class);
1879 static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
1882 struct tcp_opt *tp = tcp_sk(sk);
1885 sk_acceptq_added(sk);
1887 if (!tp->accept_queue_tail) {
1888 tp->accept_queue = req;
1890 tp->accept_queue_tail->dl_next = req;
1892 tp->accept_queue_tail = req;
1893 req->dl_next = NULL;
1899 #ifdef CONFIG_ACCEPT_QUEUES
1901 tcp_synq_removed(struct sock *sk, struct open_request *req)
1903 struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
1905 if (--lopt->qlen == 0)
1906 tcp_delete_keepalive_timer(sk);
1907 if (req->retrans == 0)
1908 lopt->qlen_young[req->acceptq_class]--;
1911 static inline void tcp_synq_added(struct sock *sk, struct open_request *req)
1913 struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
1915 if (lopt->qlen++ == 0)
1916 tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
1917 lopt->qlen_young[req->acceptq_class]++;
1920 static inline int tcp_synq_len(struct sock *sk)
1922 return tcp_sk(sk)->listen_opt->qlen;
1925 static inline int tcp_synq_young(struct sock *sk, int class)
1927 return tcp_sk(sk)->listen_opt->qlen_young[class];
1933 tcp_synq_removed(struct sock *sk, struct open_request *req)
1935 struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
1937 if (--lopt->qlen == 0)
1938 tcp_delete_keepalive_timer(sk);
1939 if (req->retrans == 0)
1943 static inline void tcp_synq_added(struct sock *sk)
1945 struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
1947 if (lopt->qlen++ == 0)
1948 tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
1952 static inline int tcp_synq_len(struct sock *sk)
1954 return tcp_sk(sk)->listen_opt->qlen;
1957 static inline int tcp_synq_young(struct sock *sk)
1959 return tcp_sk(sk)->listen_opt->qlen_young;
1963 static inline int tcp_synq_is_full(struct sock *sk)
1965 return tcp_synq_len(sk) >> tcp_sk(sk)->listen_opt->max_qlen_log;
1968 static inline void tcp_synq_unlink(struct tcp_opt *tp, struct open_request *req,
1969 struct open_request **prev)
1971 write_lock(&tp->syn_wait_lock);
1972 *prev = req->dl_next;
1973 write_unlock(&tp->syn_wait_lock);
1976 static inline void tcp_synq_drop(struct sock *sk, struct open_request *req,
1977 struct open_request **prev)
1979 tcp_synq_unlink(tcp_sk(sk), req, prev);
1980 tcp_synq_removed(sk, req);
1981 tcp_openreq_free(req);
1984 static __inline__ void tcp_openreq_init(struct open_request *req,
1986 struct sk_buff *skb)
1988 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
1989 req->rcv_isn = TCP_SKB_CB(skb)->seq;
1990 req->mss = tp->mss_clamp;
1991 req->ts_recent = tp->saw_tstamp ? tp->rcv_tsval : 0;
1992 req->tstamp_ok = tp->tstamp_ok;
1993 req->sack_ok = tp->sack_ok;
1994 req->snd_wscale = tp->snd_wscale;
1995 req->wscale_ok = tp->wscale_ok;
1998 req->rmt_port = skb->h.th->source;
2001 extern void tcp_enter_memory_pressure(void);
2003 extern void tcp_listen_wlock(void);
2005 /* - We may sleep inside this lock.
2006 * - If sleeping is not required (or called from BH),
2007 * use plain read_(un)lock(&tcp_lhash_lock).
2010 static inline void tcp_listen_lock(void)
2012 /* read_lock synchronizes to candidates to writers */
2013 read_lock(&tcp_lhash_lock);
2014 atomic_inc(&tcp_lhash_users);
2015 read_unlock(&tcp_lhash_lock);
2018 static inline void tcp_listen_unlock(void)
2020 if (atomic_dec_and_test(&tcp_lhash_users))
2021 wake_up(&tcp_lhash_wait);
2024 static inline int keepalive_intvl_when(const struct tcp_opt *tp)
2026 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
2029 static inline int keepalive_time_when(const struct tcp_opt *tp)
2031 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
2034 static inline int tcp_fin_time(const struct tcp_opt *tp)
2036 int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout;
2038 if (fin_timeout < (tp->rto<<2) - (tp->rto>>1))
2039 fin_timeout = (tp->rto<<2) - (tp->rto>>1);
2044 static inline int tcp_paws_check(const struct tcp_opt *tp, int rst)
2046 if ((s32)(tp->rcv_tsval - tp->ts_recent) >= 0)
2048 if (xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_24DAYS)
2051 /* RST segments are not recommended to carry timestamp,
2052 and, if they do, it is recommended to ignore PAWS because
2053 "their cleanup function should take precedence over timestamps."
2054 Certainly, it is mistake. It is necessary to understand the reasons
2055 of this constraint to relax it: if peer reboots, clock may go
2056 out-of-sync and half-open connections will not be reset.
2057 Actually, the problem would be not existing if all
2058 the implementations followed draft about maintaining clock
2059 via reboots. Linux-2.2 DOES NOT!
2061 However, we can relax time bounds for RST segments to MSL.
2063 if (rst && xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_MSL)
2068 static inline void tcp_v4_setup_caps(struct sock *sk, struct dst_entry *dst)
2070 sk->sk_route_caps = dst->dev->features;
2071 if (sk->sk_route_caps & NETIF_F_TSO) {
2072 if (sk->sk_no_largesend || dst->header_len)
2073 sk->sk_route_caps &= ~NETIF_F_TSO;
2077 #define TCP_CHECK_TIMER(sk) do { } while (0)
2079 static inline int tcp_use_frto(const struct sock *sk)
2081 const struct tcp_opt *tp = tcp_sk(sk);
2083 /* F-RTO must be activated in sysctl and there must be some
2084 * unsent new data, and the advertised window should allow
2087 return (sysctl_tcp_frto && sk->sk_send_head &&
2088 !after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
2089 tp->snd_una + tp->snd_wnd));
2092 static inline void tcp_mib_init(void)
2095 TCP_ADD_STATS_USER(TCP_MIB_RTOALGORITHM, 1);
2096 TCP_ADD_STATS_USER(TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
2097 TCP_ADD_STATS_USER(TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
2098 TCP_ADD_STATS_USER(TCP_MIB_MAXCONN, -1);
2102 enum tcp_seq_states {
2103 TCP_SEQ_STATE_LISTENING,
2104 TCP_SEQ_STATE_OPENREQ,
2105 TCP_SEQ_STATE_ESTABLISHED,
2106 TCP_SEQ_STATE_TIME_WAIT,
2109 struct tcp_seq_afinfo {
2110 struct module *owner;
2113 int (*seq_show) (struct seq_file *m, void *v);
2114 struct file_operations *seq_fops;
2117 struct tcp_iter_state {
2119 enum tcp_seq_states state;
2120 struct sock *syn_wait_sk;
2121 int bucket, sbucket, num, uid;
2122 struct seq_operations seq_ops;
2125 extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo);
2126 extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo);
2128 /* TCP Westwood functions and constants */
2130 #define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
2131 #define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
2133 static inline void tcp_westwood_update_rtt(struct tcp_opt *tp, __u32 rtt_seq)
2135 if (tcp_is_westwood(tp))
2136 tp->westwood.rtt = rtt_seq;
2139 void __tcp_westwood_fast_bw(struct sock *, struct sk_buff *);
2140 void __tcp_westwood_slow_bw(struct sock *, struct sk_buff *);
2142 static inline void tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb)
2144 if (tcp_is_westwood(tcp_sk(sk)))
2145 __tcp_westwood_fast_bw(sk, skb);
2148 static inline void tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb)
2150 if (tcp_is_westwood(tcp_sk(sk)))
2151 __tcp_westwood_slow_bw(sk, skb);
2154 static inline __u32 __tcp_westwood_bw_rttmin(const struct tcp_opt *tp)
2156 return max((tp->westwood.bw_est) * (tp->westwood.rtt_min) /
2157 (__u32) (tp->mss_cache_std),
2161 static inline __u32 tcp_westwood_bw_rttmin(const struct tcp_opt *tp)
2163 return tcp_is_westwood(tp) ? __tcp_westwood_bw_rttmin(tp) : 0;
2166 static inline int tcp_westwood_ssthresh(struct tcp_opt *tp)
2170 if (tcp_is_westwood(tp)) {
2171 ssthresh = __tcp_westwood_bw_rttmin(tp);
2173 tp->snd_ssthresh = ssthresh;
2176 return (ssthresh != 0);
2179 static inline int tcp_westwood_cwnd(struct tcp_opt *tp)
2183 if (tcp_is_westwood(tp)) {
2184 cwnd = __tcp_westwood_bw_rttmin(tp);
2186 tp->snd_cwnd = cwnd;