2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the TCP module.
8 * Version: @(#)tcp.h 1.0.5 05/23/93
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
22 #define FASTRETRANS_DEBUG 1
24 /* Cancel timers, when they are not required. */
25 #undef TCP_CLEAR_TIMERS
27 #include <linux/config.h>
28 #include <linux/list.h>
29 #include <linux/tcp.h>
30 #include <linux/slab.h>
31 #include <linux/cache.h>
32 #include <linux/percpu.h>
33 #include <net/checksum.h>
36 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
37 #include <linux/ipv6.h>
39 #include <linux/seq_file.h>
41 /* This is for all connections with a full identity, no wildcards.
42 * New scheme, half the table is for TIME_WAIT, the other half is
43 * for the rest. I'll experiment with dynamic table growth later.
45 struct tcp_ehash_bucket {
47 struct hlist_head chain;
48 } __attribute__((__aligned__(8)));
50 /* This is for listening sockets, thus all sockets which possess wildcards. */
51 #define TCP_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
53 /* There are a few simple rules, which allow for local port reuse by
54 * an application. In essence:
56 * 1) Sockets bound to different interfaces may share a local port.
57 * Failing that, goto test 2.
58 * 2) If all sockets have sk->sk_reuse set, and none of them are in
59 * TCP_LISTEN state, the port may be shared.
60 * Failing that, goto test 3.
61 * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
62 * address, and none of them are the same, the port may be
64 * Failing this, the port cannot be shared.
66 * The interesting point, is test #2. This is what an FTP server does
67 * all day. To optimize this case we use a specific flag bit defined
68 * below. As we add sockets to a bind bucket list, we perform a
69 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
70 * As long as all sockets added to a bind bucket pass this test,
71 * the flag bit will be set.
72 * The resulting situation is that tcp_v[46]_verify_bind() can just check
73 * for this flag bit, if it is set and the socket trying to bind has
74 * sk->sk_reuse set, we don't even have to walk the owners list at all,
75 * we return that it is ok to bind this socket to the requested local port.
77 * Sounds like a lot of work, but it is worth it. In a more naive
78 * implementation (ie. current FreeBSD etc.) the entire list of ports
79 * must be walked for each data port opened by an ftp server. Needless
80 * to say, this does not scale at all. With a couple thousand FTP
81 * users logged onto your box, isn't it nice to know that new data
82 * ports are created in O(1) time? I thought so. ;-) -DaveM
84 struct tcp_bind_bucket {
86 signed short fastreuse;
87 struct hlist_node node;
88 struct hlist_head owners;
91 #define tb_for_each(tb, node, head) hlist_for_each_entry(tb, node, head, node)
93 struct tcp_bind_hashbucket {
95 struct hlist_head chain;
98 static inline struct tcp_bind_bucket *__tb_head(struct tcp_bind_hashbucket *head)
100 return hlist_entry(head->chain.first, struct tcp_bind_bucket, node);
103 static inline struct tcp_bind_bucket *tb_head(struct tcp_bind_hashbucket *head)
105 return hlist_empty(&head->chain) ? NULL : __tb_head(head);
108 extern struct tcp_hashinfo {
109 /* This is for sockets with full identity only. Sockets here will
110 * always be without wildcards and will have the following invariant:
112 * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
114 * First half of the table is for sockets not in TIME_WAIT, second half
115 * is for TIME_WAIT sockets only.
117 struct tcp_ehash_bucket *__tcp_ehash;
119 /* Ok, let's try this, I give up, we do need a local binding
120 * TCP hash as well as the others for fast bind/connect.
122 struct tcp_bind_hashbucket *__tcp_bhash;
124 int __tcp_bhash_size;
125 int __tcp_ehash_size;
127 /* All sockets in TCP_LISTEN state will be in here. This is the only
128 * table where wildcard'd TCP sockets can exist. Hash function here
129 * is just local port number.
131 struct hlist_head __tcp_listening_hash[TCP_LHTABLE_SIZE];
133 /* All the above members are written once at bootup and
134 * never written again _or_ are predominantly read-access.
136 * Now align to a new cache line as all the following members
139 rwlock_t __tcp_lhash_lock ____cacheline_aligned;
140 atomic_t __tcp_lhash_users;
141 wait_queue_head_t __tcp_lhash_wait;
142 spinlock_t __tcp_portalloc_lock;
145 #define tcp_ehash (tcp_hashinfo.__tcp_ehash)
146 #define tcp_bhash (tcp_hashinfo.__tcp_bhash)
147 #define tcp_ehash_size (tcp_hashinfo.__tcp_ehash_size)
148 #define tcp_bhash_size (tcp_hashinfo.__tcp_bhash_size)
149 #define tcp_listening_hash (tcp_hashinfo.__tcp_listening_hash)
150 #define tcp_lhash_lock (tcp_hashinfo.__tcp_lhash_lock)
151 #define tcp_lhash_users (tcp_hashinfo.__tcp_lhash_users)
152 #define tcp_lhash_wait (tcp_hashinfo.__tcp_lhash_wait)
153 #define tcp_portalloc_lock (tcp_hashinfo.__tcp_portalloc_lock)
155 /* SLAB cache for TCP socks */
156 extern kmem_cache_t *tcp_sk_cachep;
158 extern kmem_cache_t *tcp_bucket_cachep;
159 extern struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
160 unsigned short snum);
161 extern void tcp_bucket_destroy(struct tcp_bind_bucket *tb);
162 extern void tcp_bucket_unlock(struct sock *sk);
163 extern int tcp_port_rover;
164 extern struct sock *tcp_v4_lookup_listener(u32 addr, unsigned short hnum, int dif);
166 /* These are AF independent. */
167 static __inline__ int tcp_bhashfn(__u16 lport)
169 return (lport & (tcp_bhash_size - 1));
172 extern void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
173 unsigned short snum);
175 #if (BITS_PER_LONG == 64)
176 #define TCP_ADDRCMP_ALIGN_BYTES 8
178 #define TCP_ADDRCMP_ALIGN_BYTES 4
181 /* This is a TIME_WAIT bucket. It works around the memory consumption
182 * problems of sockets in such a state on heavily loaded servers, but
183 * without violating the protocol specification.
185 struct tcp_tw_bucket {
187 * Now struct sock also uses sock_common, so please just
188 * don't add nothing before this first member (__tw_common) --acme
190 struct sock_common __tw_common;
191 #define tw_family __tw_common.skc_family
192 #define tw_state __tw_common.skc_state
193 #define tw_reuse __tw_common.skc_reuse
194 #define tw_bound_dev_if __tw_common.skc_bound_dev_if
195 #define tw_node __tw_common.skc_node
196 #define tw_bind_node __tw_common.skc_bind_node
197 #define tw_refcnt __tw_common.skc_refcnt
198 #define tw_xid __tw_common.skc_xid
199 #define tw_vx_info __tw_common.skc_vx_info
200 #define tw_nid __tw_common.skc_nid
201 #define tw_nx_info __tw_common.skc_nx_info
202 volatile unsigned char tw_substate;
203 unsigned char tw_rcv_wscale;
205 /* Socket demultiplex comparisons on incoming packets. */
206 /* these five are in inet_opt */
208 __attribute__((aligned(TCP_ADDRCMP_ALIGN_BYTES)));
212 /* And these are ours. */
219 long tw_ts_recent_stamp;
220 unsigned long tw_ttd;
221 struct tcp_bind_bucket *tw_tb;
222 struct hlist_node tw_death_node;
223 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
224 struct in6_addr tw_v6_daddr;
225 struct in6_addr tw_v6_rcv_saddr;
230 static __inline__ void tw_add_node(struct tcp_tw_bucket *tw,
231 struct hlist_head *list)
233 hlist_add_head(&tw->tw_node, list);
236 static __inline__ void tw_add_bind_node(struct tcp_tw_bucket *tw,
237 struct hlist_head *list)
239 hlist_add_head(&tw->tw_bind_node, list);
242 static inline int tw_dead_hashed(struct tcp_tw_bucket *tw)
244 return tw->tw_death_node.pprev != NULL;
247 static __inline__ void tw_dead_node_init(struct tcp_tw_bucket *tw)
249 tw->tw_death_node.pprev = NULL;
252 static __inline__ void __tw_del_dead_node(struct tcp_tw_bucket *tw)
254 __hlist_del(&tw->tw_death_node);
255 tw_dead_node_init(tw);
258 static __inline__ int tw_del_dead_node(struct tcp_tw_bucket *tw)
260 if (tw_dead_hashed(tw)) {
261 __tw_del_dead_node(tw);
267 #define tw_for_each(tw, node, head) \
268 hlist_for_each_entry(tw, node, head, tw_node)
270 #define tw_for_each_inmate(tw, node, jail) \
271 hlist_for_each_entry(tw, node, jail, tw_death_node)
273 #define tw_for_each_inmate_safe(tw, node, safe, jail) \
274 hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node)
276 #define tcptw_sk(__sk) ((struct tcp_tw_bucket *)(__sk))
278 static inline const u32 tcp_v4_rcv_saddr(const struct sock *sk)
280 return likely(sk->sk_state != TCP_TIME_WAIT) ?
281 inet_sk(sk)->rcv_saddr : tcptw_sk(sk)->tw_rcv_saddr;
284 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
285 static inline const struct in6_addr *__tcp_v6_rcv_saddr(const struct sock *sk)
287 return likely(sk->sk_state != TCP_TIME_WAIT) ?
288 &inet6_sk(sk)->rcv_saddr : &tcptw_sk(sk)->tw_v6_rcv_saddr;
291 static inline const struct in6_addr *tcp_v6_rcv_saddr(const struct sock *sk)
293 return sk->sk_family == AF_INET6 ? __tcp_v6_rcv_saddr(sk) : NULL;
296 #define tcptw_sk_ipv6only(__sk) (tcptw_sk(__sk)->tw_v6_ipv6only)
298 static inline int tcp_v6_ipv6only(const struct sock *sk)
300 return likely(sk->sk_state != TCP_TIME_WAIT) ?
301 ipv6_only_sock(sk) : tcptw_sk_ipv6only(sk);
304 # define __tcp_v6_rcv_saddr(__sk) NULL
305 # define tcp_v6_rcv_saddr(__sk) NULL
306 # define tcptw_sk_ipv6only(__sk) 0
307 # define tcp_v6_ipv6only(__sk) 0
310 extern kmem_cache_t *tcp_timewait_cachep;
312 static inline void tcp_tw_put(struct tcp_tw_bucket *tw)
314 if (atomic_dec_and_test(&tw->tw_refcnt)) {
315 #ifdef INET_REFCNT_DEBUG
316 printk(KERN_DEBUG "tw_bucket %p released\n", tw);
318 kmem_cache_free(tcp_timewait_cachep, tw);
322 extern atomic_t tcp_orphan_count;
323 extern int tcp_tw_count;
324 extern void tcp_time_wait(struct sock *sk, int state, int timeo);
325 extern void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo);
326 extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw);
329 /* Socket demux engine toys. */
331 #define TCP_COMBINED_PORTS(__sport, __dport) \
332 (((__u32)(__sport)<<16) | (__u32)(__dport))
333 #else /* __LITTLE_ENDIAN */
334 #define TCP_COMBINED_PORTS(__sport, __dport) \
335 (((__u32)(__dport)<<16) | (__u32)(__sport))
338 #if (BITS_PER_LONG == 64)
340 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
341 __u64 __name = (((__u64)(__saddr))<<32)|((__u64)(__daddr));
342 #else /* __LITTLE_ENDIAN */
343 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
344 __u64 __name = (((__u64)(__daddr))<<32)|((__u64)(__saddr));
345 #endif /* __BIG_ENDIAN */
346 #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
347 (((*((__u64 *)&(inet_sk(__sk)->daddr)))== (__cookie)) && \
348 ((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
349 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
350 #define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
351 (((*((__u64 *)&(tcptw_sk(__sk)->tw_daddr))) == (__cookie)) && \
352 ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \
353 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
354 #else /* 32-bit arch */
355 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr)
356 #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
357 ((inet_sk(__sk)->daddr == (__saddr)) && \
358 (inet_sk(__sk)->rcv_saddr == (__daddr)) && \
359 ((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
360 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
361 #define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
362 ((tcptw_sk(__sk)->tw_daddr == (__saddr)) && \
363 (tcptw_sk(__sk)->tw_rcv_saddr == (__daddr)) && \
364 ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \
365 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
366 #endif /* 64-bit arch */
368 #define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
369 (((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
370 ((__sk)->sk_family == AF_INET6) && \
371 !ipv6_addr_cmp(&inet6_sk(__sk)->daddr, (__saddr)) && \
372 !ipv6_addr_cmp(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \
373 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
375 /* These can have wildcards, don't try too hard. */
376 static __inline__ int tcp_lhashfn(unsigned short num)
378 return num & (TCP_LHTABLE_SIZE - 1);
381 static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
383 return tcp_lhashfn(inet_sk(sk)->num);
386 #define MAX_TCP_HEADER (128 + MAX_HEADER)
389 * Never offer a window over 32767 without using window scaling. Some
390 * poor stacks do signed 16bit maths!
392 #define MAX_TCP_WINDOW 32767U
394 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
395 #define TCP_MIN_MSS 88U
397 /* Minimal RCV_MSS. */
398 #define TCP_MIN_RCVMSS 536U
400 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
401 #define TCP_FASTRETRANS_THRESH 3
403 /* Maximal reordering. */
404 #define TCP_MAX_REORDERING 127
406 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
407 #define TCP_MAX_QUICKACKS 16U
409 /* urg_data states */
410 #define TCP_URG_VALID 0x0100
411 #define TCP_URG_NOTYET 0x0200
412 #define TCP_URG_READ 0x0400
414 #define TCP_RETR1 3 /*
415 * This is how many retries it does before it
416 * tries to figure out if the gateway is
417 * down. Minimal RFC value is 3; it corresponds
418 * to ~3sec-8min depending on RTO.
421 #define TCP_RETR2 15 /*
422 * This should take at least
423 * 90 minutes to time out.
424 * RFC1122 says that the limit is 100 sec.
425 * 15 is ~13-30min depending on RTO.
428 #define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
429 * connection: ~180sec is RFC minumum */
431 #define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
432 * connection: ~180sec is RFC minumum */
435 #define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
436 * socket. 7 is ~50sec-16min.
440 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
441 * state, about 60 seconds */
442 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
443 /* BSD style FIN_WAIT2 deadlock breaker.
444 * It used to be 3min, new value is 60sec,
445 * to combine FIN-WAIT-2 timeout with
449 #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
451 #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
452 #define TCP_ATO_MIN ((unsigned)(HZ/25))
454 #define TCP_DELACK_MIN 4U
455 #define TCP_ATO_MIN 4U
457 #define TCP_RTO_MAX ((unsigned)(120*HZ))
458 #define TCP_RTO_MIN ((unsigned)(HZ/5))
459 #define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */
461 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
462 * for local resources.
465 #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
466 #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
467 #define TCP_KEEPALIVE_INTVL (75*HZ)
469 #define MAX_TCP_KEEPIDLE 32767
470 #define MAX_TCP_KEEPINTVL 32767
471 #define MAX_TCP_KEEPCNT 127
472 #define MAX_TCP_SYNCNT 127
474 #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
475 #define TCP_SYNQ_HSIZE 512 /* Size of SYNACK hash table */
477 #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
478 #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
479 * after this time. It should be equal
480 * (or greater than) TCP_TIMEWAIT_LEN
481 * to provide reliability equal to one
482 * provided by timewait state.
484 #define TCP_PAWS_WINDOW 1 /* Replay window for per-host
485 * timestamps. It must be less than
486 * minimal timewait lifetime.
489 #define TCP_TW_RECYCLE_SLOTS_LOG 5
490 #define TCP_TW_RECYCLE_SLOTS (1<<TCP_TW_RECYCLE_SLOTS_LOG)
492 /* If time > 4sec, it is "slow" path, no recycling is required,
493 so that we select tick to get range about 4 seconds.
496 #if HZ <= 16 || HZ > 4096
497 # error Unsupported: HZ <= 16 or HZ > 4096
499 # define TCP_TW_RECYCLE_TICK (5+2-TCP_TW_RECYCLE_SLOTS_LOG)
501 # define TCP_TW_RECYCLE_TICK (6+2-TCP_TW_RECYCLE_SLOTS_LOG)
503 # define TCP_TW_RECYCLE_TICK (7+2-TCP_TW_RECYCLE_SLOTS_LOG)
505 # define TCP_TW_RECYCLE_TICK (8+2-TCP_TW_RECYCLE_SLOTS_LOG)
507 # define TCP_TW_RECYCLE_TICK (9+2-TCP_TW_RECYCLE_SLOTS_LOG)
509 # define TCP_TW_RECYCLE_TICK (10+2-TCP_TW_RECYCLE_SLOTS_LOG)
511 # define TCP_TW_RECYCLE_TICK (11+2-TCP_TW_RECYCLE_SLOTS_LOG)
513 # define TCP_TW_RECYCLE_TICK (12+2-TCP_TW_RECYCLE_SLOTS_LOG)
516 #define BICTCP_1_OVER_BETA 8 /*
518 * multiplicative decrease factor
520 #define BICTCP_MAX_INCREMENT 32 /*
521 * Limit on the amount of
522 * increment allowed during
525 #define BICTCP_FUNC_OF_MIN_INCR 11 /*
526 * log(B/Smin)/log(B/(B-1))+1,
530 #define BICTCP_B 4 /*
532 * go to point (max+min)/N
539 #define TCPOPT_NOP 1 /* Padding */
540 #define TCPOPT_EOL 0 /* End of options */
541 #define TCPOPT_MSS 2 /* Segment size negotiating */
542 #define TCPOPT_WINDOW 3 /* Window scaling */
543 #define TCPOPT_SACK_PERM 4 /* SACK Permitted */
544 #define TCPOPT_SACK 5 /* SACK Block */
545 #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
551 #define TCPOLEN_MSS 4
552 #define TCPOLEN_WINDOW 3
553 #define TCPOLEN_SACK_PERM 2
554 #define TCPOLEN_TIMESTAMP 10
556 /* But this is what stacks really send out. */
557 #define TCPOLEN_TSTAMP_ALIGNED 12
558 #define TCPOLEN_WSCALE_ALIGNED 4
559 #define TCPOLEN_SACKPERM_ALIGNED 4
560 #define TCPOLEN_SACK_BASE 2
561 #define TCPOLEN_SACK_BASE_ALIGNED 4
562 #define TCPOLEN_SACK_PERBLOCK 8
564 #define TCP_TIME_RETRANS 1 /* Retransmit timer */
565 #define TCP_TIME_DACK 2 /* Delayed ack timer */
566 #define TCP_TIME_PROBE0 3 /* Zero window probe timer */
567 #define TCP_TIME_KEEPOPEN 4 /* Keepalive timer */
569 /* Flags in tp->nonagle */
570 #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
571 #define TCP_NAGLE_CORK 2 /* Socket is corked */
572 #define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */
574 /* sysctl variables for tcp */
575 extern int sysctl_max_syn_backlog;
576 extern int sysctl_tcp_timestamps;
577 extern int sysctl_tcp_window_scaling;
578 extern int sysctl_tcp_sack;
579 extern int sysctl_tcp_fin_timeout;
580 extern int sysctl_tcp_tw_recycle;
581 extern int sysctl_tcp_keepalive_time;
582 extern int sysctl_tcp_keepalive_probes;
583 extern int sysctl_tcp_keepalive_intvl;
584 extern int sysctl_tcp_syn_retries;
585 extern int sysctl_tcp_synack_retries;
586 extern int sysctl_tcp_retries1;
587 extern int sysctl_tcp_retries2;
588 extern int sysctl_tcp_orphan_retries;
589 extern int sysctl_tcp_syncookies;
590 extern int sysctl_tcp_retrans_collapse;
591 extern int sysctl_tcp_stdurg;
592 extern int sysctl_tcp_rfc1337;
593 extern int sysctl_tcp_abort_on_overflow;
594 extern int sysctl_tcp_max_orphans;
595 extern int sysctl_tcp_max_tw_buckets;
596 extern int sysctl_tcp_fack;
597 extern int sysctl_tcp_reordering;
598 extern int sysctl_tcp_ecn;
599 extern int sysctl_tcp_dsack;
600 extern int sysctl_tcp_mem[3];
601 extern int sysctl_tcp_wmem[3];
602 extern int sysctl_tcp_rmem[3];
603 extern int sysctl_tcp_app_win;
604 extern int sysctl_tcp_adv_win_scale;
605 extern int sysctl_tcp_tw_reuse;
606 extern int sysctl_tcp_frto;
607 extern int sysctl_tcp_low_latency;
608 extern int sysctl_tcp_westwood;
609 extern int sysctl_tcp_vegas_cong_avoid;
610 extern int sysctl_tcp_vegas_alpha;
611 extern int sysctl_tcp_vegas_beta;
612 extern int sysctl_tcp_vegas_gamma;
613 extern int sysctl_tcp_nometrics_save;
614 extern int sysctl_tcp_bic;
615 extern int sysctl_tcp_bic_fast_convergence;
616 extern int sysctl_tcp_bic_low_window;
617 extern int sysctl_tcp_default_win_scale;
618 extern int sysctl_tcp_moderate_rcvbuf;
620 extern atomic_t tcp_memory_allocated;
621 extern atomic_t tcp_sockets_allocated;
622 extern int tcp_memory_pressure;
626 struct or_calltable {
628 int (*rtx_syn_ack) (struct sock *sk, struct open_request *req, struct dst_entry*);
629 void (*send_ack) (struct sk_buff *skb, struct open_request *req);
630 void (*destructor) (struct open_request *req);
631 void (*send_reset) (struct sk_buff *skb);
634 struct tcp_v4_open_req {
637 struct ip_options *opt;
640 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
641 struct tcp_v6_open_req {
642 struct in6_addr loc_addr;
643 struct in6_addr rmt_addr;
644 struct sk_buff *pktopts;
649 /* this structure is too big */
650 struct open_request {
651 struct open_request *dl_next; /* Must be first member! */
658 __u16 snd_wscale : 4,
665 /* The following two fields can be easily recomputed I think -AK */
666 __u32 window_clamp; /* window clamp at creation time */
667 __u32 rcv_wnd; /* rcv_wnd offered first time */
669 unsigned long expires;
670 struct or_calltable *class;
673 struct tcp_v4_open_req v4_req;
674 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
675 struct tcp_v6_open_req v6_req;
678 #ifdef CONFIG_ACCEPT_QUEUES
679 unsigned long acceptq_time_stamp;
684 /* SLAB cache for open requests. */
685 extern kmem_cache_t *tcp_openreq_cachep;
687 #define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)
688 #define tcp_openreq_fastfree(req) kmem_cache_free(tcp_openreq_cachep, req)
690 static inline void tcp_openreq_free(struct open_request *req)
692 req->class->destructor(req);
693 tcp_openreq_fastfree(req);
696 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
697 #define TCP_INET_FAMILY(fam) ((fam) == AF_INET)
699 #define TCP_INET_FAMILY(fam) 1
703 * Pointers to address related TCP functions
704 * (i.e. things that depend on the address family)
708 int (*queue_xmit) (struct sk_buff *skb,
711 void (*send_check) (struct sock *sk,
714 struct sk_buff *skb);
716 int (*rebuild_header) (struct sock *sk);
718 int (*conn_request) (struct sock *sk,
719 struct sk_buff *skb);
721 struct sock * (*syn_recv_sock) (struct sock *sk,
723 struct open_request *req,
724 struct dst_entry *dst);
726 int (*remember_stamp) (struct sock *sk);
728 __u16 net_header_len;
730 int (*setsockopt) (struct sock *sk,
736 int (*getsockopt) (struct sock *sk,
743 void (*addr2sockaddr) (struct sock *sk,
750 * The next routines deal with comparing 32 bit unsigned ints
751 * and worry about wraparound (automatic with unsigned arithmetic).
754 static inline int before(__u32 seq1, __u32 seq2)
756 return (__s32)(seq1-seq2) < 0;
759 static inline int after(__u32 seq1, __u32 seq2)
761 return (__s32)(seq2-seq1) < 0;
765 /* is s2<=s1<=s3 ? */
766 static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
768 return seq3 - seq2 >= seq1 - seq2;
772 extern struct proto tcp_prot;
774 DECLARE_SNMP_STAT(struct tcp_mib, tcp_statistics);
775 #define TCP_INC_STATS(field) SNMP_INC_STATS(tcp_statistics, field)
776 #define TCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(tcp_statistics, field)
777 #define TCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(tcp_statistics, field)
778 #define TCP_DEC_STATS(field) SNMP_DEC_STATS(tcp_statistics, field)
779 #define TCP_ADD_STATS_BH(field, val) SNMP_ADD_STATS_BH(tcp_statistics, field, val)
780 #define TCP_ADD_STATS_USER(field, val) SNMP_ADD_STATS_USER(tcp_statistics, field, val)
782 extern void tcp_put_port(struct sock *sk);
783 extern void tcp_inherit_port(struct sock *sk, struct sock *child);
785 extern void tcp_v4_err(struct sk_buff *skb, u32);
787 extern void tcp_shutdown (struct sock *sk, int how);
789 extern int tcp_v4_rcv(struct sk_buff *skb);
791 extern int tcp_v4_remember_stamp(struct sock *sk);
793 extern int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw);
795 extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk,
796 struct msghdr *msg, size_t size);
797 extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
799 extern int tcp_ioctl(struct sock *sk,
803 extern int tcp_rcv_state_process(struct sock *sk,
808 extern int tcp_rcv_established(struct sock *sk,
813 extern void tcp_rcv_space_adjust(struct sock *sk);
822 static inline void tcp_schedule_ack(struct tcp_opt *tp)
824 tp->ack.pending |= TCP_ACK_SCHED;
827 static inline int tcp_ack_scheduled(struct tcp_opt *tp)
829 return tp->ack.pending&TCP_ACK_SCHED;
832 static __inline__ void tcp_dec_quickack_mode(struct tcp_opt *tp)
834 if (tp->ack.quick && --tp->ack.quick == 0) {
835 /* Leaving quickack mode we deflate ATO. */
836 tp->ack.ato = TCP_ATO_MIN;
840 extern void tcp_enter_quickack_mode(struct tcp_opt *tp);
842 static __inline__ void tcp_delack_init(struct tcp_opt *tp)
844 memset(&tp->ack, 0, sizeof(tp->ack));
847 static inline void tcp_clear_options(struct tcp_opt *tp)
849 tp->tstamp_ok = tp->sack_ok = tp->wscale_ok = tp->snd_wscale = 0;
861 extern enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw,
866 extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
867 struct open_request *req,
868 struct open_request **prev);
869 extern int tcp_child_process(struct sock *parent,
871 struct sk_buff *skb);
872 extern void tcp_enter_frto(struct sock *sk);
873 extern void tcp_enter_loss(struct sock *sk, int how);
874 extern void tcp_clear_retrans(struct tcp_opt *tp);
875 extern void tcp_update_metrics(struct sock *sk);
877 extern void tcp_close(struct sock *sk,
879 extern struct sock * tcp_accept(struct sock *sk, int flags, int *err);
880 extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
881 extern void tcp_write_space(struct sock *sk);
883 extern int tcp_getsockopt(struct sock *sk, int level,
887 extern int tcp_setsockopt(struct sock *sk, int level,
888 int optname, char __user *optval,
890 extern void tcp_set_keepalive(struct sock *sk, int val);
891 extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
893 size_t len, int nonblock,
894 int flags, int *addr_len);
896 extern int tcp_listen_start(struct sock *sk);
898 extern void tcp_parse_options(struct sk_buff *skb,
903 * TCP v4 functions exported for the inet6 API
906 extern int tcp_v4_rebuild_header(struct sock *sk);
908 extern int tcp_v4_build_header(struct sock *sk,
909 struct sk_buff *skb);
911 extern void tcp_v4_send_check(struct sock *sk,
912 struct tcphdr *th, int len,
913 struct sk_buff *skb);
915 extern int tcp_v4_conn_request(struct sock *sk,
916 struct sk_buff *skb);
918 extern struct sock * tcp_create_openreq_child(struct sock *sk,
919 struct open_request *req,
920 struct sk_buff *skb);
922 extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
924 struct open_request *req,
925 struct dst_entry *dst);
927 extern int tcp_v4_do_rcv(struct sock *sk,
928 struct sk_buff *skb);
930 extern int tcp_v4_connect(struct sock *sk,
931 struct sockaddr *uaddr,
934 extern int tcp_connect(struct sock *sk);
936 extern struct sk_buff * tcp_make_synack(struct sock *sk,
937 struct dst_entry *dst,
938 struct open_request *req);
940 extern int tcp_disconnect(struct sock *sk, int flags);
942 extern void tcp_unhash(struct sock *sk);
944 extern int tcp_v4_hash_connecting(struct sock *sk);
947 /* From syncookies.c */
948 extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
949 struct ip_options *opt);
950 extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
955 extern int tcp_write_xmit(struct sock *, int nonagle);
956 extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
957 extern void tcp_xmit_retransmit_queue(struct sock *);
958 extern void tcp_simple_retransmit(struct sock *);
960 extern void tcp_send_probe0(struct sock *);
961 extern void tcp_send_partial(struct sock *);
962 extern int tcp_write_wakeup(struct sock *);
963 extern void tcp_send_fin(struct sock *sk);
964 extern void tcp_send_active_reset(struct sock *sk, int priority);
965 extern int tcp_send_synack(struct sock *);
966 extern int tcp_transmit_skb(struct sock *, struct sk_buff *);
967 extern void tcp_push_one(struct sock *, unsigned mss_now);
968 extern void tcp_send_ack(struct sock *sk);
969 extern void tcp_send_delayed_ack(struct sock *sk);
970 extern void cleanup_rbuf(struct sock *sk, int copied);
973 extern void tcp_init_xmit_timers(struct sock *);
974 extern void tcp_clear_xmit_timers(struct sock *);
976 extern void tcp_delete_keepalive_timer (struct sock *);
977 extern void tcp_reset_keepalive_timer (struct sock *, unsigned long);
978 extern int tcp_sync_mss(struct sock *sk, u32 pmtu);
980 extern const char timer_bug_msg[];
982 /* Read 'sendfile()'-style from a TCP socket */
983 typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
984 unsigned int, size_t);
985 extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
986 sk_read_actor_t recv_actor);
988 static inline void tcp_clear_xmit_timer(struct sock *sk, int what)
990 struct tcp_opt *tp = tcp_sk(sk);
993 case TCP_TIME_RETRANS:
994 case TCP_TIME_PROBE0:
997 #ifdef TCP_CLEAR_TIMERS
998 if (timer_pending(&tp->retransmit_timer) &&
999 del_timer(&tp->retransmit_timer))
1004 tp->ack.blocked = 0;
1005 tp->ack.pending = 0;
1007 #ifdef TCP_CLEAR_TIMERS
1008 if (timer_pending(&tp->delack_timer) &&
1009 del_timer(&tp->delack_timer))
1014 printk(timer_bug_msg);
1021 * Reset the retransmission timer
1023 static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
1025 struct tcp_opt *tp = tcp_sk(sk);
1027 if (when > TCP_RTO_MAX) {
1029 printk(KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr());
1035 case TCP_TIME_RETRANS:
1036 case TCP_TIME_PROBE0:
1038 tp->timeout = jiffies+when;
1039 if (!mod_timer(&tp->retransmit_timer, tp->timeout))
1044 tp->ack.pending |= TCP_ACK_TIMER;
1045 tp->ack.timeout = jiffies+when;
1046 if (!mod_timer(&tp->delack_timer, tp->ack.timeout))
1051 printk(KERN_DEBUG "bug: unknown timer value\n");
1055 /* Compute the current effective MSS, taking SACKs and IP options,
1056 * and even PMTU discovery events into account.
1058 * LARGESEND note: !urg_mode is overkill, only frames up to snd_up
1059 * cannot be large. However, taking into account rare use of URG, this
1060 * is not a big flaw.
1063 static __inline__ unsigned int tcp_current_mss(struct sock *sk, int large)
1065 struct tcp_opt *tp = tcp_sk(sk);
1066 struct dst_entry *dst = __sk_dst_get(sk);
1067 int mss_now = large && (sk->sk_route_caps & NETIF_F_TSO) &&
1069 tp->mss_cache : tp->mss_cache_std;
1072 u32 mtu = dst_pmtu(dst);
1073 if (mtu != tp->pmtu_cookie ||
1074 tp->ext2_header_len != dst->header_len)
1075 mss_now = tcp_sync_mss(sk, mtu);
1078 mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
1079 (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK));
1083 /* Initialize RCV_MSS value.
1084 * RCV_MSS is an our guess about MSS used by the peer.
1085 * We haven't any direct information about the MSS.
1086 * It's better to underestimate the RCV_MSS rather than overestimate.
1087 * Overestimations make us ACKing less frequently than needed.
1088 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
1091 static inline void tcp_initialize_rcv_mss(struct sock *sk)
1093 struct tcp_opt *tp = tcp_sk(sk);
1094 unsigned int hint = min(tp->advmss, tp->mss_cache_std);
1096 hint = min(hint, tp->rcv_wnd/2);
1097 hint = min(hint, TCP_MIN_RCVMSS);
1098 hint = max(hint, TCP_MIN_MSS);
1100 tp->ack.rcv_mss = hint;
1103 static __inline__ void __tcp_fast_path_on(struct tcp_opt *tp, u32 snd_wnd)
1105 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
1106 ntohl(TCP_FLAG_ACK) |
1110 static __inline__ void tcp_fast_path_on(struct tcp_opt *tp)
1112 __tcp_fast_path_on(tp, tp->snd_wnd>>tp->snd_wscale);
1115 static inline void tcp_fast_path_check(struct sock *sk, struct tcp_opt *tp)
1117 if (skb_queue_len(&tp->out_of_order_queue) == 0 &&
1119 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
1121 tcp_fast_path_on(tp);
1124 /* Compute the actual receive window we are currently advertising.
1125 * Rcv_nxt can be after the window if our peer push more data
1126 * than the offered window.
1128 static __inline__ u32 tcp_receive_window(struct tcp_opt *tp)
1130 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
1137 /* Choose a new window, without checks for shrinking, and without
1138 * scaling applied to the result. The caller does these things
1139 * if necessary. This is a "raw" window selection.
1141 extern u32 __tcp_select_window(struct sock *sk);
1143 /* TCP timestamps are only 32-bits, this causes a slight
1144 * complication on 64-bit systems since we store a snapshot
1145 * of jiffies in the buffer control blocks below. We decidely
1146 * only use of the low 32-bits of jiffies and hide the ugly
1147 * casts with the following macro.
1149 #define tcp_time_stamp ((__u32)(jiffies))
1151 /* This is what the send packet queueing engine uses to pass
1152 * TCP per-packet control information to the transmission
1153 * code. We also store the host-order sequence numbers in
1154 * here too. This is 36 bytes on 32-bit architectures,
1155 * 40 bytes on 64-bit machines, if this grows please adjust
1156 * skbuff.h:skbuff->cb[xxx] size appropriately.
1160 struct inet_skb_parm h4;
1161 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
1162 struct inet6_skb_parm h6;
1164 } header; /* For incoming frames */
1165 __u32 seq; /* Starting sequence number */
1166 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
1167 __u32 when; /* used to compute rtt's */
1168 __u8 flags; /* TCP header flags. */
1170 /* NOTE: These must match up to the flags byte in a
1173 #define TCPCB_FLAG_FIN 0x01
1174 #define TCPCB_FLAG_SYN 0x02
1175 #define TCPCB_FLAG_RST 0x04
1176 #define TCPCB_FLAG_PSH 0x08
1177 #define TCPCB_FLAG_ACK 0x10
1178 #define TCPCB_FLAG_URG 0x20
1179 #define TCPCB_FLAG_ECE 0x40
1180 #define TCPCB_FLAG_CWR 0x80
1182 __u8 sacked; /* State flags for SACK/FACK. */
1183 #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
1184 #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
1185 #define TCPCB_LOST 0x04 /* SKB is lost */
1186 #define TCPCB_TAGBITS 0x07 /* All tag bits */
1188 #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
1189 #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
1191 #define TCPCB_URG 0x20 /* Urgent pointer advenced here */
1193 #define TCPCB_AT_TAIL (TCPCB_URG)
1195 __u16 urg_ptr; /* Valid w/URG flags is set. */
1196 __u32 ack_seq; /* Sequence number ACK'd */
1199 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
1201 #define for_retrans_queue(skb, sk, tp) \
1202 for (skb = (sk)->sk_write_queue.next; \
1203 (skb != (tp)->send_head) && \
1204 (skb != (struct sk_buff *)&(sk)->sk_write_queue); \
1208 #include <net/tcp_ecn.h>
1212 * Compute minimal free write space needed to queue new packets.
1214 static inline int tcp_min_write_space(struct sock *sk)
1216 return sk->sk_wmem_queued / 2;
1219 static inline int tcp_wspace(struct sock *sk)
1221 return sk->sk_sndbuf - sk->sk_wmem_queued;
1225 /* This determines how many packets are "in the network" to the best
1226 * of our knowledge. In many cases it is conservative, but where
1227 * detailed information is available from the receiver (via SACK
1228 * blocks etc.) we can make more aggressive calculations.
1230 * Use this for decisions involving congestion control, use just
1231 * tp->packets_out to determine if the send queue is empty or not.
1233 * Read this equation as:
1235 * "Packets sent once on transmission queue" MINUS
1236 * "Packets left network, but not honestly ACKed yet" PLUS
1237 * "Packets fast retransmitted"
1239 static __inline__ unsigned int tcp_packets_in_flight(struct tcp_opt *tp)
1241 return tp->packets_out - tp->left_out + tp->retrans_out;
1244 /* Recalculate snd_ssthresh, we want to set it to:
1247 * one half the current congestion window, but no
1248 * less than two segments
1251 * behave like Reno until low_window is reached,
1252 * then increase congestion window slowly
1254 static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
1256 if (sysctl_tcp_bic) {
1257 if (sysctl_tcp_bic_fast_convergence &&
1258 tp->snd_cwnd < tp->bictcp.last_max_cwnd)
1259 tp->bictcp.last_max_cwnd
1260 = (tp->snd_cwnd * (2*BICTCP_1_OVER_BETA-1))
1261 / (BICTCP_1_OVER_BETA/2);
1263 tp->bictcp.last_max_cwnd = tp->snd_cwnd;
1265 if (tp->snd_cwnd > sysctl_tcp_bic_low_window)
1266 return max(tp->snd_cwnd - (tp->snd_cwnd/BICTCP_1_OVER_BETA),
1270 return max(tp->snd_cwnd >> 1U, 2U);
1273 /* Stop taking Vegas samples for now. */
1274 #define tcp_vegas_disable(__tp) ((__tp)->vegas.doing_vegas_now = 0)
1276 /* Is this TCP connection using Vegas (regardless of whether it is taking
1277 * Vegas measurements at the current time)?
1279 #define tcp_is_vegas(__tp) ((__tp)->vegas.do_vegas)
1281 static inline void tcp_vegas_enable(struct tcp_opt *tp)
1283 /* There are several situations when we must "re-start" Vegas:
1285 * o when a connection is established
1287 * o after fast recovery
1288 * o when we send a packet and there is no outstanding
1289 * unacknowledged data (restarting an idle connection)
1291 * In these circumstances we cannot do a Vegas calculation at the
1292 * end of the first RTT, because any calculation we do is using
1293 * stale info -- both the saved cwnd and congestion feedback are
1296 * Instead we must wait until the completion of an RTT during
1297 * which we actually receive ACKs.
1300 /* Begin taking Vegas samples next time we send something. */
1301 tp->vegas.doing_vegas_now = 1;
1303 /* Set the beginning of the next send window. */
1304 tp->vegas.beg_snd_nxt = tp->snd_nxt;
1306 tp->vegas.cntRTT = 0;
1307 tp->vegas.minRTT = 0x7fffffff;
1310 /* Should we be taking Vegas samples right now? */
1311 #define tcp_vegas_enabled(__tp) ((__tp)->vegas.doing_vegas_now)
1313 extern void tcp_vegas_init(struct tcp_opt *tp);
1315 static inline void tcp_set_ca_state(struct tcp_opt *tp, u8 ca_state)
1317 if (tcp_is_vegas(tp)) {
1318 if (ca_state == TCP_CA_Open)
1319 tcp_vegas_enable(tp);
1321 tcp_vegas_disable(tp);
1323 tp->ca_state = ca_state;
1326 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1327 * The exception is rate halving phase, when cwnd is decreasing towards
1330 static inline __u32 tcp_current_ssthresh(struct tcp_opt *tp)
1332 if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery))
1333 return tp->snd_ssthresh;
1335 return max(tp->snd_ssthresh,
1336 ((tp->snd_cwnd >> 1) +
1337 (tp->snd_cwnd >> 2)));
1340 static inline void tcp_sync_left_out(struct tcp_opt *tp)
1342 if (tp->sack_ok && tp->sacked_out >= tp->packets_out - tp->lost_out)
1343 tp->sacked_out = tp->packets_out - tp->lost_out;
1344 tp->left_out = tp->sacked_out + tp->lost_out;
1347 extern void tcp_cwnd_application_limited(struct sock *sk);
1349 /* Congestion window validation. (RFC2861) */
1351 static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_opt *tp)
1353 if (tp->packets_out >= tp->snd_cwnd) {
1354 /* Network is feed fully. */
1355 tp->snd_cwnd_used = 0;
1356 tp->snd_cwnd_stamp = tcp_time_stamp;
1358 /* Network starves. */
1359 if (tp->packets_out > tp->snd_cwnd_used)
1360 tp->snd_cwnd_used = tp->packets_out;
1362 if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto)
1363 tcp_cwnd_application_limited(sk);
1367 /* Set slow start threshould and cwnd not falling to slow start */
1368 static inline void __tcp_enter_cwr(struct tcp_opt *tp)
1370 tp->undo_marker = 0;
1371 tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
1372 tp->snd_cwnd = min(tp->snd_cwnd,
1373 tcp_packets_in_flight(tp) + 1U);
1374 tp->snd_cwnd_cnt = 0;
1375 tp->high_seq = tp->snd_nxt;
1376 tp->snd_cwnd_stamp = tcp_time_stamp;
1377 TCP_ECN_queue_cwr(tp);
1380 static inline void tcp_enter_cwr(struct tcp_opt *tp)
1382 tp->prior_ssthresh = 0;
1383 if (tp->ca_state < TCP_CA_CWR) {
1384 __tcp_enter_cwr(tp);
1385 tcp_set_ca_state(tp, TCP_CA_CWR);
1389 extern __u32 tcp_init_cwnd(struct tcp_opt *tp, struct dst_entry *dst);
1391 /* Slow start with delack produces 3 packets of burst, so that
1392 * it is safe "de facto".
1394 static __inline__ __u32 tcp_max_burst(struct tcp_opt *tp)
1399 static __inline__ int tcp_minshall_check(struct tcp_opt *tp)
1401 return after(tp->snd_sml,tp->snd_una) &&
1402 !after(tp->snd_sml, tp->snd_nxt);
1405 static __inline__ void tcp_minshall_update(struct tcp_opt *tp, int mss, struct sk_buff *skb)
1408 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1411 /* Return 0, if packet can be sent now without violation Nagle's rules:
1412 1. It is full sized.
1413 2. Or it contains FIN.
1414 3. Or higher layers meant to force a packet boundary, hence the PSH bit.
1415 4. Or TCP_NODELAY was set.
1416 5. Or TCP_CORK is not set, and all sent packets are ACKed.
1417 With Minshall's modification: all sent small packets are ACKed.
1420 static __inline__ int
1421 tcp_nagle_check(struct tcp_opt *tp, struct sk_buff *skb, unsigned mss_now, int nonagle)
1423 return (skb->len < mss_now &&
1424 !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
1425 ((nonagle&TCP_NAGLE_CORK) ||
1428 tcp_minshall_check(tp))));
1431 /* This checks if the data bearing packet SKB (usually tp->send_head)
1432 * should be put on the wire right now.
1434 static __inline__ int tcp_snd_test(struct tcp_opt *tp, struct sk_buff *skb,
1435 unsigned cur_mss, int nonagle)
1437 /* RFC 1122 - section 4.2.3.4
1441 * a) The right edge of this frame exceeds the window
1442 * b) There are packets in flight and we have a small segment
1443 * [SWS avoidance and Nagle algorithm]
1444 * (part of SWS is done on packetization)
1445 * Minshall version sounds: there are no _small_
1446 * segments in flight. (tcp_nagle_check)
1447 * c) We have too many packets 'in flight'
1449 * Don't use the nagle rule for urgent data (or
1450 * for the final FIN -DaveM).
1452 * Also, Nagle rule does not apply to frames, which
1453 * sit in the middle of queue (they have no chances
1454 * to get new data) and if room at tail of skb is
1455 * not enough to save something seriously (<32 for now).
1458 /* Don't be strict about the congestion window for the
1459 * final FIN frame. -DaveM
1461 return (((nonagle&TCP_NAGLE_PUSH) || tp->urg_mode
1462 || !tcp_nagle_check(tp, skb, cur_mss, nonagle)) &&
1463 ((tcp_packets_in_flight(tp) < tp->snd_cwnd) ||
1464 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) &&
1465 !after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd));
1468 static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_opt *tp)
1470 if (!tp->packets_out && !tp->pending)
1471 tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto);
1474 static __inline__ int tcp_skb_is_last(struct sock *sk, struct sk_buff *skb)
1476 return skb->next == (struct sk_buff *)&sk->sk_write_queue;
1479 /* Push out any pending frames which were held back due to
1480 * TCP_CORK or attempt at coalescing tiny packets.
1481 * The socket must be locked by the caller.
1483 static __inline__ void __tcp_push_pending_frames(struct sock *sk,
1488 struct sk_buff *skb = tp->send_head;
1491 if (!tcp_skb_is_last(sk, skb))
1492 nonagle = TCP_NAGLE_PUSH;
1493 if (!tcp_snd_test(tp, skb, cur_mss, nonagle) ||
1494 tcp_write_xmit(sk, nonagle))
1495 tcp_check_probe_timer(sk, tp);
1497 tcp_cwnd_validate(sk, tp);
1500 static __inline__ void tcp_push_pending_frames(struct sock *sk,
1503 __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle);
1506 static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_opt *tp)
1508 struct sk_buff *skb = tp->send_head;
1511 tcp_snd_test(tp, skb, tcp_current_mss(sk, 1),
1512 tcp_skb_is_last(sk, skb) ? TCP_NAGLE_PUSH : tp->nonagle));
1515 static __inline__ void tcp_init_wl(struct tcp_opt *tp, u32 ack, u32 seq)
1520 static __inline__ void tcp_update_wl(struct tcp_opt *tp, u32 ack, u32 seq)
1525 extern void tcp_destroy_sock(struct sock *sk);
1529 * Calculate(/check) TCP checksum
1531 static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len,
1532 unsigned long saddr, unsigned long daddr,
1535 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1538 static __inline__ int __tcp_checksum_complete(struct sk_buff *skb)
1540 return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
1543 static __inline__ int tcp_checksum_complete(struct sk_buff *skb)
1545 return skb->ip_summed != CHECKSUM_UNNECESSARY &&
1546 __tcp_checksum_complete(skb);
1549 /* Prequeue for VJ style copy to user, combined with checksumming. */
1551 static __inline__ void tcp_prequeue_init(struct tcp_opt *tp)
1553 tp->ucopy.task = NULL;
1555 tp->ucopy.memory = 0;
1556 skb_queue_head_init(&tp->ucopy.prequeue);
1559 /* Packet is added to VJ-style prequeue for processing in process
1560 * context, if a reader task is waiting. Apparently, this exciting
1561 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1562 * failed somewhere. Latency? Burstiness? Well, at least now we will
1563 * see, why it failed. 8)8) --ANK
1565 * NOTE: is this not too big to inline?
1567 static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1569 struct tcp_opt *tp = tcp_sk(sk);
1571 if (!sysctl_tcp_low_latency && tp->ucopy.task) {
1572 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1573 tp->ucopy.memory += skb->truesize;
1574 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1575 struct sk_buff *skb1;
1577 BUG_ON(sock_owned_by_user(sk));
1579 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1580 sk->sk_backlog_rcv(sk, skb1);
1581 NET_INC_STATS_BH(TCPPrequeueDropped);
1584 tp->ucopy.memory = 0;
1585 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1586 wake_up_interruptible(sk->sk_sleep);
1587 if (!tcp_ack_scheduled(tp))
1588 tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4);
1599 static char *statename[]={
1600 "Unused","Established","Syn Sent","Syn Recv",
1601 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1602 "Close Wait","Last ACK","Listen","Closing"
1606 static __inline__ void tcp_set_state(struct sock *sk, int state)
1608 int oldstate = sk->sk_state;
1611 case TCP_ESTABLISHED:
1612 if (oldstate != TCP_ESTABLISHED)
1613 TCP_INC_STATS(TcpCurrEstab);
1617 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
1618 TCP_INC_STATS(TcpEstabResets);
1620 sk->sk_prot->unhash(sk);
1621 if (tcp_sk(sk)->bind_hash &&
1622 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
1626 if (oldstate==TCP_ESTABLISHED)
1627 TCP_DEC_STATS(TcpCurrEstab);
1630 /* Change state AFTER socket is unhashed to avoid closed
1631 * socket sitting in hash tables.
1633 sk->sk_state = state;
1636 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
1640 static __inline__ void tcp_done(struct sock *sk)
1642 tcp_set_state(sk, TCP_CLOSE);
1643 tcp_clear_xmit_timers(sk);
1645 sk->sk_shutdown = SHUTDOWN_MASK;
1647 if (!sock_flag(sk, SOCK_DEAD))
1648 sk->sk_state_change(sk);
1650 tcp_destroy_sock(sk);
1653 static __inline__ void tcp_sack_reset(struct tcp_opt *tp)
1660 static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_opt *tp, __u32 tstamp)
1662 if (tp->tstamp_ok) {
1663 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
1664 (TCPOPT_NOP << 16) |
1665 (TCPOPT_TIMESTAMP << 8) |
1667 *ptr++ = htonl(tstamp);
1668 *ptr++ = htonl(tp->ts_recent);
1670 if (tp->eff_sacks) {
1671 struct tcp_sack_block *sp = tp->dsack ? tp->duplicate_sack : tp->selective_acks;
1674 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
1675 (TCPOPT_NOP << 16) |
1676 (TCPOPT_SACK << 8) |
1677 (TCPOLEN_SACK_BASE +
1678 (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK)));
1679 for(this_sack = 0; this_sack < tp->eff_sacks; this_sack++) {
1680 *ptr++ = htonl(sp[this_sack].start_seq);
1681 *ptr++ = htonl(sp[this_sack].end_seq);
1690 /* Construct a tcp options header for a SYN or SYN_ACK packet.
1691 * If this is every changed make sure to change the definition of
1692 * MAX_SYN_SIZE to match the new maximum number of options that you
1695 static inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
1696 int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent)
1698 /* We always get an MSS option.
1699 * The option bytes which will be seen in normal data
1700 * packets should timestamps be used, must be in the MSS
1701 * advertised. But we subtract them from tp->mss_cache so
1702 * that calculations in tcp_sendmsg are simpler etc.
1703 * So account for this fact here if necessary. If we
1704 * don't do this correctly, as a receiver we won't
1705 * recognize data packets as being full sized when we
1706 * should, and thus we won't abide by the delayed ACK
1708 * SACKs don't matter, we never delay an ACK when we
1709 * have any of those going out.
1711 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
1714 *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |
1715 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1717 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1718 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1719 *ptr++ = htonl(tstamp); /* TSVAL */
1720 *ptr++ = htonl(ts_recent); /* TSECR */
1722 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1723 (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);
1725 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
1728 /* Determine a window scaling and initial window to offer.
1729 * Based on the assumption that the given amount of space
1730 * will be offered. Store the results in the tp structure.
1731 * NOTE: for smooth operation initial space offering should
1732 * be a multiple of mss if possible. We assume here that mss >= 1.
1733 * This MUST be enforced by all callers.
1735 static inline void tcp_select_initial_window(int __space, __u32 mss,
1737 __u32 *window_clamp,
1741 unsigned int space = (__space < 0 ? 0 : __space);
1743 /* If no clamp set the clamp to the max possible scaled window */
1744 if (*window_clamp == 0)
1745 (*window_clamp) = (65535 << 14);
1746 space = min(*window_clamp, space);
1748 /* Quantize space offering to a multiple of mss if possible. */
1750 space = (space / mss) * mss;
1752 /* NOTE: offering an initial window larger than 32767
1753 * will break some buggy TCP stacks. We try to be nice.
1754 * If we are not window scaling, then this truncates
1755 * our initial window offering to 32k. There should also
1756 * be a sysctl option to stop being nice.
1758 (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
1761 /* See RFC1323 for an explanation of the limit to 14 */
1762 while (space > 65535 && (*rcv_wscale) < 14) {
1766 if (*rcv_wscale && sysctl_tcp_app_win && space>=mss &&
1767 space - max((space>>sysctl_tcp_app_win), mss>>*rcv_wscale) < 65536/2)
1770 *rcv_wscale = max((__u8)sysctl_tcp_default_win_scale,
1774 /* Set initial window to value enough for senders,
1775 * following RFC1414. Senders, not following this RFC,
1776 * will be satisfied with 2.
1778 if (mss > (1<<*rcv_wscale)) {
1782 else if (mss > 1460)
1784 if (*rcv_wnd > init_cwnd*mss)
1785 *rcv_wnd = init_cwnd*mss;
1787 /* Set the clamp no higher than max representable value */
1788 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
1791 static inline int tcp_win_from_space(int space)
1793 return sysctl_tcp_adv_win_scale<=0 ?
1794 (space>>(-sysctl_tcp_adv_win_scale)) :
1795 space - (space>>sysctl_tcp_adv_win_scale);
1798 /* Note: caller must be prepared to deal with negative returns */
1799 static inline int tcp_space(struct sock *sk)
1801 return tcp_win_from_space(sk->sk_rcvbuf -
1802 atomic_read(&sk->sk_rmem_alloc));
1805 static inline int tcp_full_space( struct sock *sk)
1807 return tcp_win_from_space(sk->sk_rcvbuf);
1810 #ifdef CONFIG_ACCEPT_QUEUES
1811 static inline void tcp_acceptq_removed(struct sock *sk, int class)
1813 tcp_sk(sk)->acceptq[class].aq_backlog--;
1816 static inline void tcp_acceptq_added(struct sock *sk, int class)
1818 tcp_sk(sk)->acceptq[class].aq_backlog++;
1821 static inline int tcp_acceptq_is_full(struct sock *sk, int class)
1823 return tcp_sk(sk)->acceptq[class].aq_backlog >
1824 sk->sk_max_ack_backlog;
1827 static inline void tcp_set_acceptq(struct tcp_opt *tp, struct open_request *req)
1829 int class = req->acceptq_class;
1832 if (!tp->acceptq[class].aq_ratio) {
1833 req->acceptq_class = 0;
1837 tp->acceptq[class].aq_qcount++;
1838 req->acceptq_time_stamp = jiffies;
1840 if (tp->acceptq[class].aq_tail) {
1841 req->dl_next = tp->acceptq[class].aq_tail->dl_next;
1842 tp->acceptq[class].aq_tail->dl_next = req;
1843 tp->acceptq[class].aq_tail = req;
1844 } else { /* if first request in the class */
1845 tp->acceptq[class].aq_head = req;
1846 tp->acceptq[class].aq_tail = req;
1848 prev_class = class - 1;
1849 while (prev_class >= 0) {
1850 if (tp->acceptq[prev_class].aq_tail)
1854 if (prev_class < 0) {
1855 req->dl_next = tp->accept_queue;
1856 tp->accept_queue = req;
1859 req->dl_next = tp->acceptq[prev_class].aq_tail->dl_next;
1860 tp->acceptq[prev_class].aq_tail->dl_next = req;
1864 static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
1867 tcp_set_acceptq(tcp_sk(sk),req);
1869 tcp_acceptq_added(sk,req->acceptq_class);
1873 static inline void tcp_acceptq_removed(struct sock *sk)
1875 sk->sk_ack_backlog--;
1878 static inline void tcp_acceptq_added(struct sock *sk)
1880 sk->sk_ack_backlog++;
1883 static inline int tcp_acceptq_is_full(struct sock *sk)
1885 return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
1888 static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
1891 struct tcp_opt *tp = tcp_sk(sk);
1894 tcp_acceptq_added(sk);
1896 if (!tp->accept_queue_tail) {
1897 tp->accept_queue = req;
1899 tp->accept_queue_tail->dl_next = req;
1901 tp->accept_queue_tail = req;
1902 req->dl_next = NULL;
1907 struct tcp_listen_opt
1909 u8 max_qlen_log; /* log_2 of maximal queued SYNs */
1911 #ifdef CONFIG_ACCEPT_QUEUES
1912 int qlen_young[NUM_ACCEPT_QUEUES];
1918 struct open_request *syn_table[TCP_SYNQ_HSIZE];
1921 #ifdef CONFIG_ACCEPT_QUEUES
1923 tcp_synq_removed(struct sock *sk, struct open_request *req)
1925 struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
1927 if (--lopt->qlen == 0)
1928 tcp_delete_keepalive_timer(sk);
1929 if (req->retrans == 0)
1930 lopt->qlen_young[req->acceptq_class]--;
1933 static inline void tcp_synq_added(struct sock *sk, struct open_request *req)
1935 struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
1937 if (lopt->qlen++ == 0)
1938 tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
1939 lopt->qlen_young[req->acceptq_class]++;
1942 static inline int tcp_synq_len(struct sock *sk)
1944 return tcp_sk(sk)->listen_opt->qlen;
1947 static inline int tcp_synq_young(struct sock *sk, int class)
1949 return tcp_sk(sk)->listen_opt->qlen_young[class];
1955 tcp_synq_removed(struct sock *sk, struct open_request *req)
1957 struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
1959 if (--lopt->qlen == 0)
1960 tcp_delete_keepalive_timer(sk);
1961 if (req->retrans == 0)
1965 static inline void tcp_synq_added(struct sock *sk)
1967 struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
1969 if (lopt->qlen++ == 0)
1970 tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
1974 static inline int tcp_synq_len(struct sock *sk)
1976 return tcp_sk(sk)->listen_opt->qlen;
1979 static inline int tcp_synq_young(struct sock *sk)
1981 return tcp_sk(sk)->listen_opt->qlen_young;
1985 static inline int tcp_synq_is_full(struct sock *sk)
1987 return tcp_synq_len(sk) >> tcp_sk(sk)->listen_opt->max_qlen_log;
1990 static inline void tcp_synq_unlink(struct tcp_opt *tp, struct open_request *req,
1991 struct open_request **prev)
1993 write_lock(&tp->syn_wait_lock);
1994 *prev = req->dl_next;
1995 write_unlock(&tp->syn_wait_lock);
1998 static inline void tcp_synq_drop(struct sock *sk, struct open_request *req,
1999 struct open_request **prev)
2001 tcp_synq_unlink(tcp_sk(sk), req, prev);
2002 tcp_synq_removed(sk, req);
2003 tcp_openreq_free(req);
2006 static __inline__ void tcp_openreq_init(struct open_request *req,
2008 struct sk_buff *skb)
2010 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
2011 req->rcv_isn = TCP_SKB_CB(skb)->seq;
2012 req->mss = tp->mss_clamp;
2013 req->ts_recent = tp->saw_tstamp ? tp->rcv_tsval : 0;
2014 req->tstamp_ok = tp->tstamp_ok;
2015 req->sack_ok = tp->sack_ok;
2016 req->snd_wscale = tp->snd_wscale;
2017 req->wscale_ok = tp->wscale_ok;
2020 req->rmt_port = skb->h.th->source;
2023 #define TCP_MEM_QUANTUM ((int)PAGE_SIZE)
2025 static inline void tcp_free_skb(struct sock *sk, struct sk_buff *skb)
2027 tcp_sk(sk)->queue_shrunk = 1;
2028 sk->sk_wmem_queued -= skb->truesize;
2029 sk->sk_forward_alloc += skb->truesize;
2033 static inline void tcp_charge_skb(struct sock *sk, struct sk_buff *skb)
2035 sk->sk_wmem_queued += skb->truesize;
2036 sk->sk_forward_alloc -= skb->truesize;
2039 extern void __tcp_mem_reclaim(struct sock *sk);
2040 extern int tcp_mem_schedule(struct sock *sk, int size, int kind);
2042 static inline void tcp_mem_reclaim(struct sock *sk)
2044 if (sk->sk_forward_alloc >= TCP_MEM_QUANTUM)
2045 __tcp_mem_reclaim(sk);
2048 static inline void tcp_enter_memory_pressure(void)
2050 if (!tcp_memory_pressure) {
2051 NET_INC_STATS(TCPMemoryPressures);
2052 tcp_memory_pressure = 1;
2056 static inline void tcp_moderate_sndbuf(struct sock *sk)
2058 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
2059 sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);
2060 sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
2064 static inline struct sk_buff *tcp_alloc_pskb(struct sock *sk, int size, int mem, int gfp)
2066 struct sk_buff *skb = alloc_skb(size+MAX_TCP_HEADER, gfp);
2069 skb->truesize += mem;
2070 if (sk->sk_forward_alloc >= (int)skb->truesize ||
2071 tcp_mem_schedule(sk, skb->truesize, 0)) {
2072 skb_reserve(skb, MAX_TCP_HEADER);
2077 tcp_enter_memory_pressure();
2078 tcp_moderate_sndbuf(sk);
2083 static inline struct sk_buff *tcp_alloc_skb(struct sock *sk, int size, int gfp)
2085 return tcp_alloc_pskb(sk, size, 0, gfp);
2088 static inline struct page * tcp_alloc_page(struct sock *sk)
2090 if (sk->sk_forward_alloc >= (int)PAGE_SIZE ||
2091 tcp_mem_schedule(sk, PAGE_SIZE, 0)) {
2092 struct page *page = alloc_pages(sk->sk_allocation, 0);
2096 tcp_enter_memory_pressure();
2097 tcp_moderate_sndbuf(sk);
2101 static inline void tcp_writequeue_purge(struct sock *sk)
2103 struct sk_buff *skb;
2105 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
2106 tcp_free_skb(sk, skb);
2107 tcp_mem_reclaim(sk);
2110 extern void tcp_rfree(struct sk_buff *skb);
2112 static inline void tcp_set_owner_r(struct sk_buff *skb, struct sock *sk)
2115 skb->destructor = tcp_rfree;
2116 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2117 sk->sk_forward_alloc -= skb->truesize;
2120 extern void tcp_listen_wlock(void);
2122 /* - We may sleep inside this lock.
2123 * - If sleeping is not required (or called from BH),
2124 * use plain read_(un)lock(&tcp_lhash_lock).
2127 static inline void tcp_listen_lock(void)
2129 /* read_lock synchronizes to candidates to writers */
2130 read_lock(&tcp_lhash_lock);
2131 atomic_inc(&tcp_lhash_users);
2132 read_unlock(&tcp_lhash_lock);
2135 static inline void tcp_listen_unlock(void)
2137 if (atomic_dec_and_test(&tcp_lhash_users))
2138 wake_up(&tcp_lhash_wait);
2141 static inline int keepalive_intvl_when(struct tcp_opt *tp)
2143 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
2146 static inline int keepalive_time_when(struct tcp_opt *tp)
2148 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
2151 static inline int tcp_fin_time(struct tcp_opt *tp)
2153 int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout;
2155 if (fin_timeout < (tp->rto<<2) - (tp->rto>>1))
2156 fin_timeout = (tp->rto<<2) - (tp->rto>>1);
2161 static inline int tcp_paws_check(struct tcp_opt *tp, int rst)
2163 if ((s32)(tp->rcv_tsval - tp->ts_recent) >= 0)
2165 if (xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_24DAYS)
2168 /* RST segments are not recommended to carry timestamp,
2169 and, if they do, it is recommended to ignore PAWS because
2170 "their cleanup function should take precedence over timestamps."
2171 Certainly, it is mistake. It is necessary to understand the reasons
2172 of this constraint to relax it: if peer reboots, clock may go
2173 out-of-sync and half-open connections will not be reset.
2174 Actually, the problem would be not existing if all
2175 the implementations followed draft about maintaining clock
2176 via reboots. Linux-2.2 DOES NOT!
2178 However, we can relax time bounds for RST segments to MSL.
2180 if (rst && xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_MSL)
2185 static inline void tcp_v4_setup_caps(struct sock *sk, struct dst_entry *dst)
2187 sk->sk_route_caps = dst->dev->features;
2188 if (sk->sk_route_caps & NETIF_F_TSO) {
2189 if (sk->sk_no_largesend || dst->header_len)
2190 sk->sk_route_caps &= ~NETIF_F_TSO;
2194 #define TCP_CHECK_TIMER(sk) do { } while (0)
2196 static inline int tcp_use_frto(const struct sock *sk)
2198 const struct tcp_opt *tp = tcp_sk(sk);
2200 /* F-RTO must be activated in sysctl and there must be some
2201 * unsent new data, and the advertised window should allow
2204 return (sysctl_tcp_frto && tp->send_head &&
2205 !after(TCP_SKB_CB(tp->send_head)->end_seq,
2206 tp->snd_una + tp->snd_wnd));
2209 static inline void tcp_mib_init(void)
2212 TCP_ADD_STATS_USER(TcpRtoAlgorithm, 1);
2213 TCP_ADD_STATS_USER(TcpRtoMin, TCP_RTO_MIN*1000/HZ);
2214 TCP_ADD_STATS_USER(TcpRtoMax, TCP_RTO_MAX*1000/HZ);
2215 TCP_ADD_STATS_USER(TcpMaxConn, -1);
2219 enum tcp_seq_states {
2220 TCP_SEQ_STATE_LISTENING,
2221 TCP_SEQ_STATE_OPENREQ,
2222 TCP_SEQ_STATE_ESTABLISHED,
2223 TCP_SEQ_STATE_TIME_WAIT,
2226 struct tcp_seq_afinfo {
2227 struct module *owner;
2230 int (*seq_show) (struct seq_file *m, void *v);
2231 struct file_operations *seq_fops;
2234 struct tcp_iter_state {
2236 enum tcp_seq_states state;
2237 struct sock *syn_wait_sk;
2238 int bucket, sbucket, num, uid;
2239 struct seq_operations seq_ops;
2242 extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo);
2243 extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo);
2245 /* TCP Westwood functions and constants */
2247 #define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
2248 #define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
2250 static inline void tcp_westwood_update_rtt(struct tcp_opt *tp, __u32 rtt_seq)
2252 if (sysctl_tcp_westwood)
2253 tp->westwood.rtt = rtt_seq;
2256 void __tcp_westwood_fast_bw(struct sock *, struct sk_buff *);
2257 void __tcp_westwood_slow_bw(struct sock *, struct sk_buff *);
2259 static inline void tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb)
2261 if (sysctl_tcp_westwood)
2262 __tcp_westwood_fast_bw(sk, skb);
2265 static inline void tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb)
2267 if (sysctl_tcp_westwood)
2268 __tcp_westwood_slow_bw(sk, skb);
2271 static inline __u32 __tcp_westwood_bw_rttmin(const struct tcp_opt *tp)
2273 return max((tp->westwood.bw_est) * (tp->westwood.rtt_min) /
2274 (__u32) (tp->mss_cache),
2278 static inline __u32 tcp_westwood_bw_rttmin(const struct tcp_opt *tp)
2280 return sysctl_tcp_westwood ? __tcp_westwood_bw_rttmin(tp) : 0;
2283 static inline int tcp_westwood_ssthresh(struct tcp_opt *tp)
2287 if (sysctl_tcp_westwood) {
2288 ssthresh = __tcp_westwood_bw_rttmin(tp);
2290 tp->snd_ssthresh = ssthresh;
2293 return (ssthresh != 0);
2296 static inline int tcp_westwood_cwnd(struct tcp_opt *tp)
2300 if (sysctl_tcp_westwood) {
2301 cwnd = __tcp_westwood_bw_rttmin(tp);
2303 tp->snd_cwnd = cwnd;