*
* Version: @(#)tcp.h 1.0.5 05/23/93
*
- * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
*
* This program is free software; you can redistribute it and/or
#define TCP_DEBUG 1
#define FASTRETRANS_DEBUG 1
-/* Cancel timers, when they are not required. */
-#undef TCP_CLEAR_TIMERS
-
#include <linux/config.h>
#include <linux/list.h>
#include <linux/tcp.h>
#include <linux/slab.h>
#include <linux/cache.h>
#include <linux/percpu.h>
+#include <linux/skbuff.h>
+
+#include <net/inet_connection_sock.h>
+#include <net/inet_timewait_sock.h>
+#include <net/inet_hashtables.h>
#include <net/checksum.h>
+#include <net/request_sock.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ip.h>
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
-#include <linux/ipv6.h>
-#endif
-#include <linux/seq_file.h>
-
-/* This is for all connections with a full identity, no wildcards.
- * New scheme, half the table is for TIME_WAIT, the other half is
- * for the rest. I'll experiment with dynamic table growth later.
- */
-struct tcp_ehash_bucket {
- rwlock_t lock;
- struct hlist_head chain;
-} __attribute__((__aligned__(8)));
-
-/* This is for listening sockets, thus all sockets which possess wildcards. */
-#define TCP_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
-
-/* There are a few simple rules, which allow for local port reuse by
- * an application. In essence:
- *
- * 1) Sockets bound to different interfaces may share a local port.
- * Failing that, goto test 2.
- * 2) If all sockets have sk->sk_reuse set, and none of them are in
- * TCP_LISTEN state, the port may be shared.
- * Failing that, goto test 3.
- * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
- * address, and none of them are the same, the port may be
- * shared.
- * Failing this, the port cannot be shared.
- *
- * The interesting point, is test #2. This is what an FTP server does
- * all day. To optimize this case we use a specific flag bit defined
- * below. As we add sockets to a bind bucket list, we perform a
- * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
- * As long as all sockets added to a bind bucket pass this test,
- * the flag bit will be set.
- * The resulting situation is that tcp_v[46]_verify_bind() can just check
- * for this flag bit, if it is set and the socket trying to bind has
- * sk->sk_reuse set, we don't even have to walk the owners list at all,
- * we return that it is ok to bind this socket to the requested local port.
- *
- * Sounds like a lot of work, but it is worth it. In a more naive
- * implementation (ie. current FreeBSD etc.) the entire list of ports
- * must be walked for each data port opened by an ftp server. Needless
- * to say, this does not scale at all. With a couple thousand FTP
- * users logged onto your box, isn't it nice to know that new data
- * ports are created in O(1) time? I thought so. ;-) -DaveM
- */
-struct tcp_bind_bucket {
- unsigned short port;
- signed short fastreuse;
- struct hlist_node node;
- struct hlist_head owners;
-};
-
-#define tb_for_each(tb, node, head) hlist_for_each_entry(tb, node, head, node)
-
-struct tcp_bind_hashbucket {
- spinlock_t lock;
- struct hlist_head chain;
-};
-
-static inline struct tcp_bind_bucket *__tb_head(struct tcp_bind_hashbucket *head)
-{
- return hlist_entry(head->chain.first, struct tcp_bind_bucket, node);
-}
-
-static inline struct tcp_bind_bucket *tb_head(struct tcp_bind_hashbucket *head)
-{
- return hlist_empty(&head->chain) ? NULL : __tb_head(head);
-}
-
-extern struct tcp_hashinfo {
- /* This is for sockets with full identity only. Sockets here will
- * always be without wildcards and will have the following invariant:
- *
- * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
- *
- * First half of the table is for sockets not in TIME_WAIT, second half
- * is for TIME_WAIT sockets only.
- */
- struct tcp_ehash_bucket *__tcp_ehash;
-
- /* Ok, let's try this, I give up, we do need a local binding
- * TCP hash as well as the others for fast bind/connect.
- */
- struct tcp_bind_hashbucket *__tcp_bhash;
-
- int __tcp_bhash_size;
- int __tcp_ehash_size;
+#include <net/tcp_states.h>
- /* All sockets in TCP_LISTEN state will be in here. This is the only
- * table where wildcard'd TCP sockets can exist. Hash function here
- * is just local port number.
- */
- struct hlist_head __tcp_listening_hash[TCP_LHTABLE_SIZE];
-
- /* All the above members are written once at bootup and
- * never written again _or_ are predominantly read-access.
- *
- * Now align to a new cache line as all the following members
- * are often dirty.
- */
- rwlock_t __tcp_lhash_lock ____cacheline_aligned;
- atomic_t __tcp_lhash_users;
- wait_queue_head_t __tcp_lhash_wait;
- spinlock_t __tcp_portalloc_lock;
-} tcp_hashinfo;
-
-#define tcp_ehash (tcp_hashinfo.__tcp_ehash)
-#define tcp_bhash (tcp_hashinfo.__tcp_bhash)
-#define tcp_ehash_size (tcp_hashinfo.__tcp_ehash_size)
-#define tcp_bhash_size (tcp_hashinfo.__tcp_bhash_size)
-#define tcp_listening_hash (tcp_hashinfo.__tcp_listening_hash)
-#define tcp_lhash_lock (tcp_hashinfo.__tcp_lhash_lock)
-#define tcp_lhash_users (tcp_hashinfo.__tcp_lhash_users)
-#define tcp_lhash_wait (tcp_hashinfo.__tcp_lhash_wait)
-#define tcp_portalloc_lock (tcp_hashinfo.__tcp_portalloc_lock)
-
-/* SLAB cache for TCP socks */
-extern kmem_cache_t *tcp_sk_cachep;
-
-extern kmem_cache_t *tcp_bucket_cachep;
-extern struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
- unsigned short snum);
-extern void tcp_bucket_destroy(struct tcp_bind_bucket *tb);
-extern void tcp_bucket_unlock(struct sock *sk);
-extern int tcp_port_rover;
-extern struct sock *tcp_v4_lookup_listener(u32 addr, unsigned short hnum, int dif);
-
-/* These are AF independent. */
-static __inline__ int tcp_bhashfn(__u16 lport)
-{
- return (lport & (tcp_bhash_size - 1));
-}
-
-extern void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
- unsigned short snum);
-
-#if (BITS_PER_LONG == 64)
-#define TCP_ADDRCMP_ALIGN_BYTES 8
-#else
-#define TCP_ADDRCMP_ALIGN_BYTES 4
-#endif
-
-/* This is a TIME_WAIT bucket. It works around the memory consumption
- * problems of sockets in such a state on heavily loaded servers, but
- * without violating the protocol specification.
- */
-struct tcp_tw_bucket {
- /*
- * Now struct sock also uses sock_common, so please just
- * don't add nothing before this first member (__tw_common) --acme
- */
- struct sock_common __tw_common;
-#define tw_family __tw_common.skc_family
-#define tw_state __tw_common.skc_state
-#define tw_reuse __tw_common.skc_reuse
-#define tw_bound_dev_if __tw_common.skc_bound_dev_if
-#define tw_node __tw_common.skc_node
-#define tw_bind_node __tw_common.skc_bind_node
-#define tw_refcnt __tw_common.skc_refcnt
-#define tw_xid __tw_common.skc_xid
-#define tw_vx_info __tw_common.skc_vx_info
-#define tw_nid __tw_common.skc_nid
-#define tw_nx_info __tw_common.skc_nx_info
- volatile unsigned char tw_substate;
- unsigned char tw_rcv_wscale;
- __u16 tw_sport;
- /* Socket demultiplex comparisons on incoming packets. */
- /* these five are in inet_opt */
- __u32 tw_daddr
- __attribute__((aligned(TCP_ADDRCMP_ALIGN_BYTES)));
- __u32 tw_rcv_saddr;
- __u16 tw_dport;
- __u16 tw_num;
- /* And these are ours. */
- int tw_hashent;
- int tw_timeout;
- __u32 tw_rcv_nxt;
- __u32 tw_snd_nxt;
- __u32 tw_rcv_wnd;
- __u32 tw_ts_recent;
- long tw_ts_recent_stamp;
- unsigned long tw_ttd;
- struct tcp_bind_bucket *tw_tb;
- struct hlist_node tw_death_node;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- struct in6_addr tw_v6_daddr;
- struct in6_addr tw_v6_rcv_saddr;
- int tw_v6_ipv6only;
-#endif
-};
-
-static __inline__ void tw_add_node(struct tcp_tw_bucket *tw,
- struct hlist_head *list)
-{
- hlist_add_head(&tw->tw_node, list);
-}
-
-static __inline__ void tw_add_bind_node(struct tcp_tw_bucket *tw,
- struct hlist_head *list)
-{
- hlist_add_head(&tw->tw_bind_node, list);
-}
-
-static inline int tw_dead_hashed(struct tcp_tw_bucket *tw)
-{
- return tw->tw_death_node.pprev != NULL;
-}
-
-static __inline__ void tw_dead_node_init(struct tcp_tw_bucket *tw)
-{
- tw->tw_death_node.pprev = NULL;
-}
-
-static __inline__ void __tw_del_dead_node(struct tcp_tw_bucket *tw)
-{
- __hlist_del(&tw->tw_death_node);
- tw_dead_node_init(tw);
-}
-
-static __inline__ int tw_del_dead_node(struct tcp_tw_bucket *tw)
-{
- if (tw_dead_hashed(tw)) {
- __tw_del_dead_node(tw);
- return 1;
- }
- return 0;
-}
-
-#define tw_for_each(tw, node, head) \
- hlist_for_each_entry(tw, node, head, tw_node)
-
-#define tw_for_each_inmate(tw, node, jail) \
- hlist_for_each_entry(tw, node, jail, tw_death_node)
-
-#define tw_for_each_inmate_safe(tw, node, safe, jail) \
- hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node)
-
-#define tcptw_sk(__sk) ((struct tcp_tw_bucket *)(__sk))
-
-static inline u32 tcp_v4_rcv_saddr(const struct sock *sk)
-{
- return likely(sk->sk_state != TCP_TIME_WAIT) ?
- inet_sk(sk)->rcv_saddr : tcptw_sk(sk)->tw_rcv_saddr;
-}
-
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static inline struct in6_addr *__tcp_v6_rcv_saddr(const struct sock *sk)
-{
- return likely(sk->sk_state != TCP_TIME_WAIT) ?
- &inet6_sk(sk)->rcv_saddr : &tcptw_sk(sk)->tw_v6_rcv_saddr;
-}
-
-static inline struct in6_addr *tcp_v6_rcv_saddr(const struct sock *sk)
-{
- return sk->sk_family == AF_INET6 ? __tcp_v6_rcv_saddr(sk) : NULL;
-}
-
-#define tcptw_sk_ipv6only(__sk) (tcptw_sk(__sk)->tw_v6_ipv6only)
-
-static inline int tcp_v6_ipv6only(const struct sock *sk)
-{
- return likely(sk->sk_state != TCP_TIME_WAIT) ?
- ipv6_only_sock(sk) : tcptw_sk_ipv6only(sk);
-}
-#else
-# define __tcp_v6_rcv_saddr(__sk) NULL
-# define tcp_v6_rcv_saddr(__sk) NULL
-# define tcptw_sk_ipv6only(__sk) 0
-# define tcp_v6_ipv6only(__sk) 0
-#endif
+#include <linux/seq_file.h>
-extern kmem_cache_t *tcp_timewait_cachep;
-
-static inline void tcp_tw_put(struct tcp_tw_bucket *tw)
-{
- if (atomic_dec_and_test(&tw->tw_refcnt)) {
-#ifdef INET_REFCNT_DEBUG
- printk(KERN_DEBUG "tw_bucket %p released\n", tw);
-#endif
- kmem_cache_free(tcp_timewait_cachep, tw);
- }
-}
+extern struct inet_hashinfo tcp_hashinfo;
extern atomic_t tcp_orphan_count;
-extern int tcp_tw_count;
extern void tcp_time_wait(struct sock *sk, int state, int timeo);
-extern void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo);
-extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw);
-
-
-/* Socket demux engine toys. */
-#ifdef __BIG_ENDIAN
-#define TCP_COMBINED_PORTS(__sport, __dport) \
- (((__u32)(__sport)<<16) | (__u32)(__dport))
-#else /* __LITTLE_ENDIAN */
-#define TCP_COMBINED_PORTS(__sport, __dport) \
- (((__u32)(__dport)<<16) | (__u32)(__sport))
-#endif
-
-#if (BITS_PER_LONG == 64)
-#ifdef __BIG_ENDIAN
-#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
- __u64 __name = (((__u64)(__saddr))<<32)|((__u64)(__daddr));
-#else /* __LITTLE_ENDIAN */
-#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
- __u64 __name = (((__u64)(__daddr))<<32)|((__u64)(__saddr));
-#endif /* __BIG_ENDIAN */
-#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
- (((*((__u64 *)&(inet_sk(__sk)->daddr)))== (__cookie)) && \
- ((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
- (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
-#define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
- (((*((__u64 *)&(tcptw_sk(__sk)->tw_daddr))) == (__cookie)) && \
- ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \
- (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
-#else /* 32-bit arch */
-#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr)
-#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
- ((inet_sk(__sk)->daddr == (__saddr)) && \
- (inet_sk(__sk)->rcv_saddr == (__daddr)) && \
- ((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
- (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
-#define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
- ((tcptw_sk(__sk)->tw_daddr == (__saddr)) && \
- (tcptw_sk(__sk)->tw_rcv_saddr == (__daddr)) && \
- ((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \
- (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
-#endif /* 64-bit arch */
-
-#define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
- (((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
- ((__sk)->sk_family == AF_INET6) && \
- !ipv6_addr_cmp(&inet6_sk(__sk)->daddr, (__saddr)) && \
- !ipv6_addr_cmp(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \
- (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
-
-/* These can have wildcards, don't try too hard. */
-static __inline__ int tcp_lhashfn(unsigned short num)
-{
- return num & (TCP_LHTABLE_SIZE - 1);
-}
-
-static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
-{
- return tcp_lhashfn(inet_sk(sk)->num);
-}
#define MAX_TCP_HEADER (128 + MAX_HEADER)
*/
#define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
- * connection: ~180sec is RFC minumum */
+ * connection: ~180sec is RFC minimum */
#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
- * connection: ~180sec is RFC minumum */
+ * connection: ~180sec is RFC minimum */
#define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
* timestamps. It must be less than
* minimal timewait lifetime.
*/
-
-#define TCP_TW_RECYCLE_SLOTS_LOG 5
-#define TCP_TW_RECYCLE_SLOTS (1<<TCP_TW_RECYCLE_SLOTS_LOG)
-
-/* If time > 4sec, it is "slow" path, no recycling is required,
- so that we select tick to get range about 4 seconds.
- */
-
-#if HZ <= 16 || HZ > 4096
-# error Unsupported: HZ <= 16 or HZ > 4096
-#elif HZ <= 32
-# define TCP_TW_RECYCLE_TICK (5+2-TCP_TW_RECYCLE_SLOTS_LOG)
-#elif HZ <= 64
-# define TCP_TW_RECYCLE_TICK (6+2-TCP_TW_RECYCLE_SLOTS_LOG)
-#elif HZ <= 128
-# define TCP_TW_RECYCLE_TICK (7+2-TCP_TW_RECYCLE_SLOTS_LOG)
-#elif HZ <= 256
-# define TCP_TW_RECYCLE_TICK (8+2-TCP_TW_RECYCLE_SLOTS_LOG)
-#elif HZ <= 512
-# define TCP_TW_RECYCLE_TICK (9+2-TCP_TW_RECYCLE_SLOTS_LOG)
-#elif HZ <= 1024
-# define TCP_TW_RECYCLE_TICK (10+2-TCP_TW_RECYCLE_SLOTS_LOG)
-#elif HZ <= 2048
-# define TCP_TW_RECYCLE_TICK (11+2-TCP_TW_RECYCLE_SLOTS_LOG)
-#else
-# define TCP_TW_RECYCLE_TICK (12+2-TCP_TW_RECYCLE_SLOTS_LOG)
-#endif
-
-#define BICTCP_1_OVER_BETA 8 /*
- * Fast recovery
- * multiplicative decrease factor
- */
-#define BICTCP_MAX_INCREMENT 32 /*
- * Limit on the amount of
- * increment allowed during
- * binary search.
- */
-#define BICTCP_FUNC_OF_MIN_INCR 11 /*
- * log(B/Smin)/log(B/(B-1))+1,
- * Smin:min increment
- * B:log factor
- */
-#define BICTCP_B 4 /*
- * In binary search,
- * go to point (max+min)/N
- */
-
/*
* TCP option
*/
#define TCPOLEN_SACK_BASE_ALIGNED 4
#define TCPOLEN_SACK_PERBLOCK 8
-#define TCP_TIME_RETRANS 1 /* Retransmit timer */
-#define TCP_TIME_DACK 2 /* Delayed ack timer */
-#define TCP_TIME_PROBE0 3 /* Zero window probe timer */
-#define TCP_TIME_KEEPOPEN 4 /* Keepalive timer */
-
/* Flags in tp->nonagle */
#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
#define TCP_NAGLE_CORK 2 /* Socket is corked */
-#define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */
+#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
+
+extern struct inet_timewait_death_row tcp_death_row;
/* sysctl variables for tcp */
-extern int sysctl_max_syn_backlog;
extern int sysctl_tcp_timestamps;
extern int sysctl_tcp_window_scaling;
extern int sysctl_tcp_sack;
extern int sysctl_tcp_fin_timeout;
-extern int sysctl_tcp_tw_recycle;
extern int sysctl_tcp_keepalive_time;
extern int sysctl_tcp_keepalive_probes;
extern int sysctl_tcp_keepalive_intvl;
extern int sysctl_tcp_rfc1337;
extern int sysctl_tcp_abort_on_overflow;
extern int sysctl_tcp_max_orphans;
-extern int sysctl_tcp_max_tw_buckets;
extern int sysctl_tcp_fack;
extern int sysctl_tcp_reordering;
extern int sysctl_tcp_ecn;
extern int sysctl_tcp_tw_reuse;
extern int sysctl_tcp_frto;
extern int sysctl_tcp_low_latency;
-extern int sysctl_tcp_westwood;
-extern int sysctl_tcp_vegas_cong_avoid;
-extern int sysctl_tcp_vegas_alpha;
-extern int sysctl_tcp_vegas_beta;
-extern int sysctl_tcp_vegas_gamma;
extern int sysctl_tcp_nometrics_save;
-extern int sysctl_tcp_bic;
-extern int sysctl_tcp_bic_fast_convergence;
-extern int sysctl_tcp_bic_low_window;
-extern int sysctl_tcp_default_win_scale;
extern int sysctl_tcp_moderate_rcvbuf;
+extern int sysctl_tcp_tso_win_divisor;
+extern int sysctl_tcp_abc;
extern atomic_t tcp_memory_allocated;
extern atomic_t tcp_sockets_allocated;
extern int tcp_memory_pressure;
-struct open_request;
-
-struct or_calltable {
- int family;
- int (*rtx_syn_ack) (struct sock *sk, struct open_request *req, struct dst_entry*);
- void (*send_ack) (struct sk_buff *skb, struct open_request *req);
- void (*destructor) (struct open_request *req);
- void (*send_reset) (struct sk_buff *skb);
-};
-
-struct tcp_v4_open_req {
- __u32 loc_addr;
- __u32 rmt_addr;
- struct ip_options *opt;
-};
-
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
-struct tcp_v6_open_req {
- struct in6_addr loc_addr;
- struct in6_addr rmt_addr;
- struct sk_buff *pktopts;
- int iif;
-};
-#endif
-
-/* this structure is too big */
-struct open_request {
- struct open_request *dl_next; /* Must be first member! */
- __u32 rcv_isn;
- __u32 snt_isn;
- __u16 rmt_port;
- __u16 mss;
- __u8 retrans;
- __u8 __pad;
- __u16 snd_wscale : 4,
- rcv_wscale : 4,
- tstamp_ok : 1,
- sack_ok : 1,
- wscale_ok : 1,
- ecn_ok : 1,
- acked : 1;
- /* The following two fields can be easily recomputed I think -AK */
- __u32 window_clamp; /* window clamp at creation time */
- __u32 rcv_wnd; /* rcv_wnd offered first time */
- __u32 ts_recent;
- unsigned long expires;
- struct or_calltable *class;
- struct sock *sk;
- union {
- struct tcp_v4_open_req v4_req;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
- struct tcp_v6_open_req v6_req;
-#endif
- } af;
-#ifdef CONFIG_ACCEPT_QUEUES
- unsigned long acceptq_time_stamp;
- int acceptq_class;
-#endif
-};
-
-/* SLAB cache for open requests. */
-extern kmem_cache_t *tcp_openreq_cachep;
-
-#define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)
-#define tcp_openreq_fastfree(req) kmem_cache_free(tcp_openreq_cachep, req)
-
-static inline void tcp_openreq_free(struct open_request *req)
-{
- req->class->destructor(req);
- tcp_openreq_fastfree(req);
-}
-
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-#define TCP_INET_FAMILY(fam) ((fam) == AF_INET)
-#else
-#define TCP_INET_FAMILY(fam) 1
-#endif
-
-/*
- * Pointers to address related TCP functions
- * (i.e. things that depend on the address family)
- */
-
-struct tcp_func {
- int (*queue_xmit) (struct sk_buff *skb,
- int ipfragok);
-
- void (*send_check) (struct sock *sk,
- struct tcphdr *th,
- int len,
- struct sk_buff *skb);
-
- int (*rebuild_header) (struct sock *sk);
-
- int (*conn_request) (struct sock *sk,
- struct sk_buff *skb);
-
- struct sock * (*syn_recv_sock) (struct sock *sk,
- struct sk_buff *skb,
- struct open_request *req,
- struct dst_entry *dst);
-
- int (*remember_stamp) (struct sock *sk);
-
- __u16 net_header_len;
-
- int (*setsockopt) (struct sock *sk,
- int level,
- int optname,
- char __user *optval,
- int optlen);
-
- int (*getsockopt) (struct sock *sk,
- int level,
- int optname,
- char __user *optval,
- int __user *optlen);
-
-
- void (*addr2sockaddr) (struct sock *sk,
- struct sockaddr *);
-
- int sockaddr_len;
-};
-
/*
* The next routines deal with comparing 32 bit unsigned ints
* and worry about wraparound (automatic with unsigned arithmetic).
#define TCP_ADD_STATS_BH(field, val) SNMP_ADD_STATS_BH(tcp_statistics, field, val)
#define TCP_ADD_STATS_USER(field, val) SNMP_ADD_STATS_USER(tcp_statistics, field, val)
-extern void tcp_put_port(struct sock *sk);
-extern void tcp_inherit_port(struct sock *sk, struct sock *child);
-
extern void tcp_v4_err(struct sk_buff *skb, u32);
extern void tcp_shutdown (struct sock *sk, int how);
extern int tcp_v4_remember_stamp(struct sock *sk);
-extern int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw);
+extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t size);
extern void tcp_rcv_space_adjust(struct sock *sk);
-enum tcp_ack_state_t
-{
- TCP_ACK_SCHED = 1,
- TCP_ACK_TIMER = 2,
- TCP_ACK_PUSHED= 4
-};
+extern int tcp_twsk_unique(struct sock *sk,
+ struct sock *sktw, void *twp);
-static inline void tcp_schedule_ack(struct tcp_opt *tp)
+static inline void tcp_dec_quickack_mode(struct sock *sk,
+ const unsigned int pkts)
{
- tp->ack.pending |= TCP_ACK_SCHED;
-}
-
-static inline int tcp_ack_scheduled(struct tcp_opt *tp)
-{
- return tp->ack.pending&TCP_ACK_SCHED;
-}
+ struct inet_connection_sock *icsk = inet_csk(sk);
-static __inline__ void tcp_dec_quickack_mode(struct tcp_opt *tp)
-{
- if (tp->ack.quick && --tp->ack.quick == 0) {
- /* Leaving quickack mode we deflate ATO. */
- tp->ack.ato = TCP_ATO_MIN;
+ if (icsk->icsk_ack.quick) {
+ if (pkts >= icsk->icsk_ack.quick) {
+ icsk->icsk_ack.quick = 0;
+ /* Leaving quickack mode we deflate ATO. */
+ icsk->icsk_ack.ato = TCP_ATO_MIN;
+ } else
+ icsk->icsk_ack.quick -= pkts;
}
}
-extern void tcp_enter_quickack_mode(struct tcp_opt *tp);
+extern void tcp_enter_quickack_mode(struct sock *sk);
-static __inline__ void tcp_delack_init(struct tcp_opt *tp)
+static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
{
- memset(&tp->ack, 0, sizeof(tp->ack));
-}
-
-static inline void tcp_clear_options(struct tcp_opt *tp)
-{
- tp->tstamp_ok = tp->sack_ok = tp->wscale_ok = tp->snd_wscale = 0;
+ rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
}
enum tcp_tw_status
};
-extern enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw,
+extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
struct sk_buff *skb,
- struct tcphdr *th,
- unsigned len);
+ const struct tcphdr *th);
extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
- struct open_request *req,
- struct open_request **prev);
+ struct request_sock *req,
+ struct request_sock **prev);
extern int tcp_child_process(struct sock *parent,
struct sock *child,
struct sk_buff *skb);
extern void tcp_enter_frto(struct sock *sk);
extern void tcp_enter_loss(struct sock *sk, int how);
-extern void tcp_clear_retrans(struct tcp_opt *tp);
+extern void tcp_clear_retrans(struct tcp_sock *tp);
extern void tcp_update_metrics(struct sock *sk);
extern void tcp_close(struct sock *sk,
long timeout);
-extern struct sock * tcp_accept(struct sock *sk, int flags, int *err);
extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
extern int tcp_getsockopt(struct sock *sk, int level,
size_t len, int nonblock,
int flags, int *addr_len);
-extern int tcp_listen_start(struct sock *sk);
-
extern void tcp_parse_options(struct sk_buff *skb,
- struct tcp_opt *tp,
+ struct tcp_options_received *opt_rx,
int estab);
/*
* TCP v4 functions exported for the inet6 API
*/
-extern int tcp_v4_rebuild_header(struct sock *sk);
-
-extern int tcp_v4_build_header(struct sock *sk,
- struct sk_buff *skb);
-
-extern void tcp_v4_send_check(struct sock *sk,
- struct tcphdr *th, int len,
+extern void tcp_v4_send_check(struct sock *sk, int len,
struct sk_buff *skb);
extern int tcp_v4_conn_request(struct sock *sk,
struct sk_buff *skb);
extern struct sock * tcp_create_openreq_child(struct sock *sk,
- struct open_request *req,
+ struct request_sock *req,
struct sk_buff *skb);
extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
struct sk_buff *skb,
- struct open_request *req,
+ struct request_sock *req,
struct dst_entry *dst);
extern int tcp_v4_do_rcv(struct sock *sk,
extern struct sk_buff * tcp_make_synack(struct sock *sk,
struct dst_entry *dst,
- struct open_request *req);
+ struct request_sock *req);
extern int tcp_disconnect(struct sock *sk, int flags);
/* tcp_output.c */
-extern int tcp_write_xmit(struct sock *, int nonagle);
+extern void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
+ unsigned int cur_mss, int nonagle);
+extern int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp);
extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
extern void tcp_xmit_retransmit_queue(struct sock *);
extern void tcp_simple_retransmit(struct sock *);
+extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
+extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
extern void tcp_send_probe0(struct sock *);
extern void tcp_send_partial(struct sock *);
extern int tcp_write_wakeup(struct sock *);
extern void tcp_send_fin(struct sock *sk);
-extern void tcp_send_active_reset(struct sock *sk, int priority);
+extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
extern int tcp_send_synack(struct sock *);
-extern int tcp_transmit_skb(struct sock *, struct sk_buff *);
-extern void tcp_push_one(struct sock *, unsigned mss_now);
+extern void tcp_push_one(struct sock *, unsigned int mss_now);
extern void tcp_send_ack(struct sock *sk);
extern void tcp_send_delayed_ack(struct sock *sk);
+/* tcp_input.c */
+extern void tcp_cwnd_application_limited(struct sock *sk);
+
/* tcp_timer.c */
extern void tcp_init_xmit_timers(struct sock *);
-extern void tcp_clear_xmit_timers(struct sock *);
-
-extern void tcp_delete_keepalive_timer (struct sock *);
-extern void tcp_reset_keepalive_timer (struct sock *, unsigned long);
-extern int tcp_sync_mss(struct sock *sk, u32 pmtu);
+static inline void tcp_clear_xmit_timers(struct sock *sk)
+{
+ inet_csk_clear_xmit_timers(sk);
+}
-extern const char timer_bug_msg[];
+extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
+extern unsigned int tcp_current_mss(struct sock *sk, int large);
-/* tcp_diag.c */
+/* tcp.c */
extern void tcp_get_info(struct sock *, struct tcp_info *);
/* Read 'sendfile()'-style from a TCP socket */
extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor);
-static inline void tcp_clear_xmit_timer(struct sock *sk, int what)
-{
- struct tcp_opt *tp = tcp_sk(sk);
-
- switch (what) {
- case TCP_TIME_RETRANS:
- case TCP_TIME_PROBE0:
- tp->pending = 0;
-
-#ifdef TCP_CLEAR_TIMERS
- sk_stop_timer(sk, &tp->retransmit_timer);
-#endif
- break;
- case TCP_TIME_DACK:
- tp->ack.blocked = 0;
- tp->ack.pending = 0;
-
-#ifdef TCP_CLEAR_TIMERS
- sk_stop_timer(sk, &tp->delack_timer);
-#endif
- break;
- default:
- printk(timer_bug_msg);
- return;
- };
-
-}
+extern void tcp_initialize_rcv_mss(struct sock *sk);
-/*
- * Reset the retransmission timer
- */
-static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
-{
- struct tcp_opt *tp = tcp_sk(sk);
-
- if (when > TCP_RTO_MAX) {
-#ifdef TCP_DEBUG
- printk(KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr());
-#endif
- when = TCP_RTO_MAX;
- }
-
- switch (what) {
- case TCP_TIME_RETRANS:
- case TCP_TIME_PROBE0:
- tp->pending = what;
- tp->timeout = jiffies+when;
- sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout);
- break;
-
- case TCP_TIME_DACK:
- tp->ack.pending |= TCP_ACK_TIMER;
- tp->ack.timeout = jiffies+when;
- sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout);
- break;
-
- default:
- printk(timer_bug_msg);
- };
-}
-
-/* Compute the current effective MSS, taking SACKs and IP options,
- * and even PMTU discovery events into account.
- *
- * LARGESEND note: !urg_mode is overkill, only frames up to snd_up
- * cannot be large. However, taking into account rare use of URG, this
- * is not a big flaw.
- */
-
-static __inline__ unsigned int tcp_current_mss(struct sock *sk, int large)
-{
- struct tcp_opt *tp = tcp_sk(sk);
- struct dst_entry *dst = __sk_dst_get(sk);
- int mss_now = large && (sk->sk_route_caps & NETIF_F_TSO) &&
- !tp->urg_mode ?
- tp->mss_cache : tp->mss_cache_std;
-
- if (dst) {
- u32 mtu = dst_pmtu(dst);
- if (mtu != tp->pmtu_cookie ||
- tp->ext2_header_len != dst->header_len)
- mss_now = tcp_sync_mss(sk, mtu);
- }
- if (tp->eff_sacks)
- mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
- (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK));
- return mss_now;
-}
-
-/* Initialize RCV_MSS value.
- * RCV_MSS is an our guess about MSS used by the peer.
- * We haven't any direct information about the MSS.
- * It's better to underestimate the RCV_MSS rather than overestimate.
- * Overestimations make us ACKing less frequently than needed.
- * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
- */
-
-static inline void tcp_initialize_rcv_mss(struct sock *sk)
-{
- struct tcp_opt *tp = tcp_sk(sk);
- unsigned int hint = min(tp->advmss, tp->mss_cache_std);
-
- hint = min(hint, tp->rcv_wnd/2);
- hint = min(hint, TCP_MIN_RCVMSS);
- hint = max(hint, TCP_MIN_MSS);
-
- tp->ack.rcv_mss = hint;
-}
-
-static __inline__ void __tcp_fast_path_on(struct tcp_opt *tp, u32 snd_wnd)
+static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
{
tp->pred_flags = htonl((tp->tcp_header_len << 26) |
ntohl(TCP_FLAG_ACK) |
snd_wnd);
}
-static __inline__ void tcp_fast_path_on(struct tcp_opt *tp)
+static inline void tcp_fast_path_on(struct tcp_sock *tp)
{
- __tcp_fast_path_on(tp, tp->snd_wnd>>tp->snd_wscale);
+ __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
}
-static inline void tcp_fast_path_check(struct sock *sk, struct tcp_opt *tp)
+static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp)
{
- if (skb_queue_len(&tp->out_of_order_queue) == 0 &&
+ if (skb_queue_empty(&tp->out_of_order_queue) &&
tp->rcv_wnd &&
atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
!tp->urg_data)
* Rcv_nxt can be after the window if our peer push more data
* than the offered window.
*/
-static __inline__ u32 tcp_receive_window(struct tcp_opt *tp)
+static inline u32 tcp_receive_window(const struct tcp_sock *tp)
{
s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
/* TCP timestamps are only 32-bits, this causes a slight
* complication on 64-bit systems since we store a snapshot
- * of jiffies in the buffer control blocks below. We decidely
- * only use of the low 32-bits of jiffies and hide the ugly
+ * of jiffies in the buffer control blocks below. We decided
+ * to use only the low 32-bits of jiffies and hide the ugly
* casts with the following macro.
*/
#define tcp_time_stamp ((__u32)(jiffies))
-/* This is what the send packet queueing engine uses to pass
+/* This is what the send packet queuing engine uses to pass
* TCP per-packet control information to the transmission
* code. We also store the host-order sequence numbers in
* here too. This is 36 bytes on 32-bit architectures,
#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
-#define TCPCB_URG 0x20 /* Urgent pointer advenced here */
+#define TCPCB_URG 0x20 /* Urgent pointer advanced here */
#define TCPCB_AT_TAIL (TCPCB_URG)
#include <net/tcp_ecn.h>
-/* This determines how many packets are "in the network" to the best
- * of our knowledge. In many cases it is conservative, but where
- * detailed information is available from the receiver (via SACK
- * blocks etc.) we can make more aggressive calculations.
- *
- * Use this for decisions involving congestion control, use just
- * tp->packets_out to determine if the send queue is empty or not.
- *
- * Read this equation as:
- *
- * "Packets sent once on transmission queue" MINUS
- * "Packets left network, but not honestly ACKed yet" PLUS
- * "Packets fast retransmitted"
+/* Due to TSO, an SKB can be composed of multiple actual
+ * packets. To keep these tracked properly, we use this.
*/
-static __inline__ unsigned int tcp_packets_in_flight(struct tcp_opt *tp)
+static inline int tcp_skb_pcount(const struct sk_buff *skb)
{
- return tp->packets_out - tp->left_out + tp->retrans_out;
+ return skb_shinfo(skb)->tso_segs;
}
-/* Recalculate snd_ssthresh, we want to set it to:
- *
- * Reno:
- * one half the current congestion window, but no
- * less than two segments
- *
- * BIC:
- * behave like Reno until low_window is reached,
- * then increase congestion window slowly
- */
-static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
+/* This is valid iff tcp_skb_pcount() > 1. */
+static inline int tcp_skb_mss(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->tso_size;
+}
+
+static inline void tcp_dec_pcount_approx(__u32 *count,
+ const struct sk_buff *skb)
{
- if (sysctl_tcp_bic) {
- if (sysctl_tcp_bic_fast_convergence &&
- tp->snd_cwnd < tp->bictcp.last_max_cwnd)
- tp->bictcp.last_max_cwnd
- = (tp->snd_cwnd * (2*BICTCP_1_OVER_BETA-1))
- / (BICTCP_1_OVER_BETA/2);
- else
- tp->bictcp.last_max_cwnd = tp->snd_cwnd;
-
- if (tp->snd_cwnd > sysctl_tcp_bic_low_window)
- return max(tp->snd_cwnd - (tp->snd_cwnd/BICTCP_1_OVER_BETA),
- 2U);
+ if (*count) {
+ *count -= tcp_skb_pcount(skb);
+ if ((int)*count < 0)
+ *count = 0;
}
+}
+
+static inline void tcp_packets_out_inc(struct sock *sk,
+ struct tcp_sock *tp,
+ const struct sk_buff *skb)
+{
+ int orig = tp->packets_out;
- return max(tp->snd_cwnd >> 1U, 2U);
+ tp->packets_out += tcp_skb_pcount(skb);
+ if (!orig)
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+ inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
}
-/* Stop taking Vegas samples for now. */
-#define tcp_vegas_disable(__tp) ((__tp)->vegas.doing_vegas_now = 0)
+static inline void tcp_packets_out_dec(struct tcp_sock *tp,
+ const struct sk_buff *skb)
+{
+ tp->packets_out -= tcp_skb_pcount(skb);
+}
+
+/* Events passed to congestion control interface */
+enum tcp_ca_event {
+ CA_EVENT_TX_START, /* first transmit when no packets in flight */
+ CA_EVENT_CWND_RESTART, /* congestion window restart */
+ CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
+ CA_EVENT_FRTO, /* fast recovery timeout */
+ CA_EVENT_LOSS, /* loss timeout */
+ CA_EVENT_FAST_ACK, /* in sequence ack */
+ CA_EVENT_SLOW_ACK, /* other ack */
+};
-/* Is this TCP connection using Vegas (regardless of whether it is taking
- * Vegas measurements at the current time)?
+/*
+ * Interface for adding new TCP congestion control handlers
*/
-#define tcp_is_vegas(__tp) ((__tp)->vegas.do_vegas)
-
-static inline void tcp_vegas_enable(struct tcp_opt *tp)
+#define TCP_CA_NAME_MAX 16
+struct tcp_congestion_ops {
+ struct list_head list;
+
+ /* initialize private data (optional) */
+ void (*init)(struct sock *sk);
+ /* cleanup private data (optional) */
+ void (*release)(struct sock *sk);
+
+ /* return slow start threshold (required) */
+ u32 (*ssthresh)(struct sock *sk);
+ /* lower bound for congestion window (optional) */
+ u32 (*min_cwnd)(struct sock *sk);
+ /* do new cwnd calculation (required) */
+ void (*cong_avoid)(struct sock *sk, u32 ack,
+ u32 rtt, u32 in_flight, int good_ack);
+ /* round trip time sample per acked packet (optional) */
+ void (*rtt_sample)(struct sock *sk, u32 usrtt);
+ /* call before changing ca_state (optional) */
+ void (*set_state)(struct sock *sk, u8 new_state);
+ /* call when cwnd event occurs (optional) */
+ void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
+ /* new value of cwnd after loss (optional) */
+ u32 (*undo_cwnd)(struct sock *sk);
+ /* hook for packet ack accounting (optional) */
+ void (*pkts_acked)(struct sock *sk, u32 num_acked);
+ /* get info for inet_diag (optional) */
+ void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
+
+ char name[TCP_CA_NAME_MAX];
+ struct module *owner;
+};
+
+extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
+extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
+
+extern void tcp_init_congestion_control(struct sock *sk);
+extern void tcp_cleanup_congestion_control(struct sock *sk);
+extern int tcp_set_default_congestion_control(const char *name);
+extern void tcp_get_default_congestion_control(char *name);
+extern int tcp_set_congestion_control(struct sock *sk, const char *name);
+extern void tcp_slow_start(struct tcp_sock *tp);
+
+extern struct tcp_congestion_ops tcp_init_congestion_ops;
+extern u32 tcp_reno_ssthresh(struct sock *sk);
+extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack,
+ u32 rtt, u32 in_flight, int flag);
+extern u32 tcp_reno_min_cwnd(struct sock *sk);
+extern struct tcp_congestion_ops tcp_reno;
+
+static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
{
- /* There are several situations when we must "re-start" Vegas:
- *
- * o when a connection is established
- * o after an RTO
- * o after fast recovery
- * o when we send a packet and there is no outstanding
- * unacknowledged data (restarting an idle connection)
- *
- * In these circumstances we cannot do a Vegas calculation at the
- * end of the first RTT, because any calculation we do is using
- * stale info -- both the saved cwnd and congestion feedback are
- * stale.
- *
- * Instead we must wait until the completion of an RTT during
- * which we actually receive ACKs.
- */
-
- /* Begin taking Vegas samples next time we send something. */
- tp->vegas.doing_vegas_now = 1;
-
- /* Set the beginning of the next send window. */
- tp->vegas.beg_snd_nxt = tp->snd_nxt;
-
- tp->vegas.cntRTT = 0;
- tp->vegas.minRTT = 0x7fffffff;
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ if (icsk->icsk_ca_ops->set_state)
+ icsk->icsk_ca_ops->set_state(sk, ca_state);
+ icsk->icsk_ca_state = ca_state;
}
-/* Should we be taking Vegas samples right now? */
-#define tcp_vegas_enabled(__tp) ((__tp)->vegas.doing_vegas_now)
+static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
+{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
-extern void tcp_vegas_init(struct tcp_opt *tp);
+ if (icsk->icsk_ca_ops->cwnd_event)
+ icsk->icsk_ca_ops->cwnd_event(sk, event);
+}
-static inline void tcp_set_ca_state(struct tcp_opt *tp, u8 ca_state)
+/* This determines how many packets are "in the network" to the best
+ * of our knowledge. In many cases it is conservative, but where
+ * detailed information is available from the receiver (via SACK
+ * blocks etc.) we can make more aggressive calculations.
+ *
+ * Use this for decisions involving congestion control, use just
+ * tp->packets_out to determine if the send queue is empty or not.
+ *
+ * Read this equation as:
+ *
+ * "Packets sent once on transmission queue" MINUS
+ * "Packets left network, but not honestly ACKed yet" PLUS
+ * "Packets fast retransmitted"
+ */
+static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
{
- if (tcp_is_vegas(tp)) {
- if (ca_state == TCP_CA_Open)
- tcp_vegas_enable(tp);
- else
- tcp_vegas_disable(tp);
- }
- tp->ca_state = ca_state;
+ return (tp->packets_out - tp->left_out + tp->retrans_out);
}
/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
* The exception is rate halving phase, when cwnd is decreasing towards
* ssthresh.
*/
-static inline __u32 tcp_current_ssthresh(struct tcp_opt *tp)
+static inline __u32 tcp_current_ssthresh(const struct sock *sk)
{
- if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery))
+ const struct tcp_sock *tp = tcp_sk(sk);
+ if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
return tp->snd_ssthresh;
else
return max(tp->snd_ssthresh,
(tp->snd_cwnd >> 2)));
}
-static inline void tcp_sync_left_out(struct tcp_opt *tp)
+static inline void tcp_sync_left_out(struct tcp_sock *tp)
{
- if (tp->sack_ok && tp->sacked_out >= tp->packets_out - tp->lost_out)
+ if (tp->rx_opt.sack_ok &&
+ (tp->sacked_out >= tp->packets_out - tp->lost_out))
tp->sacked_out = tp->packets_out - tp->lost_out;
tp->left_out = tp->sacked_out + tp->lost_out;
}
-extern void tcp_cwnd_application_limited(struct sock *sk);
-
-/* Congestion window validation. (RFC2861) */
-
-static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_opt *tp)
-{
- if (tp->packets_out >= tp->snd_cwnd) {
- /* Network is feed fully. */
- tp->snd_cwnd_used = 0;
- tp->snd_cwnd_stamp = tcp_time_stamp;
- } else {
- /* Network starves. */
- if (tp->packets_out > tp->snd_cwnd_used)
- tp->snd_cwnd_used = tp->packets_out;
-
- if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto)
- tcp_cwnd_application_limited(sk);
- }
-}
-
-/* Set slow start threshould and cwnd not falling to slow start */
-static inline void __tcp_enter_cwr(struct tcp_opt *tp)
-{
- tp->undo_marker = 0;
- tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
- tp->snd_cwnd = min(tp->snd_cwnd,
- tcp_packets_in_flight(tp) + 1U);
- tp->snd_cwnd_cnt = 0;
- tp->high_seq = tp->snd_nxt;
- tp->snd_cwnd_stamp = tcp_time_stamp;
- TCP_ECN_queue_cwr(tp);
-}
-
-static inline void tcp_enter_cwr(struct tcp_opt *tp)
-{
- tp->prior_ssthresh = 0;
- if (tp->ca_state < TCP_CA_CWR) {
- __tcp_enter_cwr(tp);
- tcp_set_ca_state(tp, TCP_CA_CWR);
- }
-}
-
-extern __u32 tcp_init_cwnd(struct tcp_opt *tp, struct dst_entry *dst);
+extern void tcp_enter_cwr(struct sock *sk);
+extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
/* Slow start with delack produces 3 packets of burst, so that
* it is safe "de facto".
*/
-static __inline__ __u32 tcp_max_burst(struct tcp_opt *tp)
+static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
{
return 3;
}
-static __inline__ int tcp_minshall_check(struct tcp_opt *tp)
-{
- return after(tp->snd_sml,tp->snd_una) &&
- !after(tp->snd_sml, tp->snd_nxt);
-}
-
-static __inline__ void tcp_minshall_update(struct tcp_opt *tp, int mss, struct sk_buff *skb)
-{
- if (skb->len < mss)
- tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
-}
-
-/* Return 0, if packet can be sent now without violation Nagle's rules:
- 1. It is full sized.
- 2. Or it contains FIN.
- 3. Or TCP_NODELAY was set.
- 4. Or TCP_CORK is not set, and all sent packets are ACKed.
- With Minshall's modification: all sent small packets are ACKed.
+/* RFC2861 Check whether we are limited by application or congestion window
+ * This is the inverse of cwnd check in tcp_tso_should_defer
*/
-
-static __inline__ int
-tcp_nagle_check(struct tcp_opt *tp, struct sk_buff *skb, unsigned mss_now, int nonagle)
+static inline int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
{
- return (skb->len < mss_now &&
- !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
- ((nonagle&TCP_NAGLE_CORK) ||
- (!nonagle &&
- tp->packets_out &&
- tcp_minshall_check(tp))));
-}
+ const struct tcp_sock *tp = tcp_sk(sk);
+ u32 left;
-/* This checks if the data bearing packet SKB (usually sk->sk_send_head)
- * should be put on the wire right now.
- */
-static __inline__ int tcp_snd_test(struct tcp_opt *tp, struct sk_buff *skb,
- unsigned cur_mss, int nonagle)
-{
- /* RFC 1122 - section 4.2.3.4
- *
- * We must queue if
- *
- * a) The right edge of this frame exceeds the window
- * b) There are packets in flight and we have a small segment
- * [SWS avoidance and Nagle algorithm]
- * (part of SWS is done on packetization)
- * Minshall version sounds: there are no _small_
- * segments in flight. (tcp_nagle_check)
- * c) We have too many packets 'in flight'
- *
- * Don't use the nagle rule for urgent data (or
- * for the final FIN -DaveM).
- *
- * Also, Nagle rule does not apply to frames, which
- * sit in the middle of queue (they have no chances
- * to get new data) and if room at tail of skb is
- * not enough to save something seriously (<32 for now).
- */
+ if (in_flight >= tp->snd_cwnd)
+ return 1;
- /* Don't be strict about the congestion window for the
- * final FIN frame. -DaveM
- */
- return (((nonagle&TCP_NAGLE_PUSH) || tp->urg_mode
- || !tcp_nagle_check(tp, skb, cur_mss, nonagle)) &&
- ((tcp_packets_in_flight(tp) < tp->snd_cwnd) ||
- (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) &&
- !after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd));
-}
+ if (!(sk->sk_route_caps & NETIF_F_TSO))
+ return 0;
-static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_opt *tp)
-{
- if (!tp->packets_out && !tp->pending)
- tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto);
+ left = tp->snd_cwnd - in_flight;
+ if (sysctl_tcp_tso_win_divisor)
+ return left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd;
+ else
+ return left <= tcp_max_burst(tp);
}
-static __inline__ int tcp_skb_is_last(struct sock *sk, struct sk_buff *skb)
+static inline void tcp_minshall_update(struct tcp_sock *tp, int mss,
+ const struct sk_buff *skb)
{
- return skb->next == (struct sk_buff *)&sk->sk_write_queue;
+ if (skb->len < mss)
+ tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
}
-/* Push out any pending frames which were held back due to
- * TCP_CORK or attempt at coalescing tiny packets.
- * The socket must be locked by the caller.
- */
-static __inline__ void __tcp_push_pending_frames(struct sock *sk,
- struct tcp_opt *tp,
- unsigned cur_mss,
- int nonagle)
+static inline void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp)
{
- struct sk_buff *skb = sk->sk_send_head;
-
- if (skb) {
- if (!tcp_skb_is_last(sk, skb))
- nonagle = TCP_NAGLE_PUSH;
- if (!tcp_snd_test(tp, skb, cur_mss, nonagle) ||
- tcp_write_xmit(sk, nonagle))
- tcp_check_probe_timer(sk, tp);
- }
- tcp_cwnd_validate(sk, tp);
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+ if (!tp->packets_out && !icsk->icsk_pending)
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
+ icsk->icsk_rto, TCP_RTO_MAX);
}
-static __inline__ void tcp_push_pending_frames(struct sock *sk,
- struct tcp_opt *tp)
+static inline void tcp_push_pending_frames(struct sock *sk,
+ struct tcp_sock *tp)
{
__tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle);
}
-static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_opt *tp)
-{
- struct sk_buff *skb = sk->sk_send_head;
-
- return (skb &&
- tcp_snd_test(tp, skb, tcp_current_mss(sk, 1),
- tcp_skb_is_last(sk, skb) ? TCP_NAGLE_PUSH : tp->nonagle));
-}
-
-static __inline__ void tcp_init_wl(struct tcp_opt *tp, u32 ack, u32 seq)
+static inline void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq)
{
tp->snd_wl1 = seq;
}
-static __inline__ void tcp_update_wl(struct tcp_opt *tp, u32 ack, u32 seq)
+static inline void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq)
{
tp->snd_wl1 = seq;
}
-extern void tcp_destroy_sock(struct sock *sk);
-
-
/*
* Calculate(/check) TCP checksum
*/
-static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len,
- unsigned long saddr, unsigned long daddr,
- unsigned long base)
+static inline u16 tcp_v4_check(struct tcphdr *th, int len,
+ unsigned long saddr, unsigned long daddr,
+ unsigned long base)
{
return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
}
-static __inline__ int __tcp_checksum_complete(struct sk_buff *skb)
+static inline int __tcp_checksum_complete(struct sk_buff *skb)
{
- return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
+ return __skb_checksum_complete(skb);
}
-static __inline__ int tcp_checksum_complete(struct sk_buff *skb)
+static inline int tcp_checksum_complete(struct sk_buff *skb)
{
return skb->ip_summed != CHECKSUM_UNNECESSARY &&
__tcp_checksum_complete(skb);
/* Prequeue for VJ style copy to user, combined with checksumming. */
-static __inline__ void tcp_prequeue_init(struct tcp_opt *tp)
+static inline void tcp_prequeue_init(struct tcp_sock *tp)
{
tp->ucopy.task = NULL;
tp->ucopy.len = 0;
*
* NOTE: is this not too big to inline?
*/
-static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
+static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
if (!sysctl_tcp_low_latency && tp->ucopy.task) {
__skb_queue_tail(&tp->ucopy.prequeue, skb);
while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
sk->sk_backlog_rcv(sk, skb1);
- NET_INC_STATS_BH(TCPPrequeueDropped);
+ NET_INC_STATS_BH(LINUX_MIB_TCPPREQUEUEDROPPED);
}
tp->ucopy.memory = 0;
} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
wake_up_interruptible(sk->sk_sleep);
- if (!tcp_ack_scheduled(tp))
- tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4);
+ if (!inet_csk_ack_scheduled(sk))
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
+ (3 * TCP_RTO_MIN) / 4,
+ TCP_RTO_MAX);
}
return 1;
}
#undef STATE_TRACE
#ifdef STATE_TRACE
-static char *statename[]={
+static const char *statename[]={
"Unused","Established","Syn Sent","Syn Recv",
"Fin Wait 1","Fin Wait 2","Time Wait", "Close",
"Close Wait","Last ACK","Listen","Closing"
};
#endif
-static __inline__ void tcp_set_state(struct sock *sk, int state)
+static inline void tcp_set_state(struct sock *sk, int state)
{
int oldstate = sk->sk_state;
switch (state) {
case TCP_ESTABLISHED:
if (oldstate != TCP_ESTABLISHED)
- TCP_INC_STATS(TcpCurrEstab);
+ TCP_INC_STATS(TCP_MIB_CURRESTAB);
break;
case TCP_CLOSE:
if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
- TCP_INC_STATS(TcpEstabResets);
+ TCP_INC_STATS(TCP_MIB_ESTABRESETS);
sk->sk_prot->unhash(sk);
- if (tcp_sk(sk)->bind_hash &&
+ if (inet_csk(sk)->icsk_bind_hash &&
!(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
- tcp_put_port(sk);
+ inet_put_port(&tcp_hashinfo, sk);
/* fall through */
default:
if (oldstate==TCP_ESTABLISHED)
- TCP_DEC_STATS(TcpCurrEstab);
+ TCP_DEC_STATS(TCP_MIB_CURRESTAB);
}
/* Change state AFTER socket is unhashed to avoid closed
#endif
}
-static __inline__ void tcp_done(struct sock *sk)
+static inline void tcp_done(struct sock *sk)
{
tcp_set_state(sk, TCP_CLOSE);
tcp_clear_xmit_timers(sk);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_state_change(sk);
else
- tcp_destroy_sock(sk);
+ inet_csk_destroy_sock(sk);
}
-static __inline__ void tcp_sack_reset(struct tcp_opt *tp)
+static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
{
- tp->dsack = 0;
- tp->eff_sacks = 0;
- tp->num_sacks = 0;
+ rx_opt->dsack = 0;
+ rx_opt->eff_sacks = 0;
+ rx_opt->num_sacks = 0;
}
-static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_opt *tp, __u32 tstamp)
-{
- if (tp->tstamp_ok) {
- *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
- (TCPOPT_NOP << 16) |
- (TCPOPT_TIMESTAMP << 8) |
- TCPOLEN_TIMESTAMP);
- *ptr++ = htonl(tstamp);
- *ptr++ = htonl(tp->ts_recent);
- }
- if (tp->eff_sacks) {
- struct tcp_sack_block *sp = tp->dsack ? tp->duplicate_sack : tp->selective_acks;
- int this_sack;
-
- *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
- (TCPOPT_NOP << 16) |
- (TCPOPT_SACK << 8) |
- (TCPOLEN_SACK_BASE +
- (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK)));
- for(this_sack = 0; this_sack < tp->eff_sacks; this_sack++) {
- *ptr++ = htonl(sp[this_sack].start_seq);
- *ptr++ = htonl(sp[this_sack].end_seq);
- }
- if (tp->dsack) {
- tp->dsack = 0;
- tp->eff_sacks--;
- }
- }
-}
-
-/* Construct a tcp options header for a SYN or SYN_ACK packet.
- * If this is every changed make sure to change the definition of
- * MAX_SYN_SIZE to match the new maximum number of options that you
- * can generate.
- */
-static inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
- int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent)
-{
- /* We always get an MSS option.
- * The option bytes which will be seen in normal data
- * packets should timestamps be used, must be in the MSS
- * advertised. But we subtract them from tp->mss_cache so
- * that calculations in tcp_sendmsg are simpler etc.
- * So account for this fact here if necessary. If we
- * don't do this correctly, as a receiver we won't
- * recognize data packets as being full sized when we
- * should, and thus we won't abide by the delayed ACK
- * rules correctly.
- * SACKs don't matter, we never delay an ACK when we
- * have any of those going out.
- */
- *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
- if (ts) {
- if(sack)
- *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |
- (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
- else
- *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
- (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
- *ptr++ = htonl(tstamp); /* TSVAL */
- *ptr++ = htonl(ts_recent); /* TSECR */
- } else if(sack)
- *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
- (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);
- if (offer_wscale)
- *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
-}
-
-/* Determine a window scaling and initial window to offer.
- * Based on the assumption that the given amount of space
- * will be offered. Store the results in the tp structure.
- * NOTE: for smooth operation initial space offering should
- * be a multiple of mss if possible. We assume here that mss >= 1.
- * This MUST be enforced by all callers.
- */
-static inline void tcp_select_initial_window(int __space, __u32 mss,
- __u32 *rcv_wnd,
- __u32 *window_clamp,
- int wscale_ok,
- __u8 *rcv_wscale)
-{
- unsigned int space = (__space < 0 ? 0 : __space);
-
- /* If no clamp set the clamp to the max possible scaled window */
- if (*window_clamp == 0)
- (*window_clamp) = (65535 << 14);
- space = min(*window_clamp, space);
-
- /* Quantize space offering to a multiple of mss if possible. */
- if (space > mss)
- space = (space / mss) * mss;
-
- /* NOTE: offering an initial window larger than 32767
- * will break some buggy TCP stacks. We try to be nice.
- * If we are not window scaling, then this truncates
- * our initial window offering to 32k. There should also
- * be a sysctl option to stop being nice.
- */
- (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
- (*rcv_wscale) = 0;
- if (wscale_ok) {
- /* See RFC1323 for an explanation of the limit to 14 */
- while (space > 65535 && (*rcv_wscale) < 14) {
- space >>= 1;
- (*rcv_wscale)++;
- }
- if (*rcv_wscale && sysctl_tcp_app_win && space>=mss &&
- space - max((space>>sysctl_tcp_app_win), mss>>*rcv_wscale) < 65536/2)
- (*rcv_wscale)--;
-
- *rcv_wscale = max((__u8)sysctl_tcp_default_win_scale,
- *rcv_wscale);
- }
-
- /* Set initial window to value enough for senders,
- * following RFC1414. Senders, not following this RFC,
- * will be satisfied with 2.
- */
- if (mss > (1<<*rcv_wscale)) {
- int init_cwnd = 4;
- if (mss > 1460*3)
- init_cwnd = 2;
- else if (mss > 1460)
- init_cwnd = 3;
- if (*rcv_wnd > init_cwnd*mss)
- *rcv_wnd = init_cwnd*mss;
- }
- /* Set the clamp no higher than max representable value */
- (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
-}
+/* Determine a window scaling and initial window to offer. */
+extern void tcp_select_initial_window(int __space, __u32 mss,
+ __u32 *rcv_wnd, __u32 *window_clamp,
+ int wscale_ok, __u8 *rcv_wscale);
static inline int tcp_win_from_space(int space)
{
}
/* Note: caller must be prepared to deal with negative returns */
-static inline int tcp_space(struct sock *sk)
+static inline int tcp_space(const struct sock *sk)
{
return tcp_win_from_space(sk->sk_rcvbuf -
atomic_read(&sk->sk_rmem_alloc));
}
-static inline int tcp_full_space( struct sock *sk)
+static inline int tcp_full_space(const struct sock *sk)
{
return tcp_win_from_space(sk->sk_rcvbuf);
}
-struct tcp_listen_opt
-{
- u8 max_qlen_log; /* log_2 of maximal queued SYNs */
- int qlen;
-#ifdef CONFIG_ACCEPT_QUEUES
- int qlen_young[NUM_ACCEPT_QUEUES];
-#else
- int qlen_young;
-#endif
- int clock_hand;
- u32 hash_rnd;
- struct open_request *syn_table[TCP_SYNQ_HSIZE];
-};
-
-#ifdef CONFIG_ACCEPT_QUEUES
-static inline void sk_acceptq_removed(struct sock *sk, int class)
-{
- tcp_sk(sk)->acceptq[class].aq_backlog--;
-}
-
-static inline void sk_acceptq_added(struct sock *sk, int class)
-{
- tcp_sk(sk)->acceptq[class].aq_backlog++;
-}
-
-static inline int sk_acceptq_is_full(struct sock *sk, int class)
-{
- return tcp_sk(sk)->acceptq[class].aq_backlog >
- sk->sk_max_ack_backlog;
-}
-
-static inline void tcp_set_acceptq(struct tcp_opt *tp, struct open_request *req)
-{
- int class = req->acceptq_class;
- int prev_class;
-
- if (!tp->acceptq[class].aq_ratio) {
- req->acceptq_class = 0;
- class = 0;
- }
-
- tp->acceptq[class].aq_qcount++;
- req->acceptq_time_stamp = jiffies;
-
- if (tp->acceptq[class].aq_tail) {
- req->dl_next = tp->acceptq[class].aq_tail->dl_next;
- tp->acceptq[class].aq_tail->dl_next = req;
- tp->acceptq[class].aq_tail = req;
- } else { /* if first request in the class */
- tp->acceptq[class].aq_head = req;
- tp->acceptq[class].aq_tail = req;
-
- prev_class = class - 1;
- while (prev_class >= 0) {
- if (tp->acceptq[prev_class].aq_tail)
- break;
- prev_class--;
- }
- if (prev_class < 0) {
- req->dl_next = tp->accept_queue;
- tp->accept_queue = req;
- }
- else {
- req->dl_next = tp->acceptq[prev_class].aq_tail->dl_next;
- tp->acceptq[prev_class].aq_tail->dl_next = req;
- }
- }
-}
-static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
- struct sock *child)
-{
- tcp_set_acceptq(tcp_sk(sk),req);
- req->sk = child;
- sk_acceptq_added(sk,req->acceptq_class);
-}
-
-#else
-static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
- struct sock *child)
-{
- struct tcp_opt *tp = tcp_sk(sk);
-
- req->sk = child;
- sk_acceptq_added(sk);
-
- if (!tp->accept_queue_tail) {
- tp->accept_queue = req;
- } else {
- tp->accept_queue_tail->dl_next = req;
- }
- tp->accept_queue_tail = req;
- req->dl_next = NULL;
-}
-
-#endif
-
-
-#ifdef CONFIG_ACCEPT_QUEUES
-static inline void
-tcp_synq_removed(struct sock *sk, struct open_request *req)
-{
- struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
-
- if (--lopt->qlen == 0)
- tcp_delete_keepalive_timer(sk);
- if (req->retrans == 0)
- lopt->qlen_young[req->acceptq_class]--;
-}
-
-static inline void tcp_synq_added(struct sock *sk, struct open_request *req)
-{
- struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
-
- if (lopt->qlen++ == 0)
- tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
- lopt->qlen_young[req->acceptq_class]++;
-}
-
-static inline int tcp_synq_len(struct sock *sk)
-{
- return tcp_sk(sk)->listen_opt->qlen;
-}
-
-static inline int tcp_synq_young(struct sock *sk, int class)
-{
- return tcp_sk(sk)->listen_opt->qlen_young[class];
-}
-
-#else
-
-static inline void
-tcp_synq_removed(struct sock *sk, struct open_request *req)
-{
- struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
-
- if (--lopt->qlen == 0)
- tcp_delete_keepalive_timer(sk);
- if (req->retrans == 0)
- lopt->qlen_young--;
-}
-
-static inline void tcp_synq_added(struct sock *sk)
-{
- struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
-
- if (lopt->qlen++ == 0)
- tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
- lopt->qlen_young++;
-}
-
-static inline int tcp_synq_len(struct sock *sk)
-{
- return tcp_sk(sk)->listen_opt->qlen;
-}
-
-static inline int tcp_synq_young(struct sock *sk)
-{
- return tcp_sk(sk)->listen_opt->qlen_young;
-}
-#endif
-
-static inline int tcp_synq_is_full(struct sock *sk)
-{
- return tcp_synq_len(sk) >> tcp_sk(sk)->listen_opt->max_qlen_log;
-}
-
-static inline void tcp_synq_unlink(struct tcp_opt *tp, struct open_request *req,
- struct open_request **prev)
+static inline void tcp_openreq_init(struct request_sock *req,
+ struct tcp_options_received *rx_opt,
+ struct sk_buff *skb)
{
- write_lock(&tp->syn_wait_lock);
- *prev = req->dl_next;
- write_unlock(&tp->syn_wait_lock);
-}
+ struct inet_request_sock *ireq = inet_rsk(req);
-static inline void tcp_synq_drop(struct sock *sk, struct open_request *req,
- struct open_request **prev)
-{
- tcp_synq_unlink(tcp_sk(sk), req, prev);
- tcp_synq_removed(sk, req);
- tcp_openreq_free(req);
-}
-
-static __inline__ void tcp_openreq_init(struct open_request *req,
- struct tcp_opt *tp,
- struct sk_buff *skb)
-{
req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
- req->rcv_isn = TCP_SKB_CB(skb)->seq;
- req->mss = tp->mss_clamp;
- req->ts_recent = tp->saw_tstamp ? tp->rcv_tsval : 0;
- req->tstamp_ok = tp->tstamp_ok;
- req->sack_ok = tp->sack_ok;
- req->snd_wscale = tp->snd_wscale;
- req->wscale_ok = tp->wscale_ok;
- req->acked = 0;
- req->ecn_ok = 0;
- req->rmt_port = skb->h.th->source;
+ tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
+ req->mss = rx_opt->mss_clamp;
+ req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
+ ireq->tstamp_ok = rx_opt->tstamp_ok;
+ ireq->sack_ok = rx_opt->sack_ok;
+ ireq->snd_wscale = rx_opt->snd_wscale;
+ ireq->wscale_ok = rx_opt->wscale_ok;
+ ireq->acked = 0;
+ ireq->ecn_ok = 0;
+ ireq->rmt_port = skb->h.th->source;
}
extern void tcp_enter_memory_pressure(void);
-extern void tcp_listen_wlock(void);
-
-/* - We may sleep inside this lock.
- * - If sleeping is not required (or called from BH),
- * use plain read_(un)lock(&tcp_lhash_lock).
- */
-
-static inline void tcp_listen_lock(void)
-{
- /* read_lock synchronizes to candidates to writers */
- read_lock(&tcp_lhash_lock);
- atomic_inc(&tcp_lhash_users);
- read_unlock(&tcp_lhash_lock);
-}
-
-static inline void tcp_listen_unlock(void)
-{
- if (atomic_dec_and_test(&tcp_lhash_users))
- wake_up(&tcp_lhash_wait);
-}
-
-static inline int keepalive_intvl_when(struct tcp_opt *tp)
+static inline int keepalive_intvl_when(const struct tcp_sock *tp)
{
return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
}
-static inline int keepalive_time_when(struct tcp_opt *tp)
+static inline int keepalive_time_when(const struct tcp_sock *tp)
{
return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
}
-static inline int tcp_fin_time(struct tcp_opt *tp)
+static inline int tcp_fin_time(const struct sock *sk)
{
- int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout;
+ int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
+ const int rto = inet_csk(sk)->icsk_rto;
- if (fin_timeout < (tp->rto<<2) - (tp->rto>>1))
- fin_timeout = (tp->rto<<2) - (tp->rto>>1);
+ if (fin_timeout < (rto << 2) - (rto >> 1))
+ fin_timeout = (rto << 2) - (rto >> 1);
return fin_timeout;
}
-static inline int tcp_paws_check(struct tcp_opt *tp, int rst)
+static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int rst)
{
- if ((s32)(tp->rcv_tsval - tp->ts_recent) >= 0)
+ if ((s32)(rx_opt->rcv_tsval - rx_opt->ts_recent) >= 0)
return 0;
- if (xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_24DAYS)
+ if (xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)
return 0;
/* RST segments are not recommended to carry timestamp,
However, we can relax time bounds for RST segments to MSL.
*/
- if (rst && xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_MSL)
+ if (rst && xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
return 0;
return 1;
}
-static inline void tcp_v4_setup_caps(struct sock *sk, struct dst_entry *dst)
-{
- sk->sk_route_caps = dst->dev->features;
- if (sk->sk_route_caps & NETIF_F_TSO) {
- if (sk->sk_no_largesend || dst->header_len)
- sk->sk_route_caps &= ~NETIF_F_TSO;
- }
-}
-
#define TCP_CHECK_TIMER(sk) do { } while (0)
static inline int tcp_use_frto(const struct sock *sk)
{
- const struct tcp_opt *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
/* F-RTO must be activated in sysctl and there must be some
* unsent new data, and the advertised window should allow
static inline void tcp_mib_init(void)
{
/* See RFC 2012 */
- TCP_ADD_STATS_USER(TcpRtoAlgorithm, 1);
- TCP_ADD_STATS_USER(TcpRtoMin, TCP_RTO_MIN*1000/HZ);
- TCP_ADD_STATS_USER(TcpRtoMax, TCP_RTO_MAX*1000/HZ);
- TCP_ADD_STATS_USER(TcpMaxConn, -1);
+ TCP_ADD_STATS_USER(TCP_MIB_RTOALGORITHM, 1);
+ TCP_ADD_STATS_USER(TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
+ TCP_ADD_STATS_USER(TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
+ TCP_ADD_STATS_USER(TCP_MIB_MAXCONN, -1);
+}
+
+/*from STCP */
+static inline void clear_all_retrans_hints(struct tcp_sock *tp){
+ tp->lost_skb_hint = NULL;
+ tp->scoreboard_skb_hint = NULL;
+ tp->retransmit_skb_hint = NULL;
+ tp->forward_skb_hint = NULL;
+ tp->fastpath_skb_hint = NULL;
}
/* /proc */
extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo);
extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo);
-/* TCP Westwood functions and constants */
-
-#define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
-#define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
-
-static inline void tcp_westwood_update_rtt(struct tcp_opt *tp, __u32 rtt_seq)
-{
- if (sysctl_tcp_westwood)
- tp->westwood.rtt = rtt_seq;
-}
-
-void __tcp_westwood_fast_bw(struct sock *, struct sk_buff *);
-void __tcp_westwood_slow_bw(struct sock *, struct sk_buff *);
-
-static inline void tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb)
-{
- if (sysctl_tcp_westwood)
- __tcp_westwood_fast_bw(sk, skb);
-}
+extern struct request_sock_ops tcp_request_sock_ops;
-static inline void tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb)
-{
- if (sysctl_tcp_westwood)
- __tcp_westwood_slow_bw(sk, skb);
-}
-
-static inline __u32 __tcp_westwood_bw_rttmin(const struct tcp_opt *tp)
-{
- return max((tp->westwood.bw_est) * (tp->westwood.rtt_min) /
- (__u32) (tp->mss_cache),
- 2U);
-}
+extern int tcp_v4_destroy_sock(struct sock *sk);
-static inline __u32 tcp_westwood_bw_rttmin(const struct tcp_opt *tp)
-{
- return sysctl_tcp_westwood ? __tcp_westwood_bw_rttmin(tp) : 0;
-}
-
-static inline int tcp_westwood_ssthresh(struct tcp_opt *tp)
-{
- __u32 ssthresh = 0;
-
- if (sysctl_tcp_westwood) {
- ssthresh = __tcp_westwood_bw_rttmin(tp);
- if (ssthresh)
- tp->snd_ssthresh = ssthresh;
- }
-
- return (ssthresh != 0);
-}
-
-static inline int tcp_westwood_cwnd(struct tcp_opt *tp)
-{
- __u32 cwnd = 0;
+#ifdef CONFIG_PROC_FS
+extern int tcp4_proc_init(void);
+extern void tcp4_proc_exit(void);
+#endif
- if (sysctl_tcp_westwood) {
- cwnd = __tcp_westwood_bw_rttmin(tp);
- if (cwnd)
- tp->snd_cwnd = cwnd;
- }
+extern void tcp_v4_init(struct net_proto_family *ops);
+extern void tcp_init(void);
- return (cwnd != 0);
-}
#endif /* _TCP_H */