extern void tcp_bucket_destroy(struct tcp_bind_bucket *tb);
extern void tcp_bucket_unlock(struct sock *sk);
extern int tcp_port_rover;
-extern struct sock *tcp_v4_lookup_listener(u32 addr, unsigned short hnum, int dif);
/* These are AF independent. */
static __inline__ int tcp_bhashfn(__u16 lport)
unsigned char tw_rcv_wscale;
__u16 tw_sport;
/* Socket demultiplex comparisons on incoming packets. */
- /* these five are in inet_opt */
+ /* these five are in inet_sock */
__u32 tw_daddr
__attribute__((aligned(TCP_ADDRCMP_ALIGN_BYTES)));
__u32 tw_rcv_saddr;
extern atomic_t tcp_orphan_count;
extern int tcp_tw_count;
extern void tcp_time_wait(struct sock *sk, int state, int timeo);
-extern void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo);
extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw);
#define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
(((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
((__sk)->sk_family == AF_INET6) && \
- !ipv6_addr_cmp(&inet6_sk(__sk)->daddr, (__saddr)) && \
- !ipv6_addr_cmp(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \
+ ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \
+ ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
/* These can have wildcards, don't try too hard. */
so that we select tick to get range about 4 seconds.
*/
-#if HZ <= 16 || HZ > 4096
-# error Unsupported: HZ <= 16 or HZ > 4096
+#if HZ <= 16 || HZ > 32768
+# error Unsupported: HZ <= 16 or HZ > 32768
#elif HZ <= 32
# define TCP_TW_RECYCLE_TICK (5+2-TCP_TW_RECYCLE_SLOTS_LOG)
#elif HZ <= 64
# define TCP_TW_RECYCLE_TICK (10+2-TCP_TW_RECYCLE_SLOTS_LOG)
#elif HZ <= 2048
# define TCP_TW_RECYCLE_TICK (11+2-TCP_TW_RECYCLE_SLOTS_LOG)
-#else
+#elif HZ <= 4096
# define TCP_TW_RECYCLE_TICK (12+2-TCP_TW_RECYCLE_SLOTS_LOG)
+#elif HZ <= 8192
+# define TCP_TW_RECYCLE_TICK (13+2-TCP_TW_RECYCLE_SLOTS_LOG)
+#elif HZ <= 16384
+# define TCP_TW_RECYCLE_TICK (14+2-TCP_TW_RECYCLE_SLOTS_LOG)
+#else
+# define TCP_TW_RECYCLE_TICK (15+2-TCP_TW_RECYCLE_SLOTS_LOG)
#endif
-#define BICTCP_1_OVER_BETA 8 /*
- * Fast recovery
- * multiplicative decrease factor
+#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
+ * max_cwnd = snd_cwnd * beta
*/
#define BICTCP_MAX_INCREMENT 32 /*
* Limit on the amount of
extern int sysctl_tcp_bic;
extern int sysctl_tcp_bic_fast_convergence;
extern int sysctl_tcp_bic_low_window;
+extern int sysctl_tcp_bic_beta;
extern int sysctl_tcp_moderate_rcvbuf;
extern int sysctl_tcp_tso_win_divisor;
TCP_ACK_PUSHED= 4
};
-static inline void tcp_schedule_ack(struct tcp_opt *tp)
+static inline void tcp_schedule_ack(struct tcp_sock *tp)
{
tp->ack.pending |= TCP_ACK_SCHED;
}
-static inline int tcp_ack_scheduled(struct tcp_opt *tp)
+static inline int tcp_ack_scheduled(struct tcp_sock *tp)
{
return tp->ack.pending&TCP_ACK_SCHED;
}
-static __inline__ void tcp_dec_quickack_mode(struct tcp_opt *tp)
+static __inline__ void tcp_dec_quickack_mode(struct tcp_sock *tp)
{
if (tp->ack.quick && --tp->ack.quick == 0) {
/* Leaving quickack mode we deflate ATO. */
}
}
-extern void tcp_enter_quickack_mode(struct tcp_opt *tp);
+extern void tcp_enter_quickack_mode(struct tcp_sock *tp);
-static __inline__ void tcp_delack_init(struct tcp_opt *tp)
+static __inline__ void tcp_delack_init(struct tcp_sock *tp)
{
memset(&tp->ack, 0, sizeof(tp->ack));
}
-static inline void tcp_clear_options(struct tcp_opt *tp)
+static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
{
- tp->tstamp_ok = tp->sack_ok = tp->wscale_ok = tp->snd_wscale = 0;
+ rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
}
enum tcp_tw_status
struct sk_buff *skb);
extern void tcp_enter_frto(struct sock *sk);
extern void tcp_enter_loss(struct sock *sk, int how);
-extern void tcp_clear_retrans(struct tcp_opt *tp);
+extern void tcp_clear_retrans(struct tcp_sock *tp);
extern void tcp_update_metrics(struct sock *sk);
extern void tcp_close(struct sock *sk,
extern int tcp_listen_start(struct sock *sk);
extern void tcp_parse_options(struct sk_buff *skb,
- struct tcp_opt *tp,
+ struct tcp_options_received *opt_rx,
int estab);
/*
extern void tcp_init_xmit_timers(struct sock *);
extern void tcp_clear_xmit_timers(struct sock *);
-extern void tcp_delete_keepalive_timer (struct sock *);
-extern void tcp_reset_keepalive_timer (struct sock *, unsigned long);
+extern void tcp_delete_keepalive_timer(struct sock *);
+extern void tcp_reset_keepalive_timer(struct sock *, unsigned long);
extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
extern unsigned int tcp_current_mss(struct sock *sk, int large);
-extern const char timer_bug_msg[];
+#ifdef TCP_DEBUG
+extern const char tcp_timer_bug_msg[];
+#endif
/* tcp_diag.c */
extern void tcp_get_info(struct sock *, struct tcp_info *);
static inline void tcp_clear_xmit_timer(struct sock *sk, int what)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
switch (what) {
case TCP_TIME_RETRANS:
#endif
break;
default:
- printk(timer_bug_msg);
+#ifdef TCP_DEBUG
+ printk(tcp_timer_bug_msg);
+#endif
return;
};
*/
static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
if (when > TCP_RTO_MAX) {
#ifdef TCP_DEBUG
break;
default:
- printk(timer_bug_msg);
+#ifdef TCP_DEBUG
+ printk(tcp_timer_bug_msg);
+#endif
};
}
static inline void tcp_initialize_rcv_mss(struct sock *sk)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
unsigned int hint = min(tp->advmss, tp->mss_cache_std);
hint = min(hint, tp->rcv_wnd/2);
tp->ack.rcv_mss = hint;
}
-static __inline__ void __tcp_fast_path_on(struct tcp_opt *tp, u32 snd_wnd)
+static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
{
tp->pred_flags = htonl((tp->tcp_header_len << 26) |
ntohl(TCP_FLAG_ACK) |
snd_wnd);
}
-static __inline__ void tcp_fast_path_on(struct tcp_opt *tp)
+static __inline__ void tcp_fast_path_on(struct tcp_sock *tp)
{
- __tcp_fast_path_on(tp, tp->snd_wnd>>tp->snd_wscale);
+ __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
}
-static inline void tcp_fast_path_check(struct sock *sk, struct tcp_opt *tp)
+static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp)
{
if (skb_queue_len(&tp->out_of_order_queue) == 0 &&
tp->rcv_wnd &&
* Rcv_nxt can be after the window if our peer push more data
* than the offered window.
*/
-static __inline__ u32 tcp_receive_window(struct tcp_opt *tp)
+static __inline__ u32 tcp_receive_window(const struct tcp_sock *tp)
{
s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
/* Due to TSO, an SKB can be composed of multiple actual
* packets. To keep these tracked properly, we use this.
*/
-static inline int tcp_skb_pcount(struct sk_buff *skb)
+static inline int tcp_skb_pcount(const struct sk_buff *skb)
{
return skb_shinfo(skb)->tso_segs;
}
/* This is valid iff tcp_skb_pcount() > 1. */
-static inline int tcp_skb_mss(struct sk_buff *skb)
+static inline int tcp_skb_mss(const struct sk_buff *skb)
{
return skb_shinfo(skb)->tso_size;
}
-static inline void tcp_inc_pcount(tcp_pcount_t *count, struct sk_buff *skb)
-{
- count->val += tcp_skb_pcount(skb);
-}
-
-static inline void tcp_inc_pcount_explicit(tcp_pcount_t *count, int amt)
-{
- count->val += amt;
-}
-
-static inline void tcp_dec_pcount_explicit(tcp_pcount_t *count, int amt)
-{
- count->val -= amt;
-}
-
-static inline void tcp_dec_pcount(tcp_pcount_t *count, struct sk_buff *skb)
+static inline void tcp_dec_pcount_approx(__u32 *count,
+ const struct sk_buff *skb)
{
- count->val -= tcp_skb_pcount(skb);
-}
-
-static inline void tcp_dec_pcount_approx(tcp_pcount_t *count,
- struct sk_buff *skb)
-{
- if (count->val) {
- count->val -= tcp_skb_pcount(skb);
- if ((int)count->val < 0)
- count->val = 0;
+ if (*count) {
+ *count -= tcp_skb_pcount(skb);
+ if ((int)*count < 0)
+ *count = 0;
}
}
-static inline __u32 tcp_get_pcount(tcp_pcount_t *count)
-{
- return count->val;
-}
-
-static inline void tcp_set_pcount(tcp_pcount_t *count, __u32 val)
-{
- count->val = val;
-}
-
-static inline void tcp_packets_out_inc(struct sock *sk, struct tcp_opt *tp,
- struct sk_buff *skb)
+static inline void tcp_packets_out_inc(struct sock *sk,
+ struct tcp_sock *tp,
+ const struct sk_buff *skb)
{
- int orig = tcp_get_pcount(&tp->packets_out);
+ int orig = tp->packets_out;
- tcp_inc_pcount(&tp->packets_out, skb);
+ tp->packets_out += tcp_skb_pcount(skb);
if (!orig)
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
}
-static inline void tcp_packets_out_dec(struct tcp_opt *tp, struct sk_buff *skb)
+static inline void tcp_packets_out_dec(struct tcp_sock *tp,
+ const struct sk_buff *skb)
{
- tcp_dec_pcount(&tp->packets_out, skb);
+ tp->packets_out -= tcp_skb_pcount(skb);
}
/* This determines how many packets are "in the network" to the best
* "Packets left network, but not honestly ACKed yet" PLUS
* "Packets fast retransmitted"
*/
-static __inline__ unsigned int tcp_packets_in_flight(struct tcp_opt *tp)
+static __inline__ unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
{
- return (tcp_get_pcount(&tp->packets_out) -
- tcp_get_pcount(&tp->left_out) +
- tcp_get_pcount(&tp->retrans_out));
+ return (tp->packets_out - tp->left_out + tp->retrans_out);
}
/*
* behave like Reno until low_window is reached,
* then increase congestion window slowly
*/
-static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
+static inline __u32 tcp_recalc_ssthresh(struct tcp_sock *tp)
{
if (tcp_is_bic(tp)) {
if (sysctl_tcp_bic_fast_convergence &&
tp->snd_cwnd < tp->bictcp.last_max_cwnd)
- tp->bictcp.last_max_cwnd
- = (tp->snd_cwnd * (2*BICTCP_1_OVER_BETA-1))
- / (BICTCP_1_OVER_BETA/2);
+ tp->bictcp.last_max_cwnd = (tp->snd_cwnd *
+ (BICTCP_BETA_SCALE
+ + sysctl_tcp_bic_beta))
+ / (2 * BICTCP_BETA_SCALE);
else
tp->bictcp.last_max_cwnd = tp->snd_cwnd;
if (tp->snd_cwnd > sysctl_tcp_bic_low_window)
- return max(tp->snd_cwnd - (tp->snd_cwnd/BICTCP_1_OVER_BETA),
- 2U);
+ return max((tp->snd_cwnd * sysctl_tcp_bic_beta)
+ / BICTCP_BETA_SCALE, 2U);
}
return max(tp->snd_cwnd >> 1U, 2U);
/* Stop taking Vegas samples for now. */
#define tcp_vegas_disable(__tp) ((__tp)->vegas.doing_vegas_now = 0)
-static inline void tcp_vegas_enable(struct tcp_opt *tp)
+static inline void tcp_vegas_enable(struct tcp_sock *tp)
{
/* There are several situations when we must "re-start" Vegas:
*
/* Should we be taking Vegas samples right now? */
#define tcp_vegas_enabled(__tp) ((__tp)->vegas.doing_vegas_now)
-extern void tcp_ca_init(struct tcp_opt *tp);
+extern void tcp_ca_init(struct tcp_sock *tp);
-static inline void tcp_set_ca_state(struct tcp_opt *tp, u8 ca_state)
+static inline void tcp_set_ca_state(struct tcp_sock *tp, u8 ca_state)
{
if (tcp_is_vegas(tp)) {
if (ca_state == TCP_CA_Open)
* The exception is rate halving phase, when cwnd is decreasing towards
* ssthresh.
*/
-static inline __u32 tcp_current_ssthresh(struct tcp_opt *tp)
+static inline __u32 tcp_current_ssthresh(struct tcp_sock *tp)
{
if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery))
return tp->snd_ssthresh;
(tp->snd_cwnd >> 2)));
}
-static inline void tcp_sync_left_out(struct tcp_opt *tp)
+static inline void tcp_sync_left_out(struct tcp_sock *tp)
{
- if (tp->sack_ok &&
- (tcp_get_pcount(&tp->sacked_out) >=
- tcp_get_pcount(&tp->packets_out) - tcp_get_pcount(&tp->lost_out)))
- tcp_set_pcount(&tp->sacked_out,
- (tcp_get_pcount(&tp->packets_out) -
- tcp_get_pcount(&tp->lost_out)));
- tcp_set_pcount(&tp->left_out,
- (tcp_get_pcount(&tp->sacked_out) +
- tcp_get_pcount(&tp->lost_out)));
+ if (tp->rx_opt.sack_ok &&
+ (tp->sacked_out >= tp->packets_out - tp->lost_out))
+ tp->sacked_out = tp->packets_out - tp->lost_out;
+ tp->left_out = tp->sacked_out + tp->lost_out;
}
extern void tcp_cwnd_application_limited(struct sock *sk);
/* Congestion window validation. (RFC2861) */
-static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_opt *tp)
+static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
{
- __u32 packets_out = tcp_get_pcount(&tp->packets_out);
+ __u32 packets_out = tp->packets_out;
if (packets_out >= tp->snd_cwnd) {
/* Network is feed fully. */
tp->snd_cwnd_stamp = tcp_time_stamp;
} else {
/* Network starves. */
- if (tcp_get_pcount(&tp->packets_out) > tp->snd_cwnd_used)
- tp->snd_cwnd_used = tcp_get_pcount(&tp->packets_out);
+ if (tp->packets_out > tp->snd_cwnd_used)
+ tp->snd_cwnd_used = tp->packets_out;
if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto)
tcp_cwnd_application_limited(sk);
}
/* Set slow start threshould and cwnd not falling to slow start */
-static inline void __tcp_enter_cwr(struct tcp_opt *tp)
+static inline void __tcp_enter_cwr(struct tcp_sock *tp)
{
tp->undo_marker = 0;
tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
TCP_ECN_queue_cwr(tp);
}
-static inline void tcp_enter_cwr(struct tcp_opt *tp)
+static inline void tcp_enter_cwr(struct tcp_sock *tp)
{
tp->prior_ssthresh = 0;
if (tp->ca_state < TCP_CA_CWR) {
}
}
-extern __u32 tcp_init_cwnd(struct tcp_opt *tp, struct dst_entry *dst);
+extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
/* Slow start with delack produces 3 packets of burst, so that
* it is safe "de facto".
*/
-static __inline__ __u32 tcp_max_burst(struct tcp_opt *tp)
+static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
{
return 3;
}
-static __inline__ int tcp_minshall_check(struct tcp_opt *tp)
+static __inline__ int tcp_minshall_check(const struct tcp_sock *tp)
{
return after(tp->snd_sml,tp->snd_una) &&
!after(tp->snd_sml, tp->snd_nxt);
}
-static __inline__ void tcp_minshall_update(struct tcp_opt *tp, int mss, struct sk_buff *skb)
+static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss,
+ const struct sk_buff *skb)
{
if (skb->len < mss)
tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
*/
static __inline__ int
-tcp_nagle_check(struct tcp_opt *tp, struct sk_buff *skb, unsigned mss_now, int nonagle)
+tcp_nagle_check(const struct tcp_sock *tp, const struct sk_buff *skb,
+ unsigned mss_now, int nonagle)
{
return (skb->len < mss_now &&
!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
((nonagle&TCP_NAGLE_CORK) ||
(!nonagle &&
- tcp_get_pcount(&tp->packets_out) &&
+ tp->packets_out &&
tcp_minshall_check(tp))));
}
/* This checks if the data bearing packet SKB (usually sk->sk_send_head)
* should be put on the wire right now.
*/
-static __inline__ int tcp_snd_test(struct tcp_opt *tp, struct sk_buff *skb,
+static __inline__ int tcp_snd_test(const struct tcp_sock *tp,
+ struct sk_buff *skb,
unsigned cur_mss, int nonagle)
{
int pkts = tcp_skb_pcount(skb);
!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd));
}
-static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_opt *tp)
+static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp)
{
- if (!tcp_get_pcount(&tp->packets_out) && !tp->pending)
+ if (!tp->packets_out && !tp->pending)
tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto);
}
-static __inline__ int tcp_skb_is_last(struct sock *sk, struct sk_buff *skb)
+static __inline__ int tcp_skb_is_last(const struct sock *sk,
+ const struct sk_buff *skb)
{
return skb->next == (struct sk_buff *)&sk->sk_write_queue;
}
* The socket must be locked by the caller.
*/
static __inline__ void __tcp_push_pending_frames(struct sock *sk,
- struct tcp_opt *tp,
+ struct tcp_sock *tp,
unsigned cur_mss,
int nonagle)
{
}
static __inline__ void tcp_push_pending_frames(struct sock *sk,
- struct tcp_opt *tp)
+ struct tcp_sock *tp)
{
__tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle);
}
-static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_opt *tp)
+static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
{
struct sk_buff *skb = sk->sk_send_head;
tcp_skb_is_last(sk, skb) ? TCP_NAGLE_PUSH : tp->nonagle));
}
-static __inline__ void tcp_init_wl(struct tcp_opt *tp, u32 ack, u32 seq)
+static __inline__ void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq)
{
tp->snd_wl1 = seq;
}
-static __inline__ void tcp_update_wl(struct tcp_opt *tp, u32 ack, u32 seq)
+static __inline__ void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq)
{
tp->snd_wl1 = seq;
}
-extern void tcp_destroy_sock(struct sock *sk);
+extern void tcp_destroy_sock(struct sock *sk);
/*
/* Prequeue for VJ style copy to user, combined with checksumming. */
-static __inline__ void tcp_prequeue_init(struct tcp_opt *tp)
+static __inline__ void tcp_prequeue_init(struct tcp_sock *tp)
{
tp->ucopy.task = NULL;
tp->ucopy.len = 0;
*/
static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
if (!sysctl_tcp_low_latency && tp->ucopy.task) {
__skb_queue_tail(&tp->ucopy.prequeue, skb);
#undef STATE_TRACE
#ifdef STATE_TRACE
-static char *statename[]={
+static const char *statename[]={
"Unused","Established","Syn Sent","Syn Recv",
"Fin Wait 1","Fin Wait 2","Time Wait", "Close",
"Close Wait","Last ACK","Listen","Closing"
tcp_destroy_sock(sk);
}
-static __inline__ void tcp_sack_reset(struct tcp_opt *tp)
+static __inline__ void tcp_sack_reset(struct tcp_options_received *rx_opt)
{
- tp->dsack = 0;
- tp->eff_sacks = 0;
- tp->num_sacks = 0;
+ rx_opt->dsack = 0;
+ rx_opt->eff_sacks = 0;
+ rx_opt->num_sacks = 0;
}
-static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_opt *tp, __u32 tstamp)
+static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp, __u32 tstamp)
{
- if (tp->tstamp_ok) {
+ if (tp->rx_opt.tstamp_ok) {
*ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
(TCPOPT_TIMESTAMP << 8) |
TCPOLEN_TIMESTAMP);
*ptr++ = htonl(tstamp);
- *ptr++ = htonl(tp->ts_recent);
+ *ptr++ = htonl(tp->rx_opt.ts_recent);
}
- if (tp->eff_sacks) {
- struct tcp_sack_block *sp = tp->dsack ? tp->duplicate_sack : tp->selective_acks;
+ if (tp->rx_opt.eff_sacks) {
+ struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks;
int this_sack;
*ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
(TCPOPT_SACK << 8) |
(TCPOLEN_SACK_BASE +
- (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK)));
- for(this_sack = 0; this_sack < tp->eff_sacks; this_sack++) {
+ (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)));
+ for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
*ptr++ = htonl(sp[this_sack].start_seq);
*ptr++ = htonl(sp[this_sack].end_seq);
}
- if (tp->dsack) {
- tp->dsack = 0;
- tp->eff_sacks--;
+ if (tp->rx_opt.dsack) {
+ tp->rx_opt.dsack = 0;
+ tp->rx_opt.eff_sacks--;
}
}
}
static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
struct sock *child)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
req->sk = child;
sk_acceptq_added(sk);
return tcp_synq_len(sk) >> tcp_sk(sk)->listen_opt->max_qlen_log;
}
-static inline void tcp_synq_unlink(struct tcp_opt *tp, struct open_request *req,
+static inline void tcp_synq_unlink(struct tcp_sock *tp, struct open_request *req,
struct open_request **prev)
{
write_lock(&tp->syn_wait_lock);
}
static __inline__ void tcp_openreq_init(struct open_request *req,
- struct tcp_opt *tp,
+ struct tcp_options_received *rx_opt,
struct sk_buff *skb)
{
req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
req->rcv_isn = TCP_SKB_CB(skb)->seq;
- req->mss = tp->mss_clamp;
- req->ts_recent = tp->saw_tstamp ? tp->rcv_tsval : 0;
- req->tstamp_ok = tp->tstamp_ok;
- req->sack_ok = tp->sack_ok;
- req->snd_wscale = tp->snd_wscale;
- req->wscale_ok = tp->wscale_ok;
+ req->mss = rx_opt->mss_clamp;
+ req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
+ req->tstamp_ok = rx_opt->tstamp_ok;
+ req->sack_ok = rx_opt->sack_ok;
+ req->snd_wscale = rx_opt->snd_wscale;
+ req->wscale_ok = rx_opt->wscale_ok;
req->acked = 0;
req->ecn_ok = 0;
req->rmt_port = skb->h.th->source;
wake_up(&tcp_lhash_wait);
}
-static inline int keepalive_intvl_when(struct tcp_opt *tp)
+static inline int keepalive_intvl_when(const struct tcp_sock *tp)
{
return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
}
-static inline int keepalive_time_when(struct tcp_opt *tp)
+static inline int keepalive_time_when(const struct tcp_sock *tp)
{
return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
}
-static inline int tcp_fin_time(struct tcp_opt *tp)
+static inline int tcp_fin_time(const struct tcp_sock *tp)
{
int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout;
return fin_timeout;
}
-static inline int tcp_paws_check(struct tcp_opt *tp, int rst)
+static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int rst)
{
- if ((s32)(tp->rcv_tsval - tp->ts_recent) >= 0)
+ if ((s32)(rx_opt->rcv_tsval - rx_opt->ts_recent) >= 0)
return 0;
- if (xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_24DAYS)
+ if (xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)
return 0;
/* RST segments are not recommended to carry timestamp,
However, we can relax time bounds for RST segments to MSL.
*/
- if (rst && xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_MSL)
+ if (rst && xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
return 0;
return 1;
}
static inline int tcp_use_frto(const struct sock *sk)
{
- const struct tcp_opt *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
/* F-RTO must be activated in sysctl and there must be some
* unsent new data, and the advertised window should allow
#define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
#define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
-static inline void tcp_westwood_update_rtt(struct tcp_opt *tp, __u32 rtt_seq)
+static inline void tcp_westwood_update_rtt(struct tcp_sock *tp, __u32 rtt_seq)
{
if (tcp_is_westwood(tp))
tp->westwood.rtt = rtt_seq;
}
-void __tcp_westwood_fast_bw(struct sock *, struct sk_buff *);
-void __tcp_westwood_slow_bw(struct sock *, struct sk_buff *);
-
-static inline void tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb)
-{
- if (tcp_is_westwood(tcp_sk(sk)))
- __tcp_westwood_fast_bw(sk, skb);
-}
-
-static inline void tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb)
-{
- if (tcp_is_westwood(tcp_sk(sk)))
- __tcp_westwood_slow_bw(sk, skb);
-}
-
-static inline __u32 __tcp_westwood_bw_rttmin(const struct tcp_opt *tp)
+static inline __u32 __tcp_westwood_bw_rttmin(const struct tcp_sock *tp)
{
return max((tp->westwood.bw_est) * (tp->westwood.rtt_min) /
(__u32) (tp->mss_cache_std),
2U);
}
-static inline __u32 tcp_westwood_bw_rttmin(const struct tcp_opt *tp)
+static inline __u32 tcp_westwood_bw_rttmin(const struct tcp_sock *tp)
{
return tcp_is_westwood(tp) ? __tcp_westwood_bw_rttmin(tp) : 0;
}
-static inline int tcp_westwood_ssthresh(struct tcp_opt *tp)
+static inline int tcp_westwood_ssthresh(struct tcp_sock *tp)
{
__u32 ssthresh = 0;
return (ssthresh != 0);
}
-static inline int tcp_westwood_cwnd(struct tcp_opt *tp)
+static inline int tcp_westwood_cwnd(struct tcp_sock *tp)
{
__u32 cwnd = 0;