X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fnet%2Ftcp.h;fp=include%2Fnet%2Ftcp.h;h=781156845a38655518dc967cc7b73bcf524f6110;hb=16c70f8c1b54b61c3b951b6fb220df250fe09b32;hp=8fa246247e30b6b7a17c6a6f490c75921cf7ac53;hpb=4e76c8a9fa413ccc09d3f7f664183dcce3555d57;p=linux-2.6.git diff --git a/include/net/tcp.h b/include/net/tcp.h index 8fa246247..781156845 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -21,13 +21,13 @@ #define TCP_DEBUG 1 #define FASTRETRANS_DEBUG 1 -#include #include #include #include #include #include #include +#include #include #include @@ -218,6 +218,7 @@ extern int sysctl_tcp_adv_win_scale; extern int sysctl_tcp_tw_reuse; extern int sysctl_tcp_frto; extern int sysctl_tcp_low_latency; +extern int sysctl_tcp_dma_copybreak; extern int sysctl_tcp_nometrics_save; extern int sysctl_tcp_moderate_rcvbuf; extern int sysctl_tcp_tso_win_divisor; @@ -225,6 +226,7 @@ extern int sysctl_tcp_abc; extern int sysctl_tcp_mtu_probing; extern int sysctl_tcp_base_mss; extern int sysctl_tcp_workaround_signed_windows; +extern int sysctl_tcp_slow_start_after_idle; extern atomic_t tcp_memory_allocated; extern atomic_t tcp_sockets_allocated; @@ -295,6 +297,8 @@ extern int tcp_rcv_established(struct sock *sk, extern void tcp_rcv_space_adjust(struct sock *sk); +extern void tcp_cleanup_rbuf(struct sock *sk, int copied); + extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); @@ -433,7 +437,7 @@ extern int tcp_send_synack(struct sock *); extern void tcp_push_one(struct sock *, unsigned int mss_now); extern void tcp_send_ack(struct sock *sk); extern void tcp_send_delayed_ack(struct sock *sk); -extern void cleanup_rbuf(struct sock *sk, int copied); +extern void tcp_cleanup_rbuf(struct sock *sk, int copied); /* tcp_input.c */ extern void tcp_cwnd_application_limited(struct sock *sk); @@ -631,7 +635,7 @@ struct tcp_congestion_ops { /* return slow start threshold (required) */ u32 (*ssthresh)(struct sock *sk); /* lower bound for congestion window (optional) */ - u32 (*min_cwnd)(struct sock *sk); + u32 (*min_cwnd)(const struct sock *sk); /* do new cwnd calculation (required) */ void (*cong_avoid)(struct sock *sk, u32 ack, u32 rtt, u32 in_flight, int good_ack); @@ -666,7 +670,7 @@ extern struct tcp_congestion_ops tcp_init_congestion_ops; extern u32 tcp_reno_ssthresh(struct sock *sk); extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight, int flag); -extern u32 tcp_reno_min_cwnd(struct sock *sk); +extern u32 tcp_reno_min_cwnd(const struct sock *sk); extern struct tcp_congestion_ops tcp_reno; static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) @@ -750,7 +754,7 @@ static inline int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) if (in_flight >= tp->snd_cwnd) return 1; - if (!(sk->sk_route_caps & NETIF_F_TSO)) + if (!sk_can_gso(sk)) return 0; left = tp->snd_cwnd - in_flight; @@ -820,6 +824,12 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp) tp->ucopy.len = 0; tp->ucopy.memory = 0; skb_queue_head_init(&tp->ucopy.prequeue); +#ifdef CONFIG_NET_DMA + tp->ucopy.dma_chan = NULL; + tp->ucopy.wakeup = 0; + tp->ucopy.pinned_list = NULL; + tp->ucopy.dma_cookie = 0; +#endif } /* Packet is added to VJ-style prequeue for processing in process @@ -907,6 +917,9 @@ static inline void tcp_set_state(struct sock *sk, int state) static inline void tcp_done(struct sock *sk) { + if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) + TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); + tcp_set_state(sk, TCP_CLOSE); tcp_clear_xmit_timers(sk);