This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / include / net / tcp.h
index c7d711f..79b7e24 100644 (file)
@@ -196,6 +196,10 @@ struct tcp_tw_bucket {
 #define tw_node                        __tw_common.skc_node
 #define tw_bind_node           __tw_common.skc_bind_node
 #define tw_refcnt              __tw_common.skc_refcnt
+#define tw_xid                 __tw_common.skc_xid
+#define tw_vx_info             __tw_common.skc_vx_info
+#define tw_nid                 __tw_common.skc_nid
+#define tw_nx_info             __tw_common.skc_nx_info
        volatile unsigned char  tw_substate;
        unsigned char           tw_rcv_wscale;
        __u16                   tw_sport;
@@ -672,6 +676,10 @@ struct open_request {
                struct tcp_v6_open_req v6_req;
 #endif
        } af;
+#ifdef CONFIG_ACCEPT_QUEUES
+       unsigned long acceptq_time_stamp;
+       int           acceptq_class;
+#endif
 };
 
 /* SLAB cache for open requests. */
@@ -1543,7 +1551,7 @@ static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
 
                        while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
                                sk->sk_backlog_rcv(sk, skb1);
-                               NET_INC_STATS_BH(LINUX_MIB_TCPPREQUEUEDROPPED);
+                               NET_INC_STATS_BH(TCPPrequeueDropped);
                        }
 
                        tp->ucopy.memory = 0;
@@ -1575,12 +1583,12 @@ static __inline__ void tcp_set_state(struct sock *sk, int state)
        switch (state) {
        case TCP_ESTABLISHED:
                if (oldstate != TCP_ESTABLISHED)
-                       TCP_INC_STATS(TCP_MIB_CURRESTAB);
+                       TCP_INC_STATS(TcpCurrEstab);
                break;
 
        case TCP_CLOSE:
                if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
-                       TCP_INC_STATS(TCP_MIB_ESTABRESETS);
+                       TCP_INC_STATS(TcpEstabResets);
 
                sk->sk_prot->unhash(sk);
                if (tcp_sk(sk)->bind_hash &&
@@ -1589,7 +1597,7 @@ static __inline__ void tcp_set_state(struct sock *sk, int state)
                /* fall through */
        default:
                if (oldstate==TCP_ESTABLISHED)
-                       TCP_DEC_STATS(TCP_MIB_CURRESTAB);
+                       TCP_DEC_STATS(TcpCurrEstab);
        }
 
        /* Change state AFTER socket is unhashed to avoid closed
@@ -1772,6 +1780,83 @@ static inline int tcp_full_space( struct sock *sk)
        return tcp_win_from_space(sk->sk_rcvbuf); 
 }
 
+struct tcp_listen_opt
+{
+       u8                      max_qlen_log;   /* log_2 of maximal queued SYNs */
+       int                     qlen;
+#ifdef CONFIG_ACCEPT_QUEUES
+       int                     qlen_young[NUM_ACCEPT_QUEUES];
+#else
+       int                     qlen_young;
+#endif
+       int                     clock_hand;
+       u32                     hash_rnd;
+       struct open_request     *syn_table[TCP_SYNQ_HSIZE];
+};
+
+#ifdef CONFIG_ACCEPT_QUEUES
+static inline void sk_acceptq_removed(struct sock *sk, int class)
+{
+       tcp_sk(sk)->acceptq[class].aq_backlog--;
+}
+
+static inline void sk_acceptq_added(struct sock *sk, int class)
+{
+       tcp_sk(sk)->acceptq[class].aq_backlog++;
+}
+
+static inline int sk_acceptq_is_full(struct sock *sk, int class)
+{
+       return tcp_sk(sk)->acceptq[class].aq_backlog >
+               sk->sk_max_ack_backlog;
+}
+
+static inline void tcp_set_acceptq(struct tcp_opt *tp, struct open_request *req)
+{
+       int class = req->acceptq_class;
+       int prev_class;
+
+       if (!tp->acceptq[class].aq_ratio) {
+               req->acceptq_class = 0;
+               class = 0;
+       }
+
+       tp->acceptq[class].aq_qcount++;
+       req->acceptq_time_stamp = jiffies;
+
+       if (tp->acceptq[class].aq_tail) {
+               req->dl_next = tp->acceptq[class].aq_tail->dl_next;
+               tp->acceptq[class].aq_tail->dl_next = req;
+               tp->acceptq[class].aq_tail = req;
+       } else { /* if first request in the class */
+               tp->acceptq[class].aq_head = req;
+               tp->acceptq[class].aq_tail = req;
+
+               prev_class = class - 1;
+               while (prev_class >= 0) {
+                       if (tp->acceptq[prev_class].aq_tail)
+                               break;
+                       prev_class--;
+               }
+               if (prev_class < 0) {
+                       req->dl_next = tp->accept_queue;
+                       tp->accept_queue = req;
+               }
+               else {
+                       req->dl_next = tp->acceptq[prev_class].aq_tail->dl_next;
+                       tp->acceptq[prev_class].aq_tail->dl_next = req;
+               }
+       }
+}
+static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
+                                        struct sock *child)
+{
+       tcp_set_acceptq(tcp_sk(sk),req);
+       req->sk = child;
+       sk_acceptq_added(sk,req->acceptq_class);
+}
+
+#else
 static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
                                         struct sock *child)
 {
@@ -1789,15 +1874,41 @@ static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
        req->dl_next = NULL;
 }
 
-struct tcp_listen_opt
+#endif
+
+
+#ifdef CONFIG_ACCEPT_QUEUES
+static inline void
+tcp_synq_removed(struct sock *sk, struct open_request *req)
 {
-       u8                      max_qlen_log;   /* log_2 of maximal queued SYNs */
-       int                     qlen;
-       int                     qlen_young;
-       int                     clock_hand;
-       u32                     hash_rnd;
-       struct open_request     *syn_table[TCP_SYNQ_HSIZE];
-};
+       struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
+
+       if (--lopt->qlen == 0)
+               tcp_delete_keepalive_timer(sk);
+       if (req->retrans == 0)
+               lopt->qlen_young[req->acceptq_class]--;
+}
+
+static inline void tcp_synq_added(struct sock *sk, struct open_request *req)
+{
+       struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
+
+       if (lopt->qlen++ == 0)
+               tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
+       lopt->qlen_young[req->acceptq_class]++;
+}
+
+static inline int tcp_synq_len(struct sock *sk)
+{
+       return tcp_sk(sk)->listen_opt->qlen;
+}
+
+static inline int tcp_synq_young(struct sock *sk, int class)
+{
+       return tcp_sk(sk)->listen_opt->qlen_young[class];
+}
+
+#else
 
 static inline void
 tcp_synq_removed(struct sock *sk, struct open_request *req)
@@ -1828,6 +1939,7 @@ static inline int tcp_synq_young(struct sock *sk)
 {
        return tcp_sk(sk)->listen_opt->qlen_young;
 }
+#endif
 
 static inline int tcp_synq_is_full(struct sock *sk)
 {
@@ -1961,10 +2073,10 @@ static inline int tcp_use_frto(const struct sock *sk)
 static inline void tcp_mib_init(void)
 {
        /* See RFC 2012 */
-       TCP_ADD_STATS_USER(TCP_MIB_RTOALGORITHM, 1);
-       TCP_ADD_STATS_USER(TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
-       TCP_ADD_STATS_USER(TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
-       TCP_ADD_STATS_USER(TCP_MIB_MAXCONN, -1);
+       TCP_ADD_STATS_USER(TcpRtoAlgorithm, 1);
+       TCP_ADD_STATS_USER(TcpRtoMin, TCP_RTO_MIN*1000/HZ);
+       TCP_ADD_STATS_USER(TcpRtoMax, TCP_RTO_MAX*1000/HZ);
+       TCP_ADD_STATS_USER(TcpMaxConn, -1);
 }
 
 /* /proc */