head = &tcp_listening_hash[tcp_lhashfn(hnum)];
if (!hlist_empty(head)) {
struct inet_opt *inet = inet_sk((sk = __sk_head(head)));
-
if (inet->num == hnum && !sk->sk_node.next &&
(!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
(sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
lopt->syn_table[h] = req;
write_unlock(&tp->syn_wait_lock);
+#ifdef CONFIG_ACCEPT_QUEUES
+ tcp_synq_added(sk, req);
+#else
tcp_synq_added(sk);
+#endif
}
__u32 daddr = skb->nh.iph->daddr;
__u32 isn = TCP_SKB_CB(skb)->when;
struct dst_entry *dst = NULL;
+#ifdef CONFIG_ACCEPT_QUEUES
+ int class = 0;
+#endif
#ifdef CONFIG_SYN_COOKIES
int want_cookie = 0;
#else
goto drop;
}
+#ifdef CONFIG_ACCEPT_QUEUES
+ class = (skb->nfmark <= 0) ? 0 :
+ ((skb->nfmark >= NUM_ACCEPT_QUEUES) ? 0: skb->nfmark);
+ /*
+ * Accept only if the class has shares set or if the default class
+ * i.e. class 0 has shares
+ */
+ if (!(tcp_sk(sk)->acceptq[class].aq_valid)) {
+ if (tcp_sk(sk)->acceptq[0].aq_valid)
+ class = 0;
+ else
+ goto drop;
+ }
+#endif
+
/* Accept backlog is full. If we have already queued enough
* of warm entries in syn queue, drop request. It is better than
* clogging syn queue with openreqs with exponentially increasing
* timeout.
*/
+#ifdef CONFIG_ACCEPT_QUEUES
+ if (tcp_acceptq_is_full(sk, class) && tcp_synq_young(sk, class) > 1)
+#else
if (tcp_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
+#endif
goto drop;
req = tcp_openreq_alloc();
tp.tstamp_ok = tp.saw_tstamp;
tcp_openreq_init(req, &tp, skb);
-
+#ifdef CONFIG_ACCEPT_QUEUES
+ req->acceptq_class = class;
+ req->acceptq_time_stamp = jiffies;
+#endif
req->af.v4_req.loc_addr = daddr;
req->af.v4_req.rmt_addr = saddr;
req->af.v4_req.opt = tcp_v4_save_options(sk, skb);
struct tcp_opt *newtp;
struct sock *newsk;
+#ifdef CONFIG_ACCEPT_QUEUES
+ if (tcp_acceptq_is_full(sk, req->acceptq_class))
+#else
if (tcp_acceptq_is_full(sk))
+#endif
goto exit_overflow;
if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)