atomic_t ip_frag_mem = ATOMIC_INIT(0); /* Memory used for fragments */
/* Memory Tracking Functions. */
-static __inline__ void frag_kfree_skb(struct sk_buff *skb)
+static __inline__ void frag_kfree_skb(struct sk_buff *skb, int *work)
{
+ if (work)
+ *work -= skb->truesize;
atomic_sub(skb->truesize, &ip_frag_mem);
kfree_skb(skb);
}
-static __inline__ void frag_free_queue(struct ipq *qp)
+static __inline__ void frag_free_queue(struct ipq *qp, int *work)
{
+ if (work)
+ *work -= sizeof(struct ipq);
atomic_sub(sizeof(struct ipq), &ip_frag_mem);
kfree(qp);
}
/* Destruction primitives. */
/* Complete destruction of ipq. */
-static void ip_frag_destroy(struct ipq *qp)
+static void ip_frag_destroy(struct ipq *qp, int *work)
{
struct sk_buff *fp;
while (fp) {
struct sk_buff *xp = fp->next;
- frag_kfree_skb(fp);
+ frag_kfree_skb(fp, work);
fp = xp;
}
/* Finally, release the queue descriptor itself. */
- frag_free_queue(qp);
+ frag_free_queue(qp, work);
}
-static __inline__ void ipq_put(struct ipq *ipq)
+static __inline__ void ipq_put(struct ipq *ipq, int *work)
{
if (atomic_dec_and_test(&ipq->refcnt))
- ip_frag_destroy(ipq);
+ ip_frag_destroy(ipq, work);
}
/* Kill ipq entry. It is not destroyed immediately,
}
/* Memory limiting on fragments. Evictor trashes the oldest
- * fragment queue until we are back under the low threshold.
+ * fragment queue until we are back under the threshold.
*/
-static void ip_evictor(void)
+static void __ip_evictor(int threshold)
{
struct ipq *qp;
struct list_head *tmp;
+ int work;
- for(;;) {
- if (atomic_read(&ip_frag_mem) <= sysctl_ipfrag_low_thresh)
- return;
+ work = atomic_read(&ip_frag_mem) - threshold;
+ if (work <= 0)
+ return;
+
+ while (work > 0) {
read_lock(&ipfrag_lock);
if (list_empty(&ipq_lru_list)) {
read_unlock(&ipfrag_lock);
ipq_kill(qp);
spin_unlock(&qp->lock);
- ipq_put(qp);
+ ipq_put(qp, &work);
IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
}
}
+static inline void ip_evictor(void)
+{
+ __ip_evictor(sysctl_ipfrag_low_thresh);
+}
+
/*
* Oops, a fragment queue timed out. Kill it and send an ICMP reply.
*/
}
out:
spin_unlock(&qp->lock);
- ipq_put(qp);
+ ipq_put(qp, NULL);
}
/* Creation primitives. */
atomic_inc(&qp->refcnt);
write_unlock(&ipfrag_lock);
qp_in->last_in |= COMPLETE;
- ipq_put(qp_in);
+ ipq_put(qp_in, NULL);
return qp;
}
}
qp->fragments = next;
qp->meat -= free_it->len;
- frag_kfree_skb(free_it);
+ frag_kfree_skb(free_it, NULL);
}
}
ret = ip_frag_reasm(qp, dev);
spin_unlock(&qp->lock);
- ipq_put(qp);
+ ipq_put(qp, NULL);
return ret;
}
add_timer(&ipfrag_secret_timer);
}
+void ipfrag_flush(void)
+{
+ __ip_evictor(0);
+}
+
EXPORT_SYMBOL(ip_defrag);
+EXPORT_SYMBOL(ipfrag_flush);