struct ipq {
struct ipq *next; /* linked list pointers */
struct list_head lru_list; /* lru list member */
+ u32 user;
u32 saddr;
u32 daddr;
u16 id;
atomic_t ip_frag_mem = ATOMIC_INIT(0); /* Memory used for fragments */
/* Memory Tracking Functions. */
-static __inline__ void frag_kfree_skb(struct sk_buff *skb)
+static __inline__ void frag_kfree_skb(struct sk_buff *skb, int *work)
{
+ if (work)
+ *work -= skb->truesize;
atomic_sub(skb->truesize, &ip_frag_mem);
kfree_skb(skb);
}
-static __inline__ void frag_free_queue(struct ipq *qp)
+static __inline__ void frag_free_queue(struct ipq *qp, int *work)
{
+ if (work)
+ *work -= sizeof(struct ipq);
atomic_sub(sizeof(struct ipq), &ip_frag_mem);
kfree(qp);
}
/* Destruction primitives. */
/* Complete destruction of ipq. */
-static void ip_frag_destroy(struct ipq *qp)
+static void ip_frag_destroy(struct ipq *qp, int *work)
{
struct sk_buff *fp;
while (fp) {
struct sk_buff *xp = fp->next;
- frag_kfree_skb(fp);
+ frag_kfree_skb(fp, work);
fp = xp;
}
/* Finally, release the queue descriptor itself. */
- frag_free_queue(qp);
+ frag_free_queue(qp, work);
}
-static __inline__ void ipq_put(struct ipq *ipq)
+static __inline__ void ipq_put(struct ipq *ipq, int *work)
{
if (atomic_dec_and_test(&ipq->refcnt))
- ip_frag_destroy(ipq);
+ ip_frag_destroy(ipq, work);
}
/* Kill ipq entry. It is not destroyed immediately,
}
/* Memory limiting on fragments. Evictor trashes the oldest
- * fragment queue until we are back under the low threshold.
+ * fragment queue until we are back under the threshold.
*/
static void ip_evictor(void)
{
struct ipq *qp;
struct list_head *tmp;
+ int work;
- for(;;) {
- if (atomic_read(&ip_frag_mem) <= sysctl_ipfrag_low_thresh)
- return;
+ work = atomic_read(&ip_frag_mem) - sysctl_ipfrag_low_thresh;
+ if (work <= 0)
+ return;
+
+ while (work > 0) {
read_lock(&ipfrag_lock);
if (list_empty(&ipq_lru_list)) {
read_unlock(&ipfrag_lock);
ipq_kill(qp);
spin_unlock(&qp->lock);
- ipq_put(qp);
- IP_INC_STATS_BH(IpReasmFails);
+ ipq_put(qp, &work);
+ IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
}
}
ipq_kill(qp);
- IP_INC_STATS_BH(IpReasmTimeout);
- IP_INC_STATS_BH(IpReasmFails);
+ IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT);
+ IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
if ((qp->last_in&FIRST_IN) && qp->fragments != NULL) {
struct sk_buff *head = qp->fragments;
}
out:
spin_unlock(&qp->lock);
- ipq_put(qp);
+ ipq_put(qp, NULL);
}
/* Creation primitives. */
if(qp->id == qp_in->id &&
qp->saddr == qp_in->saddr &&
qp->daddr == qp_in->daddr &&
- qp->protocol == qp_in->protocol) {
+ qp->protocol == qp_in->protocol &&
+ qp->user == qp_in->user) {
atomic_inc(&qp->refcnt);
write_unlock(&ipfrag_lock);
qp_in->last_in |= COMPLETE;
- ipq_put(qp_in);
+ ipq_put(qp_in, NULL);
return qp;
}
}
}
/* Add an entry to the 'ipq' queue for a newly received IP datagram. */
-static struct ipq *ip_frag_create(unsigned hash, struct iphdr *iph)
+static struct ipq *ip_frag_create(unsigned hash, struct iphdr *iph, u32 user)
{
struct ipq *qp;
qp->id = iph->id;
qp->saddr = iph->saddr;
qp->daddr = iph->daddr;
+ qp->user = user;
qp->len = 0;
qp->meat = 0;
qp->fragments = NULL;
init_timer(&qp->timer);
qp->timer.data = (unsigned long) qp; /* pointer to queue */
qp->timer.function = ip_expire; /* expire function */
- qp->lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&qp->lock);
atomic_set(&qp->refcnt, 1);
return ip_frag_intern(hash, qp);
/* Find the correct entry in the "incomplete datagrams" queue for
* this IP datagram, and create new one, if nothing is found.
*/
-static inline struct ipq *ip_find(struct iphdr *iph)
+static inline struct ipq *ip_find(struct iphdr *iph, u32 user)
{
__u16 id = iph->id;
__u32 saddr = iph->saddr;
if(qp->id == id &&
qp->saddr == saddr &&
qp->daddr == daddr &&
- qp->protocol == protocol) {
+ qp->protocol == protocol &&
+ qp->user == user) {
atomic_inc(&qp->refcnt);
read_unlock(&ipfrag_lock);
return qp;
}
read_unlock(&ipfrag_lock);
- return ip_frag_create(hash, iph);
+ return ip_frag_create(hash, iph, user);
}
/* Add new segment to existing queue. */
qp->fragments = next;
qp->meat -= free_it->len;
- frag_kfree_skb(free_it);
+ frag_kfree_skb(free_it, NULL);
}
}
iph = head->nh.iph;
iph->frag_off = 0;
iph->tot_len = htons(len);
- IP_INC_STATS_BH(IpReasmOKs);
+ IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
qp->fragments = NULL;
return head;
"Oversized IP packet from %d.%d.%d.%d.\n",
NIPQUAD(qp->saddr));
out_fail:
- IP_INC_STATS_BH(IpReasmFails);
+ IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
return NULL;
}
/* Process an incoming IP datagram fragment. */
-struct sk_buff *ip_defrag(struct sk_buff *skb)
+struct sk_buff *ip_defrag(struct sk_buff *skb, u32 user)
{
struct iphdr *iph = skb->nh.iph;
struct ipq *qp;
struct net_device *dev;
- IP_INC_STATS_BH(IpReasmReqds);
+ IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
/* Start by cleaning up the memory. */
if (atomic_read(&ip_frag_mem) > sysctl_ipfrag_high_thresh)
dev = skb->dev;
/* Lookup (or create) queue header */
- if ((qp = ip_find(iph)) != NULL) {
+ if ((qp = ip_find(iph, user)) != NULL) {
struct sk_buff *ret = NULL;
spin_lock(&qp->lock);
ret = ip_frag_reasm(qp, dev);
spin_unlock(&qp->lock);
- ipq_put(qp);
+ ipq_put(qp, NULL);
return ret;
}
- IP_INC_STATS_BH(IpReasmFails);
+ IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
kfree_skb(skb);
return NULL;
}
+static unsigned long ip_defrag_id_bitmap;
+
+int ip_defrag_user_id_alloc(void)
+{
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ if (!test_and_set_bit(i, &ip_defrag_id_bitmap))
+ return i + __IP_DEFRAG_DYNAMIC_FIRST;
+ }
+
+ return -ENFILE;
+}
+EXPORT_SYMBOL(ip_defrag_user_id_alloc);
+
+void ip_defrag_user_id_free(int user)
+{
+ user -= __IP_DEFRAG_DYNAMIC_FIRST;
+ if (user >= 0 && user < 32)
+ clear_bit(user, &ip_defrag_id_bitmap);
+}
+EXPORT_SYMBOL(ip_defrag_user_id_free);
+
void ipfrag_init(void)
{
ipfrag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^