*/
#include <linux/module.h>
-#include <linux/config.h>
#include <asm/uaccess.h>
#include <asm/system.h>
-#include <asm/bitops.h>
+#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <net/route.h>
#include <linux/skbuff.h>
#include <net/sock.h>
-#include <net/pkt_sched.h>
+#include <net/act_api.h>
+#include <net/pkt_cls.h>
/*
1. For now we assume that route tags < 256.
struct route4_bucket
{
+ /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
struct route4_filter *ht[16+16+1];
};
int iif;
struct tcf_result res;
-#ifdef CONFIG_NET_CLS_POLICE
- struct tcf_police *police;
-#endif
-
+ struct tcf_exts exts;
u32 handle;
struct route4_bucket *bkt;
};
#define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
+static struct tcf_ext_map route_ext_map = {
+ .police = TCA_ROUTE4_POLICE,
+ .action = TCA_ROUTE4_ACT
+};
+
static __inline__ int route4_fastmap_hash(u32 id, int iif)
{
return id&0xF;
}
-static void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
+static inline
+void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
{
spin_lock_bh(&dev->queue_lock);
memset(head->fastmap, 0, sizeof(head->fastmap));
return 32;
}
-#ifdef CONFIG_NET_CLS_POLICE
-#define IF_ROUTE_POLICE \
-if (f->police) { \
- int pol_res = tcf_police(skb, f->police); \
- if (pol_res >= 0) return pol_res; \
- dont_cache = 1; \
- continue; \
-} \
-if (!dont_cache)
-#else
-#define IF_ROUTE_POLICE
-#endif
-
+#define ROUTE4_APPLY_RESULT() \
+{ \
+ *res = f->res; \
+ if (tcf_exts_is_available(&f->exts)) { \
+ int r = tcf_exts_exec(skb, &f->exts, res); \
+ if (r < 0) { \
+ dont_cache = 1; \
+ continue; \
+ } \
+ return r; \
+ } else if (!dont_cache) \
+ route4_set_fastmap(head, id, iif, f); \
+ return 0; \
+}
static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
struct dst_entry *dst;
struct route4_bucket *b;
struct route4_filter *f;
-#ifdef CONFIG_NET_CLS_POLICE
- int dont_cache = 0;
-#endif
u32 id, h;
- int iif;
+ int iif, dont_cache = 0;
if ((dst = skb->dst) == NULL)
goto failure;
restart:
if ((b = head->table[h]) != NULL) {
- f = b->ht[route4_hash_from(id)];
-
- for ( ; f; f = f->next) {
- if (f->id == id) {
- *res = f->res;
- IF_ROUTE_POLICE route4_set_fastmap(head, id, iif, f);
- return 0;
- }
- }
+ for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
+ if (f->id == id)
+ ROUTE4_APPLY_RESULT();
- for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next) {
- if (f->iif == iif) {
- *res = f->res;
- IF_ROUTE_POLICE route4_set_fastmap(head, id, iif, f);
- return 0;
- }
- }
+ for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
+ if (f->iif == iif)
+ ROUTE4_APPLY_RESULT();
- for (f = b->ht[route4_hash_wild()]; f; f = f->next) {
- *res = f->res;
- IF_ROUTE_POLICE route4_set_fastmap(head, id, iif, f);
- return 0;
- }
+ for (f = b->ht[route4_hash_wild()]; f; f = f->next)
+ ROUTE4_APPLY_RESULT();
}
if (h < 256) {
goto restart;
}
-#ifdef CONFIG_NET_CLS_POLICE
if (!dont_cache)
-#endif
route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
failure:
return -1;
return -1;
}
-static u32 to_hash(u32 id)
+static inline u32 to_hash(u32 id)
{
u32 h = id&0xFF;
if (id&0x8000)
return h;
}
-static u32 from_hash(u32 id)
+static inline u32 from_hash(u32 id)
{
id &= 0xFFFF;
if (id == 0xFFFF)
return 0;
}
+static inline void
+route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
+{
+ tcf_unbind_filter(tp, &f->res);
+ tcf_exts_destroy(tp, &f->exts);
+ kfree(f);
+}
+
static void route4_destroy(struct tcf_proto *tp)
{
struct route4_head *head = xchg(&tp->root, NULL);
struct route4_filter *f;
while ((f = b->ht[h2]) != NULL) {
- unsigned long cl;
-
b->ht[h2] = f->next;
- if ((cl = __cls_set_class(&f->res.class, 0)) != 0)
- tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
-#ifdef CONFIG_NET_CLS_POLICE
- tcf_police_release(f->police);
-#endif
- kfree(f);
+ route4_delete_filter(tp, f);
}
}
kfree(b);
for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
if (*fp == f) {
- unsigned long cl;
-
tcf_tree_lock(tp);
*fp = f->next;
tcf_tree_unlock(tp);
route4_reset_fastmap(tp->q->dev, head, f->id);
-
- if ((cl = cls_set_class(tp, &f->res.class, 0)) != 0)
- tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
-
-#ifdef CONFIG_NET_CLS_POLICE
- tcf_police_release(f->police);
-#endif
- kfree(f);
+ route4_delete_filter(tp, f);
/* Strip tree */
return 0;
}
-static int route4_change(struct tcf_proto *tp, unsigned long base,
- u32 handle,
- struct rtattr **tca,
- unsigned long *arg)
+static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
+ struct route4_filter *f, u32 handle, struct route4_head *head,
+ struct rtattr **tb, struct rtattr *est, int new)
{
- struct route4_head *head = tp->root;
- struct route4_filter *f, *f1, **ins_f;
- struct route4_bucket *b;
- struct rtattr *opt = tca[TCA_OPTIONS-1];
- struct rtattr *tb[TCA_ROUTE4_MAX];
- unsigned h1, h2;
int err;
+ u32 id = 0, to = 0, nhandle = 0x8000;
+ struct route4_filter *fp;
+ unsigned int h1;
+ struct route4_bucket *b;
+ struct tcf_exts e;
- if (opt == NULL)
- return handle ? -EINVAL : 0;
-
- if (rtattr_parse(tb, TCA_ROUTE4_MAX, RTA_DATA(opt), RTA_PAYLOAD(opt)) < 0)
- return -EINVAL;
-
- if ((f = (struct route4_filter*)*arg) != NULL) {
- /* Node exists: adjust only classid */
-
- if (f->handle != handle && handle)
- return -EINVAL;
- if (tb[TCA_ROUTE4_CLASSID-1]) {
- unsigned long cl;
-
- f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
- cl = cls_set_class(tp, &f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
- if (cl)
- tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
- }
-#ifdef CONFIG_NET_CLS_POLICE
- if (tb[TCA_ROUTE4_POLICE-1]) {
- struct tcf_police *police = tcf_police_locate(tb[TCA_ROUTE4_POLICE-1], tca[TCA_RATE-1]);
-
- tcf_tree_lock(tp);
- police = xchg(&f->police, police);
- tcf_tree_unlock(tp);
-
- tcf_police_release(police);
- }
-#endif
- return 0;
- }
-
- /* Now more serious part... */
-
- if (head == NULL) {
- head = kmalloc(sizeof(struct route4_head), GFP_KERNEL);
- if (head == NULL)
- return -ENOBUFS;
- memset(head, 0, sizeof(struct route4_head));
-
- tcf_tree_lock(tp);
- tp->root = head;
- tcf_tree_unlock(tp);
- }
-
- f = kmalloc(sizeof(struct route4_filter), GFP_KERNEL);
- if (f == NULL)
- return -ENOBUFS;
-
- memset(f, 0, sizeof(*f));
+ err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
+ if (err < 0)
+ return err;
err = -EINVAL;
- f->handle = 0x8000;
+ if (tb[TCA_ROUTE4_CLASSID-1])
+ if (RTA_PAYLOAD(tb[TCA_ROUTE4_CLASSID-1]) < sizeof(u32))
+ goto errout;
+
if (tb[TCA_ROUTE4_TO-1]) {
- if (handle&0x8000)
+ if (new && handle & 0x8000)
goto errout;
- if (RTA_PAYLOAD(tb[TCA_ROUTE4_TO-1]) < 4)
+ if (RTA_PAYLOAD(tb[TCA_ROUTE4_TO-1]) < sizeof(u32))
goto errout;
- f->id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_TO-1]);
- if (f->id > 0xFF)
+ to = *(u32*)RTA_DATA(tb[TCA_ROUTE4_TO-1]);
+ if (to > 0xFF)
goto errout;
- f->handle = f->id;
+ nhandle = to;
}
+
if (tb[TCA_ROUTE4_FROM-1]) {
- u32 sid;
if (tb[TCA_ROUTE4_IIF-1])
goto errout;
- if (RTA_PAYLOAD(tb[TCA_ROUTE4_FROM-1]) < 4)
+ if (RTA_PAYLOAD(tb[TCA_ROUTE4_FROM-1]) < sizeof(u32))
goto errout;
- sid = (*(u32*)RTA_DATA(tb[TCA_ROUTE4_FROM-1]));
- if (sid > 0xFF)
+ id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_FROM-1]);
+ if (id > 0xFF)
goto errout;
- f->handle |= sid<<16;
- f->id |= sid<<16;
+ nhandle |= id << 16;
} else if (tb[TCA_ROUTE4_IIF-1]) {
- if (RTA_PAYLOAD(tb[TCA_ROUTE4_IIF-1]) < 4)
+ if (RTA_PAYLOAD(tb[TCA_ROUTE4_IIF-1]) < sizeof(u32))
goto errout;
- f->iif = *(u32*)RTA_DATA(tb[TCA_ROUTE4_IIF-1]);
- if (f->iif > 0x7FFF)
+ id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_IIF-1]);
+ if (id > 0x7FFF)
goto errout;
- f->handle |= (f->iif|0x8000)<<16;
+ nhandle |= (id | 0x8000) << 16;
} else
- f->handle |= 0xFFFF<<16;
-
- if (handle) {
- f->handle |= handle&0x7F00;
- if (f->handle != handle)
- goto errout;
- }
+ nhandle |= 0xFFFF << 16;
- if (tb[TCA_ROUTE4_CLASSID-1]) {
- if (RTA_PAYLOAD(tb[TCA_ROUTE4_CLASSID-1]) < 4)
+ if (handle && new) {
+ nhandle |= handle & 0x7F00;
+ if (nhandle != handle)
goto errout;
- f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
}
- h1 = to_hash(f->handle);
+ h1 = to_hash(nhandle);
if ((b = head->table[h1]) == NULL) {
err = -ENOBUFS;
- b = kmalloc(sizeof(struct route4_bucket), GFP_KERNEL);
+ b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
if (b == NULL)
goto errout;
- memset(b, 0, sizeof(*b));
tcf_tree_lock(tp);
head->table[h1] = b;
tcf_tree_unlock(tp);
+ } else {
+ unsigned int h2 = from_hash(nhandle >> 16);
+ err = -EEXIST;
+ for (fp = b->ht[h2]; fp; fp = fp->next)
+ if (fp->handle == f->handle)
+ goto errout;
}
+
+ tcf_tree_lock(tp);
+ if (tb[TCA_ROUTE4_TO-1])
+ f->id = to;
+
+ if (tb[TCA_ROUTE4_FROM-1])
+ f->id = to | id<<16;
+ else if (tb[TCA_ROUTE4_IIF-1])
+ f->iif = id;
+
+ f->handle = nhandle;
f->bkt = b;
+ tcf_tree_unlock(tp);
- err = -EEXIST;
- h2 = from_hash(f->handle>>16);
- for (ins_f = &b->ht[h2]; (f1=*ins_f) != NULL; ins_f = &f1->next) {
- if (f->handle < f1->handle)
- break;
- if (f1->handle == f->handle)
+ if (tb[TCA_ROUTE4_CLASSID-1]) {
+ f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
+ tcf_bind_filter(tp, &f->res, base);
+ }
+
+ tcf_exts_change(tp, &f->exts, &e);
+
+ return 0;
+errout:
+ tcf_exts_destroy(tp, &e);
+ return err;
+}
+
+static int route4_change(struct tcf_proto *tp, unsigned long base,
+ u32 handle,
+ struct rtattr **tca,
+ unsigned long *arg)
+{
+ struct route4_head *head = tp->root;
+ struct route4_filter *f, *f1, **fp;
+ struct route4_bucket *b;
+ struct rtattr *opt = tca[TCA_OPTIONS-1];
+ struct rtattr *tb[TCA_ROUTE4_MAX];
+ unsigned int h, th;
+ u32 old_handle = 0;
+ int err;
+
+ if (opt == NULL)
+ return handle ? -EINVAL : 0;
+
+ if (rtattr_parse_nested(tb, TCA_ROUTE4_MAX, opt) < 0)
+ return -EINVAL;
+
+ if ((f = (struct route4_filter*)*arg) != NULL) {
+ if (f->handle != handle && handle)
+ return -EINVAL;
+
+ if (f->bkt)
+ old_handle = f->handle;
+
+ err = route4_set_parms(tp, base, f, handle, head, tb,
+ tca[TCA_RATE-1], 0);
+ if (err < 0)
+ return err;
+
+ goto reinsert;
+ }
+
+ err = -ENOBUFS;
+ if (head == NULL) {
+ head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
+ if (head == NULL)
goto errout;
+
+ tcf_tree_lock(tp);
+ tp->root = head;
+ tcf_tree_unlock(tp);
}
- cls_set_class(tp, &f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
-#ifdef CONFIG_NET_CLS_POLICE
- if (tb[TCA_ROUTE4_POLICE-1])
- f->police = tcf_police_locate(tb[TCA_ROUTE4_POLICE-1], tca[TCA_RATE-1]);
-#endif
+ f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
+ if (f == NULL)
+ goto errout;
+
+ err = route4_set_parms(tp, base, f, handle, head, tb,
+ tca[TCA_RATE-1], 1);
+ if (err < 0)
+ goto errout;
+
+reinsert:
+ h = from_hash(f->handle >> 16);
+ for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
+ if (f->handle < f1->handle)
+ break;
f->next = f1;
tcf_tree_lock(tp);
- *ins_f = f;
+ *fp = f;
+
+ if (old_handle && f->handle != old_handle) {
+ th = to_hash(old_handle);
+ h = from_hash(old_handle >> 16);
+ if ((b = head->table[th]) != NULL) {
+ for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
+ if (*fp == f) {
+ *fp = f->next;
+ break;
+ }
+ }
+ }
+ }
tcf_tree_unlock(tp);
route4_reset_fastmap(tp->q->dev, head, f->id);
return 0;
errout:
- if (f)
- kfree(f);
+ kfree(f);
return err;
}
}
if (arg->fn(tp, (unsigned long)f, arg) < 0) {
arg->stop = 1;
- break;
+ return;
}
arg->count++;
}
}
if (f->res.classid)
RTA_PUT(skb, TCA_ROUTE4_CLASSID, 4, &f->res.classid);
-#ifdef CONFIG_NET_CLS_POLICE
- if (f->police) {
- struct rtattr * p_rta = (struct rtattr*)skb->tail;
- RTA_PUT(skb, TCA_ROUTE4_POLICE, 0, NULL);
+ if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
+ goto rtattr_failure;
- if (tcf_police_dump(skb, f->police) < 0)
- goto rtattr_failure;
+ rta->rta_len = skb->tail - b;
- p_rta->rta_len = skb->tail - (u8*)p_rta;
- }
-#endif
+ if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
+ goto rtattr_failure;
- rta->rta_len = skb->tail - b;
-#ifdef CONFIG_NET_CLS_POLICE
- if (f->police) {
- if (qdisc_copy_stats(skb, &f->police->stats))
- goto rtattr_failure;
- }
-#endif
return skb->len;
rtattr_failure: