2 * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
4 * Written 1998,1999 by Werner Almesberger, EPFL ICA
7 #include <linux/config.h>
8 #include <linux/module.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/skbuff.h>
12 #include <linux/errno.h>
13 #include <linux/netdevice.h>
15 #include <net/pkt_sched.h>
16 #include <net/route.h>
20 * Not quite sure if we need all the xchgs Alexey uses when accessing things.
21 * Can always add them later ... :)
25 * Passing parameters to the root seems to be done more awkwardly than really
26 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
30 #define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
31 #define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
35 #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
37 #define DPRINTK(format,args...)
41 #define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
43 #define D2PRINTK(format,args...)
47 #define PRIV(tp) ((struct tcindex_data *) (tp)->root)
50 struct tcindex_filter_result {
51 struct tcf_police *police;
52 struct tcf_result res;
55 struct tcindex_filter {
57 struct tcindex_filter_result result;
58 struct tcindex_filter *next;
63 struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
64 struct tcindex_filter **h; /* imperfect hash; only used if !perfect;
66 __u16 mask; /* AND key with mask */
67 int shift; /* shift ANDed key to the right */
68 int hash; /* hash table size; 0 if undefined */
69 int alloc_hash; /* allocated size */
70 int fall_through; /* 0: only classify if explicit match */
74 static struct tcindex_filter_result *lookup(struct tcindex_data *p,__u16 key)
76 struct tcindex_filter *f;
79 return p->perfect[key].res.class ? p->perfect+key : NULL;
82 for (f = p->h[key % p->hash]; f; f = f->next) {
90 static int tcindex_classify(struct sk_buff *skb, struct tcf_proto *tp,
91 struct tcf_result *res)
93 struct tcindex_data *p = PRIV(tp);
94 struct tcindex_filter_result *f;
96 D2PRINTK("tcindex_classify(skb %p,tp %p,res %p),p %p\n",skb,tp,res,p);
98 f = lookup(p,(skb->tc_index & p->mask) >> p->shift);
100 if (!p->fall_through)
102 res->classid = TC_H_MAKE(TC_H_MAJ(tp->q->handle),
103 (skb->tc_index& p->mask) >> p->shift);
105 D2PRINTK("alg 0x%x\n",res->classid);
109 D2PRINTK("map 0x%x\n",res->classid);
110 #ifdef CONFIG_NET_CLS_POLICE
114 result = tcf_police(skb,f->police);
115 D2PRINTK("police %d\n",res);
123 static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle)
125 struct tcindex_data *p = PRIV(tp);
126 struct tcindex_filter_result *r;
128 DPRINTK("tcindex_get(tp %p,handle 0x%08x)\n",tp,handle);
129 if (p->perfect && handle >= p->alloc_hash)
131 r = lookup(PRIV(tp),handle);
132 return r && r->res.class ? (unsigned long) r : 0;
136 static void tcindex_put(struct tcf_proto *tp, unsigned long f)
138 DPRINTK("tcindex_put(tp %p,f 0x%lx)\n",tp,f);
142 static int tcindex_init(struct tcf_proto *tp)
144 struct tcindex_data *p;
146 DPRINTK("tcindex_init(tp %p)\n",tp);
147 p = kmalloc(sizeof(struct tcindex_data),GFP_KERNEL);
162 static int tcindex_delete(struct tcf_proto *tp, unsigned long arg)
164 struct tcindex_data *p = PRIV(tp);
165 struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg;
166 struct tcindex_filter *f = NULL;
169 DPRINTK("tcindex_delete(tp %p,arg 0x%lx),p %p,f %p\n",tp,arg,p,f);
175 struct tcindex_filter **walk = NULL;
177 for (i = 0; i < p->hash; i++)
178 for (walk = p->h+i; *walk; walk = &(*walk)->next)
179 if (&(*walk)->result == r)
189 cl = __cls_set_class(&r->res.class,0);
191 tp->q->ops->cl_ops->unbind_tcf(tp->q,cl);
192 #ifdef CONFIG_NET_CLS_POLICE
193 tcf_police_release(r->police);
202 * There are no parameters for tcindex_init, so we overload tcindex_change
206 static int tcindex_change(struct tcf_proto *tp,unsigned long base,u32 handle,
207 struct rtattr **tca,unsigned long *arg)
209 struct tcindex_filter_result new_filter_result = {
210 NULL, /* no policing */
211 { 0,0 }, /* no classification */
213 struct rtattr *opt = tca[TCA_OPTIONS-1];
214 struct rtattr *tb[TCA_TCINDEX_MAX];
215 struct tcindex_data *p = PRIV(tp);
216 struct tcindex_filter *f;
217 struct tcindex_filter_result *r = (struct tcindex_filter_result *) *arg;
218 struct tcindex_filter **walk;
222 DPRINTK("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
223 "p %p,r %p\n",tp,handle,tca,arg,opt,p,r);
225 DPRINTK("*arg = 0x%lx\n",*arg);
228 if (rtattr_parse(tb,TCA_TCINDEX_MAX,RTA_DATA(opt),RTA_PAYLOAD(opt)) < 0)
230 if (!tb[TCA_TCINDEX_HASH-1]) {
233 if (RTA_PAYLOAD(tb[TCA_TCINDEX_HASH-1]) < sizeof(int))
235 hash = *(int *) RTA_DATA(tb[TCA_TCINDEX_HASH-1]);
237 if (!tb[TCA_TCINDEX_MASK-1]) {
240 if (RTA_PAYLOAD(tb[TCA_TCINDEX_MASK-1]) < sizeof(__u16))
242 mask = *(__u16 *) RTA_DATA(tb[TCA_TCINDEX_MASK-1]);
244 if (!tb[TCA_TCINDEX_SHIFT-1])
247 if (RTA_PAYLOAD(tb[TCA_TCINDEX_SHIFT-1]) < sizeof(__u16))
249 shift = *(int *) RTA_DATA(tb[TCA_TCINDEX_SHIFT-1]);
251 if (p->perfect && hash <= (mask >> shift))
253 if (p->perfect && hash > p->alloc_hash)
255 if (p->h && hash != p->alloc_hash)
260 if (tb[TCA_TCINDEX_FALL_THROUGH-1]) {
261 if (RTA_PAYLOAD(tb[TCA_TCINDEX_FALL_THROUGH-1]) < sizeof(int))
264 *(int *) RTA_DATA(tb[TCA_TCINDEX_FALL_THROUGH-1]);
266 DPRINTK("classid/police %p/%p\n",tb[TCA_TCINDEX_CLASSID-1],
267 tb[TCA_TCINDEX_POLICE-1]);
268 if (!tb[TCA_TCINDEX_CLASSID-1] && !tb[TCA_TCINDEX_POLICE-1])
271 if ((mask >> shift) < PERFECT_HASH_THRESHOLD) {
272 p->hash = (mask >> shift)+1;
274 p->hash = DEFAULT_HASH_SIZE;
277 if (!p->perfect && !p->h) {
278 p->alloc_hash = p->hash;
279 DPRINTK("hash %d mask %d\n",p->hash,p->mask);
280 if (p->hash > (mask >> shift)) {
281 p->perfect = kmalloc(p->hash*
282 sizeof(struct tcindex_filter_result),GFP_KERNEL);
285 memset(p->perfect, 0,
286 p->hash * sizeof(struct tcindex_filter_result));
288 p->h = kmalloc(p->hash*sizeof(struct tcindex_filter *),
292 memset(p->h, 0, p->hash*sizeof(struct tcindex_filter *));
296 * Note: this could be as restrictive as
297 * if (handle & ~(mask >> shift))
298 * but then, we'd fail handles that may become valid after some
299 * future mask change. While this is extremely unlikely to ever
300 * matter, the check below is safer (and also more
301 * backwards-compatible).
303 if (p->perfect && handle >= p->alloc_hash)
306 r = p->perfect+handle;
308 r = lookup(p,handle);
311 r = &new_filter_result;
314 if (tb[TCA_TCINDEX_CLASSID-1]) {
315 unsigned long cl = cls_set_class(tp,&r->res.class,0);
318 tp->q->ops->cl_ops->unbind_tcf(tp->q,cl);
319 r->res.classid = *(__u32 *) RTA_DATA(tb[TCA_TCINDEX_CLASSID-1]);
320 r->res.class = tp->q->ops->cl_ops->bind_tcf(tp->q,base,
327 #ifdef CONFIG_NET_CLS_POLICE
329 struct tcf_police *police;
331 police = tb[TCA_TCINDEX_POLICE-1] ?
332 tcf_police_locate(tb[TCA_TCINDEX_POLICE-1],NULL) : NULL;
334 police = xchg(&r->police,police);
336 tcf_police_release(police);
339 if (r != &new_filter_result)
341 f = kmalloc(sizeof(struct tcindex_filter),GFP_KERNEL);
345 f->result = new_filter_result;
347 for (walk = p->h+(handle % p->hash); *walk; walk = &(*walk)->next)
355 static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
357 struct tcindex_data *p = PRIV(tp);
358 struct tcindex_filter *f,*next;
361 DPRINTK("tcindex_walk(tp %p,walker %p),p %p\n",tp,walker,p);
363 for (i = 0; i < p->hash; i++) {
364 if (!p->perfect[i].res.class)
366 if (walker->count >= walker->skip) {
368 (unsigned long) (p->perfect+i), walker)
379 for (i = 0; i < p->hash; i++) {
380 for (f = p->h[i]; f; f = next) {
382 if (walker->count >= walker->skip) {
383 if (walker->fn(tp,(unsigned long) &f->result,
395 static int tcindex_destroy_element(struct tcf_proto *tp,
396 unsigned long arg, struct tcf_walker *walker)
398 return tcindex_delete(tp,arg);
402 static void tcindex_destroy(struct tcf_proto *tp)
404 struct tcindex_data *p = PRIV(tp);
405 struct tcf_walker walker;
407 DPRINTK("tcindex_destroy(tp %p),p %p\n",tp,p);
410 walker.fn = &tcindex_destroy_element;
411 tcindex_walk(tp,&walker);
421 static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
422 struct sk_buff *skb, struct tcmsg *t)
424 struct tcindex_data *p = PRIV(tp);
425 struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh;
426 unsigned char *b = skb->tail;
429 DPRINTK("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p,b %p\n",
431 DPRINTK("p->perfect %p p->h %p\n",p->perfect,p->h);
432 rta = (struct rtattr *) b;
433 RTA_PUT(skb,TCA_OPTIONS,0,NULL);
435 t->tcm_handle = ~0; /* whatever ... */
436 RTA_PUT(skb,TCA_TCINDEX_HASH,sizeof(p->hash),&p->hash);
437 RTA_PUT(skb,TCA_TCINDEX_MASK,sizeof(p->mask),&p->mask);
438 RTA_PUT(skb,TCA_TCINDEX_SHIFT,sizeof(p->shift),&p->shift);
439 RTA_PUT(skb,TCA_TCINDEX_FALL_THROUGH,sizeof(p->fall_through),
443 t->tcm_handle = r-p->perfect;
445 struct tcindex_filter *f;
449 for (i = 0; !t->tcm_handle && i < p->hash; i++) {
450 for (f = p->h[i]; !t->tcm_handle && f;
453 t->tcm_handle = f->key;
457 DPRINTK("handle = %d\n",t->tcm_handle);
459 RTA_PUT(skb, TCA_TCINDEX_CLASSID, 4, &r->res.classid);
460 #ifdef CONFIG_NET_CLS_POLICE
462 struct rtattr *p_rta = (struct rtattr *) skb->tail;
464 RTA_PUT(skb,TCA_TCINDEX_POLICE,0,NULL);
465 if (tcf_police_dump(skb,r->police) < 0)
467 p_rta->rta_len = skb->tail-(u8 *) p_rta;
471 rta->rta_len = skb->tail-b;
475 skb_trim(skb, b - skb->data);
479 static struct tcf_proto_ops cls_tcindex_ops = {
482 .classify = tcindex_classify,
483 .init = tcindex_init,
484 .destroy = tcindex_destroy,
487 .change = tcindex_change,
488 .delete = tcindex_delete,
489 .walk = tcindex_walk,
490 .dump = tcindex_dump,
491 .owner = THIS_MODULE,
494 static int __init init_tcindex(void)
496 return register_tcf_proto_ops(&cls_tcindex_ops);
499 static void __exit exit_tcindex(void)
501 unregister_tcf_proto_ops(&cls_tcindex_ops);
504 module_init(init_tcindex)
505 module_exit(exit_tcindex)
506 MODULE_LICENSE("GPL");