2 * net/sched/cls_route.c ROUTE4 classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <linux/config.h>
14 #include <asm/uaccess.h>
15 #include <asm/system.h>
16 #include <linux/bitops.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/string.h>
22 #include <linux/socket.h>
23 #include <linux/sockios.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/if_ether.h>
28 #include <linux/inet.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/notifier.h>
33 #include <net/route.h>
34 #include <linux/skbuff.h>
36 #include <net/act_api.h>
37 #include <net/pkt_cls.h>
40 1. For now we assume that route tags < 256.
41 It allows to use direct table lookups, instead of hash tables.
42 2. For now we assume that "from TAG" and "fromdev DEV" statements
43 are mutually exclusive.
44 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
49 struct route4_filter *filter;
56 struct route4_fastmap fastmap[16];
57 struct route4_bucket *table[256+1];
62 struct route4_filter *ht[16+16+1];
67 struct route4_filter *next;
71 struct tcf_result res;
72 #ifdef CONFIG_NET_CLS_POLICE
73 struct tcf_police *police;
77 struct route4_bucket *bkt;
80 #define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
82 static __inline__ int route4_fastmap_hash(u32 id, int iif)
87 static void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
89 spin_lock_bh(&dev->queue_lock);
90 memset(head->fastmap, 0, sizeof(head->fastmap));
91 spin_unlock_bh(&dev->queue_lock);
94 static void __inline__
95 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
96 struct route4_filter *f)
98 int h = route4_fastmap_hash(id, iif);
99 head->fastmap[h].id = id;
100 head->fastmap[h].iif = iif;
101 head->fastmap[h].filter = f;
104 static __inline__ int route4_hash_to(u32 id)
109 static __inline__ int route4_hash_from(u32 id)
114 static __inline__ int route4_hash_iif(int iif)
116 return 16 + ((iif>>16)&0xF);
119 static __inline__ int route4_hash_wild(void)
124 #ifdef CONFIG_NET_CLS_POLICE
125 #define IF_ROUTE_POLICE \
127 int pol_res = tcf_police(skb, f->police); \
128 if (pol_res >= 0) return pol_res; \
134 #define IF_ROUTE_POLICE
138 static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
139 struct tcf_result *res)
141 struct route4_head *head = (struct route4_head*)tp->root;
142 struct dst_entry *dst;
143 struct route4_bucket *b;
144 struct route4_filter *f;
145 #ifdef CONFIG_NET_CLS_POLICE
151 if ((dst = skb->dst) == NULL)
158 iif = ((struct rtable*)dst)->fl.iif;
160 h = route4_fastmap_hash(id, iif);
161 if (id == head->fastmap[h].id &&
162 iif == head->fastmap[h].iif &&
163 (f = head->fastmap[h].filter) != NULL) {
164 if (f == ROUTE4_FAILURE)
171 h = route4_hash_to(id);
174 if ((b = head->table[h]) != NULL) {
175 f = b->ht[route4_hash_from(id)];
177 for ( ; f; f = f->next) {
180 IF_ROUTE_POLICE route4_set_fastmap(head, id, iif, f);
185 for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next) {
188 IF_ROUTE_POLICE route4_set_fastmap(head, id, iif, f);
193 for (f = b->ht[route4_hash_wild()]; f; f = f->next) {
195 IF_ROUTE_POLICE route4_set_fastmap(head, id, iif, f);
206 #ifdef CONFIG_NET_CLS_POLICE
209 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
214 if (id && (TC_H_MAJ(id) == 0 ||
215 !(TC_H_MAJ(id^tp->q->handle)))) {
223 static u32 to_hash(u32 id)
231 static u32 from_hash(u32 id)
236 if (!(id & 0x8000)) {
241 return 16 + (id&0xF);
244 static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
246 struct route4_head *head = (struct route4_head*)tp->root;
247 struct route4_bucket *b;
248 struct route4_filter *f;
254 h1 = to_hash(handle);
258 h2 = from_hash(handle>>16);
262 if ((b = head->table[h1]) != NULL) {
263 for (f = b->ht[h2]; f; f = f->next)
264 if (f->handle == handle)
265 return (unsigned long)f;
270 static void route4_put(struct tcf_proto *tp, unsigned long f)
274 static int route4_init(struct tcf_proto *tp)
279 static void route4_destroy(struct tcf_proto *tp)
281 struct route4_head *head = xchg(&tp->root, NULL);
287 for (h1=0; h1<=256; h1++) {
288 struct route4_bucket *b;
290 if ((b = head->table[h1]) != NULL) {
291 for (h2=0; h2<=32; h2++) {
292 struct route4_filter *f;
294 while ((f = b->ht[h2]) != NULL) {
296 tcf_unbind_filter(tp, &f->res);
297 #ifdef CONFIG_NET_CLS_POLICE
298 tcf_police_release(f->police,TCA_ACT_UNBIND);
309 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
311 struct route4_head *head = (struct route4_head*)tp->root;
312 struct route4_filter **fp, *f = (struct route4_filter*)arg;
314 struct route4_bucket *b;
323 for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
329 route4_reset_fastmap(tp->q->dev, head, f->id);
330 tcf_unbind_filter(tp, &f->res);
331 #ifdef CONFIG_NET_CLS_POLICE
332 tcf_police_release(f->police,TCA_ACT_UNBIND);
338 for (i=0; i<=32; i++)
342 /* OK, session has no flows */
344 head->table[to_hash(h)] = NULL;
354 static int route4_change(struct tcf_proto *tp, unsigned long base,
359 struct route4_head *head = tp->root;
360 struct route4_filter *f, *f1, **ins_f;
361 struct route4_bucket *b;
362 struct rtattr *opt = tca[TCA_OPTIONS-1];
363 struct rtattr *tb[TCA_ROUTE4_MAX];
368 return handle ? -EINVAL : 0;
370 if (rtattr_parse(tb, TCA_ROUTE4_MAX, RTA_DATA(opt), RTA_PAYLOAD(opt)) < 0)
373 if ((f = (struct route4_filter*)*arg) != NULL) {
374 if (f->handle != handle && handle)
376 if (tb[TCA_ROUTE4_CLASSID-1]) {
377 f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
378 tcf_bind_filter(tp, &f->res, base);
380 #ifdef CONFIG_NET_CLS_POLICE
381 if (tb[TCA_ROUTE4_POLICE-1]) {
382 err = tcf_change_police(tp, &f->police,
383 tb[TCA_ROUTE4_POLICE-1], tca[TCA_RATE-1]);
391 /* Now more serious part... */
394 head = kmalloc(sizeof(struct route4_head), GFP_KERNEL);
397 memset(head, 0, sizeof(struct route4_head));
404 f = kmalloc(sizeof(struct route4_filter), GFP_KERNEL);
408 memset(f, 0, sizeof(*f));
412 if (tb[TCA_ROUTE4_TO-1]) {
415 if (RTA_PAYLOAD(tb[TCA_ROUTE4_TO-1]) < 4)
417 f->id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_TO-1]);
422 if (tb[TCA_ROUTE4_FROM-1]) {
424 if (tb[TCA_ROUTE4_IIF-1])
426 if (RTA_PAYLOAD(tb[TCA_ROUTE4_FROM-1]) < 4)
428 sid = (*(u32*)RTA_DATA(tb[TCA_ROUTE4_FROM-1]));
431 f->handle |= sid<<16;
433 } else if (tb[TCA_ROUTE4_IIF-1]) {
434 if (RTA_PAYLOAD(tb[TCA_ROUTE4_IIF-1]) < 4)
436 f->iif = *(u32*)RTA_DATA(tb[TCA_ROUTE4_IIF-1]);
439 f->handle |= (f->iif|0x8000)<<16;
441 f->handle |= 0xFFFF<<16;
444 f->handle |= handle&0x7F00;
445 if (f->handle != handle)
449 if (tb[TCA_ROUTE4_CLASSID-1]) {
450 if (RTA_PAYLOAD(tb[TCA_ROUTE4_CLASSID-1]) < 4)
452 f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
455 h1 = to_hash(f->handle);
456 if ((b = head->table[h1]) == NULL) {
458 b = kmalloc(sizeof(struct route4_bucket), GFP_KERNEL);
461 memset(b, 0, sizeof(*b));
470 h2 = from_hash(f->handle>>16);
471 for (ins_f = &b->ht[h2]; (f1=*ins_f) != NULL; ins_f = &f1->next) {
472 if (f->handle < f1->handle)
474 if (f1->handle == f->handle)
478 tcf_bind_filter(tp, &f->res, base);
479 #ifdef CONFIG_NET_CLS_POLICE
480 if (tb[TCA_ROUTE4_POLICE-1])
481 tcf_change_police(tp, &f->police, tb[TCA_ROUTE4_POLICE-1], tca[TCA_RATE-1]);
489 route4_reset_fastmap(tp->q->dev, head, f->id);
490 *arg = (unsigned long)f;
499 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
501 struct route4_head *head = tp->root;
510 for (h = 0; h <= 256; h++) {
511 struct route4_bucket *b = head->table[h];
514 for (h1 = 0; h1 <= 32; h1++) {
515 struct route4_filter *f;
517 for (f = b->ht[h1]; f; f = f->next) {
518 if (arg->count < arg->skip) {
522 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
533 static int route4_dump(struct tcf_proto *tp, unsigned long fh,
534 struct sk_buff *skb, struct tcmsg *t)
536 struct route4_filter *f = (struct route4_filter*)fh;
537 unsigned char *b = skb->tail;
544 t->tcm_handle = f->handle;
546 rta = (struct rtattr*)b;
547 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
549 if (!(f->handle&0x8000)) {
551 RTA_PUT(skb, TCA_ROUTE4_TO, sizeof(id), &id);
553 if (f->handle&0x80000000) {
554 if ((f->handle>>16) != 0xFFFF)
555 RTA_PUT(skb, TCA_ROUTE4_IIF, sizeof(f->iif), &f->iif);
558 RTA_PUT(skb, TCA_ROUTE4_FROM, sizeof(id), &id);
561 RTA_PUT(skb, TCA_ROUTE4_CLASSID, 4, &f->res.classid);
562 #ifdef CONFIG_NET_CLS_POLICE
563 if (tcf_dump_police(skb, f->police, TCA_ROUTE4_POLICE) < 0)
567 rta->rta_len = skb->tail - b;
568 #ifdef CONFIG_NET_CLS_POLICE
570 if (tcf_police_dump_stats(skb, f->police) < 0)
576 skb_trim(skb, b - skb->data);
580 static struct tcf_proto_ops cls_route4_ops = {
583 .classify = route4_classify,
585 .destroy = route4_destroy,
588 .change = route4_change,
589 .delete = route4_delete,
592 .owner = THIS_MODULE,
595 static int __init init_route4(void)
597 return register_tcf_proto_ops(&cls_route4_ops);
600 static void __exit exit_route4(void)
602 unregister_tcf_proto_ops(&cls_route4_ops);
605 module_init(init_route4)
606 module_exit(exit_route4)
607 MODULE_LICENSE("GPL");