2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * IPv4 FIB: lookup engine and maintenance routines.
8 * Version: $Id: fib_hash.c,v 1.13 2001/10/31 21:55:54 davem Exp $
10 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 #include <linux/config.h>
19 #include <asm/uaccess.h>
20 #include <asm/system.h>
21 #include <asm/bitops.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
26 #include <linux/string.h>
27 #include <linux/socket.h>
28 #include <linux/sockios.h>
29 #include <linux/errno.h>
31 #include <linux/inet.h>
32 #include <linux/netdevice.h>
33 #include <linux/if_arp.h>
34 #include <linux/proc_fs.h>
35 #include <linux/skbuff.h>
36 #include <linux/netlink.h>
37 #include <linux/init.h>
40 #include <net/protocol.h>
41 #include <net/route.h>
44 #include <net/ip_fib.h>
51 static kmem_cache_t * fn_hash_kmem;
54 These bizarre types are just to force strict type checking.
55 When I reversed order of bytes and changed to natural mask lengths,
56 I forgot to make fixes in several places. Now I am lazy to return
70 struct fib_node *fn_next;
71 struct fib_info *fn_info;
72 #define FIB_INFO(f) ((f)->fn_info)
81 #define FN_S_ACCESSED 2
83 static int fib_hash_zombies;
87 struct fn_zone *fz_next; /* Next not empty zone */
88 struct fib_node **fz_hash; /* Hash table pointer */
89 int fz_nent; /* Number of entries */
91 int fz_divisor; /* Hash divisor */
92 u32 fz_hashmask; /* (fz_divisor - 1) */
93 #define FZ_HASHMASK(fz) ((fz)->fz_hashmask)
95 int fz_order; /* Zone order */
97 #define FZ_MASK(fz) ((fz)->fz_mask)
100 /* NOTE. On fast computers evaluation of fz_hashmask and fz_mask
101 can be cheaper than memory lookup, so that FZ_* macros are used.
106 struct fn_zone *fn_zones[33];
107 struct fn_zone *fn_zone_list;
110 static __inline__ fn_hash_idx_t fn_hash(fn_key_t key, struct fn_zone *fz)
112 u32 h = ntohl(key.datum)>>(32 - fz->fz_order);
116 h &= FZ_HASHMASK(fz);
117 return *(fn_hash_idx_t*)&h;
120 #define fz_key_0(key) ((key).datum = 0)
121 #define fz_prefix(key,fz) ((key).datum)
123 static __inline__ fn_key_t fz_key(u32 dst, struct fn_zone *fz)
126 k.datum = dst & FZ_MASK(fz);
130 static __inline__ struct fib_node ** fz_chain_p(fn_key_t key, struct fn_zone *fz)
132 return &fz->fz_hash[fn_hash(key, fz).datum];
135 static __inline__ struct fib_node * fz_chain(fn_key_t key, struct fn_zone *fz)
137 return fz->fz_hash[fn_hash(key, fz).datum];
140 static __inline__ int fn_key_eq(fn_key_t a, fn_key_t b)
142 return a.datum == b.datum;
145 static __inline__ int fn_key_leq(fn_key_t a, fn_key_t b)
147 return a.datum <= b.datum;
150 static rwlock_t fib_hash_lock = RW_LOCK_UNLOCKED;
152 #define FZ_MAX_DIVISOR ((PAGE_SIZE<<MAX_ORDER) / sizeof(struct fib_node *))
154 static struct fib_node **fz_hash_alloc(int divisor)
156 unsigned long size = divisor * sizeof(struct fib_node *);
158 if (divisor <= 1024) {
159 return kmalloc(size, GFP_KERNEL);
161 return (struct fib_node **)
162 __get_free_pages(GFP_KERNEL, get_order(size));
166 /* The fib hash lock must be held when this is called. */
167 static __inline__ void fn_rebuild_zone(struct fn_zone *fz,
168 struct fib_node **old_ht,
172 struct fib_node *f, **fp, *next;
174 for (i=0; i<old_divisor; i++) {
175 for (f=old_ht[i]; f; f=next) {
177 for (fp = fz_chain_p(f->fn_key, fz);
178 *fp && fn_key_leq((*fp)->fn_key, f->fn_key);
179 fp = &(*fp)->fn_next)
187 static void fz_hash_free(struct fib_node **hash, int divisor)
192 free_pages((unsigned long) hash,
193 get_order(divisor * sizeof(struct fib_node *)));
196 static void fn_rehash_zone(struct fn_zone *fz)
198 struct fib_node **ht, **old_ht;
199 int old_divisor, new_divisor;
202 old_divisor = fz->fz_divisor;
204 switch (old_divisor) {
212 if ((old_divisor << 1) > FZ_MAX_DIVISOR) {
213 printk(KERN_CRIT "route.c: bad divisor %d!\n", old_divisor);
216 new_divisor = (old_divisor << 1);
220 new_hashmask = (new_divisor - 1);
222 #if RT_CACHE_DEBUG >= 2
223 printk("fn_rehash_zone: hash for zone %d grows from %d\n", fz->fz_order, old_divisor);
226 ht = fz_hash_alloc(new_divisor);
229 memset(ht, 0, new_divisor*sizeof(struct fib_node*));
231 write_lock_bh(&fib_hash_lock);
232 old_ht = fz->fz_hash;
234 fz->fz_hashmask = new_hashmask;
235 fz->fz_divisor = new_divisor;
236 fn_rebuild_zone(fz, old_ht, old_divisor);
237 write_unlock_bh(&fib_hash_lock);
239 fz_hash_free(old_ht, old_divisor);
243 static void fn_free_node(struct fib_node * f)
245 fib_release_info(FIB_INFO(f));
246 kmem_cache_free(fn_hash_kmem, f);
250 static struct fn_zone *
251 fn_new_zone(struct fn_hash *table, int z)
254 struct fn_zone *fz = kmalloc(sizeof(struct fn_zone), GFP_KERNEL);
258 memset(fz, 0, sizeof(struct fn_zone));
264 fz->fz_hashmask = (fz->fz_divisor - 1);
265 fz->fz_hash = fz_hash_alloc(fz->fz_divisor);
270 memset(fz->fz_hash, 0, fz->fz_divisor*sizeof(struct fib_node*));
272 fz->fz_mask = inet_make_mask(z);
274 /* Find the first not empty zone with more specific mask */
275 for (i=z+1; i<=32; i++)
276 if (table->fn_zones[i])
278 write_lock_bh(&fib_hash_lock);
280 /* No more specific masks, we are the first. */
281 fz->fz_next = table->fn_zone_list;
282 table->fn_zone_list = fz;
284 fz->fz_next = table->fn_zones[i]->fz_next;
285 table->fn_zones[i]->fz_next = fz;
287 table->fn_zones[z] = fz;
288 write_unlock_bh(&fib_hash_lock);
293 fn_hash_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
297 struct fn_hash *t = (struct fn_hash*)tb->tb_data;
299 read_lock(&fib_hash_lock);
300 for (fz = t->fn_zone_list; fz; fz = fz->fz_next) {
302 fn_key_t k = fz_key(flp->fl4_dst, fz);
304 for (f = fz_chain(k, fz); f; f = f->fn_next) {
305 if (!fn_key_eq(k, f->fn_key)) {
306 if (fn_key_leq(k, f->fn_key))
311 #ifdef CONFIG_IP_ROUTE_TOS
312 if (f->fn_tos && f->fn_tos != flp->fl4_tos)
315 f->fn_state |= FN_S_ACCESSED;
317 if (f->fn_state&FN_S_ZOMBIE)
319 if (f->fn_scope < flp->fl4_scope)
322 err = fib_semantic_match(f->fn_type, FIB_INFO(f), flp, res);
324 res->type = f->fn_type;
325 res->scope = f->fn_scope;
326 res->prefixlen = fz->fz_order;
335 read_unlock(&fib_hash_lock);
339 static int fn_hash_last_dflt=-1;
341 static int fib_detect_death(struct fib_info *fi, int order,
342 struct fib_info **last_resort, int *last_idx)
345 int state = NUD_NONE;
347 n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev);
349 state = n->nud_state;
352 if (state==NUD_REACHABLE)
354 if ((state&NUD_VALID) && order != fn_hash_last_dflt)
356 if ((state&NUD_VALID) ||
357 (*last_idx<0 && order > fn_hash_last_dflt)) {
365 fn_hash_select_default(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
369 struct fib_info *fi = NULL;
370 struct fib_info *last_resort;
371 struct fn_hash *t = (struct fn_hash*)tb->tb_data;
372 struct fn_zone *fz = t->fn_zones[0];
381 read_lock(&fib_hash_lock);
382 for (f = fz->fz_hash[0]; f; f = f->fn_next) {
383 struct fib_info *next_fi = FIB_INFO(f);
385 if ((f->fn_state&FN_S_ZOMBIE) ||
386 f->fn_scope != res->scope ||
387 f->fn_type != RTN_UNICAST)
390 if (next_fi->fib_priority > res->fi->fib_priority)
392 if (!next_fi->fib_nh[0].nh_gw || next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
394 f->fn_state |= FN_S_ACCESSED;
397 if (next_fi != res->fi)
399 } else if (!fib_detect_death(fi, order, &last_resort, &last_idx)) {
401 fib_info_put(res->fi);
403 atomic_inc(&fi->fib_clntref);
404 fn_hash_last_dflt = order;
411 if (order<=0 || fi==NULL) {
412 fn_hash_last_dflt = -1;
416 if (!fib_detect_death(fi, order, &last_resort, &last_idx)) {
418 fib_info_put(res->fi);
420 atomic_inc(&fi->fib_clntref);
421 fn_hash_last_dflt = order;
427 fib_info_put(res->fi);
428 res->fi = last_resort;
430 atomic_inc(&last_resort->fib_clntref);
432 fn_hash_last_dflt = last_idx;
434 read_unlock(&fib_hash_lock);
437 #define FIB_SCAN(f, fp) \
438 for ( ; ((f) = *(fp)) != NULL; (fp) = &(f)->fn_next)
440 #define FIB_SCAN_KEY(f, fp, key) \
441 for ( ; ((f) = *(fp)) != NULL && fn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_next)
443 #ifndef CONFIG_IP_ROUTE_TOS
444 #define FIB_SCAN_TOS(f, fp, key, tos) FIB_SCAN_KEY(f, fp, key)
446 #define FIB_SCAN_TOS(f, fp, key, tos) \
447 for ( ; ((f) = *(fp)) != NULL && fn_key_eq((f)->fn_key, (key)) && \
448 (f)->fn_tos == (tos) ; (fp) = &(f)->fn_next)
452 static void rtmsg_fib(int, struct fib_node*, int, int,
454 struct netlink_skb_parms *);
457 fn_hash_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
458 struct nlmsghdr *n, struct netlink_skb_parms *req)
460 struct fn_hash *table = (struct fn_hash*)tb->tb_data;
461 struct fib_node *new_f, *f, **fp, **del_fp;
465 int z = r->rtm_dst_len;
466 int type = r->rtm_type;
467 #ifdef CONFIG_IP_ROUTE_TOS
473 FTprint("tb(%d)_insert: %d %08x/%d %d %08x\n", tb->tb_id, r->rtm_type, rta->rta_dst ?
474 *(u32*)rta->rta_dst : 0, z, rta->rta_oif ? *rta->rta_oif : -1,
475 rta->rta_prefsrc ? *(u32*)rta->rta_prefsrc : 0);
478 fz = table->fn_zones[z];
479 if (!fz && !(fz = fn_new_zone(table, z)))
485 memcpy(&dst, rta->rta_dst, 4);
486 if (dst & ~FZ_MASK(fz))
488 key = fz_key(dst, fz);
491 if ((fi = fib_create_info(r, rta, n, &err)) == NULL)
494 if (fz->fz_nent > (fz->fz_divisor<<1) &&
495 fz->fz_divisor < FZ_MAX_DIVISOR &&
496 (z==32 || (1<<z) > fz->fz_divisor))
499 fp = fz_chain_p(key, fz);
503 * Scan list to find the first route with the same destination
506 if (fn_key_leq(key,f->fn_key))
510 #ifdef CONFIG_IP_ROUTE_TOS
512 * Find route with the same destination and tos.
514 FIB_SCAN_KEY(f, fp, key) {
515 if (f->fn_tos <= tos)
522 if (f && (f->fn_state&FN_S_ZOMBIE) &&
523 #ifdef CONFIG_IP_ROUTE_TOS
526 fn_key_eq(f->fn_key, key)) {
533 FIB_SCAN_TOS(f, fp, key, tos) {
534 if (fi->fib_priority <= FIB_INFO(f)->fib_priority)
538 /* Now f==*fp points to the first node with the same
539 keys [prefix,tos,priority], if such key already
540 exists or to the node, before which we will insert new one.
544 #ifdef CONFIG_IP_ROUTE_TOS
547 fn_key_eq(f->fn_key, key) &&
548 fi->fib_priority == FIB_INFO(f)->fib_priority) {
549 struct fib_node **ins_fp;
552 if (n->nlmsg_flags&NLM_F_EXCL)
555 if (n->nlmsg_flags&NLM_F_REPLACE) {
565 FIB_SCAN_TOS(f, fp, key, tos) {
566 if (fi->fib_priority != FIB_INFO(f)->fib_priority)
568 if (f->fn_type == type && f->fn_scope == r->rtm_scope
569 && FIB_INFO(f) == fi)
573 if (!(n->nlmsg_flags&NLM_F_APPEND)) {
581 if (!(n->nlmsg_flags&NLM_F_CREATE))
586 new_f = kmem_cache_alloc(fn_hash_kmem, SLAB_KERNEL);
590 memset(new_f, 0, sizeof(struct fib_node));
593 #ifdef CONFIG_IP_ROUTE_TOS
596 new_f->fn_type = type;
597 new_f->fn_scope = r->rtm_scope;
598 FIB_INFO(new_f) = fi;
601 * Insert new entry to the list.
605 write_lock_bh(&fib_hash_lock);
607 write_unlock_bh(&fib_hash_lock);
612 /* Unlink replaced node */
613 write_lock_bh(&fib_hash_lock);
614 *del_fp = f->fn_next;
615 write_unlock_bh(&fib_hash_lock);
617 if (!(f->fn_state&FN_S_ZOMBIE))
618 rtmsg_fib(RTM_DELROUTE, f, z, tb->tb_id, n, req);
619 if (f->fn_state&FN_S_ACCESSED)
626 rtmsg_fib(RTM_NEWROUTE, new_f, z, tb->tb_id, n, req);
630 fib_release_info(fi);
636 fn_hash_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
637 struct nlmsghdr *n, struct netlink_skb_parms *req)
639 struct fn_hash *table = (struct fn_hash*)tb->tb_data;
640 struct fib_node **fp, **del_fp, *f;
641 int z = r->rtm_dst_len;
645 #ifdef CONFIG_IP_ROUTE_TOS
649 FTprint("tb(%d)_delete: %d %08x/%d %d\n", tb->tb_id, r->rtm_type, rta->rta_dst ?
650 *(u32*)rta->rta_dst : 0, z, rta->rta_oif ? *rta->rta_oif : -1);
653 if ((fz = table->fn_zones[z]) == NULL)
659 memcpy(&dst, rta->rta_dst, 4);
660 if (dst & ~FZ_MASK(fz))
662 key = fz_key(dst, fz);
665 fp = fz_chain_p(key, fz);
669 if (fn_key_eq(f->fn_key, key))
671 if (fn_key_leq(key, f->fn_key)) {
675 #ifdef CONFIG_IP_ROUTE_TOS
676 FIB_SCAN_KEY(f, fp, key) {
677 if (f->fn_tos == tos)
684 FIB_SCAN_TOS(f, fp, key, tos) {
685 struct fib_info * fi = FIB_INFO(f);
687 if (f->fn_state&FN_S_ZOMBIE) {
692 if (del_fp == NULL &&
693 (!r->rtm_type || f->fn_type == r->rtm_type) &&
694 (r->rtm_scope == RT_SCOPE_NOWHERE || f->fn_scope == r->rtm_scope) &&
695 (!r->rtm_protocol || fi->fib_protocol == r->rtm_protocol) &&
696 fib_nh_match(r, n, rta, fi) == 0)
702 rtmsg_fib(RTM_DELROUTE, f, z, tb->tb_id, n, req);
705 write_lock_bh(&fib_hash_lock);
706 *del_fp = f->fn_next;
707 write_unlock_bh(&fib_hash_lock);
709 if (f->fn_state&FN_S_ACCESSED)
714 f->fn_state |= FN_S_ZOMBIE;
715 if (f->fn_state&FN_S_ACCESSED) {
716 f->fn_state &= ~FN_S_ACCESSED;
719 if (++fib_hash_zombies > 128)
728 static __inline__ int
729 fn_flush_list(struct fib_node ** fp, int z, struct fn_hash *table)
734 while ((f = *fp) != NULL) {
735 struct fib_info *fi = FIB_INFO(f);
737 if (fi && ((f->fn_state&FN_S_ZOMBIE) || (fi->fib_flags&RTNH_F_DEAD))) {
738 write_lock_bh(&fib_hash_lock);
740 write_unlock_bh(&fib_hash_lock);
751 static int fn_hash_flush(struct fib_table *tb)
753 struct fn_hash *table = (struct fn_hash*)tb->tb_data;
757 fib_hash_zombies = 0;
758 for (fz = table->fn_zone_list; fz; fz = fz->fz_next) {
761 for (i=fz->fz_divisor-1; i>=0; i--)
762 tmp += fn_flush_list(&fz->fz_hash[i], fz->fz_order, table);
770 static __inline__ int
771 fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb,
772 struct fib_table *tb,
779 for (i=0; f; i++, f=f->fn_next) {
780 if (i < s_i) continue;
781 if (f->fn_state&FN_S_ZOMBIE) continue;
782 if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
784 tb->tb_id, (f->fn_state&FN_S_ZOMBIE) ? 0 : f->fn_type, f->fn_scope,
785 &f->fn_key, fz->fz_order, f->fn_tos,
795 static __inline__ int
796 fn_hash_dump_zone(struct sk_buff *skb, struct netlink_callback *cb,
797 struct fib_table *tb,
803 for (h=0; h < fz->fz_divisor; h++) {
804 if (h < s_h) continue;
806 memset(&cb->args[3], 0, sizeof(cb->args) - 3*sizeof(cb->args[0]));
807 if (fz->fz_hash == NULL || fz->fz_hash[h] == NULL)
809 if (fn_hash_dump_bucket(skb, cb, tb, fz, fz->fz_hash[h]) < 0) {
818 static int fn_hash_dump(struct fib_table *tb, struct sk_buff *skb, struct netlink_callback *cb)
822 struct fn_hash *table = (struct fn_hash*)tb->tb_data;
825 read_lock(&fib_hash_lock);
826 for (fz = table->fn_zone_list, m=0; fz; fz = fz->fz_next, m++) {
827 if (m < s_m) continue;
829 memset(&cb->args[2], 0, sizeof(cb->args) - 2*sizeof(cb->args[0]));
830 if (fn_hash_dump_zone(skb, cb, tb, fz) < 0) {
832 read_unlock(&fib_hash_lock);
836 read_unlock(&fib_hash_lock);
841 static void rtmsg_fib(int event, struct fib_node* f, int z, int tb_id,
842 struct nlmsghdr *n, struct netlink_skb_parms *req)
845 u32 pid = req ? req->pid : 0;
846 int size = NLMSG_SPACE(sizeof(struct rtmsg)+256);
848 skb = alloc_skb(size, GFP_KERNEL);
852 if (fib_dump_info(skb, pid, n->nlmsg_seq, event, tb_id,
853 f->fn_type, f->fn_scope, &f->fn_key, z, f->fn_tos,
858 NETLINK_CB(skb).dst_groups = RTMGRP_IPV4_ROUTE;
859 if (n->nlmsg_flags&NLM_F_ECHO)
860 atomic_inc(&skb->users);
861 netlink_broadcast(rtnl, skb, pid, RTMGRP_IPV4_ROUTE, GFP_KERNEL);
862 if (n->nlmsg_flags&NLM_F_ECHO)
863 netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
866 #ifdef CONFIG_IP_MULTIPLE_TABLES
867 struct fib_table * fib_hash_init(int id)
869 struct fib_table * __init fib_hash_init(int id)
872 struct fib_table *tb;
874 if (fn_hash_kmem == NULL)
875 fn_hash_kmem = kmem_cache_create("ip_fib_hash",
876 sizeof(struct fib_node),
877 0, SLAB_HWCACHE_ALIGN,
880 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct fn_hash), GFP_KERNEL);
885 tb->tb_lookup = fn_hash_lookup;
886 tb->tb_insert = fn_hash_insert;
887 tb->tb_delete = fn_hash_delete;
888 tb->tb_flush = fn_hash_flush;
889 tb->tb_select_default = fn_hash_select_default;
890 tb->tb_dump = fn_hash_dump;
891 memset(tb->tb_data, 0, sizeof(struct fn_hash));
895 /* ------------------------------------------------------------------------ */
896 #ifdef CONFIG_PROC_FS
898 struct fib_iter_state {
899 struct fn_zone *zone;
901 struct fib_node **hash;
902 struct fib_node *node;
905 static __inline__ struct fib_node *fib_get_first(struct seq_file *seq)
907 struct fib_iter_state* iter = seq->private;
908 struct fn_hash *table = (struct fn_hash *)ip_fib_main_table->tb_data;
914 for (iter->zone = table->fn_zone_list; iter->zone;
915 iter->zone = iter->zone->fz_next) {
918 if (!iter->zone->fz_next)
921 iter->hash = iter->zone->fz_hash;
922 maxslot = iter->zone->fz_divisor;
924 for (iter->bucket = 0; iter->bucket < maxslot;
925 ++iter->bucket, ++iter->hash) {
926 iter->node = *iter->hash;
936 static __inline__ struct fib_node *fib_get_next(struct seq_file *seq)
938 struct fib_iter_state* iter = seq->private;
941 iter->node = iter->node->fn_next;
952 maxslot = iter->zone->fz_divisor;
954 while (++iter->bucket < maxslot) {
955 iter->node = *++iter->hash;
961 iter->zone = iter->zone->fz_next;
966 iter->hash = iter->zone->fz_hash;
968 iter->node = *iter->hash;
976 static void *fib_seq_start(struct seq_file *seq, loff_t *pos)
980 read_lock(&fib_hash_lock);
981 if (ip_fib_main_table)
982 v = *pos ? fib_get_next(seq) : SEQ_START_TOKEN;
986 static void *fib_seq_next(struct seq_file *seq, void *v, loff_t *pos)
989 return v == SEQ_START_TOKEN ? fib_get_first(seq) : fib_get_next(seq);
992 static void fib_seq_stop(struct seq_file *seq, void *v)
994 read_unlock(&fib_hash_lock);
997 static unsigned fib_flag_trans(int type, int dead, u32 mask, struct fib_info *fi)
999 static unsigned type2flags[RTN_MAX + 1] = {
1000 [7] = RTF_REJECT, [8] = RTF_REJECT,
1002 unsigned flags = type2flags[type];
1004 if (fi && fi->fib_nh->nh_gw)
1005 flags |= RTF_GATEWAY;
1006 if (mask == 0xFFFFFFFF)
1013 extern int dev_in_nx_info(struct net_device *, struct nx_info *);
1016 * This outputs /proc/net/route.
1018 * It always works in backward compatibility mode.
1019 * The format of the file is not supposed to be changed.
1021 static int fib_seq_show(struct seq_file *seq, void *v)
1023 struct fib_iter_state* iter;
1028 struct fib_info *fi;
1030 if (v == SEQ_START_TOKEN) {
1031 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
1032 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
1039 iter = seq->private;
1040 prefix = fz_prefix(f->fn_key, iter->zone);
1041 mask = FZ_MASK(iter->zone);
1042 flags = fib_flag_trans(f->fn_type, f->fn_state & FN_S_ZOMBIE,
1044 if (fi && dev_in_nx_info(fi->fib_dev, current->nx_info))
1045 snprintf(bf, sizeof(bf),
1046 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
1047 fi->fib_dev ? fi->fib_dev->name : "*", prefix,
1048 fi->fib_nh->nh_gw, flags, 0, 0, fi->fib_priority,
1049 mask, (fi->fib_advmss ? fi->fib_advmss + 40 : 0),
1053 snprintf(bf, sizeof(bf),
1054 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
1055 prefix, 0, flags, 0, 0, 0, mask, 0, 0, 0);
1056 seq_printf(seq, "%-127s\n", bf);
1061 static struct seq_operations fib_seq_ops = {
1062 .start = fib_seq_start,
1063 .next = fib_seq_next,
1064 .stop = fib_seq_stop,
1065 .show = fib_seq_show,
1068 static int fib_seq_open(struct inode *inode, struct file *file)
1070 struct seq_file *seq;
1072 struct fib_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
1077 rc = seq_open(file, &fib_seq_ops);
1081 seq = file->private_data;
1083 memset(s, 0, sizeof(*s));
1091 static struct file_operations fib_seq_fops = {
1092 .owner = THIS_MODULE,
1093 .open = fib_seq_open,
1095 .llseek = seq_lseek,
1096 .release = seq_release_private,
1099 int __init fib_proc_init(void)
1101 if (!proc_net_fops_create("route", S_IRUGO, &fib_seq_fops))
1106 void __init fib_proc_exit(void)
1108 proc_net_remove("route");
1110 #endif /* CONFIG_PROC_FS */