2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
8 * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
9 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi)
11 #include <linux/errno.h>
12 #include <linux/types.h>
13 #include <linux/socket.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/string.h>
19 #include <linux/sockios.h>
20 #include <linux/net.h>
22 #include <linux/inet.h>
23 #include <linux/netdevice.h>
25 #include <linux/if_arp.h>
26 #include <linux/skbuff.h>
28 #include <asm/uaccess.h>
29 #include <asm/system.h>
30 #include <linux/fcntl.h>
31 #include <linux/termios.h> /* For TIOCINQ/OUTQ */
33 #include <linux/interrupt.h>
34 #include <linux/notifier.h>
35 #include <linux/netfilter.h>
36 #include <linux/init.h>
37 #include <linux/spinlock.h>
38 #include <net/netrom.h>
39 #include <linux/seq_file.h>
41 static unsigned int nr_neigh_no = 1;
43 static HLIST_HEAD(nr_node_list);
44 static spinlock_t nr_node_list_lock = SPIN_LOCK_UNLOCKED;
45 static HLIST_HEAD(nr_neigh_list);
46 static spinlock_t nr_neigh_list_lock = SPIN_LOCK_UNLOCKED;
48 struct nr_node *nr_node_get(ax25_address *callsign)
50 struct nr_node *found = NULL;
51 struct nr_node *nr_node;
52 struct hlist_node *node;
54 spin_lock_bh(&nr_node_list_lock);
55 nr_node_for_each(nr_node, node, &nr_node_list)
56 if (ax25cmp(callsign, &nr_node->callsign) == 0) {
57 nr_node_hold(nr_node);
61 spin_unlock_bh(&nr_node_list_lock);
65 struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign, struct net_device *dev)
67 struct nr_neigh *found = NULL;
68 struct nr_neigh *nr_neigh;
69 struct hlist_node *node;
71 spin_lock_bh(&nr_neigh_list_lock);
72 nr_neigh_for_each(nr_neigh, node, &nr_neigh_list)
73 if (ax25cmp(callsign, &nr_neigh->callsign) == 0 &&
74 nr_neigh->dev == dev) {
75 nr_neigh_hold(nr_neigh);
79 spin_unlock_bh(&nr_neigh_list_lock);
83 static void nr_remove_neigh(struct nr_neigh *);
86 * Add a new route to a node, and in the process add the node and the
87 * neighbour if it is new.
89 static int nr_add_node(ax25_address *nr, const char *mnemonic, ax25_address *ax25,
90 ax25_digi *ax25_digi, struct net_device *dev, int quality, int obs_count)
92 struct nr_node *nr_node;
93 struct nr_neigh *nr_neigh;
94 struct nr_route nr_route;
96 struct net_device *odev;
98 if ((odev=nr_dev_get(nr)) != NULL) { /* Can't add routes to ourself */
103 nr_node = nr_node_get(nr);
105 nr_neigh = nr_neigh_get_dev(ax25, dev);
108 * The L2 link to a neighbour has failed in the past
109 * and now a frame comes from this neighbour. We assume
110 * it was a temporary trouble with the link and reset the
111 * routes now (and not wait for a node broadcast).
113 if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) {
114 struct nr_node *nr_nodet;
115 struct hlist_node *node;
117 spin_lock_bh(&nr_node_list_lock);
118 nr_node_for_each(nr_nodet, node, &nr_node_list) {
119 nr_node_lock(nr_nodet);
120 for (i = 0; i < nr_nodet->count; i++)
121 if (nr_nodet->routes[i].neighbour == nr_neigh)
122 if (i < nr_nodet->which)
124 nr_node_unlock(nr_nodet);
126 spin_unlock_bh(&nr_node_list_lock);
129 if (nr_neigh != NULL)
130 nr_neigh->failed = 0;
132 if (quality == 0 && nr_neigh != NULL && nr_node != NULL) {
133 nr_neigh_put(nr_neigh);
134 nr_node_put(nr_node);
138 if (nr_neigh == NULL) {
139 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) {
141 nr_node_put(nr_node);
145 nr_neigh->callsign = *ax25;
146 nr_neigh->digipeat = NULL;
147 nr_neigh->ax25 = NULL;
149 nr_neigh->quality = sysctl_netrom_default_path_quality;
150 nr_neigh->locked = 0;
152 nr_neigh->number = nr_neigh_no++;
153 nr_neigh->failed = 0;
154 atomic_set(&nr_neigh->refcount, 1);
156 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
157 if ((nr_neigh->digipeat = kmalloc(sizeof(*ax25_digi), GFP_KERNEL)) == NULL) {
160 nr_node_put(nr_node);
163 memcpy(nr_neigh->digipeat, ax25_digi,
167 spin_lock_bh(&nr_neigh_list_lock);
168 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
169 nr_neigh_hold(nr_neigh);
170 spin_unlock_bh(&nr_neigh_list_lock);
173 if (quality != 0 && ax25cmp(nr, ax25) == 0 && !nr_neigh->locked)
174 nr_neigh->quality = quality;
176 if (nr_node == NULL) {
177 if ((nr_node = kmalloc(sizeof(*nr_node), GFP_ATOMIC)) == NULL) {
179 nr_neigh_put(nr_neigh);
183 nr_node->callsign = *nr;
184 strcpy(nr_node->mnemonic, mnemonic);
188 atomic_set(&nr_node->refcount, 1);
189 nr_node->node_lock = SPIN_LOCK_UNLOCKED;
191 nr_node->routes[0].quality = quality;
192 nr_node->routes[0].obs_count = obs_count;
193 nr_node->routes[0].neighbour = nr_neigh;
195 nr_neigh_hold(nr_neigh);
198 spin_lock_bh(&nr_node_list_lock);
199 hlist_add_head(&nr_node->node_node, &nr_node_list);
200 /* refcount initialized at 1 */
201 spin_unlock_bh(&nr_node_list_lock);
205 nr_node_lock(nr_node);
208 strcpy(nr_node->mnemonic, mnemonic);
210 for (found = 0, i = 0; i < nr_node->count; i++) {
211 if (nr_node->routes[i].neighbour == nr_neigh) {
212 nr_node->routes[i].quality = quality;
213 nr_node->routes[i].obs_count = obs_count;
220 /* We have space at the bottom, slot it in */
221 if (nr_node->count < 3) {
222 nr_node->routes[2] = nr_node->routes[1];
223 nr_node->routes[1] = nr_node->routes[0];
225 nr_node->routes[0].quality = quality;
226 nr_node->routes[0].obs_count = obs_count;
227 nr_node->routes[0].neighbour = nr_neigh;
231 nr_neigh_hold(nr_neigh);
234 /* It must be better than the worst */
235 if (quality > nr_node->routes[2].quality) {
236 nr_node->routes[2].neighbour->count--;
237 nr_neigh_put(nr_node->routes[2].neighbour);
239 if (nr_node->routes[2].neighbour->count == 0 && !nr_node->routes[2].neighbour->locked)
240 nr_remove_neigh(nr_node->routes[2].neighbour);
242 nr_node->routes[2].quality = quality;
243 nr_node->routes[2].obs_count = obs_count;
244 nr_node->routes[2].neighbour = nr_neigh;
246 nr_neigh_hold(nr_neigh);
252 /* Now re-sort the routes in quality order */
253 switch (nr_node->count) {
255 if (nr_node->routes[1].quality > nr_node->routes[0].quality) {
256 switch (nr_node->which) {
257 case 0: nr_node->which = 1; break;
258 case 1: nr_node->which = 0; break;
261 nr_route = nr_node->routes[0];
262 nr_node->routes[0] = nr_node->routes[1];
263 nr_node->routes[1] = nr_route;
265 if (nr_node->routes[2].quality > nr_node->routes[1].quality) {
266 switch (nr_node->which) {
267 case 1: nr_node->which = 2;
270 case 2: nr_node->which = 1;
276 nr_route = nr_node->routes[1];
277 nr_node->routes[1] = nr_node->routes[2];
278 nr_node->routes[2] = nr_route;
281 if (nr_node->routes[1].quality > nr_node->routes[0].quality) {
282 switch (nr_node->which) {
283 case 0: nr_node->which = 1;
286 case 1: nr_node->which = 0;
291 nr_route = nr_node->routes[0];
292 nr_node->routes[0] = nr_node->routes[1];
293 nr_node->routes[1] = nr_route;
299 for (i = 0; i < nr_node->count; i++) {
300 if (nr_node->routes[i].neighbour == nr_neigh) {
301 if (i < nr_node->which)
307 nr_neigh_put(nr_neigh);
308 nr_node_unlock(nr_node);
309 nr_node_put(nr_node);
313 static inline void __nr_remove_node(struct nr_node *nr_node)
315 hlist_del_init(&nr_node->node_node);
316 nr_node_put(nr_node);
319 #define nr_remove_node_locked(__node) \
320 __nr_remove_node(__node)
322 static void nr_remove_node(struct nr_node *nr_node)
324 spin_lock_bh(&nr_node_list_lock);
325 __nr_remove_node(nr_node);
326 spin_unlock_bh(&nr_node_list_lock);
329 static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh)
331 hlist_del_init(&nr_neigh->neigh_node);
332 nr_neigh_put(nr_neigh);
335 #define nr_remove_neigh_locked(__neigh) \
336 __nr_remove_neigh(__neigh)
338 static void nr_remove_neigh(struct nr_neigh *nr_neigh)
340 spin_lock_bh(&nr_neigh_list_lock);
341 __nr_remove_neigh(nr_neigh);
342 spin_unlock_bh(&nr_neigh_list_lock);
346 * "Delete" a node. Strictly speaking remove a route to a node. The node
347 * is only deleted if no routes are left to it.
349 static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct net_device *dev)
351 struct nr_node *nr_node;
352 struct nr_neigh *nr_neigh;
355 nr_node = nr_node_get(callsign);
360 nr_neigh = nr_neigh_get_dev(neighbour, dev);
362 if (nr_neigh == NULL) {
363 nr_node_put(nr_node);
367 nr_node_lock(nr_node);
368 for (i = 0; i < nr_node->count; i++) {
369 if (nr_node->routes[i].neighbour == nr_neigh) {
371 nr_neigh_put(nr_neigh);
373 if (nr_neigh->count == 0 && !nr_neigh->locked)
374 nr_remove_neigh(nr_neigh);
375 nr_neigh_put(nr_neigh);
379 if (nr_node->count == 0) {
380 nr_remove_node(nr_node);
384 nr_node->routes[0] = nr_node->routes[1];
386 nr_node->routes[1] = nr_node->routes[2];
390 nr_node_put(nr_node);
392 nr_node_unlock(nr_node);
397 nr_neigh_put(nr_neigh);
398 nr_node_unlock(nr_node);
399 nr_node_put(nr_node);
405 * Lock a neighbour with a quality.
407 static int nr_add_neigh(ax25_address *callsign, ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality)
409 struct nr_neigh *nr_neigh;
411 nr_neigh = nr_neigh_get_dev(callsign, dev);
413 nr_neigh->quality = quality;
414 nr_neigh->locked = 1;
415 nr_neigh_put(nr_neigh);
419 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL)
422 nr_neigh->callsign = *callsign;
423 nr_neigh->digipeat = NULL;
424 nr_neigh->ax25 = NULL;
426 nr_neigh->quality = quality;
427 nr_neigh->locked = 1;
429 nr_neigh->number = nr_neigh_no++;
430 nr_neigh->failed = 0;
431 atomic_set(&nr_neigh->refcount, 1);
433 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
434 if ((nr_neigh->digipeat = kmalloc(sizeof(*ax25_digi), GFP_KERNEL)) == NULL) {
438 memcpy(nr_neigh->digipeat, ax25_digi, sizeof(*ax25_digi));
441 spin_lock_bh(&nr_neigh_list_lock);
442 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
443 /* refcount is initialized at 1 */
444 spin_unlock_bh(&nr_neigh_list_lock);
450 * "Delete" a neighbour. The neighbour is only removed if the number
451 * of nodes that may use it is zero.
453 static int nr_del_neigh(ax25_address *callsign, struct net_device *dev, unsigned int quality)
455 struct nr_neigh *nr_neigh;
457 nr_neigh = nr_neigh_get_dev(callsign, dev);
459 if (nr_neigh == NULL) return -EINVAL;
461 nr_neigh->quality = quality;
462 nr_neigh->locked = 0;
464 if (nr_neigh->count == 0)
465 nr_remove_neigh(nr_neigh);
466 nr_neigh_put(nr_neigh);
472 * Decrement the obsolescence count by one. If a route is reduced to a
473 * count of zero, remove it. Also remove any unlocked neighbours with
474 * zero nodes routing via it.
476 static int nr_dec_obs(void)
478 struct nr_neigh *nr_neigh;
480 struct hlist_node *node, *nodet;
483 spin_lock_bh(&nr_node_list_lock);
484 nr_node_for_each_safe(s, node, nodet, &nr_node_list) {
486 for (i = 0; i < s->count; i++) {
487 switch (s->routes[i].obs_count) {
488 case 0: /* A locked entry */
491 case 1: /* From 1 -> 0 */
492 nr_neigh = s->routes[i].neighbour;
495 nr_neigh_put(nr_neigh);
497 if (nr_neigh->count == 0 && !nr_neigh->locked)
498 nr_remove_neigh(nr_neigh);
504 s->routes[0] = s->routes[1];
506 s->routes[1] = s->routes[2];
513 s->routes[i].obs_count--;
520 nr_remove_node_locked(s);
523 spin_unlock_bh(&nr_node_list_lock);
529 * A device has been removed. Remove its routes and neighbours.
531 void nr_rt_device_down(struct net_device *dev)
534 struct hlist_node *node, *nodet, *node2, *node2t;
538 spin_lock_bh(&nr_neigh_list_lock);
539 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) {
541 spin_lock_bh(&nr_node_list_lock);
542 nr_node_for_each_safe(t, node2, node2t, &nr_node_list) {
544 for (i = 0; i < t->count; i++) {
545 if (t->routes[i].neighbour == s) {
550 t->routes[0] = t->routes[1];
552 t->routes[1] = t->routes[2];
560 nr_remove_node_locked(t);
563 spin_unlock_bh(&nr_node_list_lock);
565 nr_remove_neigh_locked(s);
568 spin_unlock_bh(&nr_neigh_list_lock);
572 * Check that the device given is a valid AX.25 interface that is "up".
573 * Or a valid ethernet interface with an AX.25 callsign binding.
575 static struct net_device *nr_ax25_dev_get(char *devname)
577 struct net_device *dev;
579 if ((dev = dev_get_by_name(devname)) == NULL)
582 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
590 * Find the first active NET/ROM device, usually "nr0".
592 struct net_device *nr_dev_first(void)
594 struct net_device *dev, *first = NULL;
596 read_lock(&dev_base_lock);
597 for (dev = dev_base; dev != NULL; dev = dev->next) {
598 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM)
599 if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
604 read_unlock(&dev_base_lock);
610 * Find the NET/ROM device for the given callsign.
612 struct net_device *nr_dev_get(ax25_address *addr)
614 struct net_device *dev;
616 read_lock(&dev_base_lock);
617 for (dev = dev_base; dev != NULL; dev = dev->next) {
618 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) {
624 read_unlock(&dev_base_lock);
628 static ax25_digi *nr_call_to_digi(int ndigis, ax25_address *digipeaters)
630 static ax25_digi ax25_digi;
636 for (i = 0; i < ndigis; i++) {
637 ax25_digi.calls[i] = digipeaters[i];
638 ax25_digi.repeated[i] = 0;
641 ax25_digi.ndigi = ndigis;
642 ax25_digi.lastrepeat = -1;
648 * Handle the ioctls that control the routing functions.
650 int nr_rt_ioctl(unsigned int cmd, void *arg)
652 struct nr_route_struct nr_route;
653 struct net_device *dev;
658 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
660 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
662 if (nr_route.ndigis < 0 || nr_route.ndigis > AX25_MAX_DIGIS) {
666 switch (nr_route.type) {
668 ret = nr_add_node(&nr_route.callsign,
671 nr_call_to_digi(nr_route.ndigis, nr_route.digipeaters),
672 dev, nr_route.quality,
676 ret = nr_add_neigh(&nr_route.callsign,
677 nr_call_to_digi(nr_route.ndigis, nr_route.digipeaters),
678 dev, nr_route.quality);
687 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
689 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
691 switch (nr_route.type) {
693 ret = nr_del_node(&nr_route.callsign,
694 &nr_route.neighbour, dev);
697 ret = nr_del_neigh(&nr_route.callsign,
698 dev, nr_route.quality);
717 * A level 2 link has timed out, therefore it appears to be a poor link,
718 * then don't use that neighbour until it is reset.
720 void nr_link_failed(ax25_cb *ax25, int reason)
722 struct nr_neigh *s, *nr_neigh = NULL;
723 struct hlist_node *node;
724 struct nr_node *nr_node = NULL;
726 spin_lock_bh(&nr_neigh_list_lock);
727 nr_neigh_for_each(s, node, &nr_neigh_list)
728 if (s->ax25 == ax25) {
733 spin_unlock_bh(&nr_neigh_list_lock);
735 if (nr_neigh == NULL) return;
737 nr_neigh->ax25 = NULL;
740 if (++nr_neigh->failed < sysctl_netrom_link_fails_count) {
741 nr_neigh_put(nr_neigh);
744 spin_lock_bh(&nr_node_list_lock);
745 nr_node_for_each(nr_node, node, &nr_node_list)
746 nr_node_lock(nr_node);
747 if (nr_node->which < nr_node->count && nr_node->routes[nr_node->which].neighbour == nr_neigh)
749 nr_node_unlock(nr_node);
750 spin_unlock_bh(&nr_node_list_lock);
751 nr_neigh_put(nr_neigh);
755 * Route a frame to an appropriate AX.25 connection. A NULL ax25_cb
756 * indicates an internally generated frame.
758 int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
760 ax25_address *nr_src, *nr_dest;
761 struct nr_neigh *nr_neigh;
762 struct nr_node *nr_node;
763 struct net_device *dev;
767 struct sk_buff *skbn;
770 nr_src = (ax25_address *)(skb->data + 0);
771 nr_dest = (ax25_address *)(skb->data + 7);
774 nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
775 ax25->ax25_dev->dev, 0, sysctl_netrom_obsolescence_count_initialiser);
777 if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */
778 if (ax25 == NULL) /* Its from me */
779 ret = nr_loopback_queue(skb);
781 ret = nr_rx_frame(skb, dev);
786 if (!sysctl_netrom_routing_control && ax25 != NULL)
789 /* Its Time-To-Live has expired */
790 if (skb->data[14] == 1) {
794 nr_node = nr_node_get(nr_dest);
797 nr_node_lock(nr_node);
799 if (nr_node->which >= nr_node->count) {
800 nr_node_unlock(nr_node);
801 nr_node_put(nr_node);
805 nr_neigh = nr_node->routes[nr_node->which].neighbour;
807 if ((dev = nr_dev_first()) == NULL) {
808 nr_node_unlock(nr_node);
809 nr_node_put(nr_node);
813 /* We are going to change the netrom headers so we should get our
814 own skb, we also did not know until now how much header space
815 we had to reserve... - RXQ */
816 if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) {
817 nr_node_unlock(nr_node);
818 nr_node_put(nr_node);
826 dptr = skb_push(skb, 1);
827 *dptr = AX25_P_NETROM;
829 ax25s = ax25_send_frame(skb, 256, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev);
830 if (nr_neigh->ax25 && ax25s) {
831 /* We were already holding this ax25_cb */
834 nr_neigh->ax25 = ax25s;
837 ret = (nr_neigh->ax25 != NULL);
838 nr_node_unlock(nr_node);
839 nr_node_put(nr_node);
843 #ifdef CONFIG_PROC_FS
845 static void *nr_node_start(struct seq_file *seq, loff_t *pos)
847 struct nr_node *nr_node;
848 struct hlist_node *node;
851 spin_lock_bh(&nr_node_list_lock);
853 return SEQ_START_TOKEN;
855 nr_node_for_each(nr_node, node, &nr_node_list) {
864 static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos)
866 struct hlist_node *node;
869 node = (v == SEQ_START_TOKEN)
871 : ((struct nr_node *)v)->node_node.next;
873 return hlist_entry(node, struct nr_node, node_node);
876 static void nr_node_stop(struct seq_file *seq, void *v)
878 spin_unlock_bh(&nr_node_list_lock);
881 static int nr_node_show(struct seq_file *seq, void *v)
885 if (v == SEQ_START_TOKEN)
887 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
889 struct nr_node *nr_node = v;
890 nr_node_lock(nr_node);
891 seq_printf(seq, "%-9s %-7s %d %d",
892 ax2asc(&nr_node->callsign),
893 (nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic,
897 for (i = 0; i < nr_node->count; i++) {
898 seq_printf(seq, " %3d %d %05d",
899 nr_node->routes[i].quality,
900 nr_node->routes[i].obs_count,
901 nr_node->routes[i].neighbour->number);
903 nr_node_unlock(nr_node);
910 static struct seq_operations nr_node_seqops = {
911 .start = nr_node_start,
912 .next = nr_node_next,
913 .stop = nr_node_stop,
914 .show = nr_node_show,
917 static int nr_node_info_open(struct inode *inode, struct file *file)
919 return seq_open(file, &nr_node_seqops);
922 struct file_operations nr_nodes_fops = {
923 .owner = THIS_MODULE,
924 .open = nr_node_info_open,
927 .release = seq_release,
930 static void *nr_neigh_start(struct seq_file *seq, loff_t *pos)
932 struct nr_neigh *nr_neigh;
933 struct hlist_node *node;
936 spin_lock_bh(&nr_neigh_list_lock);
938 return SEQ_START_TOKEN;
940 nr_neigh_for_each(nr_neigh, node, &nr_neigh_list) {
947 static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
949 struct hlist_node *node;
952 node = (v == SEQ_START_TOKEN)
953 ? nr_neigh_list.first
954 : ((struct nr_neigh *)v)->neigh_node.next;
956 return hlist_entry(node, struct nr_neigh, neigh_node);
959 static void nr_neigh_stop(struct seq_file *seq, void *v)
961 spin_unlock_bh(&nr_neigh_list_lock);
964 static int nr_neigh_show(struct seq_file *seq, void *v)
968 if (v == SEQ_START_TOKEN)
969 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n");
971 struct nr_neigh *nr_neigh = v;
973 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d",
975 ax2asc(&nr_neigh->callsign),
976 nr_neigh->dev ? nr_neigh->dev->name : "???",
982 if (nr_neigh->digipeat != NULL) {
983 for (i = 0; i < nr_neigh->digipeat->ndigi; i++)
984 seq_printf(seq, " %s",
985 ax2asc(&nr_neigh->digipeat->calls[i]));
993 static struct seq_operations nr_neigh_seqops = {
994 .start = nr_neigh_start,
995 .next = nr_neigh_next,
996 .stop = nr_neigh_stop,
997 .show = nr_neigh_show,
1000 static int nr_neigh_info_open(struct inode *inode, struct file *file)
1002 return seq_open(file, &nr_neigh_seqops);
1005 struct file_operations nr_neigh_fops = {
1006 .owner = THIS_MODULE,
1007 .open = nr_neigh_info_open,
1009 .llseek = seq_lseek,
1010 .release = seq_release,
1016 * Free all memory associated with the nodes and routes lists.
1018 void __exit nr_rt_free(void)
1020 struct nr_neigh *s = NULL;
1021 struct nr_node *t = NULL;
1022 struct hlist_node *node, *nodet;
1024 spin_lock_bh(&nr_neigh_list_lock);
1025 spin_lock_bh(&nr_node_list_lock);
1026 nr_node_for_each_safe(t, node, nodet, &nr_node_list) {
1028 nr_remove_node_locked(t);
1031 nr_neigh_for_each_safe(s, node, nodet, &nr_neigh_list) {
1036 nr_remove_neigh_locked(s);
1038 spin_unlock_bh(&nr_node_list_lock);
1039 spin_unlock_bh(&nr_neigh_list_lock);