2 * Generic address resolution entity
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
18 #include <linux/config.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/socket.h>
23 #include <linux/sched.h>
24 #include <linux/netdevice.h>
25 #include <linux/proc_fs.h>
27 #include <linux/sysctl.h>
29 #include <linux/times.h>
30 #include <net/neighbour.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/random.h>
38 #define NEIGH_PRINTK(x...) printk(x)
39 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
40 #define NEIGH_PRINTK0 NEIGH_PRINTK
41 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
42 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
46 #define NEIGH_PRINTK1 NEIGH_PRINTK
50 #define NEIGH_PRINTK2 NEIGH_PRINTK
53 #define PNEIGH_HASHMASK 0xF
55 static void neigh_timer_handler(unsigned long arg);
57 static void neigh_app_notify(struct neighbour *n);
59 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
60 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
62 static struct neigh_table *neigh_tables;
63 static struct file_operations neigh_stat_seq_fops;
66 Neighbour hash table buckets are protected with rwlock tbl->lock.
68 - All the scans/updates to hash buckets MUST be made under this lock.
69 - NOTHING clever should be made under this lock: no callbacks
70 to protocol backends, no attempts to send something to network.
71 It will result in deadlocks, if backend/driver wants to use neighbour
73 - If the entry requires some non-trivial actions, increase
74 its reference count and release table lock.
76 Neighbour entries are protected:
77 - with reference count.
78 - with rwlock neigh->lock
80 Reference count prevents destruction.
82 neigh->lock mainly serializes ll address data and its validity state.
83 However, the same lock is used to protect another entry fields:
87 Again, nothing clever shall be made under neigh->lock,
88 the most complicated procedure, which we allow is dev->hard_header.
89 It is supposed, that dev->hard_header is simplistic and does
90 not make callbacks to neighbour tables.
92 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
93 list of neighbour tables. This list is used only in process context,
96 static rwlock_t neigh_tbl_lock = RW_LOCK_UNLOCKED;
98 static int neigh_blackhole(struct sk_buff *skb)
105 * It is random distribution in the interval (1/2)*base...(3/2)*base.
106 * It corresponds to default IPv6 settings and is not overridable,
107 * because it is really reasonable choice.
110 unsigned long neigh_rand_reach_time(unsigned long base)
112 return (net_random() % base) + (base >> 1);
116 static int neigh_forced_gc(struct neigh_table *tbl)
121 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
123 write_lock_bh(&tbl->lock);
124 for (i = 0; i <= tbl->hash_mask; i++) {
125 struct neighbour *n, **np;
127 np = &tbl->hash_buckets[i];
128 while ((n = *np) != NULL) {
129 /* Neighbour record may be discarded if:
130 * - nobody refers to it.
131 * - it is not permanent
133 write_lock(&n->lock);
134 if (atomic_read(&n->refcnt) == 1 &&
135 !(n->nud_state & NUD_PERMANENT)) {
139 write_unlock(&n->lock);
143 write_unlock(&n->lock);
148 tbl->last_flush = jiffies;
150 write_unlock_bh(&tbl->lock);
155 static int neigh_del_timer(struct neighbour *n)
157 if ((n->nud_state & NUD_IN_TIMER) &&
158 del_timer(&n->timer)) {
165 static void pneigh_queue_purge(struct sk_buff_head *list)
169 while ((skb = skb_dequeue(list)) != NULL) {
175 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
179 write_lock_bh(&tbl->lock);
181 for (i=0; i <= tbl->hash_mask; i++) {
182 struct neighbour *n, **np;
184 np = &tbl->hash_buckets[i];
185 while ((n = *np) != NULL) {
186 if (dev && n->dev != dev) {
191 write_lock_bh(&n->lock);
194 write_unlock_bh(&n->lock);
199 write_unlock_bh(&tbl->lock);
202 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
206 write_lock_bh(&tbl->lock);
208 for (i = 0; i <= tbl->hash_mask; i++) {
209 struct neighbour *n, **np = &tbl->hash_buckets[i];
211 while ((n = *np) != NULL) {
212 if (dev && n->dev != dev) {
217 write_lock(&n->lock);
221 if (atomic_read(&n->refcnt) != 1) {
222 /* The most unpleasant situation.
223 We must destroy neighbour entry,
224 but someone still uses it.
226 The destroy will be delayed until
227 the last user releases us, but
228 we must kill timers etc. and move
231 skb_queue_purge(&n->arp_queue);
232 n->output = neigh_blackhole;
233 if (n->nud_state & NUD_VALID)
234 n->nud_state = NUD_NOARP;
236 n->nud_state = NUD_NONE;
237 NEIGH_PRINTK2("neigh %p is stray.\n", n);
239 write_unlock(&n->lock);
244 pneigh_ifdown(tbl, dev);
245 write_unlock_bh(&tbl->lock);
247 del_timer_sync(&tbl->proxy_timer);
248 pneigh_queue_purge(&tbl->proxy_queue);
252 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
254 struct neighbour *n = NULL;
255 unsigned long now = jiffies;
258 entries = atomic_inc_return(&tbl->entries) - 1;
259 if (entries >= tbl->gc_thresh3 ||
260 (entries >= tbl->gc_thresh2 &&
261 time_after(now, tbl->last_flush + 5 * HZ))) {
262 if (!neigh_forced_gc(tbl) &&
263 entries >= tbl->gc_thresh3)
267 n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
271 memset(n, 0, tbl->entry_size);
273 skb_queue_head_init(&n->arp_queue);
274 rwlock_init(&n->lock);
275 n->updated = n->used = now;
276 n->nud_state = NUD_NONE;
277 n->output = neigh_blackhole;
278 n->parms = neigh_parms_clone(&tbl->parms);
279 init_timer(&n->timer);
280 n->timer.function = neigh_timer_handler;
281 n->timer.data = (unsigned long)n;
283 NEIGH_CACHE_STAT_INC(tbl, allocs);
285 atomic_set(&n->refcnt, 1);
291 atomic_dec(&tbl->entries);
295 static struct neighbour **neigh_hash_alloc(unsigned int entries)
297 unsigned long size = entries * sizeof(struct neighbour *);
298 struct neighbour **ret;
300 if (size <= PAGE_SIZE) {
301 ret = kmalloc(size, GFP_ATOMIC);
303 ret = (struct neighbour **)
304 __get_free_pages(GFP_ATOMIC, get_order(size));
307 memset(ret, 0, size);
312 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
314 unsigned long size = entries * sizeof(struct neighbour *);
316 if (size <= PAGE_SIZE)
319 free_pages((unsigned long)hash, get_order(size));
322 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
324 struct neighbour **new_hash, **old_hash;
325 unsigned int i, new_hash_mask, old_entries;
327 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
329 BUG_ON(new_entries & (new_entries - 1));
330 new_hash = neigh_hash_alloc(new_entries);
334 old_entries = tbl->hash_mask + 1;
335 new_hash_mask = new_entries - 1;
336 old_hash = tbl->hash_buckets;
338 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
339 for (i = 0; i < old_entries; i++) {
340 struct neighbour *n, *next;
342 for (n = old_hash[i]; n; n = next) {
343 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
345 hash_val &= new_hash_mask;
348 n->next = new_hash[hash_val];
349 new_hash[hash_val] = n;
352 tbl->hash_buckets = new_hash;
353 tbl->hash_mask = new_hash_mask;
355 neigh_hash_free(old_hash, old_entries);
358 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
359 struct net_device *dev)
362 int key_len = tbl->key_len;
363 u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
365 NEIGH_CACHE_STAT_INC(tbl, lookups);
367 read_lock_bh(&tbl->lock);
368 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
369 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
371 NEIGH_CACHE_STAT_INC(tbl, hits);
375 read_unlock_bh(&tbl->lock);
379 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
382 int key_len = tbl->key_len;
383 u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
385 NEIGH_CACHE_STAT_INC(tbl, lookups);
387 read_lock_bh(&tbl->lock);
388 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
389 if (!memcmp(n->primary_key, pkey, key_len)) {
391 NEIGH_CACHE_STAT_INC(tbl, hits);
395 read_unlock_bh(&tbl->lock);
399 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
400 struct net_device *dev)
403 int key_len = tbl->key_len;
405 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
408 rc = ERR_PTR(-ENOBUFS);
412 memcpy(n->primary_key, pkey, key_len);
416 /* Protocol specific setup. */
417 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
419 goto out_neigh_release;
422 /* Device specific setup. */
423 if (n->parms->neigh_setup &&
424 (error = n->parms->neigh_setup(n)) < 0) {
426 goto out_neigh_release;
429 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
431 write_lock_bh(&tbl->lock);
433 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
434 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
436 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
438 if (n->parms->dead) {
439 rc = ERR_PTR(-EINVAL);
443 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
444 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
451 n->next = tbl->hash_buckets[hash_val];
452 tbl->hash_buckets[hash_val] = n;
455 write_unlock_bh(&tbl->lock);
456 NEIGH_PRINTK2("neigh %p is created.\n", n);
461 write_unlock_bh(&tbl->lock);
467 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
468 struct net_device *dev, int creat)
470 struct pneigh_entry *n;
471 int key_len = tbl->key_len;
472 u32 hash_val = *(u32 *)(pkey + key_len - 4);
474 hash_val ^= (hash_val >> 16);
475 hash_val ^= hash_val >> 8;
476 hash_val ^= hash_val >> 4;
477 hash_val &= PNEIGH_HASHMASK;
479 read_lock_bh(&tbl->lock);
481 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
482 if (!memcmp(n->key, pkey, key_len) &&
483 (n->dev == dev || !n->dev)) {
484 read_unlock_bh(&tbl->lock);
488 read_unlock_bh(&tbl->lock);
493 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
497 memcpy(n->key, pkey, key_len);
502 if (tbl->pconstructor && tbl->pconstructor(n)) {
510 write_lock_bh(&tbl->lock);
511 n->next = tbl->phash_buckets[hash_val];
512 tbl->phash_buckets[hash_val] = n;
513 write_unlock_bh(&tbl->lock);
519 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
520 struct net_device *dev)
522 struct pneigh_entry *n, **np;
523 int key_len = tbl->key_len;
524 u32 hash_val = *(u32 *)(pkey + key_len - 4);
526 hash_val ^= (hash_val >> 16);
527 hash_val ^= hash_val >> 8;
528 hash_val ^= hash_val >> 4;
529 hash_val &= PNEIGH_HASHMASK;
531 write_lock_bh(&tbl->lock);
532 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
534 if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
536 write_unlock_bh(&tbl->lock);
537 if (tbl->pdestructor)
545 write_unlock_bh(&tbl->lock);
549 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
551 struct pneigh_entry *n, **np;
554 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
555 np = &tbl->phash_buckets[h];
556 while ((n = *np) != NULL) {
557 if (!dev || n->dev == dev) {
559 if (tbl->pdestructor)
574 * neighbour must already be out of the table;
577 void neigh_destroy(struct neighbour *neigh)
581 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
585 "Destroying alive neighbour %p\n", neigh);
590 if (neigh_del_timer(neigh))
591 printk(KERN_WARNING "Impossible event.\n");
593 while ((hh = neigh->hh) != NULL) {
594 neigh->hh = hh->hh_next;
596 write_lock_bh(&hh->hh_lock);
597 hh->hh_output = neigh_blackhole;
598 write_unlock_bh(&hh->hh_lock);
599 if (atomic_dec_and_test(&hh->hh_refcnt))
603 if (neigh->ops && neigh->ops->destructor)
604 (neigh->ops->destructor)(neigh);
606 skb_queue_purge(&neigh->arp_queue);
609 neigh_parms_put(neigh->parms);
611 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
613 atomic_dec(&neigh->tbl->entries);
614 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
617 /* Neighbour state is suspicious;
620 Called with write_locked neigh.
622 static void neigh_suspect(struct neighbour *neigh)
626 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
628 neigh->output = neigh->ops->output;
630 for (hh = neigh->hh; hh; hh = hh->hh_next)
631 hh->hh_output = neigh->ops->output;
634 /* Neighbour state is OK;
637 Called with write_locked neigh.
639 static void neigh_connect(struct neighbour *neigh)
643 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
645 neigh->output = neigh->ops->connected_output;
647 for (hh = neigh->hh; hh; hh = hh->hh_next)
648 hh->hh_output = neigh->ops->hh_output;
651 static void neigh_periodic_timer(unsigned long arg)
653 struct neigh_table *tbl = (struct neigh_table *)arg;
654 struct neighbour *n, **np;
655 unsigned long expire, now = jiffies;
657 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
659 write_lock(&tbl->lock);
662 * periodically recompute ReachableTime from random function
665 if (time_after(now, tbl->last_rand + 300 * HZ)) {
666 struct neigh_parms *p;
667 tbl->last_rand = now;
668 for (p = &tbl->parms; p; p = p->next)
670 neigh_rand_reach_time(p->base_reachable_time);
673 np = &tbl->hash_buckets[tbl->hash_chain_gc];
674 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
676 while ((n = *np) != NULL) {
679 write_lock(&n->lock);
681 state = n->nud_state;
682 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
683 write_unlock(&n->lock);
687 if (time_before(n->used, n->confirmed))
688 n->used = n->confirmed;
690 if (atomic_read(&n->refcnt) == 1 &&
691 (state == NUD_FAILED ||
692 time_after(now, n->used + n->parms->gc_staletime))) {
695 write_unlock(&n->lock);
699 write_unlock(&n->lock);
705 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
706 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
707 * base_reachable_time.
709 expire = tbl->parms.base_reachable_time >> 1;
710 expire /= (tbl->hash_mask + 1);
714 mod_timer(&tbl->gc_timer, now + expire);
716 write_unlock(&tbl->lock);
719 static __inline__ int neigh_max_probes(struct neighbour *n)
721 struct neigh_parms *p = n->parms;
722 return (n->nud_state & NUD_PROBE ?
724 p->ucast_probes + p->app_probes + p->mcast_probes);
728 /* Called when a timer expires for a neighbour entry. */
730 static void neigh_timer_handler(unsigned long arg)
732 unsigned long now, next;
733 struct neighbour *neigh = (struct neighbour *)arg;
737 write_lock(&neigh->lock);
739 state = neigh->nud_state;
743 if (!(state & NUD_IN_TIMER)) {
745 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
750 if (state & NUD_REACHABLE) {
751 if (time_before_eq(now,
752 neigh->confirmed + neigh->parms->reachable_time)) {
753 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
754 next = neigh->confirmed + neigh->parms->reachable_time;
755 } else if (time_before_eq(now,
756 neigh->used + neigh->parms->delay_probe_time)) {
757 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
758 neigh->nud_state = NUD_DELAY;
759 neigh_suspect(neigh);
760 next = now + neigh->parms->delay_probe_time;
762 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
763 neigh->nud_state = NUD_STALE;
764 neigh_suspect(neigh);
766 } else if (state & NUD_DELAY) {
767 if (time_before_eq(now,
768 neigh->confirmed + neigh->parms->delay_probe_time)) {
769 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
770 neigh->nud_state = NUD_REACHABLE;
771 neigh_connect(neigh);
772 next = neigh->confirmed + neigh->parms->reachable_time;
774 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
775 neigh->nud_state = NUD_PROBE;
776 atomic_set(&neigh->probes, 0);
777 next = now + neigh->parms->retrans_time;
780 /* NUD_PROBE|NUD_INCOMPLETE */
781 next = now + neigh->parms->retrans_time;
784 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
785 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
788 neigh->nud_state = NUD_FAILED;
790 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
791 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
793 /* It is very thin place. report_unreachable is very complicated
794 routine. Particularly, it can hit the same neighbour entry!
796 So that, we try to be accurate and avoid dead loop. --ANK
798 while (neigh->nud_state == NUD_FAILED &&
799 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
800 write_unlock(&neigh->lock);
801 neigh->ops->error_report(neigh, skb);
802 write_lock(&neigh->lock);
804 skb_queue_purge(&neigh->arp_queue);
807 if (neigh->nud_state & NUD_IN_TIMER) {
809 if (time_before(next, jiffies + HZ/2))
810 next = jiffies + HZ/2;
811 neigh->timer.expires = next;
812 add_timer(&neigh->timer);
814 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
815 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
816 /* keep skb alive even if arp_queue overflows */
819 write_unlock(&neigh->lock);
820 neigh->ops->solicit(neigh, skb);
821 atomic_inc(&neigh->probes);
826 write_unlock(&neigh->lock);
830 if (notify && neigh->parms->app_probes)
831 neigh_app_notify(neigh);
833 neigh_release(neigh);
836 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
841 write_lock_bh(&neigh->lock);
844 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
849 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
850 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
851 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
852 neigh->nud_state = NUD_INCOMPLETE;
854 neigh->timer.expires = now + 1;
855 add_timer(&neigh->timer);
857 neigh->nud_state = NUD_FAILED;
858 write_unlock_bh(&neigh->lock);
864 } else if (neigh->nud_state & NUD_STALE) {
865 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
867 neigh->nud_state = NUD_DELAY;
868 neigh->timer.expires = jiffies + neigh->parms->delay_probe_time;
869 add_timer(&neigh->timer);
872 if (neigh->nud_state == NUD_INCOMPLETE) {
874 if (skb_queue_len(&neigh->arp_queue) >=
875 neigh->parms->queue_len) {
876 struct sk_buff *buff;
877 buff = neigh->arp_queue.next;
878 __skb_unlink(buff, &neigh->arp_queue);
881 __skb_queue_tail(&neigh->arp_queue, skb);
886 write_unlock_bh(&neigh->lock);
890 static __inline__ void neigh_update_hhs(struct neighbour *neigh)
893 void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
894 neigh->dev->header_cache_update;
897 for (hh = neigh->hh; hh; hh = hh->hh_next) {
898 write_lock_bh(&hh->hh_lock);
899 update(hh, neigh->dev, neigh->ha);
900 write_unlock_bh(&hh->hh_lock);
907 /* Generic update routine.
908 -- lladdr is new lladdr or NULL, if it is not supplied.
911 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
913 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
914 lladdr instead of overriding it
916 It also allows to retain current state
917 if lladdr is unchanged.
918 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
920 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
922 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
925 Caller MUST hold reference count on the entry.
928 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
936 struct net_device *dev;
937 int update_isrouter = 0;
939 write_lock_bh(&neigh->lock);
942 old = neigh->nud_state;
945 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
946 (old & (NUD_NOARP | NUD_PERMANENT)))
949 if (!(new & NUD_VALID)) {
950 neigh_del_timer(neigh);
951 if (old & NUD_CONNECTED)
952 neigh_suspect(neigh);
953 neigh->nud_state = new;
956 notify = old & NUD_VALID;
961 /* Compare new lladdr with cached one */
962 if (!dev->addr_len) {
963 /* First case: device needs no address. */
966 /* The second case: if something is already cached
967 and a new address is proposed:
969 - if they are different, check override flag
971 if ((old & NUD_VALID) &&
972 !memcmp(lladdr, neigh->ha, dev->addr_len))
975 /* No address is supplied; if we know something,
976 use it, otherwise discard the request.
979 if (!(old & NUD_VALID))
984 if (new & NUD_CONNECTED)
985 neigh->confirmed = jiffies;
986 neigh->updated = jiffies;
988 /* If entry was valid and address is not changed,
989 do not change entry state, if new one is STALE.
992 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
993 if (old & NUD_VALID) {
994 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
996 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
997 (old & NUD_CONNECTED)) {
1003 if (lladdr == neigh->ha && new == NUD_STALE &&
1004 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1005 (old & NUD_CONNECTED))
1012 neigh_del_timer(neigh);
1013 if (new & NUD_IN_TIMER) {
1015 neigh->timer.expires = jiffies +
1016 ((new & NUD_REACHABLE) ?
1017 neigh->parms->reachable_time : 0);
1018 add_timer(&neigh->timer);
1020 neigh->nud_state = new;
1023 if (lladdr != neigh->ha) {
1024 memcpy(&neigh->ha, lladdr, dev->addr_len);
1025 neigh_update_hhs(neigh);
1026 if (!(new & NUD_CONNECTED))
1027 neigh->confirmed = jiffies -
1028 (neigh->parms->base_reachable_time << 1);
1035 if (new & NUD_CONNECTED)
1036 neigh_connect(neigh);
1038 neigh_suspect(neigh);
1039 if (!(old & NUD_VALID)) {
1040 struct sk_buff *skb;
1042 /* Again: avoid dead loop if something went wrong */
1044 while (neigh->nud_state & NUD_VALID &&
1045 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1046 struct neighbour *n1 = neigh;
1047 write_unlock_bh(&neigh->lock);
1048 /* On shaper/eql skb->dst->neighbour != neigh :( */
1049 if (skb->dst && skb->dst->neighbour)
1050 n1 = skb->dst->neighbour;
1052 write_lock_bh(&neigh->lock);
1054 skb_queue_purge(&neigh->arp_queue);
1057 if (update_isrouter) {
1058 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1059 (neigh->flags | NTF_ROUTER) :
1060 (neigh->flags & ~NTF_ROUTER);
1062 write_unlock_bh(&neigh->lock);
1064 if (notify && neigh->parms->app_probes)
1065 neigh_app_notify(neigh);
1070 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1071 u8 *lladdr, void *saddr,
1072 struct net_device *dev)
1074 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1075 lladdr || !dev->addr_len);
1077 neigh_update(neigh, lladdr, NUD_STALE,
1078 NEIGH_UPDATE_F_OVERRIDE);
1082 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1085 struct hh_cache *hh;
1086 struct net_device *dev = dst->dev;
1088 for (hh = n->hh; hh; hh = hh->hh_next)
1089 if (hh->hh_type == protocol)
1092 if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1093 memset(hh, 0, sizeof(struct hh_cache));
1094 rwlock_init(&hh->hh_lock);
1095 hh->hh_type = protocol;
1096 atomic_set(&hh->hh_refcnt, 0);
1098 if (dev->hard_header_cache(n, hh)) {
1102 atomic_inc(&hh->hh_refcnt);
1103 hh->hh_next = n->hh;
1105 if (n->nud_state & NUD_CONNECTED)
1106 hh->hh_output = n->ops->hh_output;
1108 hh->hh_output = n->ops->output;
1112 atomic_inc(&hh->hh_refcnt);
1117 /* This function can be used in contexts, where only old dev_queue_xmit
1118 worked, f.e. if you want to override normal output path (eql, shaper),
1119 but resolution is not made yet.
1122 int neigh_compat_output(struct sk_buff *skb)
1124 struct net_device *dev = skb->dev;
1126 __skb_pull(skb, skb->nh.raw - skb->data);
1128 if (dev->hard_header &&
1129 dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1131 dev->rebuild_header(skb))
1134 return dev_queue_xmit(skb);
1137 /* Slow and careful. */
1139 int neigh_resolve_output(struct sk_buff *skb)
1141 struct dst_entry *dst = skb->dst;
1142 struct neighbour *neigh;
1145 if (!dst || !(neigh = dst->neighbour))
1148 __skb_pull(skb, skb->nh.raw - skb->data);
1150 if (!neigh_event_send(neigh, skb)) {
1152 struct net_device *dev = neigh->dev;
1153 if (dev->hard_header_cache && !dst->hh) {
1154 write_lock_bh(&neigh->lock);
1156 neigh_hh_init(neigh, dst, dst->ops->protocol);
1157 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1158 neigh->ha, NULL, skb->len);
1159 write_unlock_bh(&neigh->lock);
1161 read_lock_bh(&neigh->lock);
1162 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1163 neigh->ha, NULL, skb->len);
1164 read_unlock_bh(&neigh->lock);
1167 rc = neigh->ops->queue_xmit(skb);
1174 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1175 dst, dst ? dst->neighbour : NULL);
1182 /* As fast as possible without hh cache */
1184 int neigh_connected_output(struct sk_buff *skb)
1187 struct dst_entry *dst = skb->dst;
1188 struct neighbour *neigh = dst->neighbour;
1189 struct net_device *dev = neigh->dev;
1191 __skb_pull(skb, skb->nh.raw - skb->data);
1193 read_lock_bh(&neigh->lock);
1194 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1195 neigh->ha, NULL, skb->len);
1196 read_unlock_bh(&neigh->lock);
1198 err = neigh->ops->queue_xmit(skb);
1206 static void neigh_proxy_process(unsigned long arg)
1208 struct neigh_table *tbl = (struct neigh_table *)arg;
1209 long sched_next = 0;
1210 unsigned long now = jiffies;
1211 struct sk_buff *skb;
1213 spin_lock(&tbl->proxy_queue.lock);
1215 skb = tbl->proxy_queue.next;
1217 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1218 struct sk_buff *back = skb;
1219 long tdif = back->stamp.tv_usec - now;
1223 struct net_device *dev = back->dev;
1224 __skb_unlink(back, &tbl->proxy_queue);
1225 if (tbl->proxy_redo && netif_running(dev))
1226 tbl->proxy_redo(back);
1231 } else if (!sched_next || tdif < sched_next)
1234 del_timer(&tbl->proxy_timer);
1236 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1237 spin_unlock(&tbl->proxy_queue.lock);
1240 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1241 struct sk_buff *skb)
1243 unsigned long now = jiffies;
1244 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1246 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1250 skb->stamp.tv_sec = LOCALLY_ENQUEUED;
1251 skb->stamp.tv_usec = sched_next;
1253 spin_lock(&tbl->proxy_queue.lock);
1254 if (del_timer(&tbl->proxy_timer)) {
1255 if (time_before(tbl->proxy_timer.expires, sched_next))
1256 sched_next = tbl->proxy_timer.expires;
1258 dst_release(skb->dst);
1261 __skb_queue_tail(&tbl->proxy_queue, skb);
1262 mod_timer(&tbl->proxy_timer, sched_next);
1263 spin_unlock(&tbl->proxy_queue.lock);
1267 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1268 struct neigh_table *tbl)
1270 struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL);
1273 memcpy(p, &tbl->parms, sizeof(*p));
1275 atomic_set(&p->refcnt, 1);
1276 INIT_RCU_HEAD(&p->rcu_head);
1278 neigh_rand_reach_time(p->base_reachable_time);
1279 if (dev && dev->neigh_setup && dev->neigh_setup(dev, p)) {
1283 p->sysctl_table = NULL;
1284 write_lock_bh(&tbl->lock);
1285 p->next = tbl->parms.next;
1286 tbl->parms.next = p;
1287 write_unlock_bh(&tbl->lock);
1292 static void neigh_rcu_free_parms(struct rcu_head *head)
1294 struct neigh_parms *parms =
1295 container_of(head, struct neigh_parms, rcu_head);
1297 neigh_parms_put(parms);
1300 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1302 struct neigh_parms **p;
1304 if (!parms || parms == &tbl->parms)
1306 write_lock_bh(&tbl->lock);
1307 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1311 write_unlock_bh(&tbl->lock);
1312 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1316 write_unlock_bh(&tbl->lock);
1317 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1320 void neigh_parms_destroy(struct neigh_parms *parms)
1326 void neigh_table_init(struct neigh_table *tbl)
1328 unsigned long now = jiffies;
1329 unsigned long phsize;
1331 atomic_set(&tbl->parms.refcnt, 1);
1332 INIT_RCU_HEAD(&tbl->parms.rcu_head);
1333 tbl->parms.reachable_time =
1334 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1336 if (!tbl->kmem_cachep)
1337 tbl->kmem_cachep = kmem_cache_create(tbl->id,
1339 0, SLAB_HWCACHE_ALIGN,
1342 if (!tbl->kmem_cachep)
1343 panic("cannot create neighbour cache");
1345 tbl->stats = alloc_percpu(struct neigh_statistics);
1347 panic("cannot create neighbour cache statistics");
1349 #ifdef CONFIG_PROC_FS
1350 tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1352 panic("cannot create neighbour proc dir entry");
1353 tbl->pde->proc_fops = &neigh_stat_seq_fops;
1354 tbl->pde->data = tbl;
1358 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1360 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1361 tbl->phash_buckets = kmalloc(phsize, GFP_KERNEL);
1363 if (!tbl->hash_buckets || !tbl->phash_buckets)
1364 panic("cannot allocate neighbour cache hashes");
1366 memset(tbl->phash_buckets, 0, phsize);
1368 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1370 rwlock_init(&tbl->lock);
1371 init_timer(&tbl->gc_timer);
1372 tbl->gc_timer.data = (unsigned long)tbl;
1373 tbl->gc_timer.function = neigh_periodic_timer;
1374 tbl->gc_timer.expires = now + 1;
1375 add_timer(&tbl->gc_timer);
1377 init_timer(&tbl->proxy_timer);
1378 tbl->proxy_timer.data = (unsigned long)tbl;
1379 tbl->proxy_timer.function = neigh_proxy_process;
1380 skb_queue_head_init(&tbl->proxy_queue);
1382 tbl->last_flush = now;
1383 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1384 write_lock(&neigh_tbl_lock);
1385 tbl->next = neigh_tables;
1387 write_unlock(&neigh_tbl_lock);
1390 int neigh_table_clear(struct neigh_table *tbl)
1392 struct neigh_table **tp;
1394 /* It is not clean... Fix it to unload IPv6 module safely */
1395 del_timer_sync(&tbl->gc_timer);
1396 del_timer_sync(&tbl->proxy_timer);
1397 pneigh_queue_purge(&tbl->proxy_queue);
1398 neigh_ifdown(tbl, NULL);
1399 if (atomic_read(&tbl->entries))
1400 printk(KERN_CRIT "neighbour leakage\n");
1401 write_lock(&neigh_tbl_lock);
1402 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1408 write_unlock(&neigh_tbl_lock);
1410 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1411 tbl->hash_buckets = NULL;
1413 kfree(tbl->phash_buckets);
1414 tbl->phash_buckets = NULL;
1419 int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1421 struct ndmsg *ndm = NLMSG_DATA(nlh);
1422 struct rtattr **nda = arg;
1423 struct neigh_table *tbl;
1424 struct net_device *dev = NULL;
1427 if (ndm->ndm_ifindex &&
1428 (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1431 read_lock(&neigh_tbl_lock);
1432 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1433 struct neighbour *n;
1435 if (tbl->family != ndm->ndm_family)
1437 read_unlock(&neigh_tbl_lock);
1440 if (!nda[NDA_DST - 1] ||
1441 nda[NDA_DST - 1]->rta_len != RTA_LENGTH(tbl->key_len))
1444 if (ndm->ndm_flags & NTF_PROXY) {
1445 err = pneigh_delete(tbl,
1446 RTA_DATA(nda[NDA_DST - 1]), dev);
1453 n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST - 1]), dev);
1455 err = neigh_update(n, NULL, NUD_FAILED,
1456 NEIGH_UPDATE_F_OVERRIDE|
1457 NEIGH_UPDATE_F_ADMIN);
1462 read_unlock(&neigh_tbl_lock);
1463 err = -EADDRNOTAVAIL;
1471 int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1473 struct ndmsg *ndm = NLMSG_DATA(nlh);
1474 struct rtattr **nda = arg;
1475 struct neigh_table *tbl;
1476 struct net_device *dev = NULL;
1479 if (ndm->ndm_ifindex &&
1480 (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1483 read_lock(&neigh_tbl_lock);
1484 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1486 struct neighbour *n;
1488 if (tbl->family != ndm->ndm_family)
1490 read_unlock(&neigh_tbl_lock);
1493 if (!nda[NDA_DST - 1] ||
1494 nda[NDA_DST - 1]->rta_len != RTA_LENGTH(tbl->key_len))
1496 if (ndm->ndm_flags & NTF_PROXY) {
1498 if (pneigh_lookup(tbl,
1499 RTA_DATA(nda[NDA_DST - 1]), dev, 1))
1506 if (nda[NDA_LLADDR - 1] &&
1507 nda[NDA_LLADDR - 1]->rta_len != RTA_LENGTH(dev->addr_len))
1510 n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST - 1]), dev);
1512 if (nlh->nlmsg_flags & NLM_F_EXCL)
1514 override = nlh->nlmsg_flags & NLM_F_REPLACE;
1515 } else if (!(nlh->nlmsg_flags & NLM_F_CREATE))
1518 n = __neigh_lookup_errno(tbl, RTA_DATA(nda[NDA_DST - 1]),
1526 err = neigh_update(n, nda[NDA_LLADDR - 1] ?
1527 RTA_DATA(nda[NDA_LLADDR - 1]) :
1530 (override ? NEIGH_UPDATE_F_OVERRIDE : 0) |
1531 NEIGH_UPDATE_F_ADMIN);
1538 read_unlock(&neigh_tbl_lock);
1539 err = -EADDRNOTAVAIL;
1548 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1549 u32 pid, u32 seq, int event)
1551 unsigned long now = jiffies;
1552 unsigned char *b = skb->tail;
1553 struct nda_cacheinfo ci;
1555 struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq, event,
1556 sizeof(struct ndmsg));
1557 struct ndmsg *ndm = NLMSG_DATA(nlh);
1559 ndm->ndm_family = n->ops->family;
1560 ndm->ndm_flags = n->flags;
1561 ndm->ndm_type = n->type;
1562 ndm->ndm_ifindex = n->dev->ifindex;
1563 RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1564 read_lock_bh(&n->lock);
1566 ndm->ndm_state = n->nud_state;
1567 if (n->nud_state & NUD_VALID)
1568 RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1569 ci.ndm_used = now - n->used;
1570 ci.ndm_confirmed = now - n->confirmed;
1571 ci.ndm_updated = now - n->updated;
1572 ci.ndm_refcnt = atomic_read(&n->refcnt) - 1;
1573 read_unlock_bh(&n->lock);
1575 RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1576 nlh->nlmsg_len = skb->tail - b;
1582 read_unlock_bh(&n->lock);
1583 skb_trim(skb, b - skb->data);
1588 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1589 struct netlink_callback *cb)
1591 struct neighbour *n;
1592 int rc, h, s_h = cb->args[1];
1593 int idx, s_idx = idx = cb->args[2];
1595 for (h = 0; h <= tbl->hash_mask; h++) {
1600 read_lock_bh(&tbl->lock);
1601 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
1604 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1606 RTM_NEWNEIGH) <= 0) {
1607 read_unlock_bh(&tbl->lock);
1612 read_unlock_bh(&tbl->lock);
1621 int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1623 struct neigh_table *tbl;
1626 read_lock(&neigh_tbl_lock);
1627 family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1630 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
1631 if (t < s_t || (family && tbl->family != family))
1634 memset(&cb->args[1], 0, sizeof(cb->args) -
1635 sizeof(cb->args[0]));
1636 if (neigh_dump_table(tbl, skb, cb) < 0)
1639 read_unlock(&neigh_tbl_lock);
1645 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
1649 read_lock_bh(&tbl->lock);
1650 for (chain = 0; chain <= tbl->hash_mask; chain++) {
1651 struct neighbour *n;
1653 for (n = tbl->hash_buckets[chain]; n; n = n->next)
1656 read_unlock_bh(&tbl->lock);
1658 EXPORT_SYMBOL(neigh_for_each);
1660 /* The tbl->lock must be held as a writer and BH disabled. */
1661 void __neigh_for_each_release(struct neigh_table *tbl,
1662 int (*cb)(struct neighbour *))
1666 for (chain = 0; chain <= tbl->hash_mask; chain++) {
1667 struct neighbour *n, **np;
1669 np = &tbl->hash_buckets[chain];
1670 while ((n = *np) != NULL) {
1673 write_lock(&n->lock);
1680 write_unlock(&n->lock);
1686 EXPORT_SYMBOL(__neigh_for_each_release);
1688 #ifdef CONFIG_PROC_FS
1690 static struct neighbour *neigh_get_first(struct seq_file *seq)
1692 struct neigh_seq_state *state = seq->private;
1693 struct neigh_table *tbl = state->tbl;
1694 struct neighbour *n = NULL;
1695 int bucket = state->bucket;
1697 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
1698 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
1699 n = tbl->hash_buckets[bucket];
1702 if (state->neigh_sub_iter) {
1706 v = state->neigh_sub_iter(state, n, &fakep);
1710 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
1712 if (n->nud_state & ~NUD_NOARP)
1721 state->bucket = bucket;
1726 static struct neighbour *neigh_get_next(struct seq_file *seq,
1727 struct neighbour *n,
1730 struct neigh_seq_state *state = seq->private;
1731 struct neigh_table *tbl = state->tbl;
1733 if (state->neigh_sub_iter) {
1734 void *v = state->neigh_sub_iter(state, n, pos);
1742 if (state->neigh_sub_iter) {
1743 void *v = state->neigh_sub_iter(state, n, pos);
1748 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
1751 if (n->nud_state & ~NUD_NOARP)
1760 if (++state->bucket > tbl->hash_mask)
1763 n = tbl->hash_buckets[state->bucket];
1771 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
1773 struct neighbour *n = neigh_get_first(seq);
1777 n = neigh_get_next(seq, n, pos);
1782 return *pos ? NULL : n;
1785 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
1787 struct neigh_seq_state *state = seq->private;
1788 struct neigh_table *tbl = state->tbl;
1789 struct pneigh_entry *pn = NULL;
1790 int bucket = state->bucket;
1792 state->flags |= NEIGH_SEQ_IS_PNEIGH;
1793 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
1794 pn = tbl->phash_buckets[bucket];
1798 state->bucket = bucket;
1803 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
1804 struct pneigh_entry *pn,
1807 struct neigh_seq_state *state = seq->private;
1808 struct neigh_table *tbl = state->tbl;
1812 if (++state->bucket > PNEIGH_HASHMASK)
1814 pn = tbl->phash_buckets[state->bucket];
1825 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
1827 struct pneigh_entry *pn = pneigh_get_first(seq);
1831 pn = pneigh_get_next(seq, pn, pos);
1836 return *pos ? NULL : pn;
1839 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
1841 struct neigh_seq_state *state = seq->private;
1844 rc = neigh_get_idx(seq, pos);
1845 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
1846 rc = pneigh_get_idx(seq, pos);
1851 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
1853 struct neigh_seq_state *state = seq->private;
1854 loff_t pos_minus_one;
1858 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
1860 read_lock_bh(&tbl->lock);
1862 pos_minus_one = *pos - 1;
1863 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
1865 EXPORT_SYMBOL(neigh_seq_start);
1867 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1869 struct neigh_seq_state *state;
1872 if (v == SEQ_START_TOKEN) {
1873 rc = neigh_get_idx(seq, pos);
1877 state = seq->private;
1878 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
1879 rc = neigh_get_next(seq, v, NULL);
1882 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
1883 rc = pneigh_get_first(seq);
1885 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
1886 rc = pneigh_get_next(seq, v, NULL);
1892 EXPORT_SYMBOL(neigh_seq_next);
1894 void neigh_seq_stop(struct seq_file *seq, void *v)
1896 struct neigh_seq_state *state = seq->private;
1897 struct neigh_table *tbl = state->tbl;
1899 read_unlock_bh(&tbl->lock);
1901 EXPORT_SYMBOL(neigh_seq_stop);
1903 /* statistics via seq_file */
1905 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
1907 struct proc_dir_entry *pde = seq->private;
1908 struct neigh_table *tbl = pde->data;
1912 return SEQ_START_TOKEN;
1914 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
1915 if (!cpu_possible(cpu))
1918 return per_cpu_ptr(tbl->stats, cpu);
1923 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1925 struct proc_dir_entry *pde = seq->private;
1926 struct neigh_table *tbl = pde->data;
1929 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
1930 if (!cpu_possible(cpu))
1933 return per_cpu_ptr(tbl->stats, cpu);
1938 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
1943 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
1945 struct proc_dir_entry *pde = seq->private;
1946 struct neigh_table *tbl = pde->data;
1947 struct neigh_statistics *st = v;
1949 if (v == SEQ_START_TOKEN) {
1950 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs forced_gc_goal_miss\n");
1954 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
1955 "%08lx %08lx %08lx %08lx\n",
1956 atomic_read(&tbl->entries),
1967 st->rcv_probes_mcast,
1968 st->rcv_probes_ucast,
1970 st->periodic_gc_runs,
1977 static struct seq_operations neigh_stat_seq_ops = {
1978 .start = neigh_stat_seq_start,
1979 .next = neigh_stat_seq_next,
1980 .stop = neigh_stat_seq_stop,
1981 .show = neigh_stat_seq_show,
1984 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
1986 int ret = seq_open(file, &neigh_stat_seq_ops);
1989 struct seq_file *sf = file->private_data;
1990 sf->private = PDE(inode);
1995 static struct file_operations neigh_stat_seq_fops = {
1996 .owner = THIS_MODULE,
1997 .open = neigh_stat_seq_open,
1999 .llseek = seq_lseek,
2000 .release = seq_release,
2003 #endif /* CONFIG_PROC_FS */
2006 void neigh_app_ns(struct neighbour *n)
2008 struct nlmsghdr *nlh;
2009 int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2010 struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2015 if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH) < 0) {
2019 nlh = (struct nlmsghdr *)skb->data;
2020 nlh->nlmsg_flags = NLM_F_REQUEST;
2021 NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH;
2022 netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC);
2025 static void neigh_app_notify(struct neighbour *n)
2027 struct nlmsghdr *nlh;
2028 int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2029 struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2034 if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH) < 0) {
2038 nlh = (struct nlmsghdr *)skb->data;
2039 NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH;
2040 netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC);
2043 #endif /* CONFIG_ARPD */
2045 #ifdef CONFIG_SYSCTL
2047 static struct neigh_sysctl_table {
2048 struct ctl_table_header *sysctl_header;
2049 ctl_table neigh_vars[17];
2050 ctl_table neigh_dev[2];
2051 ctl_table neigh_neigh_dir[2];
2052 ctl_table neigh_proto_dir[2];
2053 ctl_table neigh_root_dir[2];
2054 } neigh_sysctl_template = {
2057 .ctl_name = NET_NEIGH_MCAST_SOLICIT,
2058 .procname = "mcast_solicit",
2059 .maxlen = sizeof(int),
2061 .proc_handler = &proc_dointvec,
2064 .ctl_name = NET_NEIGH_UCAST_SOLICIT,
2065 .procname = "ucast_solicit",
2066 .maxlen = sizeof(int),
2068 .proc_handler = &proc_dointvec,
2071 .ctl_name = NET_NEIGH_APP_SOLICIT,
2072 .procname = "app_solicit",
2073 .maxlen = sizeof(int),
2075 .proc_handler = &proc_dointvec,
2078 .ctl_name = NET_NEIGH_RETRANS_TIME,
2079 .procname = "retrans_time",
2080 .maxlen = sizeof(int),
2082 .proc_handler = &proc_dointvec_userhz_jiffies,
2085 .ctl_name = NET_NEIGH_REACHABLE_TIME,
2086 .procname = "base_reachable_time",
2087 .maxlen = sizeof(int),
2089 .proc_handler = &proc_dointvec_jiffies,
2090 .strategy = &sysctl_jiffies,
2093 .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
2094 .procname = "delay_first_probe_time",
2095 .maxlen = sizeof(int),
2097 .proc_handler = &proc_dointvec_jiffies,
2098 .strategy = &sysctl_jiffies,
2101 .ctl_name = NET_NEIGH_GC_STALE_TIME,
2102 .procname = "gc_stale_time",
2103 .maxlen = sizeof(int),
2105 .proc_handler = &proc_dointvec_jiffies,
2106 .strategy = &sysctl_jiffies,
2109 .ctl_name = NET_NEIGH_UNRES_QLEN,
2110 .procname = "unres_qlen",
2111 .maxlen = sizeof(int),
2113 .proc_handler = &proc_dointvec,
2116 .ctl_name = NET_NEIGH_PROXY_QLEN,
2117 .procname = "proxy_qlen",
2118 .maxlen = sizeof(int),
2120 .proc_handler = &proc_dointvec,
2123 .ctl_name = NET_NEIGH_ANYCAST_DELAY,
2124 .procname = "anycast_delay",
2125 .maxlen = sizeof(int),
2127 .proc_handler = &proc_dointvec_userhz_jiffies,
2130 .ctl_name = NET_NEIGH_PROXY_DELAY,
2131 .procname = "proxy_delay",
2132 .maxlen = sizeof(int),
2134 .proc_handler = &proc_dointvec_userhz_jiffies,
2137 .ctl_name = NET_NEIGH_LOCKTIME,
2138 .procname = "locktime",
2139 .maxlen = sizeof(int),
2141 .proc_handler = &proc_dointvec_userhz_jiffies,
2144 .ctl_name = NET_NEIGH_GC_INTERVAL,
2145 .procname = "gc_interval",
2146 .maxlen = sizeof(int),
2148 .proc_handler = &proc_dointvec_jiffies,
2149 .strategy = &sysctl_jiffies,
2152 .ctl_name = NET_NEIGH_GC_THRESH1,
2153 .procname = "gc_thresh1",
2154 .maxlen = sizeof(int),
2156 .proc_handler = &proc_dointvec,
2159 .ctl_name = NET_NEIGH_GC_THRESH2,
2160 .procname = "gc_thresh2",
2161 .maxlen = sizeof(int),
2163 .proc_handler = &proc_dointvec,
2166 .ctl_name = NET_NEIGH_GC_THRESH3,
2167 .procname = "gc_thresh3",
2168 .maxlen = sizeof(int),
2170 .proc_handler = &proc_dointvec,
2175 .ctl_name = NET_PROTO_CONF_DEFAULT,
2176 .procname = "default",
2180 .neigh_neigh_dir = {
2182 .procname = "neigh",
2186 .neigh_proto_dir = {
2193 .ctl_name = CTL_NET,
2200 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2201 int p_id, int pdev_id, char *p_name,
2202 proc_handler *handler)
2204 struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
2205 const char *dev_name_source = NULL;
2206 char *dev_name = NULL;
2211 memcpy(t, &neigh_sysctl_template, sizeof(*t));
2212 t->neigh_vars[0].data = &p->mcast_probes;
2213 t->neigh_vars[1].data = &p->ucast_probes;
2214 t->neigh_vars[2].data = &p->app_probes;
2215 t->neigh_vars[3].data = &p->retrans_time;
2217 t->neigh_vars[3].proc_handler = handler;
2218 t->neigh_vars[3].extra1 = dev;
2220 t->neigh_vars[4].data = &p->base_reachable_time;
2221 t->neigh_vars[5].data = &p->delay_probe_time;
2222 t->neigh_vars[6].data = &p->gc_staletime;
2223 t->neigh_vars[7].data = &p->queue_len;
2224 t->neigh_vars[8].data = &p->proxy_qlen;
2225 t->neigh_vars[9].data = &p->anycast_delay;
2226 t->neigh_vars[10].data = &p->proxy_delay;
2227 t->neigh_vars[11].data = &p->locktime;
2229 dev_name_source = t->neigh_dev[0].procname;
2231 dev_name_source = dev->name;
2232 t->neigh_dev[0].ctl_name = dev->ifindex;
2233 memset(&t->neigh_vars[12], 0, sizeof(ctl_table));
2235 t->neigh_vars[12].data = (int *)(p + 1);
2236 t->neigh_vars[13].data = (int *)(p + 1) + 1;
2237 t->neigh_vars[14].data = (int *)(p + 1) + 2;
2238 t->neigh_vars[15].data = (int *)(p + 1) + 3;
2241 dev_name = net_sysctl_strdup(dev_name_source);
2247 t->neigh_dev[0].procname = dev_name;
2249 t->neigh_neigh_dir[0].ctl_name = pdev_id;
2251 t->neigh_proto_dir[0].procname = p_name;
2252 t->neigh_proto_dir[0].ctl_name = p_id;
2254 t->neigh_dev[0].child = t->neigh_vars;
2255 t->neigh_neigh_dir[0].child = t->neigh_dev;
2256 t->neigh_proto_dir[0].child = t->neigh_neigh_dir;
2257 t->neigh_root_dir[0].child = t->neigh_proto_dir;
2259 t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
2260 if (!t->sysctl_header) {
2264 p->sysctl_table = t;
2276 void neigh_sysctl_unregister(struct neigh_parms *p)
2278 if (p->sysctl_table) {
2279 struct neigh_sysctl_table *t = p->sysctl_table;
2280 p->sysctl_table = NULL;
2281 unregister_sysctl_table(t->sysctl_header);
2282 kfree(t->neigh_dev[0].procname);
2287 #endif /* CONFIG_SYSCTL */
2289 EXPORT_SYMBOL(__neigh_event_send);
2290 EXPORT_SYMBOL(neigh_add);
2291 EXPORT_SYMBOL(neigh_changeaddr);
2292 EXPORT_SYMBOL(neigh_compat_output);
2293 EXPORT_SYMBOL(neigh_connected_output);
2294 EXPORT_SYMBOL(neigh_create);
2295 EXPORT_SYMBOL(neigh_delete);
2296 EXPORT_SYMBOL(neigh_destroy);
2297 EXPORT_SYMBOL(neigh_dump_info);
2298 EXPORT_SYMBOL(neigh_event_ns);
2299 EXPORT_SYMBOL(neigh_ifdown);
2300 EXPORT_SYMBOL(neigh_lookup);
2301 EXPORT_SYMBOL(neigh_lookup_nodev);
2302 EXPORT_SYMBOL(neigh_parms_alloc);
2303 EXPORT_SYMBOL(neigh_parms_release);
2304 EXPORT_SYMBOL(neigh_rand_reach_time);
2305 EXPORT_SYMBOL(neigh_resolve_output);
2306 EXPORT_SYMBOL(neigh_table_clear);
2307 EXPORT_SYMBOL(neigh_table_init);
2308 EXPORT_SYMBOL(neigh_update);
2309 EXPORT_SYMBOL(neigh_update_hhs);
2310 EXPORT_SYMBOL(pneigh_enqueue);
2311 EXPORT_SYMBOL(pneigh_lookup);
2314 EXPORT_SYMBOL(neigh_app_ns);
2316 #ifdef CONFIG_SYSCTL
2317 EXPORT_SYMBOL(neigh_sysctl_register);
2318 EXPORT_SYMBOL(neigh_sysctl_unregister);