2 * Generic address resolution entity
5 * Pedro Roque <pedro_m@yahoo.com>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 * Harald Welte port neighbour cache rework from 2.6.9-rcX
17 * Pradeep Vincent Move neighbour cache entry to stale state
20 #include <linux/config.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/socket.h>
25 #include <linux/sched.h>
26 #include <linux/netdevice.h>
27 #include <linux/proc_fs.h>
29 #include <linux/sysctl.h>
31 #include <net/neighbour.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/random.h>
36 #include <linux/module.h>
40 #define NEIGH_PRINTK(x...) printk(x)
41 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
42 #define NEIGH_PRINTK0 NEIGH_PRINTK
43 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
44 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
48 #define NEIGH_PRINTK1 NEIGH_PRINTK
52 #define NEIGH_PRINTK2 NEIGH_PRINTK
55 #define PNEIGH_HASHMASK 0xF
57 static void neigh_timer_handler(unsigned long arg);
59 static void neigh_app_notify(struct neighbour *n);
61 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
64 static int neigh_glbl_allocs;
65 static struct neigh_table *neigh_tables;
66 static struct file_operations neigh_stat_seq_fops;
69 Neighbour hash table buckets are protected with rwlock tbl->lock.
71 - All the scans/updates to hash buckets MUST be made under this lock.
72 - NOTHING clever should be made under this lock: no callbacks
73 to protocol backends, no attempts to send something to network.
74 It will result in deadlocks, if backend/driver wants to use neighbour
76 - If the entry requires some non-trivial actions, increase
77 its reference count and release table lock.
79 Neighbour entries are protected:
80 - with reference count.
81 - with rwlock neigh->lock
83 Reference count prevents destruction.
85 neigh->lock mainly serializes ll address data and its validity state.
86 However, the same lock is used to protect another entry fields:
90 Again, nothing clever shall be made under neigh->lock,
91 the most complicated procedure, which we allow is dev->hard_header.
92 It is supposed, that dev->hard_header is simplistic and does
93 not make callbacks to neighbour tables.
95 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96 list of neighbour tables. This list is used only in process context,
99 static rwlock_t neigh_tbl_lock = RW_LOCK_UNLOCKED;
101 static int neigh_blackhole(struct sk_buff *skb)
108 * It is random distribution in the interval (1/2)*base...(3/2)*base.
109 * It corresponds to default IPv6 settings and is not overridable,
110 * because it is really reasonable choice.
113 unsigned long neigh_rand_reach_time(unsigned long base)
115 return (base ? (net_random() % base) + (base >> 1) : 0);
119 static int neigh_forced_gc(struct neigh_table *tbl)
124 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
126 write_lock_bh(&tbl->lock);
127 for (i = 0; i <= tbl->hash_mask; i++) {
128 struct neighbour *n, **np;
130 np = &tbl->hash_buckets[i];
131 while ((n = *np) != NULL) {
132 /* Neighbour record may be discarded if:
133 * - nobody refers to it.
134 * - it is not permanent
136 write_lock(&n->lock);
137 if (atomic_read(&n->refcnt) == 1 &&
138 !(n->nud_state&NUD_PERMANENT)) {
142 write_unlock(&n->lock);
146 write_unlock(&n->lock);
151 tbl->last_flush = jiffies;
153 write_unlock_bh(&tbl->lock);
158 static int neigh_del_timer(struct neighbour *n)
160 if (n->nud_state & NUD_IN_TIMER) {
161 if (del_timer(&n->timer)) {
169 static void pneigh_queue_purge(struct sk_buff_head *list)
173 while ((skb = skb_dequeue(list)) != NULL) {
179 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
183 write_lock_bh(&tbl->lock);
185 for (i=0; i <= tbl->hash_mask; i++) {
186 struct neighbour *n, **np;
188 np = &tbl->hash_buckets[i];
189 while ((n = *np) != NULL) {
190 if (dev && n->dev != dev) {
195 write_lock_bh(&n->lock);
198 write_unlock_bh(&n->lock);
203 write_unlock_bh(&tbl->lock);
206 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
210 write_lock_bh(&tbl->lock);
212 for (i = 0; i <= tbl->hash_mask; i++) {
213 struct neighbour *n, **np;
215 np = &tbl->hash_buckets[i];
216 while ((n = *np) != NULL) {
217 if (dev && n->dev != dev) {
222 write_lock(&n->lock);
226 if (atomic_read(&n->refcnt) != 1) {
227 /* The most unpleasant situation.
228 We must destroy neighbour entry,
229 but someone still uses it.
231 The destroy will be delayed until
232 the last user releases us, but
233 we must kill timers etc. and move
236 n->parms = &tbl->parms;
237 skb_queue_purge(&n->arp_queue);
238 n->output = neigh_blackhole;
239 if (n->nud_state&NUD_VALID)
240 n->nud_state = NUD_NOARP;
242 n->nud_state = NUD_NONE;
243 NEIGH_PRINTK2("neigh %p is stray.\n", n);
245 write_unlock(&n->lock);
250 pneigh_ifdown(tbl, dev);
251 write_unlock_bh(&tbl->lock);
253 del_timer_sync(&tbl->proxy_timer);
254 pneigh_queue_purge(&tbl->proxy_queue);
258 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
261 unsigned long now = jiffies;
263 if (atomic_read(&tbl->entries) > tbl->gc_thresh3 ||
264 (atomic_read(&tbl->entries) > tbl->gc_thresh2 &&
265 now - tbl->last_flush > 5*HZ)) {
266 if (neigh_forced_gc(tbl) == 0 &&
267 atomic_read(&tbl->entries) > tbl->gc_thresh3)
271 n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
275 memset(n, 0, tbl->entry_size);
277 skb_queue_head_init(&n->arp_queue);
278 n->lock = RW_LOCK_UNLOCKED;
279 n->updated = n->used = now;
280 n->nud_state = NUD_NONE;
281 n->output = neigh_blackhole;
282 n->parms = &tbl->parms;
283 init_timer(&n->timer);
284 n->timer.function = neigh_timer_handler;
285 n->timer.data = (unsigned long)n;
286 NEIGH_CACHE_STAT_INC(tbl, allocs);
288 atomic_inc(&tbl->entries);
290 atomic_set(&n->refcnt, 1);
295 static struct neighbour **neigh_hash_alloc(unsigned int entries)
297 unsigned long size = entries * sizeof(struct neighbour *);
298 struct neighbour **ret;
300 if (size <= PAGE_SIZE) {
301 ret = kmalloc(size, GFP_ATOMIC);
303 ret = (struct neighbour **)
304 __get_free_pages(GFP_ATOMIC, get_order(size));
307 memset(ret, 0, size);
312 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
314 unsigned long size = entries * sizeof(struct neighbour *);
316 if (size <= PAGE_SIZE)
319 free_pages((unsigned long)hash, get_order(size));
322 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
324 struct neighbour **new_hash, **old_hash;
325 unsigned int i, new_hash_mask, old_entries;
327 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
329 BUG_ON(new_entries & (new_entries - 1));
330 new_hash = neigh_hash_alloc(new_entries);
334 old_entries = tbl->hash_mask + 1;
335 new_hash_mask = new_entries - 1;
336 old_hash = tbl->hash_buckets;
338 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
339 for (i = 0; i < old_entries; i++) {
340 struct neighbour *n, *next;
342 for (n = old_hash[i]; n; n = next) {
343 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
345 hash_val &= new_hash_mask;
348 n->next = new_hash[hash_val];
349 new_hash[hash_val] = n;
352 tbl->hash_buckets = new_hash;
353 tbl->hash_mask = new_hash_mask;
355 neigh_hash_free(old_hash, old_entries);
358 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
359 struct net_device *dev)
362 int key_len = tbl->key_len;
363 u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
365 NEIGH_CACHE_STAT_INC(tbl, lookups);
367 read_lock_bh(&tbl->lock);
368 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
370 memcmp(n->primary_key, pkey, key_len) == 0) {
372 NEIGH_CACHE_STAT_INC(tbl, hits);
376 read_unlock_bh(&tbl->lock);
380 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
383 int key_len = tbl->key_len;
384 u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
386 NEIGH_CACHE_STAT_INC(tbl, lookups);
388 read_lock_bh(&tbl->lock);
389 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
390 if (!memcmp(n->primary_key, pkey, key_len)) {
392 NEIGH_CACHE_STAT_INC(tbl, hits);
396 read_unlock_bh(&tbl->lock);
400 struct neighbour * neigh_create(struct neigh_table *tbl, const void *pkey,
401 struct net_device *dev)
403 struct neighbour *n, *n1;
405 int key_len = tbl->key_len;
408 n = neigh_alloc(tbl);
410 return ERR_PTR(-ENOBUFS);
412 memcpy(n->primary_key, pkey, key_len);
416 /* Protocol specific setup. */
417 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
419 return ERR_PTR(error);
422 /* Device specific setup. */
423 if (n->parms->neigh_setup &&
424 (error = n->parms->neigh_setup(n)) < 0) {
426 return ERR_PTR(error);
429 n->confirmed = jiffies - (n->parms->base_reachable_time<<1);
431 write_lock_bh(&tbl->lock);
432 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
433 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
435 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
437 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
438 if (dev == n1->dev &&
439 memcmp(n1->primary_key, pkey, key_len) == 0) {
441 write_unlock_bh(&tbl->lock);
447 n->next = tbl->hash_buckets[hash_val];
448 tbl->hash_buckets[hash_val] = n;
451 write_unlock_bh(&tbl->lock);
452 NEIGH_PRINTK2("neigh %p is created.\n", n);
456 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
457 struct net_device *dev, int creat)
459 struct pneigh_entry *n;
461 int key_len = tbl->key_len;
463 hash_val = *(u32*)(pkey + key_len - 4);
464 hash_val ^= (hash_val>>16);
465 hash_val ^= hash_val>>8;
466 hash_val ^= hash_val>>4;
467 hash_val &= PNEIGH_HASHMASK;
469 read_lock_bh(&tbl->lock);
471 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
472 if (memcmp(n->key, pkey, key_len) == 0 &&
473 (n->dev == dev || !n->dev)) {
474 read_unlock_bh(&tbl->lock);
478 read_unlock_bh(&tbl->lock);
482 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
486 memcpy(n->key, pkey, key_len);
489 if (tbl->pconstructor && tbl->pconstructor(n)) {
494 write_lock_bh(&tbl->lock);
495 n->next = tbl->phash_buckets[hash_val];
496 tbl->phash_buckets[hash_val] = n;
497 write_unlock_bh(&tbl->lock);
502 int pneigh_delete(struct neigh_table *tbl, const void *pkey, struct net_device *dev)
504 struct pneigh_entry *n, **np;
506 int key_len = tbl->key_len;
508 hash_val = *(u32*)(pkey + key_len - 4);
509 hash_val ^= (hash_val>>16);
510 hash_val ^= hash_val>>8;
511 hash_val ^= hash_val>>4;
512 hash_val &= PNEIGH_HASHMASK;
514 write_lock_bh(&tbl->lock);
515 for (np = &tbl->phash_buckets[hash_val]; (n=*np) != NULL; np = &n->next) {
516 if (memcmp(n->key, pkey, key_len) == 0 && n->dev == dev) {
518 write_unlock_bh(&tbl->lock);
519 if (tbl->pdestructor)
525 write_unlock_bh(&tbl->lock);
529 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
531 struct pneigh_entry *n, **np;
534 for (h=0; h<=PNEIGH_HASHMASK; h++) {
535 np = &tbl->phash_buckets[h];
536 while ((n=*np) != NULL) {
537 if (n->dev == dev || dev == NULL) {
539 if (tbl->pdestructor)
552 * neighbour must already be out of the table;
555 void neigh_destroy(struct neighbour *neigh)
559 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
562 printk("Destroying alive neighbour %p\n", neigh);
567 if (neigh_del_timer(neigh))
568 printk("Impossible event.\n");
570 while ((hh = neigh->hh) != NULL) {
571 neigh->hh = hh->hh_next;
573 write_lock_bh(&hh->hh_lock);
574 hh->hh_output = neigh_blackhole;
575 write_unlock_bh(&hh->hh_lock);
576 if (atomic_dec_and_test(&hh->hh_refcnt))
580 if (neigh->ops && neigh->ops->destructor)
581 (neigh->ops->destructor)(neigh);
583 skb_queue_purge(&neigh->arp_queue);
587 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
590 atomic_dec(&neigh->tbl->entries);
591 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
594 /* Neighbour state is suspicious;
597 Called with write_locked neigh.
599 static void neigh_suspect(struct neighbour *neigh)
603 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
605 neigh->output = neigh->ops->output;
607 for (hh = neigh->hh; hh; hh = hh->hh_next)
608 hh->hh_output = neigh->ops->output;
611 /* Neighbour state is OK;
614 Called with write_locked neigh.
616 static void neigh_connect(struct neighbour *neigh)
620 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
622 neigh->output = neigh->ops->connected_output;
624 for (hh = neigh->hh; hh; hh = hh->hh_next)
625 hh->hh_output = neigh->ops->hh_output;
629 Transitions NUD_STALE <-> NUD_REACHABLE do not occur
630 when fast path is built: we have no timers associated with
631 these states, we do not have time to check state when sending.
632 neigh_periodic_timer check periodically neigh->confirmed
633 time and moves NUD_REACHABLE -> NUD_STALE.
635 If a routine wants to know TRUE entry state, it calls
636 neigh_sync before checking state.
638 Called with write_locked neigh.
641 static void neigh_sync(struct neighbour *n)
643 unsigned long now = jiffies;
644 u8 state = n->nud_state;
646 if (state&(NUD_NOARP|NUD_PERMANENT))
648 if (state&NUD_REACHABLE) {
649 if (now - n->confirmed > n->parms->reachable_time) {
650 n->nud_state = NUD_STALE;
653 } else if (state&NUD_VALID) {
654 if (now - n->confirmed < n->parms->reachable_time) {
656 n->nud_state = NUD_REACHABLE;
662 static void SMP_TIMER_NAME(neigh_periodic_timer)(unsigned long arg)
664 struct neigh_table *tbl = (struct neigh_table*)arg;
665 struct neighbour *n, **np;
666 unsigned long expire, now = jiffies;
668 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
670 write_lock(&tbl->lock);
673 * periodicly recompute ReachableTime from random function
676 if (now - tbl->last_rand > 300*HZ) {
677 struct neigh_parms *p;
678 tbl->last_rand = now;
679 for (p=&tbl->parms; p; p = p->next)
680 p->reachable_time = neigh_rand_reach_time(p->base_reachable_time);
683 np = &tbl->hash_buckets[tbl->hash_chain_gc];
684 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
686 while ((n = *np) != NULL) {
689 write_lock(&n->lock);
691 state = n->nud_state;
692 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
693 write_unlock(&n->lock);
697 if (time_before(n->used, n->confirmed))
698 n->used = n->confirmed;
700 if (atomic_read(&n->refcnt) == 1 &&
701 (state == NUD_FAILED ||
702 time_after(now, n->used + n->parms->gc_staletime))) {
705 write_unlock(&n->lock);
710 /* Mark it stale - To be reconfirmed later when used */
711 if (n->nud_state & NUD_REACHABLE &&
712 now - n->confirmed > n->parms->reachable_time) {
713 n->nud_state = NUD_STALE;
717 write_unlock(&n->lock);
723 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
724 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
725 * base_reachable_time.
727 expire = tbl->parms.base_reachable_time >> 1;
728 expire /= (tbl->hash_mask + 1);
732 mod_timer(&tbl->gc_timer, now + expire);
734 write_unlock(&tbl->lock);
738 static void neigh_periodic_timer(unsigned long arg)
740 struct neigh_table *tbl = (struct neigh_table*)arg;
742 tasklet_schedule(&tbl->gc_task);
746 static __inline__ int neigh_max_probes(struct neighbour *n)
748 struct neigh_parms *p = n->parms;
749 return p->ucast_probes + p->app_probes + p->mcast_probes;
753 /* Called when a timer expires for a neighbour entry. */
755 static void neigh_timer_handler(unsigned long arg)
757 unsigned long now = jiffies;
758 struct neighbour *neigh = (struct neighbour*)arg;
763 write_lock(&neigh->lock);
765 state = neigh->nud_state;
767 if (!(state&NUD_IN_TIMER)) {
769 printk("neigh: timer & !nud_in_timer\n");
774 if ((state&NUD_VALID) &&
775 now - neigh->confirmed < neigh->parms->reachable_time) {
776 neigh->nud_state = NUD_REACHABLE;
777 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
778 neigh_connect(neigh);
781 if (state == NUD_DELAY) {
782 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
783 neigh->nud_state = NUD_PROBE;
784 atomic_set(&neigh->probes, 0);
787 if (atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
790 neigh->nud_state = NUD_FAILED;
792 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
793 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
795 /* It is very thin place. report_unreachable is very complicated
796 routine. Particularly, it can hit the same neighbour entry!
798 So that, we try to be accurate and avoid dead loop. --ANK
800 while(neigh->nud_state==NUD_FAILED && (skb=__skb_dequeue(&neigh->arp_queue)) != NULL) {
801 write_unlock(&neigh->lock);
802 neigh->ops->error_report(neigh, skb);
803 write_lock(&neigh->lock);
805 skb_queue_purge(&neigh->arp_queue);
809 neigh->timer.expires = now + neigh->parms->retrans_time;
810 add_timer(&neigh->timer);
812 /* keep skb alive even if arp_queue overflows */
813 skb = skb_peek(&neigh->arp_queue);
817 write_unlock(&neigh->lock);
819 neigh->ops->solicit(neigh, skb);
820 atomic_inc(&neigh->probes);
828 write_unlock(&neigh->lock);
830 if (notify && neigh->parms->app_probes)
831 neigh_app_notify(neigh);
833 neigh_release(neigh);
836 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
838 write_lock_bh(&neigh->lock);
839 if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) {
840 if (!(neigh->nud_state&(NUD_STALE|NUD_INCOMPLETE))) {
841 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
842 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
843 neigh->nud_state = NUD_INCOMPLETE;
845 neigh->timer.expires = jiffies + neigh->parms->retrans_time;
846 add_timer(&neigh->timer);
847 write_unlock_bh(&neigh->lock);
848 neigh->ops->solicit(neigh, skb);
849 atomic_inc(&neigh->probes);
850 write_lock_bh(&neigh->lock);
852 neigh->nud_state = NUD_FAILED;
853 write_unlock_bh(&neigh->lock);
860 if (neigh->nud_state == NUD_INCOMPLETE) {
862 if (skb_queue_len(&neigh->arp_queue) >= neigh->parms->queue_len) {
863 struct sk_buff *buff;
864 buff = neigh->arp_queue.next;
865 __skb_unlink(buff, &neigh->arp_queue);
868 __skb_queue_tail(&neigh->arp_queue, skb);
870 write_unlock_bh(&neigh->lock);
873 if (neigh->nud_state == NUD_STALE) {
874 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
876 neigh->nud_state = NUD_DELAY;
877 neigh->timer.expires = jiffies + neigh->parms->delay_probe_time;
878 add_timer(&neigh->timer);
881 write_unlock_bh(&neigh->lock);
885 static __inline__ void neigh_update_hhs(struct neighbour *neigh)
888 void (*update)(struct hh_cache*, struct net_device*, unsigned char*) =
889 neigh->dev->header_cache_update;
892 for (hh=neigh->hh; hh; hh=hh->hh_next) {
893 write_lock_bh(&hh->hh_lock);
894 update(hh, neigh->dev, neigh->ha);
895 write_unlock_bh(&hh->hh_lock);
902 /* Generic update routine.
903 -- lladdr is new lladdr or NULL, if it is not supplied.
905 -- override==1 allows to override existing lladdr, if it is different.
906 -- arp==0 means that the change is administrative.
908 Caller MUST hold reference count on the entry.
911 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, int override, int arp)
916 struct net_device *dev = neigh->dev;
918 write_lock_bh(&neigh->lock);
919 old = neigh->nud_state;
922 if (arp && (old&(NUD_NOARP|NUD_PERMANENT)))
925 if (!(new&NUD_VALID)) {
926 neigh_del_timer(neigh);
927 if (old&NUD_CONNECTED)
928 neigh_suspect(neigh);
929 neigh->nud_state = new;
931 notify = old&NUD_VALID;
935 /* Compare new lladdr with cached one */
936 if (dev->addr_len == 0) {
937 /* First case: device needs no address. */
940 /* The second case: if something is already cached
941 and a new address is proposed:
943 - if they are different, check override flag
946 if (memcmp(lladdr, neigh->ha, dev->addr_len) == 0)
952 /* No address is supplied; if we know something,
953 use it, otherwise discard the request.
956 if (!(old&NUD_VALID))
962 old = neigh->nud_state;
963 if (new&NUD_CONNECTED)
964 neigh->confirmed = jiffies;
965 neigh->updated = jiffies;
967 /* If entry was valid and address is not changed,
968 do not change entry state, if new one is STALE.
972 if (lladdr == neigh->ha)
973 if (new == old || (new == NUD_STALE && (old&NUD_CONNECTED)))
976 neigh_del_timer(neigh);
977 neigh->nud_state = new;
978 if (lladdr != neigh->ha) {
979 memcpy(&neigh->ha, lladdr, dev->addr_len);
980 neigh_update_hhs(neigh);
981 if (!(new&NUD_CONNECTED))
982 neigh->confirmed = jiffies - (neigh->parms->base_reachable_time<<1);
989 if (new&NUD_CONNECTED)
990 neigh_connect(neigh);
992 neigh_suspect(neigh);
993 if (!(old&NUD_VALID)) {
996 /* Again: avoid dead loop if something went wrong */
998 while (neigh->nud_state&NUD_VALID &&
999 (skb=__skb_dequeue(&neigh->arp_queue)) != NULL) {
1000 struct neighbour *n1 = neigh;
1001 write_unlock_bh(&neigh->lock);
1002 /* On shaper/eql skb->dst->neighbour != neigh :( */
1003 if (skb->dst && skb->dst->neighbour)
1004 n1 = skb->dst->neighbour;
1006 write_lock_bh(&neigh->lock);
1008 skb_queue_purge(&neigh->arp_queue);
1011 write_unlock_bh(&neigh->lock);
1013 if (notify && neigh->parms->app_probes)
1014 neigh_app_notify(neigh);
1019 struct neighbour * neigh_event_ns(struct neigh_table *tbl,
1020 u8 *lladdr, void *saddr,
1021 struct net_device *dev)
1023 struct neighbour *neigh;
1025 neigh = __neigh_lookup(tbl, saddr, dev, lladdr || !dev->addr_len);
1027 neigh_update(neigh, lladdr, NUD_STALE, 1, 1);
1031 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst, u16 protocol)
1033 struct hh_cache *hh = NULL;
1034 struct net_device *dev = dst->dev;
1036 for (hh=n->hh; hh; hh = hh->hh_next)
1037 if (hh->hh_type == protocol)
1040 if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1041 memset(hh, 0, sizeof(struct hh_cache));
1042 hh->hh_lock = RW_LOCK_UNLOCKED;
1043 hh->hh_type = protocol;
1044 atomic_set(&hh->hh_refcnt, 0);
1046 if (dev->hard_header_cache(n, hh)) {
1050 atomic_inc(&hh->hh_refcnt);
1051 hh->hh_next = n->hh;
1053 if (n->nud_state&NUD_CONNECTED)
1054 hh->hh_output = n->ops->hh_output;
1056 hh->hh_output = n->ops->output;
1060 atomic_inc(&hh->hh_refcnt);
1065 /* This function can be used in contexts, where only old dev_queue_xmit
1066 worked, f.e. if you want to override normal output path (eql, shaper),
1067 but resolution is not made yet.
1070 int neigh_compat_output(struct sk_buff *skb)
1072 struct net_device *dev = skb->dev;
1074 __skb_pull(skb, skb->nh.raw - skb->data);
1076 if (dev->hard_header &&
1077 dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, skb->len) < 0 &&
1078 dev->rebuild_header(skb))
1081 return dev_queue_xmit(skb);
1084 /* Slow and careful. */
1086 int neigh_resolve_output(struct sk_buff *skb)
1088 struct dst_entry *dst = skb->dst;
1089 struct neighbour *neigh;
1091 if (!dst || !(neigh = dst->neighbour))
1094 __skb_pull(skb, skb->nh.raw - skb->data);
1096 if (neigh_event_send(neigh, skb) == 0) {
1098 struct net_device *dev = neigh->dev;
1099 if (dev->hard_header_cache && dst->hh == NULL) {
1100 write_lock_bh(&neigh->lock);
1101 if (dst->hh == NULL)
1102 neigh_hh_init(neigh, dst, dst->ops->protocol);
1103 err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
1104 write_unlock_bh(&neigh->lock);
1106 read_lock_bh(&neigh->lock);
1107 err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
1108 read_unlock_bh(&neigh->lock);
1111 return neigh->ops->queue_xmit(skb);
1118 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n", dst, dst ? dst->neighbour : NULL);
1123 /* As fast as possible without hh cache */
1125 int neigh_connected_output(struct sk_buff *skb)
1128 struct dst_entry *dst = skb->dst;
1129 struct neighbour *neigh = dst->neighbour;
1130 struct net_device *dev = neigh->dev;
1132 __skb_pull(skb, skb->nh.raw - skb->data);
1134 read_lock_bh(&neigh->lock);
1135 err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
1136 read_unlock_bh(&neigh->lock);
1138 return neigh->ops->queue_xmit(skb);
1143 static void neigh_proxy_process(unsigned long arg)
1145 struct neigh_table *tbl = (struct neigh_table *)arg;
1146 long sched_next = 0;
1147 unsigned long now = jiffies;
1148 struct sk_buff *skb;
1150 spin_lock(&tbl->proxy_queue.lock);
1152 skb = tbl->proxy_queue.next;
1154 while (skb != (struct sk_buff*)&tbl->proxy_queue) {
1155 struct sk_buff *back = skb;
1156 long tdif = back->stamp.tv_usec - now;
1160 struct net_device *dev = back->dev;
1161 __skb_unlink(back, &tbl->proxy_queue);
1162 if (tbl->proxy_redo && netif_running(dev))
1163 tbl->proxy_redo(back);
1168 } else if (!sched_next || tdif < sched_next)
1171 del_timer(&tbl->proxy_timer);
1173 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1174 spin_unlock(&tbl->proxy_queue.lock);
1177 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1178 struct sk_buff *skb)
1180 unsigned long now = jiffies;
1181 long sched_next = net_random()%p->proxy_delay;
1183 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1187 skb->stamp.tv_sec = 0;
1188 skb->stamp.tv_usec = now + sched_next;
1190 spin_lock(&tbl->proxy_queue.lock);
1191 if (del_timer(&tbl->proxy_timer)) {
1192 long tval = tbl->proxy_timer.expires - now;
1193 if (tval < sched_next)
1196 dst_release(skb->dst);
1199 __skb_queue_tail(&tbl->proxy_queue, skb);
1200 mod_timer(&tbl->proxy_timer, now + sched_next);
1201 spin_unlock(&tbl->proxy_queue.lock);
1205 struct neigh_parms *neigh_parms_alloc(struct net_device *dev, struct neigh_table *tbl)
1207 struct neigh_parms *p;
1208 p = kmalloc(sizeof(*p), GFP_KERNEL);
1210 memcpy(p, &tbl->parms, sizeof(*p));
1212 p->reachable_time = neigh_rand_reach_time(p->base_reachable_time);
1213 if (dev && dev->neigh_setup) {
1214 if (dev->neigh_setup(dev, p)) {
1219 p->sysctl_table = NULL;
1220 write_lock_bh(&tbl->lock);
1221 p->next = tbl->parms.next;
1222 tbl->parms.next = p;
1223 write_unlock_bh(&tbl->lock);
1228 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1230 struct neigh_parms **p;
1232 if (parms == NULL || parms == &tbl->parms)
1234 write_lock_bh(&tbl->lock);
1235 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1238 write_unlock_bh(&tbl->lock);
1239 #ifdef CONFIG_SYSCTL
1240 neigh_sysctl_unregister(parms);
1246 write_unlock_bh(&tbl->lock);
1247 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1251 void neigh_table_init(struct neigh_table *tbl)
1253 unsigned long now = jiffies;
1254 unsigned long phsize;
1256 tbl->parms.reachable_time = neigh_rand_reach_time(tbl->parms.base_reachable_time);
1258 if (tbl->kmem_cachep == NULL)
1259 tbl->kmem_cachep = kmem_cache_create(tbl->id,
1260 (tbl->entry_size+15)&~15,
1261 0, SLAB_HWCACHE_ALIGN,
1264 if (!tbl->kmem_cachep)
1265 panic("cannot create neighbour cache");
1267 #ifdef CONFIG_PROC_FS
1268 tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1270 panic("cannot create neighbour proc dir entry");
1271 tbl->pde->proc_fops = &neigh_stat_seq_fops;
1272 tbl->pde->data = tbl;
1276 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1278 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1279 tbl->phash_buckets = kmalloc(phsize, GFP_KERNEL);
1281 if (!tbl->hash_buckets || !tbl->phash_buckets)
1282 panic("cannot allocate neighbour cache hashes");
1284 memset(tbl->phash_buckets, 0, phsize);
1286 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1289 tasklet_init(&tbl->gc_task, SMP_TIMER_NAME(neigh_periodic_timer), (unsigned long)tbl);
1291 init_timer(&tbl->gc_timer);
1292 tbl->lock = RW_LOCK_UNLOCKED;
1293 tbl->gc_timer.data = (unsigned long)tbl;
1294 tbl->gc_timer.function = neigh_periodic_timer;
1295 tbl->gc_timer.expires = now + 1;
1296 add_timer(&tbl->gc_timer);
1298 init_timer(&tbl->proxy_timer);
1299 tbl->proxy_timer.data = (unsigned long)tbl;
1300 tbl->proxy_timer.function = neigh_proxy_process;
1301 skb_queue_head_init(&tbl->proxy_queue);
1303 tbl->last_flush = now;
1304 tbl->last_rand = now + tbl->parms.reachable_time*20;
1305 write_lock(&neigh_tbl_lock);
1306 tbl->next = neigh_tables;
1308 write_unlock(&neigh_tbl_lock);
1311 int neigh_table_clear(struct neigh_table *tbl)
1313 struct neigh_table **tp;
1315 /* It is not clean... Fix it to unload IPv6 module safely */
1316 del_timer_sync(&tbl->gc_timer);
1317 tasklet_kill(&tbl->gc_task);
1318 del_timer_sync(&tbl->proxy_timer);
1319 pneigh_queue_purge(&tbl->proxy_queue);
1320 neigh_ifdown(tbl, NULL);
1321 if (atomic_read(&tbl->entries))
1322 printk(KERN_CRIT "neighbour leakage\n");
1323 write_lock(&neigh_tbl_lock);
1324 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1330 write_unlock(&neigh_tbl_lock);
1332 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1333 tbl->hash_buckets = NULL;
1335 kfree(tbl->phash_buckets);
1336 tbl->phash_buckets = NULL;
1338 #ifdef CONFIG_SYSCTL
1339 neigh_sysctl_unregister(&tbl->parms);
1344 int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1346 struct ndmsg *ndm = NLMSG_DATA(nlh);
1347 struct rtattr **nda = arg;
1348 struct neigh_table *tbl;
1349 struct net_device *dev = NULL;
1352 if (ndm->ndm_ifindex) {
1353 if ((dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1357 read_lock(&neigh_tbl_lock);
1358 for (tbl=neigh_tables; tbl; tbl = tbl->next) {
1359 struct neighbour *n;
1361 if (tbl->family != ndm->ndm_family)
1363 read_unlock(&neigh_tbl_lock);
1366 if (nda[NDA_DST-1] == NULL ||
1367 nda[NDA_DST-1]->rta_len != RTA_LENGTH(tbl->key_len))
1370 if (ndm->ndm_flags&NTF_PROXY) {
1371 err = pneigh_delete(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
1378 n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
1380 err = neigh_update(n, NULL, NUD_FAILED, 1, 0);
1388 read_unlock(&neigh_tbl_lock);
1393 return -EADDRNOTAVAIL;
1396 int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1398 struct ndmsg *ndm = NLMSG_DATA(nlh);
1399 struct rtattr **nda = arg;
1400 struct neigh_table *tbl;
1401 struct net_device *dev = NULL;
1403 if (ndm->ndm_ifindex) {
1404 if ((dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1408 read_lock(&neigh_tbl_lock);
1409 for (tbl=neigh_tables; tbl; tbl = tbl->next) {
1412 struct neighbour *n;
1414 if (tbl->family != ndm->ndm_family)
1416 read_unlock(&neigh_tbl_lock);
1419 if (nda[NDA_DST-1] == NULL ||
1420 nda[NDA_DST-1]->rta_len != RTA_LENGTH(tbl->key_len))
1422 if (ndm->ndm_flags&NTF_PROXY) {
1424 if (pneigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev, 1))
1431 if (nda[NDA_LLADDR-1] != NULL &&
1432 nda[NDA_LLADDR-1]->rta_len != RTA_LENGTH(dev->addr_len))
1435 n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
1437 if (nlh->nlmsg_flags&NLM_F_EXCL)
1439 override = nlh->nlmsg_flags&NLM_F_REPLACE;
1440 } else if (!(nlh->nlmsg_flags&NLM_F_CREATE))
1443 n = __neigh_lookup_errno(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
1450 err = neigh_update(n, nda[NDA_LLADDR-1] ? RTA_DATA(nda[NDA_LLADDR-1]) : NULL,
1461 read_unlock(&neigh_tbl_lock);
1465 return -EADDRNOTAVAIL;
1469 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1470 u32 pid, u32 seq, int event)
1472 unsigned long now = jiffies;
1474 struct nlmsghdr *nlh;
1475 unsigned char *b = skb->tail;
1476 struct nda_cacheinfo ci;
1479 nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*ndm));
1480 ndm = NLMSG_DATA(nlh);
1481 nlh->nlmsg_flags = pid ? NLM_F_MULTI : 0;
1482 ndm->ndm_family = n->ops->family;
1483 ndm->ndm_flags = n->flags;
1484 ndm->ndm_type = n->type;
1485 ndm->ndm_ifindex = n->dev->ifindex;
1486 RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1487 read_lock_bh(&n->lock);
1489 ndm->ndm_state = n->nud_state;
1490 if (n->nud_state&NUD_VALID)
1491 RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1492 ci.ndm_used = now - n->used;
1493 ci.ndm_confirmed = now - n->confirmed;
1494 ci.ndm_updated = now - n->updated;
1495 ci.ndm_refcnt = atomic_read(&n->refcnt) - 1;
1496 read_unlock_bh(&n->lock);
1498 RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1499 nlh->nlmsg_len = skb->tail - b;
1505 read_unlock_bh(&n->lock);
1506 skb_trim(skb, b - skb->data);
1511 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, struct netlink_callback *cb)
1513 struct neighbour *n;
1518 s_idx = idx = cb->args[2];
1519 for (h=0; h <= tbl->hash_mask; h++) {
1520 if (h < s_h) continue;
1523 read_lock_bh(&tbl->lock);
1524 for (n = tbl->hash_buckets[h], idx = 0; n;
1525 n = n->next, idx++) {
1528 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1529 cb->nlh->nlmsg_seq, RTM_NEWNEIGH) <= 0) {
1530 read_unlock_bh(&tbl->lock);
1536 read_unlock_bh(&tbl->lock);
1544 int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1548 struct neigh_table *tbl;
1549 int family = ((struct rtgenmsg*)NLMSG_DATA(cb->nlh))->rtgen_family;
1553 read_lock(&neigh_tbl_lock);
1554 for (tbl=neigh_tables, t=0; tbl; tbl = tbl->next, t++) {
1555 if (t < s_t) continue;
1556 if (family && tbl->family != family)
1559 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1560 if (neigh_dump_table(tbl, skb, cb) < 0)
1563 read_unlock(&neigh_tbl_lock);
1570 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
1574 read_lock_bh(&tbl->lock);
1575 for (chain = 0; chain <= tbl->hash_mask; chain++) {
1576 struct neighbour *n;
1578 for (n = tbl->hash_buckets[chain]; n; n = n->next)
1581 read_unlock_bh(&tbl->lock);
1583 EXPORT_SYMBOL(neigh_for_each);
1585 /* The tbl->lock must be held as a writer and BH disabled. */
1586 void __neigh_for_each_release(struct neigh_table *tbl,
1587 int (*cb)(struct neighbour *))
1591 for (chain = 0; chain <= tbl->hash_mask; chain++) {
1592 struct neighbour *n, **np;
1594 np = &tbl->hash_buckets[chain];
1595 while ((n = *np) != NULL) {
1598 write_lock(&n->lock);
1605 write_unlock(&n->lock);
1611 EXPORT_SYMBOL(__neigh_for_each_release);
1613 #ifdef CONFIG_PROC_FS
1615 static struct neighbour *neigh_get_first(struct seq_file *seq)
1617 struct neigh_seq_state *state = seq->private;
1618 struct neigh_table *tbl = state->tbl;
1619 struct neighbour *n = NULL;
1620 int bucket = state->bucket;
1622 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
1623 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
1624 n = tbl->hash_buckets[bucket];
1627 if (state->neigh_sub_iter) {
1631 v = state->neigh_sub_iter(state, n, &fakep);
1635 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
1637 if (n->nud_state & ~NUD_NOARP)
1646 state->bucket = bucket;
1651 static struct neighbour *neigh_get_next(struct seq_file *seq,
1652 struct neighbour *n,
1655 struct neigh_seq_state *state = seq->private;
1656 struct neigh_table *tbl = state->tbl;
1658 if (state->neigh_sub_iter) {
1659 void *v = state->neigh_sub_iter(state, n, pos);
1667 if (state->neigh_sub_iter) {
1668 void *v = state->neigh_sub_iter(state, n, pos);
1673 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
1676 if (n->nud_state & ~NUD_NOARP)
1685 if (++state->bucket > tbl->hash_mask)
1688 n = tbl->hash_buckets[state->bucket];
1696 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
1698 struct neighbour *n = neigh_get_first(seq);
1702 n = neigh_get_next(seq, n, pos);
1707 return *pos ? NULL : n;
1710 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
1712 struct neigh_seq_state *state = seq->private;
1713 struct neigh_table *tbl = state->tbl;
1714 struct pneigh_entry *pn = NULL;
1715 int bucket = state->bucket;
1717 state->flags |= NEIGH_SEQ_IS_PNEIGH;
1718 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
1719 pn = tbl->phash_buckets[bucket];
1723 state->bucket = bucket;
1728 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
1729 struct pneigh_entry *pn,
1732 struct neigh_seq_state *state = seq->private;
1733 struct neigh_table *tbl = state->tbl;
1737 if (++state->bucket > PNEIGH_HASHMASK)
1739 pn = tbl->phash_buckets[state->bucket];
1750 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
1752 struct pneigh_entry *pn = pneigh_get_first(seq);
1756 pn = pneigh_get_next(seq, pn, pos);
1761 return *pos ? NULL : pn;
1764 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
1766 struct neigh_seq_state *state = seq->private;
1769 rc = neigh_get_idx(seq, pos);
1770 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
1771 rc = pneigh_get_idx(seq, pos);
1776 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
1778 struct neigh_seq_state *state = seq->private;
1779 loff_t pos_minus_one;
1783 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
1785 read_lock_bh(&tbl->lock);
1787 pos_minus_one = *pos - 1;
1788 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
1791 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1793 struct neigh_seq_state *state;
1796 if (v == SEQ_START_TOKEN) {
1797 rc = neigh_get_idx(seq, pos);
1801 state = seq->private;
1802 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
1803 rc = neigh_get_next(seq, v, NULL);
1806 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
1807 rc = pneigh_get_first(seq);
1809 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
1810 rc = pneigh_get_next(seq, v, NULL);
1817 void neigh_seq_stop(struct seq_file *seq, void *v)
1819 struct neigh_seq_state *state = seq->private;
1820 struct neigh_table *tbl = state->tbl;
1822 read_unlock_bh(&tbl->lock);
1825 /* statistics via seq_file */
1827 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
1829 struct proc_dir_entry *pde = seq->private;
1830 struct neigh_table *tbl = pde->data;
1834 return SEQ_START_TOKEN;
1836 for (lcpu = *pos-1; lcpu < smp_num_cpus; ++lcpu) {
1837 int i = cpu_logical_map(lcpu);
1839 return &tbl->stats[i];
1844 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1846 struct proc_dir_entry *pde = seq->private;
1847 struct neigh_table *tbl = pde->data;
1850 for (lcpu = *pos; lcpu < smp_num_cpus; ++lcpu) {
1851 int i = cpu_logical_map(lcpu);
1853 return &tbl->stats[i];
1858 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
1863 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
1865 struct proc_dir_entry *pde = seq->private;
1866 struct neigh_table *tbl = pde->data;
1867 struct neigh_statistics *st = v;
1869 if (v == SEQ_START_TOKEN) {
1870 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs forced_gc_goal_miss\n");
1874 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
1875 "%08lx %08lx %08lx %08lx\n",
1876 atomic_read(&tbl->entries),
1887 st->rcv_probes_mcast,
1888 st->rcv_probes_ucast,
1890 st->periodic_gc_runs,
1897 static struct seq_operations neigh_stat_seq_ops = {
1898 .start = neigh_stat_seq_start,
1899 .next = neigh_stat_seq_next,
1900 .stop = neigh_stat_seq_stop,
1901 .show = neigh_stat_seq_show,
1904 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
1906 int ret = seq_open(file, &neigh_stat_seq_ops);
1909 struct seq_file *sf = file->private_data;
1910 sf->private = PDE(inode);
1915 static struct file_operations neigh_stat_seq_fops = {
1916 .owner = THIS_MODULE,
1917 .open = neigh_stat_seq_open,
1919 .llseek = seq_lseek,
1920 .release = seq_release,
1923 #endif /* CONFIG_PROC_FS */
1926 void neigh_app_ns(struct neighbour *n)
1928 struct sk_buff *skb;
1929 struct nlmsghdr *nlh;
1930 int size = NLMSG_SPACE(sizeof(struct ndmsg)+256);
1932 skb = alloc_skb(size, GFP_ATOMIC);
1936 if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH) < 0) {
1940 nlh = (struct nlmsghdr*)skb->data;
1941 nlh->nlmsg_flags = NLM_F_REQUEST;
1942 NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH;
1943 netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC);
1946 static void neigh_app_notify(struct neighbour *n)
1948 struct sk_buff *skb;
1949 struct nlmsghdr *nlh;
1950 int size = NLMSG_SPACE(sizeof(struct ndmsg)+256);
1952 skb = alloc_skb(size, GFP_ATOMIC);
1956 if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH) < 0) {
1960 nlh = (struct nlmsghdr*)skb->data;
1961 NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH;
1962 netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC);
1965 #endif /* CONFIG_ARPD */
1967 #ifdef CONFIG_SYSCTL
1969 struct neigh_sysctl_table
1971 struct ctl_table_header *sysctl_header;
1972 ctl_table neigh_vars[17];
1973 ctl_table neigh_dev[2];
1974 ctl_table neigh_neigh_dir[2];
1975 ctl_table neigh_proto_dir[2];
1976 ctl_table neigh_root_dir[2];
1977 } neigh_sysctl_template = {
1979 {{NET_NEIGH_MCAST_SOLICIT, "mcast_solicit",
1980 NULL, sizeof(int), 0644, NULL,
1982 {NET_NEIGH_UCAST_SOLICIT, "ucast_solicit",
1983 NULL, sizeof(int), 0644, NULL,
1985 {NET_NEIGH_APP_SOLICIT, "app_solicit",
1986 NULL, sizeof(int), 0644, NULL,
1988 {NET_NEIGH_RETRANS_TIME, "retrans_time",
1989 NULL, sizeof(int), 0644, NULL,
1991 {NET_NEIGH_REACHABLE_TIME, "base_reachable_time",
1992 NULL, sizeof(int), 0644, NULL,
1993 &proc_dointvec_jiffies},
1994 {NET_NEIGH_DELAY_PROBE_TIME, "delay_first_probe_time",
1995 NULL, sizeof(int), 0644, NULL,
1996 &proc_dointvec_jiffies},
1997 {NET_NEIGH_GC_STALE_TIME, "gc_stale_time",
1998 NULL, sizeof(int), 0644, NULL,
1999 &proc_dointvec_jiffies},
2000 {NET_NEIGH_UNRES_QLEN, "unres_qlen",
2001 NULL, sizeof(int), 0644, NULL,
2003 {NET_NEIGH_PROXY_QLEN, "proxy_qlen",
2004 NULL, sizeof(int), 0644, NULL,
2006 {NET_NEIGH_ANYCAST_DELAY, "anycast_delay",
2007 NULL, sizeof(int), 0644, NULL,
2009 {NET_NEIGH_PROXY_DELAY, "proxy_delay",
2010 NULL, sizeof(int), 0644, NULL,
2012 {NET_NEIGH_LOCKTIME, "locktime",
2013 NULL, sizeof(int), 0644, NULL,
2015 {NET_NEIGH_GC_INTERVAL, "gc_interval",
2016 NULL, sizeof(int), 0644, NULL,
2017 &proc_dointvec_jiffies},
2018 {NET_NEIGH_GC_THRESH1, "gc_thresh1",
2019 NULL, sizeof(int), 0644, NULL,
2021 {NET_NEIGH_GC_THRESH2, "gc_thresh2",
2022 NULL, sizeof(int), 0644, NULL,
2024 {NET_NEIGH_GC_THRESH3, "gc_thresh3",
2025 NULL, sizeof(int), 0644, NULL,
2029 {{NET_PROTO_CONF_DEFAULT, "default", NULL, 0, 0555, NULL},{0}},
2030 {{0, "neigh", NULL, 0, 0555, NULL},{0}},
2031 {{0, NULL, NULL, 0, 0555, NULL},{0}},
2032 {{CTL_NET, "net", NULL, 0, 0555, NULL},{0}}
2035 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2036 int p_id, int pdev_id, char *p_name)
2038 struct neigh_sysctl_table *t;
2040 t = kmalloc(sizeof(*t), GFP_KERNEL);
2043 memcpy(t, &neigh_sysctl_template, sizeof(*t));
2044 t->neigh_vars[0].data = &p->mcast_probes;
2045 t->neigh_vars[1].data = &p->ucast_probes;
2046 t->neigh_vars[2].data = &p->app_probes;
2047 t->neigh_vars[3].data = &p->retrans_time;
2048 t->neigh_vars[4].data = &p->base_reachable_time;
2049 t->neigh_vars[5].data = &p->delay_probe_time;
2050 t->neigh_vars[6].data = &p->gc_staletime;
2051 t->neigh_vars[7].data = &p->queue_len;
2052 t->neigh_vars[8].data = &p->proxy_qlen;
2053 t->neigh_vars[9].data = &p->anycast_delay;
2054 t->neigh_vars[10].data = &p->proxy_delay;
2055 t->neigh_vars[11].data = &p->locktime;
2057 t->neigh_dev[0].procname = dev->name;
2058 t->neigh_dev[0].ctl_name = dev->ifindex;
2059 memset(&t->neigh_vars[12], 0, sizeof(ctl_table));
2061 t->neigh_vars[12].data = (int*)(p+1);
2062 t->neigh_vars[13].data = (int*)(p+1) + 1;
2063 t->neigh_vars[14].data = (int*)(p+1) + 2;
2064 t->neigh_vars[15].data = (int*)(p+1) + 3;
2066 t->neigh_neigh_dir[0].ctl_name = pdev_id;
2068 t->neigh_proto_dir[0].procname = p_name;
2069 t->neigh_proto_dir[0].ctl_name = p_id;
2071 t->neigh_dev[0].child = t->neigh_vars;
2072 t->neigh_neigh_dir[0].child = t->neigh_dev;
2073 t->neigh_proto_dir[0].child = t->neigh_neigh_dir;
2074 t->neigh_root_dir[0].child = t->neigh_proto_dir;
2076 t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
2077 if (t->sysctl_header == NULL) {
2081 p->sysctl_table = t;
2085 void neigh_sysctl_unregister(struct neigh_parms *p)
2087 if (p->sysctl_table) {
2088 struct neigh_sysctl_table *t = p->sysctl_table;
2089 p->sysctl_table = NULL;
2090 unregister_sysctl_table(t->sysctl_header);
2095 #endif /* CONFIG_SYSCTL */