import of upstream 2.4.34.4 from kernel.org
[linux-2.4.git] / net / core / neighbour.c
1 /*
2  *      Generic address resolution entity
3  *
4  *      Authors:
5  *      Pedro Roque             <pedro_m@yahoo.com>
6  *      Alexey Kuznetsov        <kuznet@ms2.inr.ac.ru>
7  *
8  *      This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *      Fixes:
14  *      Vitaly E. Lavrov        releasing NULL neighbor in neigh_add.
15  *      Harald Welte            Add neighbour cache statistics like rtstat
16  *      Harald Welte            port neighbour cache rework from 2.6.9-rcX
17  *      Pradeep Vincent         Move neighbour cache entry to stale state
18  */
19
20 #include <linux/config.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/socket.h>
25 #include <linux/sched.h>
26 #include <linux/netdevice.h>
27 #include <linux/proc_fs.h>
28 #ifdef CONFIG_SYSCTL
29 #include <linux/sysctl.h>
30 #endif
31 #include <net/neighbour.h>
32 #include <net/dst.h>
33 #include <net/sock.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/random.h>
36 #include <linux/module.h>
37
38 #define NEIGH_DEBUG 1
39
40 #define NEIGH_PRINTK(x...) printk(x)
41 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
42 #define NEIGH_PRINTK0 NEIGH_PRINTK
43 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
44 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
45
46 #if NEIGH_DEBUG >= 1
47 #undef NEIGH_PRINTK1
48 #define NEIGH_PRINTK1 NEIGH_PRINTK
49 #endif
50 #if NEIGH_DEBUG >= 2
51 #undef NEIGH_PRINTK2
52 #define NEIGH_PRINTK2 NEIGH_PRINTK
53 #endif
54
55 #define PNEIGH_HASHMASK         0xF
56
57 static void neigh_timer_handler(unsigned long arg);
58 #ifdef CONFIG_ARPD
59 static void neigh_app_notify(struct neighbour *n);
60 #endif
61 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
63
64 static int neigh_glbl_allocs;
65 static struct neigh_table *neigh_tables;
66 static struct file_operations neigh_stat_seq_fops;
67
68 /*
69    Neighbour hash table buckets are protected with rwlock tbl->lock.
70
71    - All the scans/updates to hash buckets MUST be made under this lock.
72    - NOTHING clever should be made under this lock: no callbacks
73      to protocol backends, no attempts to send something to network.
74      It will result in deadlocks, if backend/driver wants to use neighbour
75      cache.
76    - If the entry requires some non-trivial actions, increase
77      its reference count and release table lock.
78  
79    Neighbour entries are protected:
80    - with reference count.
81    - with rwlock neigh->lock
82
83    Reference count prevents destruction.
84
85    neigh->lock mainly serializes ll address data and its validity state.
86    However, the same lock is used to protect another entry fields:
87     - timer
88     - resolution queue
89
90    Again, nothing clever shall be made under neigh->lock,
91    the most complicated procedure, which we allow is dev->hard_header.
92    It is supposed, that dev->hard_header is simplistic and does
93    not make callbacks to neighbour tables.
94
95    The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96    list of neighbour tables. This list is used only in process context,
97  */
98
99 static rwlock_t neigh_tbl_lock = RW_LOCK_UNLOCKED;
100
101 static int neigh_blackhole(struct sk_buff *skb)
102 {
103         kfree_skb(skb);
104         return -ENETDOWN;
105 }
106
107 /*
108  * It is random distribution in the interval (1/2)*base...(3/2)*base.
109  * It corresponds to default IPv6 settings and is not overridable,
110  * because it is really reasonable choice.
111  */
112
113 unsigned long neigh_rand_reach_time(unsigned long base)
114 {
115         return (base ? (net_random() % base) + (base >> 1) : 0);
116 }
117
118
119 static int neigh_forced_gc(struct neigh_table *tbl)
120 {
121         int shrunk = 0;
122         int i;
123
124         NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
125
126         write_lock_bh(&tbl->lock);
127         for (i = 0; i <= tbl->hash_mask; i++) {
128                 struct neighbour *n, **np;
129
130                 np = &tbl->hash_buckets[i];
131                 while ((n = *np) != NULL) {
132                         /* Neighbour record may be discarded if:
133                          * - nobody refers to it.
134                          * - it is not permanent
135                          */
136                         write_lock(&n->lock);
137                         if (atomic_read(&n->refcnt) == 1 &&
138                             !(n->nud_state&NUD_PERMANENT)) {
139                                 *np = n->next;
140                                 n->dead = 1;
141                                 shrunk = 1;
142                                 write_unlock(&n->lock);
143                                 neigh_release(n);
144                                 continue;
145                         }
146                         write_unlock(&n->lock);
147                         np = &n->next;
148                 }
149         }
150         
151         tbl->last_flush = jiffies;
152
153         write_unlock_bh(&tbl->lock);
154
155         return shrunk;
156 }
157
158 static int neigh_del_timer(struct neighbour *n)
159 {
160         if (n->nud_state & NUD_IN_TIMER) {
161                 if (del_timer(&n->timer)) {
162                         neigh_release(n);
163                         return 1;
164                 }
165         }
166         return 0;
167 }
168
169 static void pneigh_queue_purge(struct sk_buff_head *list)
170 {
171         struct sk_buff *skb;
172
173         while ((skb = skb_dequeue(list)) != NULL) {
174                 dev_put(skb->dev);
175                 kfree_skb(skb);
176         }
177 }
178
179 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
180 {
181         int i;
182
183         write_lock_bh(&tbl->lock);
184
185         for (i=0; i <= tbl->hash_mask; i++) {
186                 struct neighbour *n, **np;
187
188                 np = &tbl->hash_buckets[i];
189                 while ((n = *np) != NULL) {
190                         if (dev && n->dev != dev) {
191                                 np = &n->next;
192                                 continue;
193                         }
194                         *np = n->next;
195                         write_lock_bh(&n->lock);
196                         n->dead = 1;
197                         neigh_del_timer(n);
198                         write_unlock_bh(&n->lock);
199                         neigh_release(n);
200                 }
201         }
202
203         write_unlock_bh(&tbl->lock);
204 }
205
206 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
207 {
208         int i;
209
210         write_lock_bh(&tbl->lock);
211
212         for (i = 0; i <= tbl->hash_mask; i++) {
213                 struct neighbour *n, **np;
214
215                 np = &tbl->hash_buckets[i];
216                 while ((n = *np) != NULL) {
217                         if (dev && n->dev != dev) {
218                                 np = &n->next;
219                                 continue;
220                         }
221                         *np = n->next;
222                         write_lock(&n->lock);
223                         neigh_del_timer(n);
224                         n->dead = 1;
225
226                         if (atomic_read(&n->refcnt) != 1) {
227                                 /* The most unpleasant situation.
228                                    We must destroy neighbour entry,
229                                    but someone still uses it.
230
231                                    The destroy will be delayed until
232                                    the last user releases us, but
233                                    we must kill timers etc. and move
234                                    it to safe state.
235                                  */
236                                 n->parms = &tbl->parms;
237                                 skb_queue_purge(&n->arp_queue);
238                                 n->output = neigh_blackhole;
239                                 if (n->nud_state&NUD_VALID)
240                                         n->nud_state = NUD_NOARP;
241                                 else
242                                         n->nud_state = NUD_NONE;
243                                 NEIGH_PRINTK2("neigh %p is stray.\n", n);
244                         }
245                         write_unlock(&n->lock);
246                         neigh_release(n);
247                 }
248         }
249
250         pneigh_ifdown(tbl, dev);
251         write_unlock_bh(&tbl->lock);
252
253         del_timer_sync(&tbl->proxy_timer);
254         pneigh_queue_purge(&tbl->proxy_queue);
255         return 0;
256 }
257
258 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
259 {
260         struct neighbour *n;
261         unsigned long now = jiffies;
262
263         if (atomic_read(&tbl->entries) > tbl->gc_thresh3 ||
264             (atomic_read(&tbl->entries) > tbl->gc_thresh2 &&
265              now - tbl->last_flush > 5*HZ)) {
266                 if (neigh_forced_gc(tbl) == 0 &&
267                     atomic_read(&tbl->entries) > tbl->gc_thresh3)
268                         return NULL;
269         }
270
271         n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
272         if (n == NULL)
273                 return NULL;
274
275         memset(n, 0, tbl->entry_size);
276
277         skb_queue_head_init(&n->arp_queue);
278         n->lock = RW_LOCK_UNLOCKED;
279         n->updated = n->used = now;
280         n->nud_state = NUD_NONE;
281         n->output = neigh_blackhole;
282         n->parms = &tbl->parms;
283         init_timer(&n->timer);
284         n->timer.function = neigh_timer_handler;
285         n->timer.data = (unsigned long)n;
286         NEIGH_CACHE_STAT_INC(tbl, allocs);
287         neigh_glbl_allocs++;
288         atomic_inc(&tbl->entries);
289         n->tbl = tbl;
290         atomic_set(&n->refcnt, 1);
291         n->dead = 1;
292         return n;
293 }
294
295 static struct neighbour **neigh_hash_alloc(unsigned int entries)
296 {
297         unsigned long size = entries * sizeof(struct neighbour *);
298         struct neighbour **ret;
299
300         if (size <= PAGE_SIZE) {
301                 ret = kmalloc(size, GFP_ATOMIC);
302         } else {
303                 ret = (struct neighbour **)
304                         __get_free_pages(GFP_ATOMIC, get_order(size));
305         }
306         if (ret)
307                 memset(ret, 0, size);
308
309         return ret;
310 }
311
312 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
313 {
314         unsigned long size = entries * sizeof(struct neighbour *);
315
316         if (size <= PAGE_SIZE)
317                 kfree(hash);
318         else
319                 free_pages((unsigned long)hash, get_order(size));
320 }
321
322 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
323 {
324         struct neighbour **new_hash, **old_hash;
325         unsigned int i, new_hash_mask, old_entries;
326
327         NEIGH_CACHE_STAT_INC(tbl, hash_grows);
328
329         BUG_ON(new_entries & (new_entries - 1));
330         new_hash = neigh_hash_alloc(new_entries);
331         if (!new_hash)
332                 return;
333
334         old_entries = tbl->hash_mask + 1;
335         new_hash_mask = new_entries - 1;
336         old_hash = tbl->hash_buckets;
337
338         get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
339         for (i = 0; i < old_entries; i++) {
340                 struct neighbour *n, *next;
341
342                 for (n = old_hash[i]; n; n = next) {
343                         unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
344
345                         hash_val &= new_hash_mask;
346                         next = n->next;
347
348                         n->next = new_hash[hash_val];
349                         new_hash[hash_val] = n;
350                 }
351         }
352         tbl->hash_buckets = new_hash;
353         tbl->hash_mask = new_hash_mask;
354
355         neigh_hash_free(old_hash, old_entries);
356 }
357
358 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
359                                struct net_device *dev)
360 {
361         struct neighbour *n;
362         int key_len = tbl->key_len;
363         u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
364
365         NEIGH_CACHE_STAT_INC(tbl, lookups);
366
367         read_lock_bh(&tbl->lock);
368         for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
369                 if (dev == n->dev &&
370                     memcmp(n->primary_key, pkey, key_len) == 0) {
371                         neigh_hold(n);
372                         NEIGH_CACHE_STAT_INC(tbl, hits);
373                         break;
374                 }
375         }
376         read_unlock_bh(&tbl->lock);
377         return n;
378 }
379
380 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
381 {
382         struct neighbour *n;
383         int key_len = tbl->key_len;
384         u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
385
386         NEIGH_CACHE_STAT_INC(tbl, lookups);
387
388         read_lock_bh(&tbl->lock);
389         for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
390                 if (!memcmp(n->primary_key, pkey, key_len)) {
391                         neigh_hold(n);
392                         NEIGH_CACHE_STAT_INC(tbl, hits);
393                         break;
394                 }
395         }
396         read_unlock_bh(&tbl->lock);
397         return n;
398 }
399
400 struct neighbour * neigh_create(struct neigh_table *tbl, const void *pkey,
401                                 struct net_device *dev)
402 {
403         struct neighbour *n, *n1;
404         u32 hash_val;
405         int key_len = tbl->key_len;
406         int error;
407
408         n = neigh_alloc(tbl);
409         if (n == NULL)
410                 return ERR_PTR(-ENOBUFS);
411
412         memcpy(n->primary_key, pkey, key_len);
413         n->dev = dev;
414         dev_hold(dev);
415
416         /* Protocol specific setup. */
417         if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
418                 neigh_release(n);
419                 return ERR_PTR(error);
420         }
421
422         /* Device specific setup. */
423         if (n->parms->neigh_setup &&
424             (error = n->parms->neigh_setup(n)) < 0) {
425                 neigh_release(n);
426                 return ERR_PTR(error);
427         }
428
429         n->confirmed = jiffies - (n->parms->base_reachable_time<<1);
430
431         write_lock_bh(&tbl->lock);
432         if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
433                 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
434
435         hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
436
437         for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
438                 if (dev == n1->dev &&
439                     memcmp(n1->primary_key, pkey, key_len) == 0) {
440                         neigh_hold(n1);
441                         write_unlock_bh(&tbl->lock);
442                         neigh_release(n);
443                         return n1;
444                 }
445         }
446
447         n->next = tbl->hash_buckets[hash_val];
448         tbl->hash_buckets[hash_val] = n;
449         n->dead = 0;
450         neigh_hold(n);
451         write_unlock_bh(&tbl->lock);
452         NEIGH_PRINTK2("neigh %p is created.\n", n);
453         return n;
454 }
455
456 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
457                                     struct net_device *dev, int creat)
458 {
459         struct pneigh_entry *n;
460         u32 hash_val;
461         int key_len = tbl->key_len;
462
463         hash_val = *(u32*)(pkey + key_len - 4);
464         hash_val ^= (hash_val>>16);
465         hash_val ^= hash_val>>8;
466         hash_val ^= hash_val>>4;
467         hash_val &= PNEIGH_HASHMASK;
468
469         read_lock_bh(&tbl->lock);
470
471         for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
472                 if (memcmp(n->key, pkey, key_len) == 0 &&
473                     (n->dev == dev || !n->dev)) {
474                         read_unlock_bh(&tbl->lock);
475                         return n;
476                 }
477         }
478         read_unlock_bh(&tbl->lock);
479         if (!creat)
480                 return NULL;
481
482         n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
483         if (n == NULL)
484                 return NULL;
485
486         memcpy(n->key, pkey, key_len);
487         n->dev = dev;
488
489         if (tbl->pconstructor && tbl->pconstructor(n)) {
490                 kfree(n);
491                 return NULL;
492         }
493
494         write_lock_bh(&tbl->lock);
495         n->next = tbl->phash_buckets[hash_val];
496         tbl->phash_buckets[hash_val] = n;
497         write_unlock_bh(&tbl->lock);
498         return n;
499 }
500
501
502 int pneigh_delete(struct neigh_table *tbl, const void *pkey, struct net_device *dev)
503 {
504         struct pneigh_entry *n, **np;
505         u32 hash_val;
506         int key_len = tbl->key_len;
507
508         hash_val = *(u32*)(pkey + key_len - 4);
509         hash_val ^= (hash_val>>16);
510         hash_val ^= hash_val>>8;
511         hash_val ^= hash_val>>4;
512         hash_val &= PNEIGH_HASHMASK;
513
514         write_lock_bh(&tbl->lock);
515         for (np = &tbl->phash_buckets[hash_val]; (n=*np) != NULL; np = &n->next) {
516                 if (memcmp(n->key, pkey, key_len) == 0 && n->dev == dev) {
517                         *np = n->next;
518                         write_unlock_bh(&tbl->lock);
519                         if (tbl->pdestructor)
520                                 tbl->pdestructor(n);
521                         kfree(n);
522                         return 0;
523                 }
524         }
525         write_unlock_bh(&tbl->lock);
526         return -ENOENT;
527 }
528
529 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
530 {
531         struct pneigh_entry *n, **np;
532         u32 h;
533
534         for (h=0; h<=PNEIGH_HASHMASK; h++) {
535                 np = &tbl->phash_buckets[h]; 
536                 while ((n=*np) != NULL) {
537                         if (n->dev == dev || dev == NULL) {
538                                 *np = n->next;
539                                 if (tbl->pdestructor)
540                                         tbl->pdestructor(n);
541                                 kfree(n);
542                                 continue;
543                         }
544                         np = &n->next;
545                 }
546         }
547         return -ENOENT;
548 }
549
550
551 /*
552  *      neighbour must already be out of the table;
553  *
554  */
555 void neigh_destroy(struct neighbour *neigh)
556 {       
557         struct hh_cache *hh;
558
559         NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
560
561         if (!neigh->dead) {
562                 printk("Destroying alive neighbour %p\n", neigh);
563                 dump_stack();
564                 return;
565         }
566
567         if (neigh_del_timer(neigh))
568                 printk("Impossible event.\n");
569
570         while ((hh = neigh->hh) != NULL) {
571                 neigh->hh = hh->hh_next;
572                 hh->hh_next = NULL;
573                 write_lock_bh(&hh->hh_lock);
574                 hh->hh_output = neigh_blackhole;
575                 write_unlock_bh(&hh->hh_lock);
576                 if (atomic_dec_and_test(&hh->hh_refcnt))
577                         kfree(hh);
578         }
579
580         if (neigh->ops && neigh->ops->destructor)
581                 (neigh->ops->destructor)(neigh);
582
583         skb_queue_purge(&neigh->arp_queue);
584
585         dev_put(neigh->dev);
586
587         NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
588
589         neigh_glbl_allocs--;
590         atomic_dec(&neigh->tbl->entries);
591         kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
592 }
593
594 /* Neighbour state is suspicious;
595    disable fast path.
596
597    Called with write_locked neigh.
598  */
599 static void neigh_suspect(struct neighbour *neigh)
600 {
601         struct hh_cache *hh;
602
603         NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
604
605         neigh->output = neigh->ops->output;
606
607         for (hh = neigh->hh; hh; hh = hh->hh_next)
608                 hh->hh_output = neigh->ops->output;
609 }
610
611 /* Neighbour state is OK;
612    enable fast path.
613
614    Called with write_locked neigh.
615  */
616 static void neigh_connect(struct neighbour *neigh)
617 {
618         struct hh_cache *hh;
619
620         NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
621
622         neigh->output = neigh->ops->connected_output;
623
624         for (hh = neigh->hh; hh; hh = hh->hh_next)
625                 hh->hh_output = neigh->ops->hh_output;
626 }
627
628 /*
629    Transitions NUD_STALE <-> NUD_REACHABLE do not occur
630    when fast path is built: we have no timers associated with
631    these states, we do not have time to check state when sending.
632    neigh_periodic_timer check periodically neigh->confirmed
633    time and moves NUD_REACHABLE -> NUD_STALE.
634
635    If a routine wants to know TRUE entry state, it calls
636    neigh_sync before checking state.
637
638    Called with write_locked neigh.
639  */
640
641 static void neigh_sync(struct neighbour *n)
642 {
643         unsigned long now = jiffies;
644         u8 state = n->nud_state;
645
646         if (state&(NUD_NOARP|NUD_PERMANENT))
647                 return;
648         if (state&NUD_REACHABLE) {
649                 if (now - n->confirmed > n->parms->reachable_time) {
650                         n->nud_state = NUD_STALE;
651                         neigh_suspect(n);
652                 }
653         } else if (state&NUD_VALID) {
654                 if (now - n->confirmed < n->parms->reachable_time) {
655                         neigh_del_timer(n);
656                         n->nud_state = NUD_REACHABLE;
657                         neigh_connect(n);
658                 }
659         }
660 }
661
662 static void SMP_TIMER_NAME(neigh_periodic_timer)(unsigned long arg)
663 {
664         struct neigh_table *tbl = (struct neigh_table*)arg;
665         struct neighbour *n, **np;
666         unsigned long expire, now = jiffies;
667
668         NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
669
670         write_lock(&tbl->lock);
671
672         /*
673          *      periodicly recompute ReachableTime from random function
674          */
675         
676         if (now - tbl->last_rand > 300*HZ) {
677                 struct neigh_parms *p;
678                 tbl->last_rand = now;
679                 for (p=&tbl->parms; p; p = p->next)
680                         p->reachable_time = neigh_rand_reach_time(p->base_reachable_time);
681         }
682
683         np = &tbl->hash_buckets[tbl->hash_chain_gc];
684         tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
685
686         while ((n = *np) != NULL) {
687                 unsigned int state;
688
689                 write_lock(&n->lock);
690   
691                 state = n->nud_state;
692                 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
693                         write_unlock(&n->lock);
694                         goto next_elt;
695                 }
696
697                 if (time_before(n->used, n->confirmed))
698                         n->used = n->confirmed;
699
700                 if (atomic_read(&n->refcnt) == 1 &&
701                     (state == NUD_FAILED ||
702                      time_after(now, n->used + n->parms->gc_staletime))) {
703                         *np = n->next;
704                         n->dead = 1;
705                         write_unlock(&n->lock);
706                         neigh_release(n);
707                         continue;
708                 }
709
710                 /* Mark it stale - To be reconfirmed later when used */
711                 if (n->nud_state & NUD_REACHABLE &&
712                     now - n->confirmed > n->parms->reachable_time) {
713                         n->nud_state = NUD_STALE;
714                         neigh_suspect(n);
715                 }
716
717                 write_unlock(&n->lock);
718
719 next_elt:
720                 np = &n->next;
721         }
722   
723         /* Cycle through all hash buckets every base_reachable_time/2 ticks.
724          * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
725          * base_reachable_time.
726          */
727         expire = tbl->parms.base_reachable_time >> 1;
728         expire /= (tbl->hash_mask + 1);
729         if (!expire)
730                 expire = 1;
731
732         mod_timer(&tbl->gc_timer, now + expire);
733
734         write_unlock(&tbl->lock);
735 }
736
737 #ifdef CONFIG_SMP
738 static void neigh_periodic_timer(unsigned long arg)
739 {
740         struct neigh_table *tbl = (struct neigh_table*)arg;
741         
742         tasklet_schedule(&tbl->gc_task);
743 }
744 #endif
745
746 static __inline__ int neigh_max_probes(struct neighbour *n)
747 {
748         struct neigh_parms *p = n->parms;
749         return p->ucast_probes + p->app_probes + p->mcast_probes;
750 }
751
752
753 /* Called when a timer expires for a neighbour entry. */
754
755 static void neigh_timer_handler(unsigned long arg) 
756 {
757         unsigned long now = jiffies;
758         struct neighbour *neigh = (struct neighbour*)arg;
759         struct sk_buff *skb;
760         unsigned state;
761         int notify = 0;
762
763         write_lock(&neigh->lock);
764
765         state = neigh->nud_state;
766
767         if (!(state&NUD_IN_TIMER)) {
768 #ifndef CONFIG_SMP
769                 printk("neigh: timer & !nud_in_timer\n");
770 #endif
771                 goto out;
772         }
773
774         if ((state&NUD_VALID) &&
775             now - neigh->confirmed < neigh->parms->reachable_time) {
776                 neigh->nud_state = NUD_REACHABLE;
777                 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
778                 neigh_connect(neigh);
779                 goto out;
780         }
781         if (state == NUD_DELAY) {
782                 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
783                 neigh->nud_state = NUD_PROBE;
784                 atomic_set(&neigh->probes, 0);
785         }
786
787         if (atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
788                 struct sk_buff *skb;
789
790                 neigh->nud_state = NUD_FAILED;
791                 notify = 1;
792                 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
793                 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
794
795                 /* It is very thin place. report_unreachable is very complicated
796                    routine. Particularly, it can hit the same neighbour entry!
797                    
798                    So that, we try to be accurate and avoid dead loop. --ANK
799                  */
800                 while(neigh->nud_state==NUD_FAILED && (skb=__skb_dequeue(&neigh->arp_queue)) != NULL) {
801                         write_unlock(&neigh->lock);
802                         neigh->ops->error_report(neigh, skb);
803                         write_lock(&neigh->lock);
804                 }
805                 skb_queue_purge(&neigh->arp_queue);
806                 goto out;
807         }
808
809         neigh->timer.expires = now + neigh->parms->retrans_time;
810         add_timer(&neigh->timer);
811
812         /* keep skb alive even if arp_queue overflows */
813         skb = skb_peek(&neigh->arp_queue);
814         if (skb)
815                 skb_get(skb);
816
817         write_unlock(&neigh->lock);
818
819         neigh->ops->solicit(neigh, skb);
820         atomic_inc(&neigh->probes);
821
822         if (skb)
823                 kfree_skb(skb);
824
825         return;
826
827 out:
828         write_unlock(&neigh->lock);
829 #ifdef CONFIG_ARPD
830         if (notify && neigh->parms->app_probes)
831                 neigh_app_notify(neigh);
832 #endif
833         neigh_release(neigh);
834 }
835
836 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
837 {
838         write_lock_bh(&neigh->lock);
839         if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) {
840                 if (!(neigh->nud_state&(NUD_STALE|NUD_INCOMPLETE))) {
841                         if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
842                                 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
843                                 neigh->nud_state = NUD_INCOMPLETE;
844                                 neigh_hold(neigh);
845                                 neigh->timer.expires = jiffies + neigh->parms->retrans_time;
846                                 add_timer(&neigh->timer);
847                                 write_unlock_bh(&neigh->lock);
848                                 neigh->ops->solicit(neigh, skb);
849                                 atomic_inc(&neigh->probes);
850                                 write_lock_bh(&neigh->lock);
851                         } else {
852                                 neigh->nud_state = NUD_FAILED;
853                                 write_unlock_bh(&neigh->lock);
854
855                                 if (skb)
856                                         kfree_skb(skb);
857                                 return 1;
858                         }
859                 }
860                 if (neigh->nud_state == NUD_INCOMPLETE) {
861                         if (skb) {
862                                 if (skb_queue_len(&neigh->arp_queue) >= neigh->parms->queue_len) {
863                                         struct sk_buff *buff;
864                                         buff = neigh->arp_queue.next;
865                                         __skb_unlink(buff, &neigh->arp_queue);
866                                         kfree_skb(buff);
867                                 }
868                                 __skb_queue_tail(&neigh->arp_queue, skb);
869                         }
870                         write_unlock_bh(&neigh->lock);
871                         return 1;
872                 }
873                 if (neigh->nud_state == NUD_STALE) {
874                         NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
875                         neigh_hold(neigh);
876                         neigh->nud_state = NUD_DELAY;
877                         neigh->timer.expires = jiffies + neigh->parms->delay_probe_time;
878                         add_timer(&neigh->timer);
879                 }
880         }
881         write_unlock_bh(&neigh->lock);
882         return 0;
883 }
884
885 static __inline__ void neigh_update_hhs(struct neighbour *neigh)
886 {
887         struct hh_cache *hh;
888         void (*update)(struct hh_cache*, struct net_device*, unsigned char*) =
889                 neigh->dev->header_cache_update;
890
891         if (update) {
892                 for (hh=neigh->hh; hh; hh=hh->hh_next) {
893                         write_lock_bh(&hh->hh_lock);
894                         update(hh, neigh->dev, neigh->ha);
895                         write_unlock_bh(&hh->hh_lock);
896                 }
897         }
898 }
899
900
901
902 /* Generic update routine.
903    -- lladdr is new lladdr or NULL, if it is not supplied.
904    -- new    is new state.
905    -- override==1 allows to override existing lladdr, if it is different.
906    -- arp==0 means that the change is administrative.
907
908    Caller MUST hold reference count on the entry.
909  */
910
911 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, int override, int arp)
912 {
913         u8 old;
914         int err;
915         int notify = 0;
916         struct net_device *dev = neigh->dev;
917
918         write_lock_bh(&neigh->lock);
919         old = neigh->nud_state;
920
921         err = -EPERM;
922         if (arp && (old&(NUD_NOARP|NUD_PERMANENT)))
923                 goto out;
924
925         if (!(new&NUD_VALID)) {
926                 neigh_del_timer(neigh);
927                 if (old&NUD_CONNECTED)
928                         neigh_suspect(neigh);
929                 neigh->nud_state = new;
930                 err = 0;
931                 notify = old&NUD_VALID;
932                 goto out;
933         }
934
935         /* Compare new lladdr with cached one */
936         if (dev->addr_len == 0) {
937                 /* First case: device needs no address. */
938                 lladdr = neigh->ha;
939         } else if (lladdr) {
940                 /* The second case: if something is already cached
941                    and a new address is proposed:
942                    - compare new & old
943                    - if they are different, check override flag
944                  */
945                 if (old&NUD_VALID) {
946                         if (memcmp(lladdr, neigh->ha, dev->addr_len) == 0)
947                                 lladdr = neigh->ha;
948                         else if (!override)
949                                 goto out;
950                 }
951         } else {
952                 /* No address is supplied; if we know something,
953                    use it, otherwise discard the request.
954                  */
955                 err = -EINVAL;
956                 if (!(old&NUD_VALID))
957                         goto out;
958                 lladdr = neigh->ha;
959         }
960
961         neigh_sync(neigh);
962         old = neigh->nud_state;
963         if (new&NUD_CONNECTED)
964                 neigh->confirmed = jiffies;
965         neigh->updated = jiffies;
966
967         /* If entry was valid and address is not changed,
968            do not change entry state, if new one is STALE.
969          */
970         err = 0;
971         if (old&NUD_VALID) {
972                 if (lladdr == neigh->ha)
973                         if (new == old || (new == NUD_STALE && (old&NUD_CONNECTED)))
974                                 goto out;
975         }
976         neigh_del_timer(neigh);
977         neigh->nud_state = new;
978         if (lladdr != neigh->ha) {
979                 memcpy(&neigh->ha, lladdr, dev->addr_len);
980                 neigh_update_hhs(neigh);
981                 if (!(new&NUD_CONNECTED))
982                         neigh->confirmed = jiffies - (neigh->parms->base_reachable_time<<1);
983 #ifdef CONFIG_ARPD
984                 notify = 1;
985 #endif
986         }
987         if (new == old)
988                 goto out;
989         if (new&NUD_CONNECTED)
990                 neigh_connect(neigh);
991         else
992                 neigh_suspect(neigh);
993         if (!(old&NUD_VALID)) {
994                 struct sk_buff *skb;
995
996                 /* Again: avoid dead loop if something went wrong */
997
998                 while (neigh->nud_state&NUD_VALID &&
999                        (skb=__skb_dequeue(&neigh->arp_queue)) != NULL) {
1000                         struct neighbour *n1 = neigh;
1001                         write_unlock_bh(&neigh->lock);
1002                         /* On shaper/eql skb->dst->neighbour != neigh :( */
1003                         if (skb->dst && skb->dst->neighbour)
1004                                 n1 = skb->dst->neighbour;
1005                         n1->output(skb);
1006                         write_lock_bh(&neigh->lock);
1007                 }
1008                 skb_queue_purge(&neigh->arp_queue);
1009         }
1010 out:
1011         write_unlock_bh(&neigh->lock);
1012 #ifdef CONFIG_ARPD
1013         if (notify && neigh->parms->app_probes)
1014                 neigh_app_notify(neigh);
1015 #endif
1016         return err;
1017 }
1018
1019 struct neighbour * neigh_event_ns(struct neigh_table *tbl,
1020                                   u8 *lladdr, void *saddr,
1021                                   struct net_device *dev)
1022 {
1023         struct neighbour *neigh;
1024
1025         neigh = __neigh_lookup(tbl, saddr, dev, lladdr || !dev->addr_len);
1026         if (neigh)
1027                 neigh_update(neigh, lladdr, NUD_STALE, 1, 1);
1028         return neigh;
1029 }
1030
1031 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst, u16 protocol)
1032 {
1033         struct hh_cache *hh = NULL;
1034         struct net_device *dev = dst->dev;
1035
1036         for (hh=n->hh; hh; hh = hh->hh_next)
1037                 if (hh->hh_type == protocol)
1038                         break;
1039
1040         if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1041                 memset(hh, 0, sizeof(struct hh_cache));
1042                 hh->hh_lock = RW_LOCK_UNLOCKED;
1043                 hh->hh_type = protocol;
1044                 atomic_set(&hh->hh_refcnt, 0);
1045                 hh->hh_next = NULL;
1046                 if (dev->hard_header_cache(n, hh)) {
1047                         kfree(hh);
1048                         hh = NULL;
1049                 } else {
1050                         atomic_inc(&hh->hh_refcnt);
1051                         hh->hh_next = n->hh;
1052                         n->hh = hh;
1053                         if (n->nud_state&NUD_CONNECTED)
1054                                 hh->hh_output = n->ops->hh_output;
1055                         else
1056                                 hh->hh_output = n->ops->output;
1057                 }
1058         }
1059         if (hh) {
1060                 atomic_inc(&hh->hh_refcnt);
1061                 dst->hh = hh;
1062         }
1063 }
1064
1065 /* This function can be used in contexts, where only old dev_queue_xmit
1066    worked, f.e. if you want to override normal output path (eql, shaper),
1067    but resolution is not made yet.
1068  */
1069
1070 int neigh_compat_output(struct sk_buff *skb)
1071 {
1072         struct net_device *dev = skb->dev;
1073
1074         __skb_pull(skb, skb->nh.raw - skb->data);
1075
1076         if (dev->hard_header &&
1077             dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, skb->len) < 0 &&
1078             dev->rebuild_header(skb))
1079                 return 0;
1080
1081         return dev_queue_xmit(skb);
1082 }
1083
1084 /* Slow and careful. */
1085
1086 int neigh_resolve_output(struct sk_buff *skb)
1087 {
1088         struct dst_entry *dst = skb->dst;
1089         struct neighbour *neigh;
1090
1091         if (!dst || !(neigh = dst->neighbour))
1092                 goto discard;
1093
1094         __skb_pull(skb, skb->nh.raw - skb->data);
1095
1096         if (neigh_event_send(neigh, skb) == 0) {
1097                 int err;
1098                 struct net_device *dev = neigh->dev;
1099                 if (dev->hard_header_cache && dst->hh == NULL) {
1100                         write_lock_bh(&neigh->lock);
1101                         if (dst->hh == NULL)
1102                                 neigh_hh_init(neigh, dst, dst->ops->protocol);
1103                         err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
1104                         write_unlock_bh(&neigh->lock);
1105                 } else {
1106                         read_lock_bh(&neigh->lock);
1107                         err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
1108                         read_unlock_bh(&neigh->lock);
1109                 }
1110                 if (err >= 0)
1111                         return neigh->ops->queue_xmit(skb);
1112                 kfree_skb(skb);
1113                 return -EINVAL;
1114         }
1115         return 0;
1116
1117 discard:
1118         NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n", dst, dst ? dst->neighbour : NULL);
1119         kfree_skb(skb);
1120         return -EINVAL;
1121 }
1122
1123 /* As fast as possible without hh cache */
1124
1125 int neigh_connected_output(struct sk_buff *skb)
1126 {
1127         int err;
1128         struct dst_entry *dst = skb->dst;
1129         struct neighbour *neigh = dst->neighbour;
1130         struct net_device *dev = neigh->dev;
1131
1132         __skb_pull(skb, skb->nh.raw - skb->data);
1133
1134         read_lock_bh(&neigh->lock);
1135         err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
1136         read_unlock_bh(&neigh->lock);
1137         if (err >= 0)
1138                 return neigh->ops->queue_xmit(skb);
1139         kfree_skb(skb);
1140         return -EINVAL;
1141 }
1142
1143 static void neigh_proxy_process(unsigned long arg)
1144 {
1145         struct neigh_table *tbl = (struct neigh_table *)arg;
1146         long sched_next = 0;
1147         unsigned long now = jiffies;
1148         struct sk_buff *skb;
1149
1150         spin_lock(&tbl->proxy_queue.lock);
1151
1152         skb = tbl->proxy_queue.next;
1153
1154         while (skb != (struct sk_buff*)&tbl->proxy_queue) {
1155                 struct sk_buff *back = skb;
1156                 long tdif = back->stamp.tv_usec - now;
1157
1158                 skb = skb->next;
1159                 if (tdif <= 0) {
1160                         struct net_device *dev = back->dev;
1161                         __skb_unlink(back, &tbl->proxy_queue);
1162                         if (tbl->proxy_redo && netif_running(dev))
1163                                 tbl->proxy_redo(back);
1164                         else
1165                                 kfree_skb(back);
1166
1167                         dev_put(dev);
1168                 } else if (!sched_next || tdif < sched_next)
1169                         sched_next = tdif;
1170         }
1171         del_timer(&tbl->proxy_timer);
1172         if (sched_next)
1173                 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1174         spin_unlock(&tbl->proxy_queue.lock);
1175 }
1176
1177 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1178                     struct sk_buff *skb)
1179 {
1180         unsigned long now = jiffies;
1181         long sched_next = net_random()%p->proxy_delay;
1182
1183         if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1184                 kfree_skb(skb);
1185                 return;
1186         }
1187         skb->stamp.tv_sec = 0;
1188         skb->stamp.tv_usec = now + sched_next;
1189
1190         spin_lock(&tbl->proxy_queue.lock);
1191         if (del_timer(&tbl->proxy_timer)) {
1192                 long tval = tbl->proxy_timer.expires - now;
1193                 if (tval < sched_next)
1194                         sched_next = tval;
1195         }
1196         dst_release(skb->dst);
1197         skb->dst = NULL;
1198         dev_hold(skb->dev);
1199         __skb_queue_tail(&tbl->proxy_queue, skb);
1200         mod_timer(&tbl->proxy_timer, now + sched_next);
1201         spin_unlock(&tbl->proxy_queue.lock);
1202 }
1203
1204
1205 struct neigh_parms *neigh_parms_alloc(struct net_device *dev, struct neigh_table *tbl)
1206 {
1207         struct neigh_parms *p;
1208         p = kmalloc(sizeof(*p), GFP_KERNEL);
1209         if (p) {
1210                 memcpy(p, &tbl->parms, sizeof(*p));
1211                 p->tbl = tbl;
1212                 p->reachable_time = neigh_rand_reach_time(p->base_reachable_time);
1213                 if (dev && dev->neigh_setup) {
1214                         if (dev->neigh_setup(dev, p)) {
1215                                 kfree(p);
1216                                 return NULL;
1217                         }
1218                 }
1219                 p->sysctl_table = NULL;
1220                 write_lock_bh(&tbl->lock);
1221                 p->next = tbl->parms.next;
1222                 tbl->parms.next = p;
1223                 write_unlock_bh(&tbl->lock);
1224         }
1225         return p;
1226 }
1227
1228 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1229 {
1230         struct neigh_parms **p;
1231         
1232         if (parms == NULL || parms == &tbl->parms)
1233                 return;
1234         write_lock_bh(&tbl->lock);
1235         for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1236                 if (*p == parms) {
1237                         *p = parms->next;
1238                         write_unlock_bh(&tbl->lock);
1239 #ifdef CONFIG_SYSCTL
1240                         neigh_sysctl_unregister(parms);
1241 #endif
1242                         kfree(parms);
1243                         return;
1244                 }
1245         }
1246         write_unlock_bh(&tbl->lock);
1247         NEIGH_PRINTK1("neigh_parms_release: not found\n");
1248 }
1249
1250
1251 void neigh_table_init(struct neigh_table *tbl)
1252 {
1253         unsigned long now = jiffies;
1254         unsigned long phsize;
1255
1256         tbl->parms.reachable_time = neigh_rand_reach_time(tbl->parms.base_reachable_time);
1257
1258         if (tbl->kmem_cachep == NULL)
1259                 tbl->kmem_cachep = kmem_cache_create(tbl->id,
1260                                                      (tbl->entry_size+15)&~15,
1261                                                      0, SLAB_HWCACHE_ALIGN,
1262                                                      NULL, NULL);
1263
1264         if (!tbl->kmem_cachep)
1265                 panic("cannot create neighbour cache");
1266
1267 #ifdef CONFIG_PROC_FS
1268         tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1269         if (!tbl->pde) 
1270                 panic("cannot create neighbour proc dir entry");
1271         tbl->pde->proc_fops = &neigh_stat_seq_fops;
1272         tbl->pde->data = tbl;
1273 #endif
1274
1275         tbl->hash_mask = 1;
1276         tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1277
1278         phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1279         tbl->phash_buckets = kmalloc(phsize, GFP_KERNEL);
1280
1281         if (!tbl->hash_buckets || !tbl->phash_buckets)
1282                 panic("cannot allocate neighbour cache hashes");
1283
1284         memset(tbl->phash_buckets, 0, phsize);
1285
1286         get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1287
1288 #ifdef CONFIG_SMP
1289         tasklet_init(&tbl->gc_task, SMP_TIMER_NAME(neigh_periodic_timer), (unsigned long)tbl);
1290 #endif
1291         init_timer(&tbl->gc_timer);
1292         tbl->lock = RW_LOCK_UNLOCKED;
1293         tbl->gc_timer.data = (unsigned long)tbl;
1294         tbl->gc_timer.function = neigh_periodic_timer;
1295         tbl->gc_timer.expires = now + 1;
1296         add_timer(&tbl->gc_timer);
1297
1298         init_timer(&tbl->proxy_timer);
1299         tbl->proxy_timer.data = (unsigned long)tbl;
1300         tbl->proxy_timer.function = neigh_proxy_process;
1301         skb_queue_head_init(&tbl->proxy_queue);
1302
1303         tbl->last_flush = now;
1304         tbl->last_rand = now + tbl->parms.reachable_time*20;
1305         write_lock(&neigh_tbl_lock);
1306         tbl->next = neigh_tables;
1307         neigh_tables = tbl;
1308         write_unlock(&neigh_tbl_lock);
1309 }
1310
1311 int neigh_table_clear(struct neigh_table *tbl)
1312 {
1313         struct neigh_table **tp;
1314
1315         /* It is not clean... Fix it to unload IPv6 module safely */
1316         del_timer_sync(&tbl->gc_timer);
1317         tasklet_kill(&tbl->gc_task);
1318         del_timer_sync(&tbl->proxy_timer);
1319         pneigh_queue_purge(&tbl->proxy_queue);
1320         neigh_ifdown(tbl, NULL);
1321         if (atomic_read(&tbl->entries))
1322                 printk(KERN_CRIT "neighbour leakage\n");
1323         write_lock(&neigh_tbl_lock);
1324         for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1325                 if (*tp == tbl) {
1326                         *tp = tbl->next;
1327                         break;
1328                 }
1329         }
1330         write_unlock(&neigh_tbl_lock);
1331
1332         neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1333         tbl->hash_buckets = NULL;
1334
1335         kfree(tbl->phash_buckets);
1336         tbl->phash_buckets = NULL;
1337
1338 #ifdef CONFIG_SYSCTL
1339         neigh_sysctl_unregister(&tbl->parms);
1340 #endif
1341         return 0;
1342 }
1343
1344 int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1345 {
1346         struct ndmsg *ndm = NLMSG_DATA(nlh);
1347         struct rtattr **nda = arg;
1348         struct neigh_table *tbl;
1349         struct net_device *dev = NULL;
1350         int err = 0;
1351
1352         if (ndm->ndm_ifindex) {
1353                 if ((dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1354                         return -ENODEV;
1355         }
1356
1357         read_lock(&neigh_tbl_lock);
1358         for (tbl=neigh_tables; tbl; tbl = tbl->next) {
1359                 struct neighbour *n;
1360
1361                 if (tbl->family != ndm->ndm_family)
1362                         continue;
1363                 read_unlock(&neigh_tbl_lock);
1364
1365                 err = -EINVAL;
1366                 if (nda[NDA_DST-1] == NULL ||
1367                     nda[NDA_DST-1]->rta_len != RTA_LENGTH(tbl->key_len))
1368                         goto out;
1369
1370                 if (ndm->ndm_flags&NTF_PROXY) {
1371                         err = pneigh_delete(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
1372                         goto out;
1373                 }
1374
1375                 if (dev == NULL)
1376                         return -EINVAL;
1377
1378                 n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
1379                 if (n) {
1380                         err = neigh_update(n, NULL, NUD_FAILED, 1, 0);
1381                         neigh_release(n);
1382                 }
1383 out:
1384                 if (dev)
1385                         dev_put(dev);
1386                 return err;
1387         }
1388         read_unlock(&neigh_tbl_lock);
1389
1390         if (dev)
1391                 dev_put(dev);
1392
1393         return -EADDRNOTAVAIL;
1394 }
1395
1396 int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1397 {
1398         struct ndmsg *ndm = NLMSG_DATA(nlh);
1399         struct rtattr **nda = arg;
1400         struct neigh_table *tbl;
1401         struct net_device *dev = NULL;
1402
1403         if (ndm->ndm_ifindex) {
1404                 if ((dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1405                         return -ENODEV;
1406         }
1407
1408         read_lock(&neigh_tbl_lock);
1409         for (tbl=neigh_tables; tbl; tbl = tbl->next) {
1410                 int err = 0;
1411                 int override = 1;
1412                 struct neighbour *n;
1413
1414                 if (tbl->family != ndm->ndm_family)
1415                         continue;
1416                 read_unlock(&neigh_tbl_lock);
1417
1418                 err = -EINVAL;
1419                 if (nda[NDA_DST-1] == NULL ||
1420                     nda[NDA_DST-1]->rta_len != RTA_LENGTH(tbl->key_len))
1421                         goto out;
1422                 if (ndm->ndm_flags&NTF_PROXY) {
1423                         err = -ENOBUFS;
1424                         if (pneigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev, 1))
1425                                 err = 0;
1426                         goto out;
1427                 }
1428                 if (dev == NULL)
1429                         return -EINVAL;
1430                 err = -EINVAL;
1431                 if (nda[NDA_LLADDR-1] != NULL &&
1432                     nda[NDA_LLADDR-1]->rta_len != RTA_LENGTH(dev->addr_len))
1433                         goto out;
1434                 err = 0;
1435                 n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
1436                 if (n) {
1437                         if (nlh->nlmsg_flags&NLM_F_EXCL)
1438                                 err = -EEXIST;
1439                         override = nlh->nlmsg_flags&NLM_F_REPLACE;
1440                 } else if (!(nlh->nlmsg_flags&NLM_F_CREATE))
1441                         err = -ENOENT;
1442                 else {
1443                         n = __neigh_lookup_errno(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
1444                         if (IS_ERR(n)) {
1445                                 err = PTR_ERR(n);
1446                                 n = NULL;
1447                         }
1448                 }
1449                 if (err == 0) {
1450                         err = neigh_update(n, nda[NDA_LLADDR-1] ? RTA_DATA(nda[NDA_LLADDR-1]) : NULL,
1451                                            ndm->ndm_state,
1452                                            override, 0);
1453                 }
1454                 if (n)
1455                         neigh_release(n);
1456 out:
1457                 if (dev)
1458                         dev_put(dev);
1459                 return err;
1460         }
1461         read_unlock(&neigh_tbl_lock);
1462
1463         if (dev)
1464                 dev_put(dev);
1465         return -EADDRNOTAVAIL;
1466 }
1467
1468
1469 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1470                            u32 pid, u32 seq, int event)
1471 {
1472         unsigned long now = jiffies;
1473         struct ndmsg *ndm;
1474         struct nlmsghdr  *nlh;
1475         unsigned char    *b = skb->tail;
1476         struct nda_cacheinfo ci;
1477         int locked = 0;
1478
1479         nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*ndm));
1480         ndm = NLMSG_DATA(nlh);
1481         nlh->nlmsg_flags = pid ? NLM_F_MULTI : 0;
1482         ndm->ndm_family = n->ops->family;
1483         ndm->ndm_flags = n->flags;
1484         ndm->ndm_type = n->type;
1485         ndm->ndm_ifindex = n->dev->ifindex;
1486         RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1487         read_lock_bh(&n->lock);
1488         locked=1;
1489         ndm->ndm_state = n->nud_state;
1490         if (n->nud_state&NUD_VALID)
1491                 RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1492         ci.ndm_used = now - n->used;
1493         ci.ndm_confirmed = now - n->confirmed;
1494         ci.ndm_updated = now - n->updated;
1495         ci.ndm_refcnt = atomic_read(&n->refcnt) - 1;
1496         read_unlock_bh(&n->lock);
1497         locked=0;
1498         RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1499         nlh->nlmsg_len = skb->tail - b;
1500         return skb->len;
1501
1502 nlmsg_failure:
1503 rtattr_failure:
1504         if (locked)
1505                 read_unlock_bh(&n->lock);
1506         skb_trim(skb, b - skb->data);
1507         return -1;
1508 }
1509
1510
1511 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, struct netlink_callback *cb)
1512 {
1513         struct neighbour *n;
1514         int h, s_h;
1515         int idx, s_idx;
1516
1517         s_h = cb->args[1];
1518         s_idx = idx = cb->args[2];
1519         for (h=0; h <= tbl->hash_mask; h++) {
1520                 if (h < s_h) continue;
1521                 if (h > s_h)
1522                         s_idx = 0;
1523                 read_lock_bh(&tbl->lock);
1524                 for (n = tbl->hash_buckets[h], idx = 0; n;
1525                      n = n->next, idx++) {
1526                         if (idx < s_idx)
1527                                 continue;
1528                         if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1529                                             cb->nlh->nlmsg_seq, RTM_NEWNEIGH) <= 0) {
1530                                 read_unlock_bh(&tbl->lock);
1531                                 cb->args[1] = h;
1532                                 cb->args[2] = idx;
1533                                 return -1;
1534                         }
1535                 }
1536                 read_unlock_bh(&tbl->lock);
1537         }
1538
1539         cb->args[1] = h;
1540         cb->args[2] = idx;
1541         return skb->len;
1542 }
1543
1544 int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1545 {
1546         int t;
1547         int s_t;
1548         struct neigh_table *tbl;
1549         int family = ((struct rtgenmsg*)NLMSG_DATA(cb->nlh))->rtgen_family;
1550
1551         s_t = cb->args[0];
1552
1553         read_lock(&neigh_tbl_lock);
1554         for (tbl=neigh_tables, t=0; tbl; tbl = tbl->next, t++) {
1555                 if (t < s_t) continue;
1556                 if (family && tbl->family != family)
1557                         continue;
1558                 if (t > s_t)
1559                         memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1560                 if (neigh_dump_table(tbl, skb, cb) < 0) 
1561                         break;
1562         }
1563         read_unlock(&neigh_tbl_lock);
1564
1565         cb->args[0] = t;
1566
1567         return skb->len;
1568 }
1569
1570 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
1571 {
1572         int chain;
1573
1574         read_lock_bh(&tbl->lock);
1575         for (chain = 0; chain <= tbl->hash_mask; chain++) {
1576                 struct neighbour *n;
1577
1578                 for (n = tbl->hash_buckets[chain]; n; n = n->next)
1579                         cb(n, cookie);
1580         }
1581         read_unlock_bh(&tbl->lock);
1582 }
1583 EXPORT_SYMBOL(neigh_for_each);
1584
1585 /* The tbl->lock must be held as a writer and BH disabled. */
1586 void __neigh_for_each_release(struct neigh_table *tbl,
1587                               int (*cb)(struct neighbour *))
1588 {
1589         int chain;
1590
1591         for (chain = 0; chain <= tbl->hash_mask; chain++) {
1592                 struct neighbour *n, **np;
1593
1594                 np = &tbl->hash_buckets[chain];
1595                 while ((n = *np) != NULL) {
1596                         int release;
1597
1598                         write_lock(&n->lock);
1599                         release = cb(n);
1600                         if (release) {
1601                                 *np = n->next;
1602                                 n->dead = 1;
1603                         } else
1604                                 np = &n->next;
1605                         write_unlock(&n->lock);
1606                         if (release)
1607                                 neigh_release(n);
1608                 }
1609         }
1610 }
1611 EXPORT_SYMBOL(__neigh_for_each_release);
1612
1613 #ifdef CONFIG_PROC_FS
1614
1615 static struct neighbour *neigh_get_first(struct seq_file *seq)
1616 {
1617         struct neigh_seq_state *state = seq->private;
1618         struct neigh_table *tbl = state->tbl;
1619         struct neighbour *n = NULL;
1620         int bucket = state->bucket;
1621
1622         state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
1623         for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
1624                 n = tbl->hash_buckets[bucket];
1625
1626                 while (n) {
1627                         if (state->neigh_sub_iter) {
1628                                 loff_t fakep = 0;
1629                                 void *v;
1630
1631                                 v = state->neigh_sub_iter(state, n, &fakep);
1632                                 if (!v)
1633                                         goto next;
1634                         }
1635                         if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
1636                                 break;
1637                         if (n->nud_state & ~NUD_NOARP)
1638                                 break;
1639                 next:
1640                         n = n->next;
1641                 }
1642
1643                 if (n)
1644                         break;
1645         }
1646         state->bucket = bucket;
1647
1648         return n;
1649 }
1650
1651 static struct neighbour *neigh_get_next(struct seq_file *seq,
1652                                         struct neighbour *n,
1653                                         loff_t *pos)
1654 {
1655         struct neigh_seq_state *state = seq->private;
1656         struct neigh_table *tbl = state->tbl;
1657
1658         if (state->neigh_sub_iter) {
1659                 void *v = state->neigh_sub_iter(state, n, pos);
1660                 if (v)
1661                         return n;
1662         }
1663         n = n->next;
1664
1665         while (1) {
1666                 while (n) {
1667                         if (state->neigh_sub_iter) {
1668                                 void *v = state->neigh_sub_iter(state, n, pos);
1669                                 if (v)
1670                                         return n;
1671                                 goto next;
1672                         }
1673                         if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
1674                                 break;
1675
1676                         if (n->nud_state & ~NUD_NOARP)
1677                                 break;
1678                 next:
1679                         n = n->next;
1680                 }
1681
1682                 if (n)
1683                         break;
1684
1685                 if (++state->bucket > tbl->hash_mask)
1686                         break;
1687
1688                 n = tbl->hash_buckets[state->bucket];
1689         }
1690
1691         if (n && pos)
1692                 --(*pos);
1693         return n;
1694 }
1695
1696 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
1697 {
1698         struct neighbour *n = neigh_get_first(seq);
1699
1700         if (n) {
1701                 while (*pos) {
1702                         n = neigh_get_next(seq, n, pos);
1703                         if (!n)
1704                                 break;
1705                 }
1706         }
1707         return *pos ? NULL : n;
1708 }
1709
1710 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
1711 {
1712         struct neigh_seq_state *state = seq->private;
1713         struct neigh_table *tbl = state->tbl;
1714         struct pneigh_entry *pn = NULL;
1715         int bucket = state->bucket;
1716
1717         state->flags |= NEIGH_SEQ_IS_PNEIGH;
1718         for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
1719                 pn = tbl->phash_buckets[bucket];
1720                 if (pn)
1721                         break;
1722         }
1723         state->bucket = bucket;
1724
1725         return pn;
1726 }
1727
1728 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
1729                                             struct pneigh_entry *pn,
1730                                             loff_t *pos)
1731 {
1732         struct neigh_seq_state *state = seq->private;
1733         struct neigh_table *tbl = state->tbl;
1734
1735         pn = pn->next;
1736         while (!pn) {
1737                 if (++state->bucket > PNEIGH_HASHMASK)
1738                         break;
1739                 pn = tbl->phash_buckets[state->bucket];
1740                 if (pn)
1741                         break;
1742         }
1743
1744         if (pn && pos)
1745                 --(*pos);
1746
1747         return pn;
1748 }
1749
1750 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
1751 {
1752         struct pneigh_entry *pn = pneigh_get_first(seq);
1753
1754         if (pn) {
1755                 while (*pos) {
1756                         pn = pneigh_get_next(seq, pn, pos);
1757                         if (!pn)
1758                                 break;
1759                 }
1760         }
1761         return *pos ? NULL : pn;
1762 }
1763
1764 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
1765 {
1766         struct neigh_seq_state *state = seq->private;
1767         void *rc;
1768
1769         rc = neigh_get_idx(seq, pos);
1770         if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
1771                 rc = pneigh_get_idx(seq, pos);
1772
1773         return rc;
1774 }
1775
1776 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
1777 {
1778         struct neigh_seq_state *state = seq->private;
1779         loff_t pos_minus_one;
1780
1781         state->tbl = tbl;
1782         state->bucket = 0;
1783         state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
1784
1785         read_lock_bh(&tbl->lock);
1786
1787         pos_minus_one = *pos - 1;
1788         return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
1789 }
1790
1791 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1792 {
1793         struct neigh_seq_state *state;
1794         void *rc;
1795
1796         if (v == SEQ_START_TOKEN) {
1797                 rc = neigh_get_idx(seq, pos);
1798                 goto out;
1799         }
1800
1801         state = seq->private;
1802         if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
1803                 rc = neigh_get_next(seq, v, NULL);
1804                 if (rc)
1805                         goto out;
1806                 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
1807                         rc = pneigh_get_first(seq);
1808         } else {
1809                 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
1810                 rc = pneigh_get_next(seq, v, NULL);
1811         }
1812 out:
1813         ++(*pos);
1814         return rc;
1815 }
1816
1817 void neigh_seq_stop(struct seq_file *seq, void *v)
1818 {
1819         struct neigh_seq_state *state = seq->private;
1820         struct neigh_table *tbl = state->tbl;
1821
1822         read_unlock_bh(&tbl->lock);
1823 }
1824
1825 /* statistics via seq_file */
1826
1827 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
1828 {
1829         struct proc_dir_entry *pde = seq->private;
1830         struct neigh_table *tbl = pde->data;
1831         int lcpu;
1832
1833         if (*pos == 0)
1834                 return SEQ_START_TOKEN;
1835         
1836         for (lcpu = *pos-1; lcpu < smp_num_cpus; ++lcpu) {
1837                 int i = cpu_logical_map(lcpu);
1838                 *pos = lcpu+1;
1839                 return &tbl->stats[i];
1840         }
1841         return NULL;
1842 }
1843
1844 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1845 {
1846         struct proc_dir_entry *pde = seq->private;
1847         struct neigh_table *tbl = pde->data;
1848         int lcpu;
1849
1850         for (lcpu = *pos; lcpu < smp_num_cpus; ++lcpu) {
1851                 int i = cpu_logical_map(lcpu);
1852                 *pos = lcpu+1;
1853                 return &tbl->stats[i];
1854         }
1855         return NULL;
1856 }
1857
1858 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
1859 {
1860
1861 }
1862
1863 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
1864 {
1865         struct proc_dir_entry *pde = seq->private;
1866         struct neigh_table *tbl = pde->data;
1867         struct neigh_statistics *st = v;
1868
1869         if (v == SEQ_START_TOKEN) {
1870                 seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs forced_gc_goal_miss\n");
1871                 return 0;
1872         }
1873
1874         seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
1875                         "%08lx %08lx  %08lx %08lx\n",
1876                    atomic_read(&tbl->entries),
1877
1878                    st->allocs,
1879                    st->destroys,
1880                    st->hash_grows,
1881
1882                    st->lookups,
1883                    st->hits,
1884
1885                    st->res_failed,
1886
1887                    st->rcv_probes_mcast,
1888                    st->rcv_probes_ucast,
1889
1890                    st->periodic_gc_runs,
1891                    st->forced_gc_runs
1892                    );
1893
1894         return 0;
1895 }
1896
1897 static struct seq_operations neigh_stat_seq_ops = {
1898         .start  = neigh_stat_seq_start,
1899         .next   = neigh_stat_seq_next,
1900         .stop   = neigh_stat_seq_stop,
1901         .show   = neigh_stat_seq_show,
1902 };
1903
1904 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
1905 {
1906         int ret = seq_open(file, &neigh_stat_seq_ops);
1907
1908         if (!ret) {
1909                 struct seq_file *sf = file->private_data;
1910                 sf->private = PDE(inode);
1911         }
1912         return ret;
1913 };
1914
1915 static struct file_operations neigh_stat_seq_fops = {
1916         .owner   = THIS_MODULE,
1917         .open    = neigh_stat_seq_open,
1918         .read    = seq_read,
1919         .llseek  = seq_lseek,
1920         .release = seq_release,
1921 };
1922
1923 #endif /* CONFIG_PROC_FS */
1924
1925 #ifdef CONFIG_ARPD
1926 void neigh_app_ns(struct neighbour *n)
1927 {
1928         struct sk_buff *skb;
1929         struct nlmsghdr  *nlh;
1930         int size = NLMSG_SPACE(sizeof(struct ndmsg)+256);
1931
1932         skb = alloc_skb(size, GFP_ATOMIC);
1933         if (!skb)
1934                 return;
1935
1936         if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH) < 0) {
1937                 kfree_skb(skb);
1938                 return;
1939         }
1940         nlh = (struct nlmsghdr*)skb->data;
1941         nlh->nlmsg_flags = NLM_F_REQUEST;
1942         NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH;
1943         netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC);
1944 }
1945
1946 static void neigh_app_notify(struct neighbour *n)
1947 {
1948         struct sk_buff *skb;
1949         struct nlmsghdr  *nlh;
1950         int size = NLMSG_SPACE(sizeof(struct ndmsg)+256);
1951
1952         skb = alloc_skb(size, GFP_ATOMIC);
1953         if (!skb)
1954                 return;
1955
1956         if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH) < 0) {
1957                 kfree_skb(skb);
1958                 return;
1959         }
1960         nlh = (struct nlmsghdr*)skb->data;
1961         NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH;
1962         netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC);
1963 }
1964
1965 #endif /* CONFIG_ARPD */
1966
1967 #ifdef CONFIG_SYSCTL
1968
1969 struct neigh_sysctl_table
1970 {
1971         struct ctl_table_header *sysctl_header;
1972         ctl_table neigh_vars[17];
1973         ctl_table neigh_dev[2];
1974         ctl_table neigh_neigh_dir[2];
1975         ctl_table neigh_proto_dir[2];
1976         ctl_table neigh_root_dir[2];
1977 } neigh_sysctl_template = {
1978         NULL,
1979         {{NET_NEIGH_MCAST_SOLICIT, "mcast_solicit",
1980          NULL, sizeof(int), 0644, NULL,
1981          &proc_dointvec},
1982         {NET_NEIGH_UCAST_SOLICIT, "ucast_solicit",
1983          NULL, sizeof(int), 0644, NULL,
1984          &proc_dointvec},
1985         {NET_NEIGH_APP_SOLICIT, "app_solicit",
1986          NULL, sizeof(int), 0644, NULL,
1987          &proc_dointvec},
1988         {NET_NEIGH_RETRANS_TIME, "retrans_time",
1989          NULL, sizeof(int), 0644, NULL,
1990          &proc_dointvec},
1991         {NET_NEIGH_REACHABLE_TIME, "base_reachable_time",
1992          NULL, sizeof(int), 0644, NULL,
1993          &proc_dointvec_jiffies},
1994         {NET_NEIGH_DELAY_PROBE_TIME, "delay_first_probe_time",
1995          NULL, sizeof(int), 0644, NULL,
1996          &proc_dointvec_jiffies},
1997         {NET_NEIGH_GC_STALE_TIME, "gc_stale_time",
1998          NULL, sizeof(int), 0644, NULL,
1999          &proc_dointvec_jiffies},
2000         {NET_NEIGH_UNRES_QLEN, "unres_qlen",
2001          NULL, sizeof(int), 0644, NULL,
2002          &proc_dointvec},
2003         {NET_NEIGH_PROXY_QLEN, "proxy_qlen",
2004          NULL, sizeof(int), 0644, NULL,
2005          &proc_dointvec},
2006         {NET_NEIGH_ANYCAST_DELAY, "anycast_delay",
2007          NULL, sizeof(int), 0644, NULL,
2008          &proc_dointvec},
2009         {NET_NEIGH_PROXY_DELAY, "proxy_delay",
2010          NULL, sizeof(int), 0644, NULL,
2011          &proc_dointvec},
2012         {NET_NEIGH_LOCKTIME, "locktime",
2013          NULL, sizeof(int), 0644, NULL,
2014          &proc_dointvec},
2015         {NET_NEIGH_GC_INTERVAL, "gc_interval",
2016          NULL, sizeof(int), 0644, NULL,
2017          &proc_dointvec_jiffies},
2018         {NET_NEIGH_GC_THRESH1, "gc_thresh1",
2019          NULL, sizeof(int), 0644, NULL,
2020          &proc_dointvec},
2021         {NET_NEIGH_GC_THRESH2, "gc_thresh2",
2022          NULL, sizeof(int), 0644, NULL,
2023          &proc_dointvec},
2024         {NET_NEIGH_GC_THRESH3, "gc_thresh3",
2025          NULL, sizeof(int), 0644, NULL,
2026          &proc_dointvec},
2027          {0}},
2028
2029         {{NET_PROTO_CONF_DEFAULT, "default", NULL, 0, 0555, NULL},{0}},
2030         {{0, "neigh", NULL, 0, 0555, NULL},{0}},
2031         {{0, NULL, NULL, 0, 0555, NULL},{0}},
2032         {{CTL_NET, "net", NULL, 0, 0555, NULL},{0}}
2033 };
2034
2035 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2036                           int p_id, int pdev_id, char *p_name)
2037 {
2038         struct neigh_sysctl_table *t;
2039
2040         t = kmalloc(sizeof(*t), GFP_KERNEL);
2041         if (t == NULL)
2042                 return -ENOBUFS;
2043         memcpy(t, &neigh_sysctl_template, sizeof(*t));
2044         t->neigh_vars[0].data = &p->mcast_probes;
2045         t->neigh_vars[1].data = &p->ucast_probes;
2046         t->neigh_vars[2].data = &p->app_probes;
2047         t->neigh_vars[3].data = &p->retrans_time;
2048         t->neigh_vars[4].data = &p->base_reachable_time;
2049         t->neigh_vars[5].data = &p->delay_probe_time;
2050         t->neigh_vars[6].data = &p->gc_staletime;
2051         t->neigh_vars[7].data = &p->queue_len;
2052         t->neigh_vars[8].data = &p->proxy_qlen;
2053         t->neigh_vars[9].data = &p->anycast_delay;
2054         t->neigh_vars[10].data = &p->proxy_delay;
2055         t->neigh_vars[11].data = &p->locktime;
2056         if (dev) {
2057                 t->neigh_dev[0].procname = dev->name;
2058                 t->neigh_dev[0].ctl_name = dev->ifindex;
2059                 memset(&t->neigh_vars[12], 0, sizeof(ctl_table));
2060         } else {
2061                 t->neigh_vars[12].data = (int*)(p+1);
2062                 t->neigh_vars[13].data = (int*)(p+1) + 1;
2063                 t->neigh_vars[14].data = (int*)(p+1) + 2;
2064                 t->neigh_vars[15].data = (int*)(p+1) + 3;
2065         }
2066         t->neigh_neigh_dir[0].ctl_name = pdev_id;
2067
2068         t->neigh_proto_dir[0].procname = p_name;
2069         t->neigh_proto_dir[0].ctl_name = p_id;
2070
2071         t->neigh_dev[0].child = t->neigh_vars;
2072         t->neigh_neigh_dir[0].child = t->neigh_dev;
2073         t->neigh_proto_dir[0].child = t->neigh_neigh_dir;
2074         t->neigh_root_dir[0].child = t->neigh_proto_dir;
2075
2076         t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
2077         if (t->sysctl_header == NULL) {
2078                 kfree(t);
2079                 return -ENOBUFS;
2080         }
2081         p->sysctl_table = t;
2082         return 0;
2083 }
2084
2085 void neigh_sysctl_unregister(struct neigh_parms *p)
2086 {
2087         if (p->sysctl_table) {
2088                 struct neigh_sysctl_table *t = p->sysctl_table;
2089                 p->sysctl_table = NULL;
2090                 unregister_sysctl_table(t->sysctl_header);
2091                 kfree(t);
2092         }
2093 }
2094
2095 #endif  /* CONFIG_SYSCTL */