xref: /openbmc/linux/net/core/neighbour.c (revision 87c2ce3b)
1 /*
2  *	Generic address resolution entity
3  *
4  *	Authors:
5  *	Pedro Roque		<roque@di.fc.ul.pt>
6  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Fixes:
14  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
15  *	Harald Welte		Add neighbour cache statistics like rtstat
16  */
17 
18 #include <linux/config.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/socket.h>
23 #include <linux/sched.h>
24 #include <linux/netdevice.h>
25 #include <linux/proc_fs.h>
26 #ifdef CONFIG_SYSCTL
27 #include <linux/sysctl.h>
28 #endif
29 #include <linux/times.h>
30 #include <net/neighbour.h>
31 #include <net/dst.h>
32 #include <net/sock.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
36 
37 #define NEIGH_DEBUG 1
38 
39 #define NEIGH_PRINTK(x...) printk(x)
40 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
41 #define NEIGH_PRINTK0 NEIGH_PRINTK
42 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
43 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
44 
45 #if NEIGH_DEBUG >= 1
46 #undef NEIGH_PRINTK1
47 #define NEIGH_PRINTK1 NEIGH_PRINTK
48 #endif
49 #if NEIGH_DEBUG >= 2
50 #undef NEIGH_PRINTK2
51 #define NEIGH_PRINTK2 NEIGH_PRINTK
52 #endif
53 
54 #define PNEIGH_HASHMASK		0xF
55 
56 static void neigh_timer_handler(unsigned long arg);
57 #ifdef CONFIG_ARPD
58 static void neigh_app_notify(struct neighbour *n);
59 #endif
60 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
61 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
62 
63 static struct neigh_table *neigh_tables;
64 #ifdef CONFIG_PROC_FS
65 static struct file_operations neigh_stat_seq_fops;
66 #endif
67 
68 /*
69    Neighbour hash table buckets are protected with rwlock tbl->lock.
70 
71    - All the scans/updates to hash buckets MUST be made under this lock.
72    - NOTHING clever should be made under this lock: no callbacks
73      to protocol backends, no attempts to send something to network.
74      It will result in deadlocks, if backend/driver wants to use neighbour
75      cache.
76    - If the entry requires some non-trivial actions, increase
77      its reference count and release table lock.
78 
79    Neighbour entries are protected:
80    - with reference count.
81    - with rwlock neigh->lock
82 
83    Reference count prevents destruction.
84 
85    neigh->lock mainly serializes ll address data and its validity state.
86    However, the same lock is used to protect another entry fields:
87     - timer
88     - resolution queue
89 
90    Again, nothing clever shall be made under neigh->lock,
91    the most complicated procedure, which we allow is dev->hard_header.
92    It is supposed, that dev->hard_header is simplistic and does
93    not make callbacks to neighbour tables.
94 
95    The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96    list of neighbour tables. This list is used only in process context,
97  */
98 
99 static DEFINE_RWLOCK(neigh_tbl_lock);
100 
101 static int neigh_blackhole(struct sk_buff *skb)
102 {
103 	kfree_skb(skb);
104 	return -ENETDOWN;
105 }
106 
107 /*
108  * It is random distribution in the interval (1/2)*base...(3/2)*base.
109  * It corresponds to default IPv6 settings and is not overridable,
110  * because it is really reasonable choice.
111  */
112 
113 unsigned long neigh_rand_reach_time(unsigned long base)
114 {
115 	return (base ? (net_random() % base) + (base >> 1) : 0);
116 }
117 
118 
119 static int neigh_forced_gc(struct neigh_table *tbl)
120 {
121 	int shrunk = 0;
122 	int i;
123 
124 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
125 
126 	write_lock_bh(&tbl->lock);
127 	for (i = 0; i <= tbl->hash_mask; i++) {
128 		struct neighbour *n, **np;
129 
130 		np = &tbl->hash_buckets[i];
131 		while ((n = *np) != NULL) {
132 			/* Neighbour record may be discarded if:
133 			 * - nobody refers to it.
134 			 * - it is not permanent
135 			 */
136 			write_lock(&n->lock);
137 			if (atomic_read(&n->refcnt) == 1 &&
138 			    !(n->nud_state & NUD_PERMANENT)) {
139 				*np	= n->next;
140 				n->dead = 1;
141 				shrunk	= 1;
142 				write_unlock(&n->lock);
143 				neigh_release(n);
144 				continue;
145 			}
146 			write_unlock(&n->lock);
147 			np = &n->next;
148 		}
149 	}
150 
151 	tbl->last_flush = jiffies;
152 
153 	write_unlock_bh(&tbl->lock);
154 
155 	return shrunk;
156 }
157 
158 static int neigh_del_timer(struct neighbour *n)
159 {
160 	if ((n->nud_state & NUD_IN_TIMER) &&
161 	    del_timer(&n->timer)) {
162 		neigh_release(n);
163 		return 1;
164 	}
165 	return 0;
166 }
167 
168 static void pneigh_queue_purge(struct sk_buff_head *list)
169 {
170 	struct sk_buff *skb;
171 
172 	while ((skb = skb_dequeue(list)) != NULL) {
173 		dev_put(skb->dev);
174 		kfree_skb(skb);
175 	}
176 }
177 
178 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
179 {
180 	int i;
181 
182 	for (i = 0; i <= tbl->hash_mask; i++) {
183 		struct neighbour *n, **np = &tbl->hash_buckets[i];
184 
185 		while ((n = *np) != NULL) {
186 			if (dev && n->dev != dev) {
187 				np = &n->next;
188 				continue;
189 			}
190 			*np = n->next;
191 			write_lock(&n->lock);
192 			neigh_del_timer(n);
193 			n->dead = 1;
194 
195 			if (atomic_read(&n->refcnt) != 1) {
196 				/* The most unpleasant situation.
197 				   We must destroy neighbour entry,
198 				   but someone still uses it.
199 
200 				   The destroy will be delayed until
201 				   the last user releases us, but
202 				   we must kill timers etc. and move
203 				   it to safe state.
204 				 */
205 				skb_queue_purge(&n->arp_queue);
206 				n->output = neigh_blackhole;
207 				if (n->nud_state & NUD_VALID)
208 					n->nud_state = NUD_NOARP;
209 				else
210 					n->nud_state = NUD_NONE;
211 				NEIGH_PRINTK2("neigh %p is stray.\n", n);
212 			}
213 			write_unlock(&n->lock);
214 			neigh_release(n);
215 		}
216 	}
217 }
218 
219 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
220 {
221 	write_lock_bh(&tbl->lock);
222 	neigh_flush_dev(tbl, dev);
223 	write_unlock_bh(&tbl->lock);
224 }
225 
226 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
227 {
228 	write_lock_bh(&tbl->lock);
229 	neigh_flush_dev(tbl, dev);
230 	pneigh_ifdown(tbl, dev);
231 	write_unlock_bh(&tbl->lock);
232 
233 	del_timer_sync(&tbl->proxy_timer);
234 	pneigh_queue_purge(&tbl->proxy_queue);
235 	return 0;
236 }
237 
238 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
239 {
240 	struct neighbour *n = NULL;
241 	unsigned long now = jiffies;
242 	int entries;
243 
244 	entries = atomic_inc_return(&tbl->entries) - 1;
245 	if (entries >= tbl->gc_thresh3 ||
246 	    (entries >= tbl->gc_thresh2 &&
247 	     time_after(now, tbl->last_flush + 5 * HZ))) {
248 		if (!neigh_forced_gc(tbl) &&
249 		    entries >= tbl->gc_thresh3)
250 			goto out_entries;
251 	}
252 
253 	n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
254 	if (!n)
255 		goto out_entries;
256 
257 	memset(n, 0, tbl->entry_size);
258 
259 	skb_queue_head_init(&n->arp_queue);
260 	rwlock_init(&n->lock);
261 	n->updated	  = n->used = now;
262 	n->nud_state	  = NUD_NONE;
263 	n->output	  = neigh_blackhole;
264 	n->parms	  = neigh_parms_clone(&tbl->parms);
265 	init_timer(&n->timer);
266 	n->timer.function = neigh_timer_handler;
267 	n->timer.data	  = (unsigned long)n;
268 
269 	NEIGH_CACHE_STAT_INC(tbl, allocs);
270 	n->tbl		  = tbl;
271 	atomic_set(&n->refcnt, 1);
272 	n->dead		  = 1;
273 out:
274 	return n;
275 
276 out_entries:
277 	atomic_dec(&tbl->entries);
278 	goto out;
279 }
280 
281 static struct neighbour **neigh_hash_alloc(unsigned int entries)
282 {
283 	unsigned long size = entries * sizeof(struct neighbour *);
284 	struct neighbour **ret;
285 
286 	if (size <= PAGE_SIZE) {
287 		ret = kmalloc(size, GFP_ATOMIC);
288 	} else {
289 		ret = (struct neighbour **)
290 			__get_free_pages(GFP_ATOMIC, get_order(size));
291 	}
292 	if (ret)
293 		memset(ret, 0, size);
294 
295 	return ret;
296 }
297 
298 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
299 {
300 	unsigned long size = entries * sizeof(struct neighbour *);
301 
302 	if (size <= PAGE_SIZE)
303 		kfree(hash);
304 	else
305 		free_pages((unsigned long)hash, get_order(size));
306 }
307 
308 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
309 {
310 	struct neighbour **new_hash, **old_hash;
311 	unsigned int i, new_hash_mask, old_entries;
312 
313 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
314 
315 	BUG_ON(new_entries & (new_entries - 1));
316 	new_hash = neigh_hash_alloc(new_entries);
317 	if (!new_hash)
318 		return;
319 
320 	old_entries = tbl->hash_mask + 1;
321 	new_hash_mask = new_entries - 1;
322 	old_hash = tbl->hash_buckets;
323 
324 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
325 	for (i = 0; i < old_entries; i++) {
326 		struct neighbour *n, *next;
327 
328 		for (n = old_hash[i]; n; n = next) {
329 			unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
330 
331 			hash_val &= new_hash_mask;
332 			next = n->next;
333 
334 			n->next = new_hash[hash_val];
335 			new_hash[hash_val] = n;
336 		}
337 	}
338 	tbl->hash_buckets = new_hash;
339 	tbl->hash_mask = new_hash_mask;
340 
341 	neigh_hash_free(old_hash, old_entries);
342 }
343 
344 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
345 			       struct net_device *dev)
346 {
347 	struct neighbour *n;
348 	int key_len = tbl->key_len;
349 	u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
350 
351 	NEIGH_CACHE_STAT_INC(tbl, lookups);
352 
353 	read_lock_bh(&tbl->lock);
354 	for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
355 		if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
356 			neigh_hold(n);
357 			NEIGH_CACHE_STAT_INC(tbl, hits);
358 			break;
359 		}
360 	}
361 	read_unlock_bh(&tbl->lock);
362 	return n;
363 }
364 
365 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
366 {
367 	struct neighbour *n;
368 	int key_len = tbl->key_len;
369 	u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask;
370 
371 	NEIGH_CACHE_STAT_INC(tbl, lookups);
372 
373 	read_lock_bh(&tbl->lock);
374 	for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
375 		if (!memcmp(n->primary_key, pkey, key_len)) {
376 			neigh_hold(n);
377 			NEIGH_CACHE_STAT_INC(tbl, hits);
378 			break;
379 		}
380 	}
381 	read_unlock_bh(&tbl->lock);
382 	return n;
383 }
384 
385 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
386 			       struct net_device *dev)
387 {
388 	u32 hash_val;
389 	int key_len = tbl->key_len;
390 	int error;
391 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
392 
393 	if (!n) {
394 		rc = ERR_PTR(-ENOBUFS);
395 		goto out;
396 	}
397 
398 	memcpy(n->primary_key, pkey, key_len);
399 	n->dev = dev;
400 	dev_hold(dev);
401 
402 	/* Protocol specific setup. */
403 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
404 		rc = ERR_PTR(error);
405 		goto out_neigh_release;
406 	}
407 
408 	/* Device specific setup. */
409 	if (n->parms->neigh_setup &&
410 	    (error = n->parms->neigh_setup(n)) < 0) {
411 		rc = ERR_PTR(error);
412 		goto out_neigh_release;
413 	}
414 
415 	n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
416 
417 	write_lock_bh(&tbl->lock);
418 
419 	if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
420 		neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
421 
422 	hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
423 
424 	if (n->parms->dead) {
425 		rc = ERR_PTR(-EINVAL);
426 		goto out_tbl_unlock;
427 	}
428 
429 	for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
430 		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
431 			neigh_hold(n1);
432 			rc = n1;
433 			goto out_tbl_unlock;
434 		}
435 	}
436 
437 	n->next = tbl->hash_buckets[hash_val];
438 	tbl->hash_buckets[hash_val] = n;
439 	n->dead = 0;
440 	neigh_hold(n);
441 	write_unlock_bh(&tbl->lock);
442 	NEIGH_PRINTK2("neigh %p is created.\n", n);
443 	rc = n;
444 out:
445 	return rc;
446 out_tbl_unlock:
447 	write_unlock_bh(&tbl->lock);
448 out_neigh_release:
449 	neigh_release(n);
450 	goto out;
451 }
452 
453 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
454 				    struct net_device *dev, int creat)
455 {
456 	struct pneigh_entry *n;
457 	int key_len = tbl->key_len;
458 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
459 
460 	hash_val ^= (hash_val >> 16);
461 	hash_val ^= hash_val >> 8;
462 	hash_val ^= hash_val >> 4;
463 	hash_val &= PNEIGH_HASHMASK;
464 
465 	read_lock_bh(&tbl->lock);
466 
467 	for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
468 		if (!memcmp(n->key, pkey, key_len) &&
469 		    (n->dev == dev || !n->dev)) {
470 			read_unlock_bh(&tbl->lock);
471 			goto out;
472 		}
473 	}
474 	read_unlock_bh(&tbl->lock);
475 	n = NULL;
476 	if (!creat)
477 		goto out;
478 
479 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
480 	if (!n)
481 		goto out;
482 
483 	memcpy(n->key, pkey, key_len);
484 	n->dev = dev;
485 	if (dev)
486 		dev_hold(dev);
487 
488 	if (tbl->pconstructor && tbl->pconstructor(n)) {
489 		if (dev)
490 			dev_put(dev);
491 		kfree(n);
492 		n = NULL;
493 		goto out;
494 	}
495 
496 	write_lock_bh(&tbl->lock);
497 	n->next = tbl->phash_buckets[hash_val];
498 	tbl->phash_buckets[hash_val] = n;
499 	write_unlock_bh(&tbl->lock);
500 out:
501 	return n;
502 }
503 
504 
505 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
506 		  struct net_device *dev)
507 {
508 	struct pneigh_entry *n, **np;
509 	int key_len = tbl->key_len;
510 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
511 
512 	hash_val ^= (hash_val >> 16);
513 	hash_val ^= hash_val >> 8;
514 	hash_val ^= hash_val >> 4;
515 	hash_val &= PNEIGH_HASHMASK;
516 
517 	write_lock_bh(&tbl->lock);
518 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
519 	     np = &n->next) {
520 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
521 			*np = n->next;
522 			write_unlock_bh(&tbl->lock);
523 			if (tbl->pdestructor)
524 				tbl->pdestructor(n);
525 			if (n->dev)
526 				dev_put(n->dev);
527 			kfree(n);
528 			return 0;
529 		}
530 	}
531 	write_unlock_bh(&tbl->lock);
532 	return -ENOENT;
533 }
534 
535 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
536 {
537 	struct pneigh_entry *n, **np;
538 	u32 h;
539 
540 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
541 		np = &tbl->phash_buckets[h];
542 		while ((n = *np) != NULL) {
543 			if (!dev || n->dev == dev) {
544 				*np = n->next;
545 				if (tbl->pdestructor)
546 					tbl->pdestructor(n);
547 				if (n->dev)
548 					dev_put(n->dev);
549 				kfree(n);
550 				continue;
551 			}
552 			np = &n->next;
553 		}
554 	}
555 	return -ENOENT;
556 }
557 
558 
559 /*
560  *	neighbour must already be out of the table;
561  *
562  */
563 void neigh_destroy(struct neighbour *neigh)
564 {
565 	struct hh_cache *hh;
566 
567 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
568 
569 	if (!neigh->dead) {
570 		printk(KERN_WARNING
571 		       "Destroying alive neighbour %p\n", neigh);
572 		dump_stack();
573 		return;
574 	}
575 
576 	if (neigh_del_timer(neigh))
577 		printk(KERN_WARNING "Impossible event.\n");
578 
579 	while ((hh = neigh->hh) != NULL) {
580 		neigh->hh = hh->hh_next;
581 		hh->hh_next = NULL;
582 		write_lock_bh(&hh->hh_lock);
583 		hh->hh_output = neigh_blackhole;
584 		write_unlock_bh(&hh->hh_lock);
585 		if (atomic_dec_and_test(&hh->hh_refcnt))
586 			kfree(hh);
587 	}
588 
589 	if (neigh->ops && neigh->ops->destructor)
590 		(neigh->ops->destructor)(neigh);
591 
592 	skb_queue_purge(&neigh->arp_queue);
593 
594 	dev_put(neigh->dev);
595 	neigh_parms_put(neigh->parms);
596 
597 	NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
598 
599 	atomic_dec(&neigh->tbl->entries);
600 	kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
601 }
602 
603 /* Neighbour state is suspicious;
604    disable fast path.
605 
606    Called with write_locked neigh.
607  */
608 static void neigh_suspect(struct neighbour *neigh)
609 {
610 	struct hh_cache *hh;
611 
612 	NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
613 
614 	neigh->output = neigh->ops->output;
615 
616 	for (hh = neigh->hh; hh; hh = hh->hh_next)
617 		hh->hh_output = neigh->ops->output;
618 }
619 
620 /* Neighbour state is OK;
621    enable fast path.
622 
623    Called with write_locked neigh.
624  */
625 static void neigh_connect(struct neighbour *neigh)
626 {
627 	struct hh_cache *hh;
628 
629 	NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
630 
631 	neigh->output = neigh->ops->connected_output;
632 
633 	for (hh = neigh->hh; hh; hh = hh->hh_next)
634 		hh->hh_output = neigh->ops->hh_output;
635 }
636 
637 static void neigh_periodic_timer(unsigned long arg)
638 {
639 	struct neigh_table *tbl = (struct neigh_table *)arg;
640 	struct neighbour *n, **np;
641 	unsigned long expire, now = jiffies;
642 
643 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
644 
645 	write_lock(&tbl->lock);
646 
647 	/*
648 	 *	periodically recompute ReachableTime from random function
649 	 */
650 
651 	if (time_after(now, tbl->last_rand + 300 * HZ)) {
652 		struct neigh_parms *p;
653 		tbl->last_rand = now;
654 		for (p = &tbl->parms; p; p = p->next)
655 			p->reachable_time =
656 				neigh_rand_reach_time(p->base_reachable_time);
657 	}
658 
659 	np = &tbl->hash_buckets[tbl->hash_chain_gc];
660 	tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
661 
662 	while ((n = *np) != NULL) {
663 		unsigned int state;
664 
665 		write_lock(&n->lock);
666 
667 		state = n->nud_state;
668 		if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
669 			write_unlock(&n->lock);
670 			goto next_elt;
671 		}
672 
673 		if (time_before(n->used, n->confirmed))
674 			n->used = n->confirmed;
675 
676 		if (atomic_read(&n->refcnt) == 1 &&
677 		    (state == NUD_FAILED ||
678 		     time_after(now, n->used + n->parms->gc_staletime))) {
679 			*np = n->next;
680 			n->dead = 1;
681 			write_unlock(&n->lock);
682 			neigh_release(n);
683 			continue;
684 		}
685 		write_unlock(&n->lock);
686 
687 next_elt:
688 		np = &n->next;
689 	}
690 
691  	/* Cycle through all hash buckets every base_reachable_time/2 ticks.
692  	 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
693  	 * base_reachable_time.
694 	 */
695 	expire = tbl->parms.base_reachable_time >> 1;
696 	expire /= (tbl->hash_mask + 1);
697 	if (!expire)
698 		expire = 1;
699 
700  	mod_timer(&tbl->gc_timer, now + expire);
701 
702 	write_unlock(&tbl->lock);
703 }
704 
705 static __inline__ int neigh_max_probes(struct neighbour *n)
706 {
707 	struct neigh_parms *p = n->parms;
708 	return (n->nud_state & NUD_PROBE ?
709 		p->ucast_probes :
710 		p->ucast_probes + p->app_probes + p->mcast_probes);
711 }
712 
713 static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
714 {
715 	if (unlikely(mod_timer(&n->timer, when))) {
716 		printk("NEIGH: BUG, double timer add, state is %x\n",
717 		       n->nud_state);
718 		dump_stack();
719 	}
720 }
721 
722 /* Called when a timer expires for a neighbour entry. */
723 
724 static void neigh_timer_handler(unsigned long arg)
725 {
726 	unsigned long now, next;
727 	struct neighbour *neigh = (struct neighbour *)arg;
728 	unsigned state;
729 	int notify = 0;
730 
731 	write_lock(&neigh->lock);
732 
733 	state = neigh->nud_state;
734 	now = jiffies;
735 	next = now + HZ;
736 
737 	if (!(state & NUD_IN_TIMER)) {
738 #ifndef CONFIG_SMP
739 		printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
740 #endif
741 		goto out;
742 	}
743 
744 	if (state & NUD_REACHABLE) {
745 		if (time_before_eq(now,
746 				   neigh->confirmed + neigh->parms->reachable_time)) {
747 			NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
748 			next = neigh->confirmed + neigh->parms->reachable_time;
749 		} else if (time_before_eq(now,
750 					  neigh->used + neigh->parms->delay_probe_time)) {
751 			NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
752 			neigh->nud_state = NUD_DELAY;
753 			neigh_suspect(neigh);
754 			next = now + neigh->parms->delay_probe_time;
755 		} else {
756 			NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
757 			neigh->nud_state = NUD_STALE;
758 			neigh_suspect(neigh);
759 		}
760 	} else if (state & NUD_DELAY) {
761 		if (time_before_eq(now,
762 				   neigh->confirmed + neigh->parms->delay_probe_time)) {
763 			NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
764 			neigh->nud_state = NUD_REACHABLE;
765 			neigh_connect(neigh);
766 			next = neigh->confirmed + neigh->parms->reachable_time;
767 		} else {
768 			NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
769 			neigh->nud_state = NUD_PROBE;
770 			atomic_set(&neigh->probes, 0);
771 			next = now + neigh->parms->retrans_time;
772 		}
773 	} else {
774 		/* NUD_PROBE|NUD_INCOMPLETE */
775 		next = now + neigh->parms->retrans_time;
776 	}
777 
778 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
779 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
780 		struct sk_buff *skb;
781 
782 		neigh->nud_state = NUD_FAILED;
783 		notify = 1;
784 		NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
785 		NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
786 
787 		/* It is very thin place. report_unreachable is very complicated
788 		   routine. Particularly, it can hit the same neighbour entry!
789 
790 		   So that, we try to be accurate and avoid dead loop. --ANK
791 		 */
792 		while (neigh->nud_state == NUD_FAILED &&
793 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
794 			write_unlock(&neigh->lock);
795 			neigh->ops->error_report(neigh, skb);
796 			write_lock(&neigh->lock);
797 		}
798 		skb_queue_purge(&neigh->arp_queue);
799 	}
800 
801 	if (neigh->nud_state & NUD_IN_TIMER) {
802 		if (time_before(next, jiffies + HZ/2))
803 			next = jiffies + HZ/2;
804 		if (!mod_timer(&neigh->timer, next))
805 			neigh_hold(neigh);
806 	}
807 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
808 		struct sk_buff *skb = skb_peek(&neigh->arp_queue);
809 		/* keep skb alive even if arp_queue overflows */
810 		if (skb)
811 			skb_get(skb);
812 		write_unlock(&neigh->lock);
813 		neigh->ops->solicit(neigh, skb);
814 		atomic_inc(&neigh->probes);
815 		if (skb)
816 			kfree_skb(skb);
817 	} else {
818 out:
819 		write_unlock(&neigh->lock);
820 	}
821 
822 #ifdef CONFIG_ARPD
823 	if (notify && neigh->parms->app_probes)
824 		neigh_app_notify(neigh);
825 #endif
826 	neigh_release(neigh);
827 }
828 
829 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
830 {
831 	int rc;
832 	unsigned long now;
833 
834 	write_lock_bh(&neigh->lock);
835 
836 	rc = 0;
837 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
838 		goto out_unlock_bh;
839 
840 	now = jiffies;
841 
842 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
843 		if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
844 			atomic_set(&neigh->probes, neigh->parms->ucast_probes);
845 			neigh->nud_state     = NUD_INCOMPLETE;
846 			neigh_hold(neigh);
847 			neigh_add_timer(neigh, now + 1);
848 		} else {
849 			neigh->nud_state = NUD_FAILED;
850 			write_unlock_bh(&neigh->lock);
851 
852 			if (skb)
853 				kfree_skb(skb);
854 			return 1;
855 		}
856 	} else if (neigh->nud_state & NUD_STALE) {
857 		NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
858 		neigh_hold(neigh);
859 		neigh->nud_state = NUD_DELAY;
860 		neigh_add_timer(neigh,
861 				jiffies + neigh->parms->delay_probe_time);
862 	}
863 
864 	if (neigh->nud_state == NUD_INCOMPLETE) {
865 		if (skb) {
866 			if (skb_queue_len(&neigh->arp_queue) >=
867 			    neigh->parms->queue_len) {
868 				struct sk_buff *buff;
869 				buff = neigh->arp_queue.next;
870 				__skb_unlink(buff, &neigh->arp_queue);
871 				kfree_skb(buff);
872 			}
873 			__skb_queue_tail(&neigh->arp_queue, skb);
874 		}
875 		rc = 1;
876 	}
877 out_unlock_bh:
878 	write_unlock_bh(&neigh->lock);
879 	return rc;
880 }
881 
882 static __inline__ void neigh_update_hhs(struct neighbour *neigh)
883 {
884 	struct hh_cache *hh;
885 	void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
886 		neigh->dev->header_cache_update;
887 
888 	if (update) {
889 		for (hh = neigh->hh; hh; hh = hh->hh_next) {
890 			write_lock_bh(&hh->hh_lock);
891 			update(hh, neigh->dev, neigh->ha);
892 			write_unlock_bh(&hh->hh_lock);
893 		}
894 	}
895 }
896 
897 
898 
899 /* Generic update routine.
900    -- lladdr is new lladdr or NULL, if it is not supplied.
901    -- new    is new state.
902    -- flags
903 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
904 				if it is different.
905 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
906 				lladdr instead of overriding it
907 				if it is different.
908 				It also allows to retain current state
909 				if lladdr is unchanged.
910 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
911 
912 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
913 				NTF_ROUTER flag.
914 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
915 				a router.
916 
917    Caller MUST hold reference count on the entry.
918  */
919 
920 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
921 		 u32 flags)
922 {
923 	u8 old;
924 	int err;
925 #ifdef CONFIG_ARPD
926 	int notify = 0;
927 #endif
928 	struct net_device *dev;
929 	int update_isrouter = 0;
930 
931 	write_lock_bh(&neigh->lock);
932 
933 	dev    = neigh->dev;
934 	old    = neigh->nud_state;
935 	err    = -EPERM;
936 
937 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
938 	    (old & (NUD_NOARP | NUD_PERMANENT)))
939 		goto out;
940 
941 	if (!(new & NUD_VALID)) {
942 		neigh_del_timer(neigh);
943 		if (old & NUD_CONNECTED)
944 			neigh_suspect(neigh);
945 		neigh->nud_state = new;
946 		err = 0;
947 #ifdef CONFIG_ARPD
948 		notify = old & NUD_VALID;
949 #endif
950 		goto out;
951 	}
952 
953 	/* Compare new lladdr with cached one */
954 	if (!dev->addr_len) {
955 		/* First case: device needs no address. */
956 		lladdr = neigh->ha;
957 	} else if (lladdr) {
958 		/* The second case: if something is already cached
959 		   and a new address is proposed:
960 		   - compare new & old
961 		   - if they are different, check override flag
962 		 */
963 		if ((old & NUD_VALID) &&
964 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
965 			lladdr = neigh->ha;
966 	} else {
967 		/* No address is supplied; if we know something,
968 		   use it, otherwise discard the request.
969 		 */
970 		err = -EINVAL;
971 		if (!(old & NUD_VALID))
972 			goto out;
973 		lladdr = neigh->ha;
974 	}
975 
976 	if (new & NUD_CONNECTED)
977 		neigh->confirmed = jiffies;
978 	neigh->updated = jiffies;
979 
980 	/* If entry was valid and address is not changed,
981 	   do not change entry state, if new one is STALE.
982 	 */
983 	err = 0;
984 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
985 	if (old & NUD_VALID) {
986 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
987 			update_isrouter = 0;
988 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
989 			    (old & NUD_CONNECTED)) {
990 				lladdr = neigh->ha;
991 				new = NUD_STALE;
992 			} else
993 				goto out;
994 		} else {
995 			if (lladdr == neigh->ha && new == NUD_STALE &&
996 			    ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
997 			     (old & NUD_CONNECTED))
998 			    )
999 				new = old;
1000 		}
1001 	}
1002 
1003 	if (new != old) {
1004 		neigh_del_timer(neigh);
1005 		if (new & NUD_IN_TIMER) {
1006 			neigh_hold(neigh);
1007 			neigh_add_timer(neigh, (jiffies +
1008 						((new & NUD_REACHABLE) ?
1009 						 neigh->parms->reachable_time :
1010 						 0)));
1011 		}
1012 		neigh->nud_state = new;
1013 	}
1014 
1015 	if (lladdr != neigh->ha) {
1016 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1017 		neigh_update_hhs(neigh);
1018 		if (!(new & NUD_CONNECTED))
1019 			neigh->confirmed = jiffies -
1020 				      (neigh->parms->base_reachable_time << 1);
1021 #ifdef CONFIG_ARPD
1022 		notify = 1;
1023 #endif
1024 	}
1025 	if (new == old)
1026 		goto out;
1027 	if (new & NUD_CONNECTED)
1028 		neigh_connect(neigh);
1029 	else
1030 		neigh_suspect(neigh);
1031 	if (!(old & NUD_VALID)) {
1032 		struct sk_buff *skb;
1033 
1034 		/* Again: avoid dead loop if something went wrong */
1035 
1036 		while (neigh->nud_state & NUD_VALID &&
1037 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1038 			struct neighbour *n1 = neigh;
1039 			write_unlock_bh(&neigh->lock);
1040 			/* On shaper/eql skb->dst->neighbour != neigh :( */
1041 			if (skb->dst && skb->dst->neighbour)
1042 				n1 = skb->dst->neighbour;
1043 			n1->output(skb);
1044 			write_lock_bh(&neigh->lock);
1045 		}
1046 		skb_queue_purge(&neigh->arp_queue);
1047 	}
1048 out:
1049 	if (update_isrouter) {
1050 		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1051 			(neigh->flags | NTF_ROUTER) :
1052 			(neigh->flags & ~NTF_ROUTER);
1053 	}
1054 	write_unlock_bh(&neigh->lock);
1055 #ifdef CONFIG_ARPD
1056 	if (notify && neigh->parms->app_probes)
1057 		neigh_app_notify(neigh);
1058 #endif
1059 	return err;
1060 }
1061 
1062 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1063 				 u8 *lladdr, void *saddr,
1064 				 struct net_device *dev)
1065 {
1066 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1067 						 lladdr || !dev->addr_len);
1068 	if (neigh)
1069 		neigh_update(neigh, lladdr, NUD_STALE,
1070 			     NEIGH_UPDATE_F_OVERRIDE);
1071 	return neigh;
1072 }
1073 
1074 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1075 			  u16 protocol)
1076 {
1077 	struct hh_cache	*hh;
1078 	struct net_device *dev = dst->dev;
1079 
1080 	for (hh = n->hh; hh; hh = hh->hh_next)
1081 		if (hh->hh_type == protocol)
1082 			break;
1083 
1084 	if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1085 		memset(hh, 0, sizeof(struct hh_cache));
1086 		rwlock_init(&hh->hh_lock);
1087 		hh->hh_type = protocol;
1088 		atomic_set(&hh->hh_refcnt, 0);
1089 		hh->hh_next = NULL;
1090 		if (dev->hard_header_cache(n, hh)) {
1091 			kfree(hh);
1092 			hh = NULL;
1093 		} else {
1094 			atomic_inc(&hh->hh_refcnt);
1095 			hh->hh_next = n->hh;
1096 			n->hh	    = hh;
1097 			if (n->nud_state & NUD_CONNECTED)
1098 				hh->hh_output = n->ops->hh_output;
1099 			else
1100 				hh->hh_output = n->ops->output;
1101 		}
1102 	}
1103 	if (hh)	{
1104 		atomic_inc(&hh->hh_refcnt);
1105 		dst->hh = hh;
1106 	}
1107 }
1108 
1109 /* This function can be used in contexts, where only old dev_queue_xmit
1110    worked, f.e. if you want to override normal output path (eql, shaper),
1111    but resolution is not made yet.
1112  */
1113 
1114 int neigh_compat_output(struct sk_buff *skb)
1115 {
1116 	struct net_device *dev = skb->dev;
1117 
1118 	__skb_pull(skb, skb->nh.raw - skb->data);
1119 
1120 	if (dev->hard_header &&
1121 	    dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1122 		    	     skb->len) < 0 &&
1123 	    dev->rebuild_header(skb))
1124 		return 0;
1125 
1126 	return dev_queue_xmit(skb);
1127 }
1128 
1129 /* Slow and careful. */
1130 
1131 int neigh_resolve_output(struct sk_buff *skb)
1132 {
1133 	struct dst_entry *dst = skb->dst;
1134 	struct neighbour *neigh;
1135 	int rc = 0;
1136 
1137 	if (!dst || !(neigh = dst->neighbour))
1138 		goto discard;
1139 
1140 	__skb_pull(skb, skb->nh.raw - skb->data);
1141 
1142 	if (!neigh_event_send(neigh, skb)) {
1143 		int err;
1144 		struct net_device *dev = neigh->dev;
1145 		if (dev->hard_header_cache && !dst->hh) {
1146 			write_lock_bh(&neigh->lock);
1147 			if (!dst->hh)
1148 				neigh_hh_init(neigh, dst, dst->ops->protocol);
1149 			err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1150 					       neigh->ha, NULL, skb->len);
1151 			write_unlock_bh(&neigh->lock);
1152 		} else {
1153 			read_lock_bh(&neigh->lock);
1154 			err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1155 					       neigh->ha, NULL, skb->len);
1156 			read_unlock_bh(&neigh->lock);
1157 		}
1158 		if (err >= 0)
1159 			rc = neigh->ops->queue_xmit(skb);
1160 		else
1161 			goto out_kfree_skb;
1162 	}
1163 out:
1164 	return rc;
1165 discard:
1166 	NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1167 		      dst, dst ? dst->neighbour : NULL);
1168 out_kfree_skb:
1169 	rc = -EINVAL;
1170 	kfree_skb(skb);
1171 	goto out;
1172 }
1173 
1174 /* As fast as possible without hh cache */
1175 
1176 int neigh_connected_output(struct sk_buff *skb)
1177 {
1178 	int err;
1179 	struct dst_entry *dst = skb->dst;
1180 	struct neighbour *neigh = dst->neighbour;
1181 	struct net_device *dev = neigh->dev;
1182 
1183 	__skb_pull(skb, skb->nh.raw - skb->data);
1184 
1185 	read_lock_bh(&neigh->lock);
1186 	err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1187 			       neigh->ha, NULL, skb->len);
1188 	read_unlock_bh(&neigh->lock);
1189 	if (err >= 0)
1190 		err = neigh->ops->queue_xmit(skb);
1191 	else {
1192 		err = -EINVAL;
1193 		kfree_skb(skb);
1194 	}
1195 	return err;
1196 }
1197 
1198 static void neigh_proxy_process(unsigned long arg)
1199 {
1200 	struct neigh_table *tbl = (struct neigh_table *)arg;
1201 	long sched_next = 0;
1202 	unsigned long now = jiffies;
1203 	struct sk_buff *skb;
1204 
1205 	spin_lock(&tbl->proxy_queue.lock);
1206 
1207 	skb = tbl->proxy_queue.next;
1208 
1209 	while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1210 		struct sk_buff *back = skb;
1211 		long tdif = NEIGH_CB(back)->sched_next - now;
1212 
1213 		skb = skb->next;
1214 		if (tdif <= 0) {
1215 			struct net_device *dev = back->dev;
1216 			__skb_unlink(back, &tbl->proxy_queue);
1217 			if (tbl->proxy_redo && netif_running(dev))
1218 				tbl->proxy_redo(back);
1219 			else
1220 				kfree_skb(back);
1221 
1222 			dev_put(dev);
1223 		} else if (!sched_next || tdif < sched_next)
1224 			sched_next = tdif;
1225 	}
1226 	del_timer(&tbl->proxy_timer);
1227 	if (sched_next)
1228 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1229 	spin_unlock(&tbl->proxy_queue.lock);
1230 }
1231 
1232 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1233 		    struct sk_buff *skb)
1234 {
1235 	unsigned long now = jiffies;
1236 	unsigned long sched_next = now + (net_random() % p->proxy_delay);
1237 
1238 	if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1239 		kfree_skb(skb);
1240 		return;
1241 	}
1242 
1243 	NEIGH_CB(skb)->sched_next = sched_next;
1244 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1245 
1246 	spin_lock(&tbl->proxy_queue.lock);
1247 	if (del_timer(&tbl->proxy_timer)) {
1248 		if (time_before(tbl->proxy_timer.expires, sched_next))
1249 			sched_next = tbl->proxy_timer.expires;
1250 	}
1251 	dst_release(skb->dst);
1252 	skb->dst = NULL;
1253 	dev_hold(skb->dev);
1254 	__skb_queue_tail(&tbl->proxy_queue, skb);
1255 	mod_timer(&tbl->proxy_timer, sched_next);
1256 	spin_unlock(&tbl->proxy_queue.lock);
1257 }
1258 
1259 
1260 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1261 				      struct neigh_table *tbl)
1262 {
1263 	struct neigh_parms *p = kmalloc(sizeof(*p), GFP_KERNEL);
1264 
1265 	if (p) {
1266 		memcpy(p, &tbl->parms, sizeof(*p));
1267 		p->tbl		  = tbl;
1268 		atomic_set(&p->refcnt, 1);
1269 		INIT_RCU_HEAD(&p->rcu_head);
1270 		p->reachable_time =
1271 				neigh_rand_reach_time(p->base_reachable_time);
1272 		if (dev) {
1273 			if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1274 				kfree(p);
1275 				return NULL;
1276 			}
1277 
1278 			dev_hold(dev);
1279 			p->dev = dev;
1280 		}
1281 		p->sysctl_table = NULL;
1282 		write_lock_bh(&tbl->lock);
1283 		p->next		= tbl->parms.next;
1284 		tbl->parms.next = p;
1285 		write_unlock_bh(&tbl->lock);
1286 	}
1287 	return p;
1288 }
1289 
1290 static void neigh_rcu_free_parms(struct rcu_head *head)
1291 {
1292 	struct neigh_parms *parms =
1293 		container_of(head, struct neigh_parms, rcu_head);
1294 
1295 	neigh_parms_put(parms);
1296 }
1297 
1298 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1299 {
1300 	struct neigh_parms **p;
1301 
1302 	if (!parms || parms == &tbl->parms)
1303 		return;
1304 	write_lock_bh(&tbl->lock);
1305 	for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1306 		if (*p == parms) {
1307 			*p = parms->next;
1308 			parms->dead = 1;
1309 			write_unlock_bh(&tbl->lock);
1310 			if (parms->dev)
1311 				dev_put(parms->dev);
1312 			call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1313 			return;
1314 		}
1315 	}
1316 	write_unlock_bh(&tbl->lock);
1317 	NEIGH_PRINTK1("neigh_parms_release: not found\n");
1318 }
1319 
1320 void neigh_parms_destroy(struct neigh_parms *parms)
1321 {
1322 	kfree(parms);
1323 }
1324 
1325 
1326 void neigh_table_init(struct neigh_table *tbl)
1327 {
1328 	unsigned long now = jiffies;
1329 	unsigned long phsize;
1330 
1331 	atomic_set(&tbl->parms.refcnt, 1);
1332 	INIT_RCU_HEAD(&tbl->parms.rcu_head);
1333 	tbl->parms.reachable_time =
1334 			  neigh_rand_reach_time(tbl->parms.base_reachable_time);
1335 
1336 	if (!tbl->kmem_cachep)
1337 		tbl->kmem_cachep = kmem_cache_create(tbl->id,
1338 						     tbl->entry_size,
1339 						     0, SLAB_HWCACHE_ALIGN,
1340 						     NULL, NULL);
1341 
1342 	if (!tbl->kmem_cachep)
1343 		panic("cannot create neighbour cache");
1344 
1345 	tbl->stats = alloc_percpu(struct neigh_statistics);
1346 	if (!tbl->stats)
1347 		panic("cannot create neighbour cache statistics");
1348 
1349 #ifdef CONFIG_PROC_FS
1350 	tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1351 	if (!tbl->pde)
1352 		panic("cannot create neighbour proc dir entry");
1353 	tbl->pde->proc_fops = &neigh_stat_seq_fops;
1354 	tbl->pde->data = tbl;
1355 #endif
1356 
1357 	tbl->hash_mask = 1;
1358 	tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1359 
1360 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1361 	tbl->phash_buckets = kmalloc(phsize, GFP_KERNEL);
1362 
1363 	if (!tbl->hash_buckets || !tbl->phash_buckets)
1364 		panic("cannot allocate neighbour cache hashes");
1365 
1366 	memset(tbl->phash_buckets, 0, phsize);
1367 
1368 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1369 
1370 	rwlock_init(&tbl->lock);
1371 	init_timer(&tbl->gc_timer);
1372 	tbl->gc_timer.data     = (unsigned long)tbl;
1373 	tbl->gc_timer.function = neigh_periodic_timer;
1374 	tbl->gc_timer.expires  = now + 1;
1375 	add_timer(&tbl->gc_timer);
1376 
1377 	init_timer(&tbl->proxy_timer);
1378 	tbl->proxy_timer.data	  = (unsigned long)tbl;
1379 	tbl->proxy_timer.function = neigh_proxy_process;
1380 	skb_queue_head_init(&tbl->proxy_queue);
1381 
1382 	tbl->last_flush = now;
1383 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1384 	write_lock(&neigh_tbl_lock);
1385 	tbl->next	= neigh_tables;
1386 	neigh_tables	= tbl;
1387 	write_unlock(&neigh_tbl_lock);
1388 }
1389 
1390 int neigh_table_clear(struct neigh_table *tbl)
1391 {
1392 	struct neigh_table **tp;
1393 
1394 	/* It is not clean... Fix it to unload IPv6 module safely */
1395 	del_timer_sync(&tbl->gc_timer);
1396 	del_timer_sync(&tbl->proxy_timer);
1397 	pneigh_queue_purge(&tbl->proxy_queue);
1398 	neigh_ifdown(tbl, NULL);
1399 	if (atomic_read(&tbl->entries))
1400 		printk(KERN_CRIT "neighbour leakage\n");
1401 	write_lock(&neigh_tbl_lock);
1402 	for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1403 		if (*tp == tbl) {
1404 			*tp = tbl->next;
1405 			break;
1406 		}
1407 	}
1408 	write_unlock(&neigh_tbl_lock);
1409 
1410 	neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1411 	tbl->hash_buckets = NULL;
1412 
1413 	kfree(tbl->phash_buckets);
1414 	tbl->phash_buckets = NULL;
1415 
1416 	return 0;
1417 }
1418 
1419 int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1420 {
1421 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1422 	struct rtattr **nda = arg;
1423 	struct neigh_table *tbl;
1424 	struct net_device *dev = NULL;
1425 	int err = -ENODEV;
1426 
1427 	if (ndm->ndm_ifindex &&
1428 	    (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1429 		goto out;
1430 
1431 	read_lock(&neigh_tbl_lock);
1432 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1433 		struct rtattr *dst_attr = nda[NDA_DST - 1];
1434 		struct neighbour *n;
1435 
1436 		if (tbl->family != ndm->ndm_family)
1437 			continue;
1438 		read_unlock(&neigh_tbl_lock);
1439 
1440 		err = -EINVAL;
1441 		if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1442 			goto out_dev_put;
1443 
1444 		if (ndm->ndm_flags & NTF_PROXY) {
1445 			err = pneigh_delete(tbl, RTA_DATA(dst_attr), dev);
1446 			goto out_dev_put;
1447 		}
1448 
1449 		if (!dev)
1450 			goto out;
1451 
1452 		n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1453 		if (n) {
1454 			err = neigh_update(n, NULL, NUD_FAILED,
1455 					   NEIGH_UPDATE_F_OVERRIDE|
1456 					   NEIGH_UPDATE_F_ADMIN);
1457 			neigh_release(n);
1458 		}
1459 		goto out_dev_put;
1460 	}
1461 	read_unlock(&neigh_tbl_lock);
1462 	err = -EADDRNOTAVAIL;
1463 out_dev_put:
1464 	if (dev)
1465 		dev_put(dev);
1466 out:
1467 	return err;
1468 }
1469 
1470 int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1471 {
1472 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1473 	struct rtattr **nda = arg;
1474 	struct neigh_table *tbl;
1475 	struct net_device *dev = NULL;
1476 	int err = -ENODEV;
1477 
1478 	if (ndm->ndm_ifindex &&
1479 	    (dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1480 		goto out;
1481 
1482 	read_lock(&neigh_tbl_lock);
1483 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1484 		struct rtattr *lladdr_attr = nda[NDA_LLADDR - 1];
1485 		struct rtattr *dst_attr = nda[NDA_DST - 1];
1486 		int override = 1;
1487 		struct neighbour *n;
1488 
1489 		if (tbl->family != ndm->ndm_family)
1490 			continue;
1491 		read_unlock(&neigh_tbl_lock);
1492 
1493 		err = -EINVAL;
1494 		if (!dst_attr || RTA_PAYLOAD(dst_attr) < tbl->key_len)
1495 			goto out_dev_put;
1496 
1497 		if (ndm->ndm_flags & NTF_PROXY) {
1498 			err = -ENOBUFS;
1499 			if (pneigh_lookup(tbl, RTA_DATA(dst_attr), dev, 1))
1500 				err = 0;
1501 			goto out_dev_put;
1502 		}
1503 
1504 		err = -EINVAL;
1505 		if (!dev)
1506 			goto out;
1507 		if (lladdr_attr && RTA_PAYLOAD(lladdr_attr) < dev->addr_len)
1508 			goto out_dev_put;
1509 
1510 		n = neigh_lookup(tbl, RTA_DATA(dst_attr), dev);
1511 		if (n) {
1512 			if (nlh->nlmsg_flags & NLM_F_EXCL) {
1513 				err = -EEXIST;
1514 				neigh_release(n);
1515 				goto out_dev_put;
1516 			}
1517 
1518 			override = nlh->nlmsg_flags & NLM_F_REPLACE;
1519 		} else if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1520 			err = -ENOENT;
1521 			goto out_dev_put;
1522 		} else {
1523 			n = __neigh_lookup_errno(tbl, RTA_DATA(dst_attr), dev);
1524 			if (IS_ERR(n)) {
1525 				err = PTR_ERR(n);
1526 				goto out_dev_put;
1527 			}
1528 		}
1529 
1530 		err = neigh_update(n,
1531 				   lladdr_attr ? RTA_DATA(lladdr_attr) : NULL,
1532 				   ndm->ndm_state,
1533 				   (override ? NEIGH_UPDATE_F_OVERRIDE : 0) |
1534 				   NEIGH_UPDATE_F_ADMIN);
1535 
1536 		neigh_release(n);
1537 		goto out_dev_put;
1538 	}
1539 
1540 	read_unlock(&neigh_tbl_lock);
1541 	err = -EADDRNOTAVAIL;
1542 out_dev_put:
1543 	if (dev)
1544 		dev_put(dev);
1545 out:
1546 	return err;
1547 }
1548 
1549 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1550 {
1551 	struct rtattr *nest = NULL;
1552 
1553 	nest = RTA_NEST(skb, NDTA_PARMS);
1554 
1555 	if (parms->dev)
1556 		RTA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1557 
1558 	RTA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1559 	RTA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1560 	RTA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1561 	RTA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1562 	RTA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1563 	RTA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1564 	RTA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1565 	RTA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1566 		      parms->base_reachable_time);
1567 	RTA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1568 	RTA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1569 	RTA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1570 	RTA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1571 	RTA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1572 	RTA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1573 
1574 	return RTA_NEST_END(skb, nest);
1575 
1576 rtattr_failure:
1577 	return RTA_NEST_CANCEL(skb, nest);
1578 }
1579 
1580 static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
1581 			      struct netlink_callback *cb)
1582 {
1583 	struct nlmsghdr *nlh;
1584 	struct ndtmsg *ndtmsg;
1585 
1586 	nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1587 			       NLM_F_MULTI);
1588 
1589 	ndtmsg = NLMSG_DATA(nlh);
1590 
1591 	read_lock_bh(&tbl->lock);
1592 	ndtmsg->ndtm_family = tbl->family;
1593 	ndtmsg->ndtm_pad1   = 0;
1594 	ndtmsg->ndtm_pad2   = 0;
1595 
1596 	RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1597 	RTA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1598 	RTA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1599 	RTA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1600 	RTA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1601 
1602 	{
1603 		unsigned long now = jiffies;
1604 		unsigned int flush_delta = now - tbl->last_flush;
1605 		unsigned int rand_delta = now - tbl->last_rand;
1606 
1607 		struct ndt_config ndc = {
1608 			.ndtc_key_len		= tbl->key_len,
1609 			.ndtc_entry_size	= tbl->entry_size,
1610 			.ndtc_entries		= atomic_read(&tbl->entries),
1611 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1612 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1613 			.ndtc_hash_rnd		= tbl->hash_rnd,
1614 			.ndtc_hash_mask		= tbl->hash_mask,
1615 			.ndtc_hash_chain_gc	= tbl->hash_chain_gc,
1616 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1617 		};
1618 
1619 		RTA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1620 	}
1621 
1622 	{
1623 		int cpu;
1624 		struct ndt_stats ndst;
1625 
1626 		memset(&ndst, 0, sizeof(ndst));
1627 
1628 		for_each_cpu(cpu) {
1629 			struct neigh_statistics	*st;
1630 
1631 			st = per_cpu_ptr(tbl->stats, cpu);
1632 			ndst.ndts_allocs		+= st->allocs;
1633 			ndst.ndts_destroys		+= st->destroys;
1634 			ndst.ndts_hash_grows		+= st->hash_grows;
1635 			ndst.ndts_res_failed		+= st->res_failed;
1636 			ndst.ndts_lookups		+= st->lookups;
1637 			ndst.ndts_hits			+= st->hits;
1638 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1639 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1640 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1641 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1642 		}
1643 
1644 		RTA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1645 	}
1646 
1647 	BUG_ON(tbl->parms.dev);
1648 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1649 		goto rtattr_failure;
1650 
1651 	read_unlock_bh(&tbl->lock);
1652 	return NLMSG_END(skb, nlh);
1653 
1654 rtattr_failure:
1655 	read_unlock_bh(&tbl->lock);
1656 	return NLMSG_CANCEL(skb, nlh);
1657 
1658 nlmsg_failure:
1659 	return -1;
1660 }
1661 
1662 static int neightbl_fill_param_info(struct neigh_table *tbl,
1663 				    struct neigh_parms *parms,
1664 				    struct sk_buff *skb,
1665 				    struct netlink_callback *cb)
1666 {
1667 	struct ndtmsg *ndtmsg;
1668 	struct nlmsghdr *nlh;
1669 
1670 	nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg),
1671 			       NLM_F_MULTI);
1672 
1673 	ndtmsg = NLMSG_DATA(nlh);
1674 
1675 	read_lock_bh(&tbl->lock);
1676 	ndtmsg->ndtm_family = tbl->family;
1677 	ndtmsg->ndtm_pad1   = 0;
1678 	ndtmsg->ndtm_pad2   = 0;
1679 	RTA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1680 
1681 	if (neightbl_fill_parms(skb, parms) < 0)
1682 		goto rtattr_failure;
1683 
1684 	read_unlock_bh(&tbl->lock);
1685 	return NLMSG_END(skb, nlh);
1686 
1687 rtattr_failure:
1688 	read_unlock_bh(&tbl->lock);
1689 	return NLMSG_CANCEL(skb, nlh);
1690 
1691 nlmsg_failure:
1692 	return -1;
1693 }
1694 
1695 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1696 						      int ifindex)
1697 {
1698 	struct neigh_parms *p;
1699 
1700 	for (p = &tbl->parms; p; p = p->next)
1701 		if ((p->dev && p->dev->ifindex == ifindex) ||
1702 		    (!p->dev && !ifindex))
1703 			return p;
1704 
1705 	return NULL;
1706 }
1707 
1708 int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1709 {
1710 	struct neigh_table *tbl;
1711 	struct ndtmsg *ndtmsg = NLMSG_DATA(nlh);
1712 	struct rtattr **tb = arg;
1713 	int err = -EINVAL;
1714 
1715 	if (!tb[NDTA_NAME - 1] || !RTA_PAYLOAD(tb[NDTA_NAME - 1]))
1716 		return -EINVAL;
1717 
1718 	read_lock(&neigh_tbl_lock);
1719 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1720 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1721 			continue;
1722 
1723 		if (!rtattr_strcmp(tb[NDTA_NAME - 1], tbl->id))
1724 			break;
1725 	}
1726 
1727 	if (tbl == NULL) {
1728 		err = -ENOENT;
1729 		goto errout;
1730 	}
1731 
1732 	/*
1733 	 * We acquire tbl->lock to be nice to the periodic timers and
1734 	 * make sure they always see a consistent set of values.
1735 	 */
1736 	write_lock_bh(&tbl->lock);
1737 
1738 	if (tb[NDTA_THRESH1 - 1])
1739 		tbl->gc_thresh1 = RTA_GET_U32(tb[NDTA_THRESH1 - 1]);
1740 
1741 	if (tb[NDTA_THRESH2 - 1])
1742 		tbl->gc_thresh2 = RTA_GET_U32(tb[NDTA_THRESH2 - 1]);
1743 
1744 	if (tb[NDTA_THRESH3 - 1])
1745 		tbl->gc_thresh3 = RTA_GET_U32(tb[NDTA_THRESH3 - 1]);
1746 
1747 	if (tb[NDTA_GC_INTERVAL - 1])
1748 		tbl->gc_interval = RTA_GET_MSECS(tb[NDTA_GC_INTERVAL - 1]);
1749 
1750 	if (tb[NDTA_PARMS - 1]) {
1751 		struct rtattr *tbp[NDTPA_MAX];
1752 		struct neigh_parms *p;
1753 		u32 ifindex = 0;
1754 
1755 		if (rtattr_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS - 1]) < 0)
1756 			goto rtattr_failure;
1757 
1758 		if (tbp[NDTPA_IFINDEX - 1])
1759 			ifindex = RTA_GET_U32(tbp[NDTPA_IFINDEX - 1]);
1760 
1761 		p = lookup_neigh_params(tbl, ifindex);
1762 		if (p == NULL) {
1763 			err = -ENOENT;
1764 			goto rtattr_failure;
1765 		}
1766 
1767 		if (tbp[NDTPA_QUEUE_LEN - 1])
1768 			p->queue_len = RTA_GET_U32(tbp[NDTPA_QUEUE_LEN - 1]);
1769 
1770 		if (tbp[NDTPA_PROXY_QLEN - 1])
1771 			p->proxy_qlen = RTA_GET_U32(tbp[NDTPA_PROXY_QLEN - 1]);
1772 
1773 		if (tbp[NDTPA_APP_PROBES - 1])
1774 			p->app_probes = RTA_GET_U32(tbp[NDTPA_APP_PROBES - 1]);
1775 
1776 		if (tbp[NDTPA_UCAST_PROBES - 1])
1777 			p->ucast_probes =
1778 			   RTA_GET_U32(tbp[NDTPA_UCAST_PROBES - 1]);
1779 
1780 		if (tbp[NDTPA_MCAST_PROBES - 1])
1781 			p->mcast_probes =
1782 			   RTA_GET_U32(tbp[NDTPA_MCAST_PROBES - 1]);
1783 
1784 		if (tbp[NDTPA_BASE_REACHABLE_TIME - 1])
1785 			p->base_reachable_time =
1786 			   RTA_GET_MSECS(tbp[NDTPA_BASE_REACHABLE_TIME - 1]);
1787 
1788 		if (tbp[NDTPA_GC_STALETIME - 1])
1789 			p->gc_staletime =
1790 			   RTA_GET_MSECS(tbp[NDTPA_GC_STALETIME - 1]);
1791 
1792 		if (tbp[NDTPA_DELAY_PROBE_TIME - 1])
1793 			p->delay_probe_time =
1794 			   RTA_GET_MSECS(tbp[NDTPA_DELAY_PROBE_TIME - 1]);
1795 
1796 		if (tbp[NDTPA_RETRANS_TIME - 1])
1797 			p->retrans_time =
1798 			   RTA_GET_MSECS(tbp[NDTPA_RETRANS_TIME - 1]);
1799 
1800 		if (tbp[NDTPA_ANYCAST_DELAY - 1])
1801 			p->anycast_delay =
1802 			   RTA_GET_MSECS(tbp[NDTPA_ANYCAST_DELAY - 1]);
1803 
1804 		if (tbp[NDTPA_PROXY_DELAY - 1])
1805 			p->proxy_delay =
1806 			   RTA_GET_MSECS(tbp[NDTPA_PROXY_DELAY - 1]);
1807 
1808 		if (tbp[NDTPA_LOCKTIME - 1])
1809 			p->locktime = RTA_GET_MSECS(tbp[NDTPA_LOCKTIME - 1]);
1810 	}
1811 
1812 	err = 0;
1813 
1814 rtattr_failure:
1815 	write_unlock_bh(&tbl->lock);
1816 errout:
1817 	read_unlock(&neigh_tbl_lock);
1818 	return err;
1819 }
1820 
1821 int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1822 {
1823 	int idx, family;
1824 	int s_idx = cb->args[0];
1825 	struct neigh_table *tbl;
1826 
1827 	family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1828 
1829 	read_lock(&neigh_tbl_lock);
1830 	for (tbl = neigh_tables, idx = 0; tbl; tbl = tbl->next) {
1831 		struct neigh_parms *p;
1832 
1833 		if (idx < s_idx || (family && tbl->family != family))
1834 			continue;
1835 
1836 		if (neightbl_fill_info(tbl, skb, cb) <= 0)
1837 			break;
1838 
1839 		for (++idx, p = tbl->parms.next; p; p = p->next, idx++) {
1840 			if (idx < s_idx)
1841 				continue;
1842 
1843 			if (neightbl_fill_param_info(tbl, p, skb, cb) <= 0)
1844 				goto out;
1845 		}
1846 
1847 	}
1848 out:
1849 	read_unlock(&neigh_tbl_lock);
1850 	cb->args[0] = idx;
1851 
1852 	return skb->len;
1853 }
1854 
1855 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1856 			   u32 pid, u32 seq, int event, unsigned int flags)
1857 {
1858 	unsigned long now = jiffies;
1859 	unsigned char *b = skb->tail;
1860 	struct nda_cacheinfo ci;
1861 	int locked = 0;
1862 	u32 probes;
1863 	struct nlmsghdr *nlh = NLMSG_NEW(skb, pid, seq, event,
1864 					 sizeof(struct ndmsg), flags);
1865 	struct ndmsg *ndm = NLMSG_DATA(nlh);
1866 
1867 	ndm->ndm_family	 = n->ops->family;
1868 	ndm->ndm_pad1    = 0;
1869 	ndm->ndm_pad2    = 0;
1870 	ndm->ndm_flags	 = n->flags;
1871 	ndm->ndm_type	 = n->type;
1872 	ndm->ndm_ifindex = n->dev->ifindex;
1873 	RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1874 	read_lock_bh(&n->lock);
1875 	locked		 = 1;
1876 	ndm->ndm_state	 = n->nud_state;
1877 	if (n->nud_state & NUD_VALID)
1878 		RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1879 	ci.ndm_used	 = now - n->used;
1880 	ci.ndm_confirmed = now - n->confirmed;
1881 	ci.ndm_updated	 = now - n->updated;
1882 	ci.ndm_refcnt	 = atomic_read(&n->refcnt) - 1;
1883 	probes = atomic_read(&n->probes);
1884 	read_unlock_bh(&n->lock);
1885 	locked		 = 0;
1886 	RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1887 	RTA_PUT(skb, NDA_PROBES, sizeof(probes), &probes);
1888 	nlh->nlmsg_len	 = skb->tail - b;
1889 	return skb->len;
1890 
1891 nlmsg_failure:
1892 rtattr_failure:
1893 	if (locked)
1894 		read_unlock_bh(&n->lock);
1895 	skb_trim(skb, b - skb->data);
1896 	return -1;
1897 }
1898 
1899 
1900 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1901 			    struct netlink_callback *cb)
1902 {
1903 	struct neighbour *n;
1904 	int rc, h, s_h = cb->args[1];
1905 	int idx, s_idx = idx = cb->args[2];
1906 
1907 	for (h = 0; h <= tbl->hash_mask; h++) {
1908 		if (h < s_h)
1909 			continue;
1910 		if (h > s_h)
1911 			s_idx = 0;
1912 		read_lock_bh(&tbl->lock);
1913 		for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
1914 			if (idx < s_idx)
1915 				continue;
1916 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1917 					    cb->nlh->nlmsg_seq,
1918 					    RTM_NEWNEIGH,
1919 					    NLM_F_MULTI) <= 0) {
1920 				read_unlock_bh(&tbl->lock);
1921 				rc = -1;
1922 				goto out;
1923 			}
1924 		}
1925 		read_unlock_bh(&tbl->lock);
1926 	}
1927 	rc = skb->len;
1928 out:
1929 	cb->args[1] = h;
1930 	cb->args[2] = idx;
1931 	return rc;
1932 }
1933 
1934 int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1935 {
1936 	struct neigh_table *tbl;
1937 	int t, family, s_t;
1938 
1939 	read_lock(&neigh_tbl_lock);
1940 	family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family;
1941 	s_t = cb->args[0];
1942 
1943 	for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
1944 		if (t < s_t || (family && tbl->family != family))
1945 			continue;
1946 		if (t > s_t)
1947 			memset(&cb->args[1], 0, sizeof(cb->args) -
1948 						sizeof(cb->args[0]));
1949 		if (neigh_dump_table(tbl, skb, cb) < 0)
1950 			break;
1951 	}
1952 	read_unlock(&neigh_tbl_lock);
1953 
1954 	cb->args[0] = t;
1955 	return skb->len;
1956 }
1957 
1958 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
1959 {
1960 	int chain;
1961 
1962 	read_lock_bh(&tbl->lock);
1963 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
1964 		struct neighbour *n;
1965 
1966 		for (n = tbl->hash_buckets[chain]; n; n = n->next)
1967 			cb(n, cookie);
1968 	}
1969 	read_unlock_bh(&tbl->lock);
1970 }
1971 EXPORT_SYMBOL(neigh_for_each);
1972 
1973 /* The tbl->lock must be held as a writer and BH disabled. */
1974 void __neigh_for_each_release(struct neigh_table *tbl,
1975 			      int (*cb)(struct neighbour *))
1976 {
1977 	int chain;
1978 
1979 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
1980 		struct neighbour *n, **np;
1981 
1982 		np = &tbl->hash_buckets[chain];
1983 		while ((n = *np) != NULL) {
1984 			int release;
1985 
1986 			write_lock(&n->lock);
1987 			release = cb(n);
1988 			if (release) {
1989 				*np = n->next;
1990 				n->dead = 1;
1991 			} else
1992 				np = &n->next;
1993 			write_unlock(&n->lock);
1994 			if (release)
1995 				neigh_release(n);
1996 		}
1997 	}
1998 }
1999 EXPORT_SYMBOL(__neigh_for_each_release);
2000 
2001 #ifdef CONFIG_PROC_FS
2002 
2003 static struct neighbour *neigh_get_first(struct seq_file *seq)
2004 {
2005 	struct neigh_seq_state *state = seq->private;
2006 	struct neigh_table *tbl = state->tbl;
2007 	struct neighbour *n = NULL;
2008 	int bucket = state->bucket;
2009 
2010 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2011 	for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2012 		n = tbl->hash_buckets[bucket];
2013 
2014 		while (n) {
2015 			if (state->neigh_sub_iter) {
2016 				loff_t fakep = 0;
2017 				void *v;
2018 
2019 				v = state->neigh_sub_iter(state, n, &fakep);
2020 				if (!v)
2021 					goto next;
2022 			}
2023 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2024 				break;
2025 			if (n->nud_state & ~NUD_NOARP)
2026 				break;
2027 		next:
2028 			n = n->next;
2029 		}
2030 
2031 		if (n)
2032 			break;
2033 	}
2034 	state->bucket = bucket;
2035 
2036 	return n;
2037 }
2038 
2039 static struct neighbour *neigh_get_next(struct seq_file *seq,
2040 					struct neighbour *n,
2041 					loff_t *pos)
2042 {
2043 	struct neigh_seq_state *state = seq->private;
2044 	struct neigh_table *tbl = state->tbl;
2045 
2046 	if (state->neigh_sub_iter) {
2047 		void *v = state->neigh_sub_iter(state, n, pos);
2048 		if (v)
2049 			return n;
2050 	}
2051 	n = n->next;
2052 
2053 	while (1) {
2054 		while (n) {
2055 			if (state->neigh_sub_iter) {
2056 				void *v = state->neigh_sub_iter(state, n, pos);
2057 				if (v)
2058 					return n;
2059 				goto next;
2060 			}
2061 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2062 				break;
2063 
2064 			if (n->nud_state & ~NUD_NOARP)
2065 				break;
2066 		next:
2067 			n = n->next;
2068 		}
2069 
2070 		if (n)
2071 			break;
2072 
2073 		if (++state->bucket > tbl->hash_mask)
2074 			break;
2075 
2076 		n = tbl->hash_buckets[state->bucket];
2077 	}
2078 
2079 	if (n && pos)
2080 		--(*pos);
2081 	return n;
2082 }
2083 
2084 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2085 {
2086 	struct neighbour *n = neigh_get_first(seq);
2087 
2088 	if (n) {
2089 		while (*pos) {
2090 			n = neigh_get_next(seq, n, pos);
2091 			if (!n)
2092 				break;
2093 		}
2094 	}
2095 	return *pos ? NULL : n;
2096 }
2097 
2098 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2099 {
2100 	struct neigh_seq_state *state = seq->private;
2101 	struct neigh_table *tbl = state->tbl;
2102 	struct pneigh_entry *pn = NULL;
2103 	int bucket = state->bucket;
2104 
2105 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2106 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2107 		pn = tbl->phash_buckets[bucket];
2108 		if (pn)
2109 			break;
2110 	}
2111 	state->bucket = bucket;
2112 
2113 	return pn;
2114 }
2115 
2116 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2117 					    struct pneigh_entry *pn,
2118 					    loff_t *pos)
2119 {
2120 	struct neigh_seq_state *state = seq->private;
2121 	struct neigh_table *tbl = state->tbl;
2122 
2123 	pn = pn->next;
2124 	while (!pn) {
2125 		if (++state->bucket > PNEIGH_HASHMASK)
2126 			break;
2127 		pn = tbl->phash_buckets[state->bucket];
2128 		if (pn)
2129 			break;
2130 	}
2131 
2132 	if (pn && pos)
2133 		--(*pos);
2134 
2135 	return pn;
2136 }
2137 
2138 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2139 {
2140 	struct pneigh_entry *pn = pneigh_get_first(seq);
2141 
2142 	if (pn) {
2143 		while (*pos) {
2144 			pn = pneigh_get_next(seq, pn, pos);
2145 			if (!pn)
2146 				break;
2147 		}
2148 	}
2149 	return *pos ? NULL : pn;
2150 }
2151 
2152 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2153 {
2154 	struct neigh_seq_state *state = seq->private;
2155 	void *rc;
2156 
2157 	rc = neigh_get_idx(seq, pos);
2158 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2159 		rc = pneigh_get_idx(seq, pos);
2160 
2161 	return rc;
2162 }
2163 
2164 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2165 {
2166 	struct neigh_seq_state *state = seq->private;
2167 	loff_t pos_minus_one;
2168 
2169 	state->tbl = tbl;
2170 	state->bucket = 0;
2171 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2172 
2173 	read_lock_bh(&tbl->lock);
2174 
2175 	pos_minus_one = *pos - 1;
2176 	return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2177 }
2178 EXPORT_SYMBOL(neigh_seq_start);
2179 
2180 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2181 {
2182 	struct neigh_seq_state *state;
2183 	void *rc;
2184 
2185 	if (v == SEQ_START_TOKEN) {
2186 		rc = neigh_get_idx(seq, pos);
2187 		goto out;
2188 	}
2189 
2190 	state = seq->private;
2191 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2192 		rc = neigh_get_next(seq, v, NULL);
2193 		if (rc)
2194 			goto out;
2195 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2196 			rc = pneigh_get_first(seq);
2197 	} else {
2198 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2199 		rc = pneigh_get_next(seq, v, NULL);
2200 	}
2201 out:
2202 	++(*pos);
2203 	return rc;
2204 }
2205 EXPORT_SYMBOL(neigh_seq_next);
2206 
2207 void neigh_seq_stop(struct seq_file *seq, void *v)
2208 {
2209 	struct neigh_seq_state *state = seq->private;
2210 	struct neigh_table *tbl = state->tbl;
2211 
2212 	read_unlock_bh(&tbl->lock);
2213 }
2214 EXPORT_SYMBOL(neigh_seq_stop);
2215 
2216 /* statistics via seq_file */
2217 
2218 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2219 {
2220 	struct proc_dir_entry *pde = seq->private;
2221 	struct neigh_table *tbl = pde->data;
2222 	int cpu;
2223 
2224 	if (*pos == 0)
2225 		return SEQ_START_TOKEN;
2226 
2227 	for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2228 		if (!cpu_possible(cpu))
2229 			continue;
2230 		*pos = cpu+1;
2231 		return per_cpu_ptr(tbl->stats, cpu);
2232 	}
2233 	return NULL;
2234 }
2235 
2236 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2237 {
2238 	struct proc_dir_entry *pde = seq->private;
2239 	struct neigh_table *tbl = pde->data;
2240 	int cpu;
2241 
2242 	for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2243 		if (!cpu_possible(cpu))
2244 			continue;
2245 		*pos = cpu+1;
2246 		return per_cpu_ptr(tbl->stats, cpu);
2247 	}
2248 	return NULL;
2249 }
2250 
2251 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2252 {
2253 
2254 }
2255 
2256 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2257 {
2258 	struct proc_dir_entry *pde = seq->private;
2259 	struct neigh_table *tbl = pde->data;
2260 	struct neigh_statistics *st = v;
2261 
2262 	if (v == SEQ_START_TOKEN) {
2263 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs\n");
2264 		return 0;
2265 	}
2266 
2267 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2268 			"%08lx %08lx  %08lx %08lx\n",
2269 		   atomic_read(&tbl->entries),
2270 
2271 		   st->allocs,
2272 		   st->destroys,
2273 		   st->hash_grows,
2274 
2275 		   st->lookups,
2276 		   st->hits,
2277 
2278 		   st->res_failed,
2279 
2280 		   st->rcv_probes_mcast,
2281 		   st->rcv_probes_ucast,
2282 
2283 		   st->periodic_gc_runs,
2284 		   st->forced_gc_runs
2285 		   );
2286 
2287 	return 0;
2288 }
2289 
2290 static struct seq_operations neigh_stat_seq_ops = {
2291 	.start	= neigh_stat_seq_start,
2292 	.next	= neigh_stat_seq_next,
2293 	.stop	= neigh_stat_seq_stop,
2294 	.show	= neigh_stat_seq_show,
2295 };
2296 
2297 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2298 {
2299 	int ret = seq_open(file, &neigh_stat_seq_ops);
2300 
2301 	if (!ret) {
2302 		struct seq_file *sf = file->private_data;
2303 		sf->private = PDE(inode);
2304 	}
2305 	return ret;
2306 };
2307 
2308 static struct file_operations neigh_stat_seq_fops = {
2309 	.owner	 = THIS_MODULE,
2310 	.open 	 = neigh_stat_seq_open,
2311 	.read	 = seq_read,
2312 	.llseek	 = seq_lseek,
2313 	.release = seq_release,
2314 };
2315 
2316 #endif /* CONFIG_PROC_FS */
2317 
2318 #ifdef CONFIG_ARPD
2319 void neigh_app_ns(struct neighbour *n)
2320 {
2321 	struct nlmsghdr  *nlh;
2322 	int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2323 	struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2324 
2325 	if (!skb)
2326 		return;
2327 
2328 	if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH, 0) < 0) {
2329 		kfree_skb(skb);
2330 		return;
2331 	}
2332 	nlh			   = (struct nlmsghdr *)skb->data;
2333 	nlh->nlmsg_flags	   = NLM_F_REQUEST;
2334 	NETLINK_CB(skb).dst_group  = RTNLGRP_NEIGH;
2335 	netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2336 }
2337 
2338 static void neigh_app_notify(struct neighbour *n)
2339 {
2340 	struct nlmsghdr *nlh;
2341 	int size = NLMSG_SPACE(sizeof(struct ndmsg) + 256);
2342 	struct sk_buff *skb = alloc_skb(size, GFP_ATOMIC);
2343 
2344 	if (!skb)
2345 		return;
2346 
2347 	if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH, 0) < 0) {
2348 		kfree_skb(skb);
2349 		return;
2350 	}
2351 	nlh			   = (struct nlmsghdr *)skb->data;
2352 	NETLINK_CB(skb).dst_group  = RTNLGRP_NEIGH;
2353 	netlink_broadcast(rtnl, skb, 0, RTNLGRP_NEIGH, GFP_ATOMIC);
2354 }
2355 
2356 #endif /* CONFIG_ARPD */
2357 
2358 #ifdef CONFIG_SYSCTL
2359 
2360 static struct neigh_sysctl_table {
2361 	struct ctl_table_header *sysctl_header;
2362 	ctl_table		neigh_vars[__NET_NEIGH_MAX];
2363 	ctl_table		neigh_dev[2];
2364 	ctl_table		neigh_neigh_dir[2];
2365 	ctl_table		neigh_proto_dir[2];
2366 	ctl_table		neigh_root_dir[2];
2367 } neigh_sysctl_template = {
2368 	.neigh_vars = {
2369 		{
2370 			.ctl_name	= NET_NEIGH_MCAST_SOLICIT,
2371 			.procname	= "mcast_solicit",
2372 			.maxlen		= sizeof(int),
2373 			.mode		= 0644,
2374 			.proc_handler	= &proc_dointvec,
2375 		},
2376 		{
2377 			.ctl_name	= NET_NEIGH_UCAST_SOLICIT,
2378 			.procname	= "ucast_solicit",
2379 			.maxlen		= sizeof(int),
2380 			.mode		= 0644,
2381 			.proc_handler	= &proc_dointvec,
2382 		},
2383 		{
2384 			.ctl_name	= NET_NEIGH_APP_SOLICIT,
2385 			.procname	= "app_solicit",
2386 			.maxlen		= sizeof(int),
2387 			.mode		= 0644,
2388 			.proc_handler	= &proc_dointvec,
2389 		},
2390 		{
2391 			.ctl_name	= NET_NEIGH_RETRANS_TIME,
2392 			.procname	= "retrans_time",
2393 			.maxlen		= sizeof(int),
2394 			.mode		= 0644,
2395 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2396 		},
2397 		{
2398 			.ctl_name	= NET_NEIGH_REACHABLE_TIME,
2399 			.procname	= "base_reachable_time",
2400 			.maxlen		= sizeof(int),
2401 			.mode		= 0644,
2402 			.proc_handler	= &proc_dointvec_jiffies,
2403 			.strategy	= &sysctl_jiffies,
2404 		},
2405 		{
2406 			.ctl_name	= NET_NEIGH_DELAY_PROBE_TIME,
2407 			.procname	= "delay_first_probe_time",
2408 			.maxlen		= sizeof(int),
2409 			.mode		= 0644,
2410 			.proc_handler	= &proc_dointvec_jiffies,
2411 			.strategy	= &sysctl_jiffies,
2412 		},
2413 		{
2414 			.ctl_name	= NET_NEIGH_GC_STALE_TIME,
2415 			.procname	= "gc_stale_time",
2416 			.maxlen		= sizeof(int),
2417 			.mode		= 0644,
2418 			.proc_handler	= &proc_dointvec_jiffies,
2419 			.strategy	= &sysctl_jiffies,
2420 		},
2421 		{
2422 			.ctl_name	= NET_NEIGH_UNRES_QLEN,
2423 			.procname	= "unres_qlen",
2424 			.maxlen		= sizeof(int),
2425 			.mode		= 0644,
2426 			.proc_handler	= &proc_dointvec,
2427 		},
2428 		{
2429 			.ctl_name	= NET_NEIGH_PROXY_QLEN,
2430 			.procname	= "proxy_qlen",
2431 			.maxlen		= sizeof(int),
2432 			.mode		= 0644,
2433 			.proc_handler	= &proc_dointvec,
2434 		},
2435 		{
2436 			.ctl_name	= NET_NEIGH_ANYCAST_DELAY,
2437 			.procname	= "anycast_delay",
2438 			.maxlen		= sizeof(int),
2439 			.mode		= 0644,
2440 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2441 		},
2442 		{
2443 			.ctl_name	= NET_NEIGH_PROXY_DELAY,
2444 			.procname	= "proxy_delay",
2445 			.maxlen		= sizeof(int),
2446 			.mode		= 0644,
2447 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2448 		},
2449 		{
2450 			.ctl_name	= NET_NEIGH_LOCKTIME,
2451 			.procname	= "locktime",
2452 			.maxlen		= sizeof(int),
2453 			.mode		= 0644,
2454 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2455 		},
2456 		{
2457 			.ctl_name	= NET_NEIGH_GC_INTERVAL,
2458 			.procname	= "gc_interval",
2459 			.maxlen		= sizeof(int),
2460 			.mode		= 0644,
2461 			.proc_handler	= &proc_dointvec_jiffies,
2462 			.strategy	= &sysctl_jiffies,
2463 		},
2464 		{
2465 			.ctl_name	= NET_NEIGH_GC_THRESH1,
2466 			.procname	= "gc_thresh1",
2467 			.maxlen		= sizeof(int),
2468 			.mode		= 0644,
2469 			.proc_handler	= &proc_dointvec,
2470 		},
2471 		{
2472 			.ctl_name	= NET_NEIGH_GC_THRESH2,
2473 			.procname	= "gc_thresh2",
2474 			.maxlen		= sizeof(int),
2475 			.mode		= 0644,
2476 			.proc_handler	= &proc_dointvec,
2477 		},
2478 		{
2479 			.ctl_name	= NET_NEIGH_GC_THRESH3,
2480 			.procname	= "gc_thresh3",
2481 			.maxlen		= sizeof(int),
2482 			.mode		= 0644,
2483 			.proc_handler	= &proc_dointvec,
2484 		},
2485 		{
2486 			.ctl_name	= NET_NEIGH_RETRANS_TIME_MS,
2487 			.procname	= "retrans_time_ms",
2488 			.maxlen		= sizeof(int),
2489 			.mode		= 0644,
2490 			.proc_handler	= &proc_dointvec_ms_jiffies,
2491 			.strategy	= &sysctl_ms_jiffies,
2492 		},
2493 		{
2494 			.ctl_name	= NET_NEIGH_REACHABLE_TIME_MS,
2495 			.procname	= "base_reachable_time_ms",
2496 			.maxlen		= sizeof(int),
2497 			.mode		= 0644,
2498 			.proc_handler	= &proc_dointvec_ms_jiffies,
2499 			.strategy	= &sysctl_ms_jiffies,
2500 		},
2501 	},
2502 	.neigh_dev = {
2503 		{
2504 			.ctl_name	= NET_PROTO_CONF_DEFAULT,
2505 			.procname	= "default",
2506 			.mode		= 0555,
2507 		},
2508 	},
2509 	.neigh_neigh_dir = {
2510 		{
2511 			.procname	= "neigh",
2512 			.mode		= 0555,
2513 		},
2514 	},
2515 	.neigh_proto_dir = {
2516 		{
2517 			.mode		= 0555,
2518 		},
2519 	},
2520 	.neigh_root_dir = {
2521 		{
2522 			.ctl_name	= CTL_NET,
2523 			.procname	= "net",
2524 			.mode		= 0555,
2525 		},
2526 	},
2527 };
2528 
2529 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2530 			  int p_id, int pdev_id, char *p_name,
2531 			  proc_handler *handler, ctl_handler *strategy)
2532 {
2533 	struct neigh_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
2534 	const char *dev_name_source = NULL;
2535 	char *dev_name = NULL;
2536 	int err = 0;
2537 
2538 	if (!t)
2539 		return -ENOBUFS;
2540 	memcpy(t, &neigh_sysctl_template, sizeof(*t));
2541 	t->neigh_vars[0].data  = &p->mcast_probes;
2542 	t->neigh_vars[1].data  = &p->ucast_probes;
2543 	t->neigh_vars[2].data  = &p->app_probes;
2544 	t->neigh_vars[3].data  = &p->retrans_time;
2545 	t->neigh_vars[4].data  = &p->base_reachable_time;
2546 	t->neigh_vars[5].data  = &p->delay_probe_time;
2547 	t->neigh_vars[6].data  = &p->gc_staletime;
2548 	t->neigh_vars[7].data  = &p->queue_len;
2549 	t->neigh_vars[8].data  = &p->proxy_qlen;
2550 	t->neigh_vars[9].data  = &p->anycast_delay;
2551 	t->neigh_vars[10].data = &p->proxy_delay;
2552 	t->neigh_vars[11].data = &p->locktime;
2553 
2554 	if (dev) {
2555 		dev_name_source = dev->name;
2556 		t->neigh_dev[0].ctl_name = dev->ifindex;
2557 		t->neigh_vars[12].procname = NULL;
2558 		t->neigh_vars[13].procname = NULL;
2559 		t->neigh_vars[14].procname = NULL;
2560 		t->neigh_vars[15].procname = NULL;
2561 	} else {
2562  		dev_name_source = t->neigh_dev[0].procname;
2563 		t->neigh_vars[12].data = (int *)(p + 1);
2564 		t->neigh_vars[13].data = (int *)(p + 1) + 1;
2565 		t->neigh_vars[14].data = (int *)(p + 1) + 2;
2566 		t->neigh_vars[15].data = (int *)(p + 1) + 3;
2567 	}
2568 
2569 	t->neigh_vars[16].data  = &p->retrans_time;
2570 	t->neigh_vars[17].data  = &p->base_reachable_time;
2571 
2572 	if (handler || strategy) {
2573 		/* RetransTime */
2574 		t->neigh_vars[3].proc_handler = handler;
2575 		t->neigh_vars[3].strategy = strategy;
2576 		t->neigh_vars[3].extra1 = dev;
2577 		/* ReachableTime */
2578 		t->neigh_vars[4].proc_handler = handler;
2579 		t->neigh_vars[4].strategy = strategy;
2580 		t->neigh_vars[4].extra1 = dev;
2581 		/* RetransTime (in milliseconds)*/
2582 		t->neigh_vars[16].proc_handler = handler;
2583 		t->neigh_vars[16].strategy = strategy;
2584 		t->neigh_vars[16].extra1 = dev;
2585 		/* ReachableTime (in milliseconds) */
2586 		t->neigh_vars[17].proc_handler = handler;
2587 		t->neigh_vars[17].strategy = strategy;
2588 		t->neigh_vars[17].extra1 = dev;
2589 	}
2590 
2591 	dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2592 	if (!dev_name) {
2593 		err = -ENOBUFS;
2594 		goto free;
2595 	}
2596 
2597  	t->neigh_dev[0].procname = dev_name;
2598 
2599 	t->neigh_neigh_dir[0].ctl_name = pdev_id;
2600 
2601 	t->neigh_proto_dir[0].procname = p_name;
2602 	t->neigh_proto_dir[0].ctl_name = p_id;
2603 
2604 	t->neigh_dev[0].child	       = t->neigh_vars;
2605 	t->neigh_neigh_dir[0].child    = t->neigh_dev;
2606 	t->neigh_proto_dir[0].child    = t->neigh_neigh_dir;
2607 	t->neigh_root_dir[0].child     = t->neigh_proto_dir;
2608 
2609 	t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
2610 	if (!t->sysctl_header) {
2611 		err = -ENOBUFS;
2612 		goto free_procname;
2613 	}
2614 	p->sysctl_table = t;
2615 	return 0;
2616 
2617 	/* error path */
2618  free_procname:
2619 	kfree(dev_name);
2620  free:
2621 	kfree(t);
2622 
2623 	return err;
2624 }
2625 
2626 void neigh_sysctl_unregister(struct neigh_parms *p)
2627 {
2628 	if (p->sysctl_table) {
2629 		struct neigh_sysctl_table *t = p->sysctl_table;
2630 		p->sysctl_table = NULL;
2631 		unregister_sysctl_table(t->sysctl_header);
2632 		kfree(t->neigh_dev[0].procname);
2633 		kfree(t);
2634 	}
2635 }
2636 
2637 #endif	/* CONFIG_SYSCTL */
2638 
2639 EXPORT_SYMBOL(__neigh_event_send);
2640 EXPORT_SYMBOL(neigh_add);
2641 EXPORT_SYMBOL(neigh_changeaddr);
2642 EXPORT_SYMBOL(neigh_compat_output);
2643 EXPORT_SYMBOL(neigh_connected_output);
2644 EXPORT_SYMBOL(neigh_create);
2645 EXPORT_SYMBOL(neigh_delete);
2646 EXPORT_SYMBOL(neigh_destroy);
2647 EXPORT_SYMBOL(neigh_dump_info);
2648 EXPORT_SYMBOL(neigh_event_ns);
2649 EXPORT_SYMBOL(neigh_ifdown);
2650 EXPORT_SYMBOL(neigh_lookup);
2651 EXPORT_SYMBOL(neigh_lookup_nodev);
2652 EXPORT_SYMBOL(neigh_parms_alloc);
2653 EXPORT_SYMBOL(neigh_parms_release);
2654 EXPORT_SYMBOL(neigh_rand_reach_time);
2655 EXPORT_SYMBOL(neigh_resolve_output);
2656 EXPORT_SYMBOL(neigh_table_clear);
2657 EXPORT_SYMBOL(neigh_table_init);
2658 EXPORT_SYMBOL(neigh_update);
2659 EXPORT_SYMBOL(neigh_update_hhs);
2660 EXPORT_SYMBOL(pneigh_enqueue);
2661 EXPORT_SYMBOL(pneigh_lookup);
2662 EXPORT_SYMBOL(neightbl_dump_info);
2663 EXPORT_SYMBOL(neightbl_set);
2664 
2665 #ifdef CONFIG_ARPD
2666 EXPORT_SYMBOL(neigh_app_ns);
2667 #endif
2668 #ifdef CONFIG_SYSCTL
2669 EXPORT_SYMBOL(neigh_sysctl_register);
2670 EXPORT_SYMBOL(neigh_sysctl_unregister);
2671 #endif
2672