xref: /openbmc/linux/net/core/neighbour.c (revision 22246614)
1 /*
2  *	Generic address resolution entity
3  *
4  *	Authors:
5  *	Pedro Roque		<roque@di.fc.ul.pt>
6  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Fixes:
14  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
15  *	Harald Welte		Add neighbour cache statistics like rtstat
16  */
17 
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
30 #include <net/dst.h>
31 #include <net/sock.h>
32 #include <net/netevent.h>
33 #include <net/netlink.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/random.h>
36 #include <linux/string.h>
37 #include <linux/log2.h>
38 
39 #define NEIGH_DEBUG 1
40 
41 #define NEIGH_PRINTK(x...) printk(x)
42 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
43 #define NEIGH_PRINTK0 NEIGH_PRINTK
44 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
45 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
46 
47 #if NEIGH_DEBUG >= 1
48 #undef NEIGH_PRINTK1
49 #define NEIGH_PRINTK1 NEIGH_PRINTK
50 #endif
51 #if NEIGH_DEBUG >= 2
52 #undef NEIGH_PRINTK2
53 #define NEIGH_PRINTK2 NEIGH_PRINTK
54 #endif
55 
56 #define PNEIGH_HASHMASK		0xF
57 
58 static void neigh_timer_handler(unsigned long arg);
59 static void __neigh_notify(struct neighbour *n, int type, int flags);
60 static void neigh_update_notify(struct neighbour *neigh);
61 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62 
63 static struct neigh_table *neigh_tables;
64 #ifdef CONFIG_PROC_FS
65 static const struct file_operations neigh_stat_seq_fops;
66 #endif
67 
68 /*
69    Neighbour hash table buckets are protected with rwlock tbl->lock.
70 
71    - All the scans/updates to hash buckets MUST be made under this lock.
72    - NOTHING clever should be made under this lock: no callbacks
73      to protocol backends, no attempts to send something to network.
74      It will result in deadlocks, if backend/driver wants to use neighbour
75      cache.
76    - If the entry requires some non-trivial actions, increase
77      its reference count and release table lock.
78 
79    Neighbour entries are protected:
80    - with reference count.
81    - with rwlock neigh->lock
82 
83    Reference count prevents destruction.
84 
85    neigh->lock mainly serializes ll address data and its validity state.
86    However, the same lock is used to protect another entry fields:
87     - timer
88     - resolution queue
89 
90    Again, nothing clever shall be made under neigh->lock,
91    the most complicated procedure, which we allow is dev->hard_header.
92    It is supposed, that dev->hard_header is simplistic and does
93    not make callbacks to neighbour tables.
94 
95    The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96    list of neighbour tables. This list is used only in process context,
97  */
98 
99 static DEFINE_RWLOCK(neigh_tbl_lock);
100 
101 static int neigh_blackhole(struct sk_buff *skb)
102 {
103 	kfree_skb(skb);
104 	return -ENETDOWN;
105 }
106 
107 static void neigh_cleanup_and_release(struct neighbour *neigh)
108 {
109 	if (neigh->parms->neigh_cleanup)
110 		neigh->parms->neigh_cleanup(neigh);
111 
112 	__neigh_notify(neigh, RTM_DELNEIGH, 0);
113 	neigh_release(neigh);
114 }
115 
116 /*
117  * It is random distribution in the interval (1/2)*base...(3/2)*base.
118  * It corresponds to default IPv6 settings and is not overridable,
119  * because it is really reasonable choice.
120  */
121 
122 unsigned long neigh_rand_reach_time(unsigned long base)
123 {
124 	return (base ? (net_random() % base) + (base >> 1) : 0);
125 }
126 EXPORT_SYMBOL(neigh_rand_reach_time);
127 
128 
129 static int neigh_forced_gc(struct neigh_table *tbl)
130 {
131 	int shrunk = 0;
132 	int i;
133 
134 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
135 
136 	write_lock_bh(&tbl->lock);
137 	for (i = 0; i <= tbl->hash_mask; i++) {
138 		struct neighbour *n, **np;
139 
140 		np = &tbl->hash_buckets[i];
141 		while ((n = *np) != NULL) {
142 			/* Neighbour record may be discarded if:
143 			 * - nobody refers to it.
144 			 * - it is not permanent
145 			 */
146 			write_lock(&n->lock);
147 			if (atomic_read(&n->refcnt) == 1 &&
148 			    !(n->nud_state & NUD_PERMANENT)) {
149 				*np	= n->next;
150 				n->dead = 1;
151 				shrunk	= 1;
152 				write_unlock(&n->lock);
153 				neigh_cleanup_and_release(n);
154 				continue;
155 			}
156 			write_unlock(&n->lock);
157 			np = &n->next;
158 		}
159 	}
160 
161 	tbl->last_flush = jiffies;
162 
163 	write_unlock_bh(&tbl->lock);
164 
165 	return shrunk;
166 }
167 
168 static void neigh_add_timer(struct neighbour *n, unsigned long when)
169 {
170 	neigh_hold(n);
171 	if (unlikely(mod_timer(&n->timer, when))) {
172 		printk("NEIGH: BUG, double timer add, state is %x\n",
173 		       n->nud_state);
174 		dump_stack();
175 	}
176 }
177 
178 static int neigh_del_timer(struct neighbour *n)
179 {
180 	if ((n->nud_state & NUD_IN_TIMER) &&
181 	    del_timer(&n->timer)) {
182 		neigh_release(n);
183 		return 1;
184 	}
185 	return 0;
186 }
187 
188 static void pneigh_queue_purge(struct sk_buff_head *list)
189 {
190 	struct sk_buff *skb;
191 
192 	while ((skb = skb_dequeue(list)) != NULL) {
193 		dev_put(skb->dev);
194 		kfree_skb(skb);
195 	}
196 }
197 
198 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
199 {
200 	int i;
201 
202 	for (i = 0; i <= tbl->hash_mask; i++) {
203 		struct neighbour *n, **np = &tbl->hash_buckets[i];
204 
205 		while ((n = *np) != NULL) {
206 			if (dev && n->dev != dev) {
207 				np = &n->next;
208 				continue;
209 			}
210 			*np = n->next;
211 			write_lock(&n->lock);
212 			neigh_del_timer(n);
213 			n->dead = 1;
214 
215 			if (atomic_read(&n->refcnt) != 1) {
216 				/* The most unpleasant situation.
217 				   We must destroy neighbour entry,
218 				   but someone still uses it.
219 
220 				   The destroy will be delayed until
221 				   the last user releases us, but
222 				   we must kill timers etc. and move
223 				   it to safe state.
224 				 */
225 				skb_queue_purge(&n->arp_queue);
226 				n->output = neigh_blackhole;
227 				if (n->nud_state & NUD_VALID)
228 					n->nud_state = NUD_NOARP;
229 				else
230 					n->nud_state = NUD_NONE;
231 				NEIGH_PRINTK2("neigh %p is stray.\n", n);
232 			}
233 			write_unlock(&n->lock);
234 			neigh_cleanup_and_release(n);
235 		}
236 	}
237 }
238 
239 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
240 {
241 	write_lock_bh(&tbl->lock);
242 	neigh_flush_dev(tbl, dev);
243 	write_unlock_bh(&tbl->lock);
244 }
245 EXPORT_SYMBOL(neigh_changeaddr);
246 
247 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
248 {
249 	write_lock_bh(&tbl->lock);
250 	neigh_flush_dev(tbl, dev);
251 	pneigh_ifdown(tbl, dev);
252 	write_unlock_bh(&tbl->lock);
253 
254 	del_timer_sync(&tbl->proxy_timer);
255 	pneigh_queue_purge(&tbl->proxy_queue);
256 	return 0;
257 }
258 EXPORT_SYMBOL(neigh_ifdown);
259 
260 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
261 {
262 	struct neighbour *n = NULL;
263 	unsigned long now = jiffies;
264 	int entries;
265 
266 	entries = atomic_inc_return(&tbl->entries) - 1;
267 	if (entries >= tbl->gc_thresh3 ||
268 	    (entries >= tbl->gc_thresh2 &&
269 	     time_after(now, tbl->last_flush + 5 * HZ))) {
270 		if (!neigh_forced_gc(tbl) &&
271 		    entries >= tbl->gc_thresh3)
272 			goto out_entries;
273 	}
274 
275 	n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
276 	if (!n)
277 		goto out_entries;
278 
279 	skb_queue_head_init(&n->arp_queue);
280 	rwlock_init(&n->lock);
281 	n->updated	  = n->used = now;
282 	n->nud_state	  = NUD_NONE;
283 	n->output	  = neigh_blackhole;
284 	n->parms	  = neigh_parms_clone(&tbl->parms);
285 	setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
286 
287 	NEIGH_CACHE_STAT_INC(tbl, allocs);
288 	n->tbl		  = tbl;
289 	atomic_set(&n->refcnt, 1);
290 	n->dead		  = 1;
291 out:
292 	return n;
293 
294 out_entries:
295 	atomic_dec(&tbl->entries);
296 	goto out;
297 }
298 
299 static struct neighbour **neigh_hash_alloc(unsigned int entries)
300 {
301 	unsigned long size = entries * sizeof(struct neighbour *);
302 	struct neighbour **ret;
303 
304 	if (size <= PAGE_SIZE) {
305 		ret = kzalloc(size, GFP_ATOMIC);
306 	} else {
307 		ret = (struct neighbour **)
308 		      __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
309 	}
310 	return ret;
311 }
312 
313 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
314 {
315 	unsigned long size = entries * sizeof(struct neighbour *);
316 
317 	if (size <= PAGE_SIZE)
318 		kfree(hash);
319 	else
320 		free_pages((unsigned long)hash, get_order(size));
321 }
322 
323 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
324 {
325 	struct neighbour **new_hash, **old_hash;
326 	unsigned int i, new_hash_mask, old_entries;
327 
328 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
329 
330 	BUG_ON(!is_power_of_2(new_entries));
331 	new_hash = neigh_hash_alloc(new_entries);
332 	if (!new_hash)
333 		return;
334 
335 	old_entries = tbl->hash_mask + 1;
336 	new_hash_mask = new_entries - 1;
337 	old_hash = tbl->hash_buckets;
338 
339 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
340 	for (i = 0; i < old_entries; i++) {
341 		struct neighbour *n, *next;
342 
343 		for (n = old_hash[i]; n; n = next) {
344 			unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
345 
346 			hash_val &= new_hash_mask;
347 			next = n->next;
348 
349 			n->next = new_hash[hash_val];
350 			new_hash[hash_val] = n;
351 		}
352 	}
353 	tbl->hash_buckets = new_hash;
354 	tbl->hash_mask = new_hash_mask;
355 
356 	neigh_hash_free(old_hash, old_entries);
357 }
358 
359 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
360 			       struct net_device *dev)
361 {
362 	struct neighbour *n;
363 	int key_len = tbl->key_len;
364 	u32 hash_val;
365 
366 	NEIGH_CACHE_STAT_INC(tbl, lookups);
367 
368 	read_lock_bh(&tbl->lock);
369 	hash_val = tbl->hash(pkey, dev);
370 	for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
371 		if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
372 			neigh_hold(n);
373 			NEIGH_CACHE_STAT_INC(tbl, hits);
374 			break;
375 		}
376 	}
377 	read_unlock_bh(&tbl->lock);
378 	return n;
379 }
380 EXPORT_SYMBOL(neigh_lookup);
381 
382 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
383 				     const void *pkey)
384 {
385 	struct neighbour *n;
386 	int key_len = tbl->key_len;
387 	u32 hash_val;
388 
389 	NEIGH_CACHE_STAT_INC(tbl, lookups);
390 
391 	read_lock_bh(&tbl->lock);
392 	hash_val = tbl->hash(pkey, NULL);
393 	for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
394 		if (!memcmp(n->primary_key, pkey, key_len) &&
395 		    net_eq(dev_net(n->dev), net)) {
396 			neigh_hold(n);
397 			NEIGH_CACHE_STAT_INC(tbl, hits);
398 			break;
399 		}
400 	}
401 	read_unlock_bh(&tbl->lock);
402 	return n;
403 }
404 EXPORT_SYMBOL(neigh_lookup_nodev);
405 
406 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
407 			       struct net_device *dev)
408 {
409 	u32 hash_val;
410 	int key_len = tbl->key_len;
411 	int error;
412 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
413 
414 	if (!n) {
415 		rc = ERR_PTR(-ENOBUFS);
416 		goto out;
417 	}
418 
419 	memcpy(n->primary_key, pkey, key_len);
420 	n->dev = dev;
421 	dev_hold(dev);
422 
423 	/* Protocol specific setup. */
424 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
425 		rc = ERR_PTR(error);
426 		goto out_neigh_release;
427 	}
428 
429 	/* Device specific setup. */
430 	if (n->parms->neigh_setup &&
431 	    (error = n->parms->neigh_setup(n)) < 0) {
432 		rc = ERR_PTR(error);
433 		goto out_neigh_release;
434 	}
435 
436 	n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
437 
438 	write_lock_bh(&tbl->lock);
439 
440 	if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
441 		neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
442 
443 	hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
444 
445 	if (n->parms->dead) {
446 		rc = ERR_PTR(-EINVAL);
447 		goto out_tbl_unlock;
448 	}
449 
450 	for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
451 		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
452 			neigh_hold(n1);
453 			rc = n1;
454 			goto out_tbl_unlock;
455 		}
456 	}
457 
458 	n->next = tbl->hash_buckets[hash_val];
459 	tbl->hash_buckets[hash_val] = n;
460 	n->dead = 0;
461 	neigh_hold(n);
462 	write_unlock_bh(&tbl->lock);
463 	NEIGH_PRINTK2("neigh %p is created.\n", n);
464 	rc = n;
465 out:
466 	return rc;
467 out_tbl_unlock:
468 	write_unlock_bh(&tbl->lock);
469 out_neigh_release:
470 	neigh_release(n);
471 	goto out;
472 }
473 EXPORT_SYMBOL(neigh_create);
474 
475 static u32 pneigh_hash(const void *pkey, int key_len)
476 {
477 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
478 	hash_val ^= (hash_val >> 16);
479 	hash_val ^= hash_val >> 8;
480 	hash_val ^= hash_val >> 4;
481 	hash_val &= PNEIGH_HASHMASK;
482 	return hash_val;
483 }
484 
485 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
486 					      struct net *net,
487 					      const void *pkey,
488 					      int key_len,
489 					      struct net_device *dev)
490 {
491 	while (n) {
492 		if (!memcmp(n->key, pkey, key_len) &&
493 		    net_eq(pneigh_net(n), net) &&
494 		    (n->dev == dev || !n->dev))
495 			return n;
496 		n = n->next;
497 	}
498 	return NULL;
499 }
500 
501 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
502 		struct net *net, const void *pkey, struct net_device *dev)
503 {
504 	int key_len = tbl->key_len;
505 	u32 hash_val = pneigh_hash(pkey, key_len);
506 
507 	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
508 				 net, pkey, key_len, dev);
509 }
510 EXPORT_SYMBOL_GPL(__pneigh_lookup);
511 
512 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
513 				    struct net *net, const void *pkey,
514 				    struct net_device *dev, int creat)
515 {
516 	struct pneigh_entry *n;
517 	int key_len = tbl->key_len;
518 	u32 hash_val = pneigh_hash(pkey, key_len);
519 
520 	read_lock_bh(&tbl->lock);
521 	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
522 			      net, pkey, key_len, dev);
523 	read_unlock_bh(&tbl->lock);
524 
525 	if (n || !creat)
526 		goto out;
527 
528 	ASSERT_RTNL();
529 
530 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
531 	if (!n)
532 		goto out;
533 
534 #ifdef CONFIG_NET_NS
535 	n->net = hold_net(net);
536 #endif
537 	memcpy(n->key, pkey, key_len);
538 	n->dev = dev;
539 	if (dev)
540 		dev_hold(dev);
541 
542 	if (tbl->pconstructor && tbl->pconstructor(n)) {
543 		if (dev)
544 			dev_put(dev);
545 		release_net(net);
546 		kfree(n);
547 		n = NULL;
548 		goto out;
549 	}
550 
551 	write_lock_bh(&tbl->lock);
552 	n->next = tbl->phash_buckets[hash_val];
553 	tbl->phash_buckets[hash_val] = n;
554 	write_unlock_bh(&tbl->lock);
555 out:
556 	return n;
557 }
558 EXPORT_SYMBOL(pneigh_lookup);
559 
560 
561 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
562 		  struct net_device *dev)
563 {
564 	struct pneigh_entry *n, **np;
565 	int key_len = tbl->key_len;
566 	u32 hash_val = pneigh_hash(pkey, key_len);
567 
568 	write_lock_bh(&tbl->lock);
569 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
570 	     np = &n->next) {
571 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
572 		    net_eq(pneigh_net(n), net)) {
573 			*np = n->next;
574 			write_unlock_bh(&tbl->lock);
575 			if (tbl->pdestructor)
576 				tbl->pdestructor(n);
577 			if (n->dev)
578 				dev_put(n->dev);
579 			release_net(pneigh_net(n));
580 			kfree(n);
581 			return 0;
582 		}
583 	}
584 	write_unlock_bh(&tbl->lock);
585 	return -ENOENT;
586 }
587 
588 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
589 {
590 	struct pneigh_entry *n, **np;
591 	u32 h;
592 
593 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
594 		np = &tbl->phash_buckets[h];
595 		while ((n = *np) != NULL) {
596 			if (!dev || n->dev == dev) {
597 				*np = n->next;
598 				if (tbl->pdestructor)
599 					tbl->pdestructor(n);
600 				if (n->dev)
601 					dev_put(n->dev);
602 				release_net(pneigh_net(n));
603 				kfree(n);
604 				continue;
605 			}
606 			np = &n->next;
607 		}
608 	}
609 	return -ENOENT;
610 }
611 
612 static void neigh_parms_destroy(struct neigh_parms *parms);
613 
614 static inline void neigh_parms_put(struct neigh_parms *parms)
615 {
616 	if (atomic_dec_and_test(&parms->refcnt))
617 		neigh_parms_destroy(parms);
618 }
619 
620 /*
621  *	neighbour must already be out of the table;
622  *
623  */
624 void neigh_destroy(struct neighbour *neigh)
625 {
626 	struct hh_cache *hh;
627 
628 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
629 
630 	if (!neigh->dead) {
631 		printk(KERN_WARNING
632 		       "Destroying alive neighbour %p\n", neigh);
633 		dump_stack();
634 		return;
635 	}
636 
637 	if (neigh_del_timer(neigh))
638 		printk(KERN_WARNING "Impossible event.\n");
639 
640 	while ((hh = neigh->hh) != NULL) {
641 		neigh->hh = hh->hh_next;
642 		hh->hh_next = NULL;
643 
644 		write_seqlock_bh(&hh->hh_lock);
645 		hh->hh_output = neigh_blackhole;
646 		write_sequnlock_bh(&hh->hh_lock);
647 		if (atomic_dec_and_test(&hh->hh_refcnt))
648 			kfree(hh);
649 	}
650 
651 	skb_queue_purge(&neigh->arp_queue);
652 
653 	dev_put(neigh->dev);
654 	neigh_parms_put(neigh->parms);
655 
656 	NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
657 
658 	atomic_dec(&neigh->tbl->entries);
659 	kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
660 }
661 EXPORT_SYMBOL(neigh_destroy);
662 
663 /* Neighbour state is suspicious;
664    disable fast path.
665 
666    Called with write_locked neigh.
667  */
668 static void neigh_suspect(struct neighbour *neigh)
669 {
670 	struct hh_cache *hh;
671 
672 	NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
673 
674 	neigh->output = neigh->ops->output;
675 
676 	for (hh = neigh->hh; hh; hh = hh->hh_next)
677 		hh->hh_output = neigh->ops->output;
678 }
679 
680 /* Neighbour state is OK;
681    enable fast path.
682 
683    Called with write_locked neigh.
684  */
685 static void neigh_connect(struct neighbour *neigh)
686 {
687 	struct hh_cache *hh;
688 
689 	NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
690 
691 	neigh->output = neigh->ops->connected_output;
692 
693 	for (hh = neigh->hh; hh; hh = hh->hh_next)
694 		hh->hh_output = neigh->ops->hh_output;
695 }
696 
697 static void neigh_periodic_timer(unsigned long arg)
698 {
699 	struct neigh_table *tbl = (struct neigh_table *)arg;
700 	struct neighbour *n, **np;
701 	unsigned long expire, now = jiffies;
702 
703 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
704 
705 	write_lock(&tbl->lock);
706 
707 	/*
708 	 *	periodically recompute ReachableTime from random function
709 	 */
710 
711 	if (time_after(now, tbl->last_rand + 300 * HZ)) {
712 		struct neigh_parms *p;
713 		tbl->last_rand = now;
714 		for (p = &tbl->parms; p; p = p->next)
715 			p->reachable_time =
716 				neigh_rand_reach_time(p->base_reachable_time);
717 	}
718 
719 	np = &tbl->hash_buckets[tbl->hash_chain_gc];
720 	tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
721 
722 	while ((n = *np) != NULL) {
723 		unsigned int state;
724 
725 		write_lock(&n->lock);
726 
727 		state = n->nud_state;
728 		if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
729 			write_unlock(&n->lock);
730 			goto next_elt;
731 		}
732 
733 		if (time_before(n->used, n->confirmed))
734 			n->used = n->confirmed;
735 
736 		if (atomic_read(&n->refcnt) == 1 &&
737 		    (state == NUD_FAILED ||
738 		     time_after(now, n->used + n->parms->gc_staletime))) {
739 			*np = n->next;
740 			n->dead = 1;
741 			write_unlock(&n->lock);
742 			neigh_cleanup_and_release(n);
743 			continue;
744 		}
745 		write_unlock(&n->lock);
746 
747 next_elt:
748 		np = &n->next;
749 	}
750 
751 	/* Cycle through all hash buckets every base_reachable_time/2 ticks.
752 	 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
753 	 * base_reachable_time.
754 	 */
755 	expire = tbl->parms.base_reachable_time >> 1;
756 	expire /= (tbl->hash_mask + 1);
757 	if (!expire)
758 		expire = 1;
759 
760 	if (expire>HZ)
761 		mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
762 	else
763 		mod_timer(&tbl->gc_timer, now + expire);
764 
765 	write_unlock(&tbl->lock);
766 }
767 
768 static __inline__ int neigh_max_probes(struct neighbour *n)
769 {
770 	struct neigh_parms *p = n->parms;
771 	return (n->nud_state & NUD_PROBE ?
772 		p->ucast_probes :
773 		p->ucast_probes + p->app_probes + p->mcast_probes);
774 }
775 
776 /* Called when a timer expires for a neighbour entry. */
777 
778 static void neigh_timer_handler(unsigned long arg)
779 {
780 	unsigned long now, next;
781 	struct neighbour *neigh = (struct neighbour *)arg;
782 	unsigned state;
783 	int notify = 0;
784 
785 	write_lock(&neigh->lock);
786 
787 	state = neigh->nud_state;
788 	now = jiffies;
789 	next = now + HZ;
790 
791 	if (!(state & NUD_IN_TIMER)) {
792 #ifndef CONFIG_SMP
793 		printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
794 #endif
795 		goto out;
796 	}
797 
798 	if (state & NUD_REACHABLE) {
799 		if (time_before_eq(now,
800 				   neigh->confirmed + neigh->parms->reachable_time)) {
801 			NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
802 			next = neigh->confirmed + neigh->parms->reachable_time;
803 		} else if (time_before_eq(now,
804 					  neigh->used + neigh->parms->delay_probe_time)) {
805 			NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
806 			neigh->nud_state = NUD_DELAY;
807 			neigh->updated = jiffies;
808 			neigh_suspect(neigh);
809 			next = now + neigh->parms->delay_probe_time;
810 		} else {
811 			NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
812 			neigh->nud_state = NUD_STALE;
813 			neigh->updated = jiffies;
814 			neigh_suspect(neigh);
815 			notify = 1;
816 		}
817 	} else if (state & NUD_DELAY) {
818 		if (time_before_eq(now,
819 				   neigh->confirmed + neigh->parms->delay_probe_time)) {
820 			NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
821 			neigh->nud_state = NUD_REACHABLE;
822 			neigh->updated = jiffies;
823 			neigh_connect(neigh);
824 			notify = 1;
825 			next = neigh->confirmed + neigh->parms->reachable_time;
826 		} else {
827 			NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
828 			neigh->nud_state = NUD_PROBE;
829 			neigh->updated = jiffies;
830 			atomic_set(&neigh->probes, 0);
831 			next = now + neigh->parms->retrans_time;
832 		}
833 	} else {
834 		/* NUD_PROBE|NUD_INCOMPLETE */
835 		next = now + neigh->parms->retrans_time;
836 	}
837 
838 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
839 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
840 		struct sk_buff *skb;
841 
842 		neigh->nud_state = NUD_FAILED;
843 		neigh->updated = jiffies;
844 		notify = 1;
845 		NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
846 		NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
847 
848 		/* It is very thin place. report_unreachable is very complicated
849 		   routine. Particularly, it can hit the same neighbour entry!
850 
851 		   So that, we try to be accurate and avoid dead loop. --ANK
852 		 */
853 		while (neigh->nud_state == NUD_FAILED &&
854 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
855 			write_unlock(&neigh->lock);
856 			neigh->ops->error_report(neigh, skb);
857 			write_lock(&neigh->lock);
858 		}
859 		skb_queue_purge(&neigh->arp_queue);
860 	}
861 
862 	if (neigh->nud_state & NUD_IN_TIMER) {
863 		if (time_before(next, jiffies + HZ/2))
864 			next = jiffies + HZ/2;
865 		if (!mod_timer(&neigh->timer, next))
866 			neigh_hold(neigh);
867 	}
868 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
869 		struct sk_buff *skb = skb_peek(&neigh->arp_queue);
870 		/* keep skb alive even if arp_queue overflows */
871 		if (skb)
872 			skb = skb_copy(skb, GFP_ATOMIC);
873 		write_unlock(&neigh->lock);
874 		neigh->ops->solicit(neigh, skb);
875 		atomic_inc(&neigh->probes);
876 		if (skb)
877 			kfree_skb(skb);
878 	} else {
879 out:
880 		write_unlock(&neigh->lock);
881 	}
882 
883 	if (notify)
884 		neigh_update_notify(neigh);
885 
886 	neigh_release(neigh);
887 }
888 
889 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
890 {
891 	int rc;
892 	unsigned long now;
893 
894 	write_lock_bh(&neigh->lock);
895 
896 	rc = 0;
897 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
898 		goto out_unlock_bh;
899 
900 	now = jiffies;
901 
902 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
903 		if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
904 			atomic_set(&neigh->probes, neigh->parms->ucast_probes);
905 			neigh->nud_state     = NUD_INCOMPLETE;
906 			neigh->updated = jiffies;
907 			neigh_add_timer(neigh, now + 1);
908 		} else {
909 			neigh->nud_state = NUD_FAILED;
910 			neigh->updated = jiffies;
911 			write_unlock_bh(&neigh->lock);
912 
913 			if (skb)
914 				kfree_skb(skb);
915 			return 1;
916 		}
917 	} else if (neigh->nud_state & NUD_STALE) {
918 		NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
919 		neigh->nud_state = NUD_DELAY;
920 		neigh->updated = jiffies;
921 		neigh_add_timer(neigh,
922 				jiffies + neigh->parms->delay_probe_time);
923 	}
924 
925 	if (neigh->nud_state == NUD_INCOMPLETE) {
926 		if (skb) {
927 			if (skb_queue_len(&neigh->arp_queue) >=
928 			    neigh->parms->queue_len) {
929 				struct sk_buff *buff;
930 				buff = neigh->arp_queue.next;
931 				__skb_unlink(buff, &neigh->arp_queue);
932 				kfree_skb(buff);
933 			}
934 			__skb_queue_tail(&neigh->arp_queue, skb);
935 		}
936 		rc = 1;
937 	}
938 out_unlock_bh:
939 	write_unlock_bh(&neigh->lock);
940 	return rc;
941 }
942 EXPORT_SYMBOL(__neigh_event_send);
943 
944 static void neigh_update_hhs(struct neighbour *neigh)
945 {
946 	struct hh_cache *hh;
947 	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
948 		= neigh->dev->header_ops->cache_update;
949 
950 	if (update) {
951 		for (hh = neigh->hh; hh; hh = hh->hh_next) {
952 			write_seqlock_bh(&hh->hh_lock);
953 			update(hh, neigh->dev, neigh->ha);
954 			write_sequnlock_bh(&hh->hh_lock);
955 		}
956 	}
957 }
958 
959 
960 
961 /* Generic update routine.
962    -- lladdr is new lladdr or NULL, if it is not supplied.
963    -- new    is new state.
964    -- flags
965 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
966 				if it is different.
967 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
968 				lladdr instead of overriding it
969 				if it is different.
970 				It also allows to retain current state
971 				if lladdr is unchanged.
972 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
973 
974 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
975 				NTF_ROUTER flag.
976 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
977 				a router.
978 
979    Caller MUST hold reference count on the entry.
980  */
981 
982 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
983 		 u32 flags)
984 {
985 	u8 old;
986 	int err;
987 	int notify = 0;
988 	struct net_device *dev;
989 	int update_isrouter = 0;
990 
991 	write_lock_bh(&neigh->lock);
992 
993 	dev    = neigh->dev;
994 	old    = neigh->nud_state;
995 	err    = -EPERM;
996 
997 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
998 	    (old & (NUD_NOARP | NUD_PERMANENT)))
999 		goto out;
1000 
1001 	if (!(new & NUD_VALID)) {
1002 		neigh_del_timer(neigh);
1003 		if (old & NUD_CONNECTED)
1004 			neigh_suspect(neigh);
1005 		neigh->nud_state = new;
1006 		err = 0;
1007 		notify = old & NUD_VALID;
1008 		goto out;
1009 	}
1010 
1011 	/* Compare new lladdr with cached one */
1012 	if (!dev->addr_len) {
1013 		/* First case: device needs no address. */
1014 		lladdr = neigh->ha;
1015 	} else if (lladdr) {
1016 		/* The second case: if something is already cached
1017 		   and a new address is proposed:
1018 		   - compare new & old
1019 		   - if they are different, check override flag
1020 		 */
1021 		if ((old & NUD_VALID) &&
1022 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1023 			lladdr = neigh->ha;
1024 	} else {
1025 		/* No address is supplied; if we know something,
1026 		   use it, otherwise discard the request.
1027 		 */
1028 		err = -EINVAL;
1029 		if (!(old & NUD_VALID))
1030 			goto out;
1031 		lladdr = neigh->ha;
1032 	}
1033 
1034 	if (new & NUD_CONNECTED)
1035 		neigh->confirmed = jiffies;
1036 	neigh->updated = jiffies;
1037 
1038 	/* If entry was valid and address is not changed,
1039 	   do not change entry state, if new one is STALE.
1040 	 */
1041 	err = 0;
1042 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1043 	if (old & NUD_VALID) {
1044 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1045 			update_isrouter = 0;
1046 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1047 			    (old & NUD_CONNECTED)) {
1048 				lladdr = neigh->ha;
1049 				new = NUD_STALE;
1050 			} else
1051 				goto out;
1052 		} else {
1053 			if (lladdr == neigh->ha && new == NUD_STALE &&
1054 			    ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1055 			     (old & NUD_CONNECTED))
1056 			    )
1057 				new = old;
1058 		}
1059 	}
1060 
1061 	if (new != old) {
1062 		neigh_del_timer(neigh);
1063 		if (new & NUD_IN_TIMER)
1064 			neigh_add_timer(neigh, (jiffies +
1065 						((new & NUD_REACHABLE) ?
1066 						 neigh->parms->reachable_time :
1067 						 0)));
1068 		neigh->nud_state = new;
1069 	}
1070 
1071 	if (lladdr != neigh->ha) {
1072 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1073 		neigh_update_hhs(neigh);
1074 		if (!(new & NUD_CONNECTED))
1075 			neigh->confirmed = jiffies -
1076 				      (neigh->parms->base_reachable_time << 1);
1077 		notify = 1;
1078 	}
1079 	if (new == old)
1080 		goto out;
1081 	if (new & NUD_CONNECTED)
1082 		neigh_connect(neigh);
1083 	else
1084 		neigh_suspect(neigh);
1085 	if (!(old & NUD_VALID)) {
1086 		struct sk_buff *skb;
1087 
1088 		/* Again: avoid dead loop if something went wrong */
1089 
1090 		while (neigh->nud_state & NUD_VALID &&
1091 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1092 			struct neighbour *n1 = neigh;
1093 			write_unlock_bh(&neigh->lock);
1094 			/* On shaper/eql skb->dst->neighbour != neigh :( */
1095 			if (skb->dst && skb->dst->neighbour)
1096 				n1 = skb->dst->neighbour;
1097 			n1->output(skb);
1098 			write_lock_bh(&neigh->lock);
1099 		}
1100 		skb_queue_purge(&neigh->arp_queue);
1101 	}
1102 out:
1103 	if (update_isrouter) {
1104 		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1105 			(neigh->flags | NTF_ROUTER) :
1106 			(neigh->flags & ~NTF_ROUTER);
1107 	}
1108 	write_unlock_bh(&neigh->lock);
1109 
1110 	if (notify)
1111 		neigh_update_notify(neigh);
1112 
1113 	return err;
1114 }
1115 EXPORT_SYMBOL(neigh_update);
1116 
1117 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1118 				 u8 *lladdr, void *saddr,
1119 				 struct net_device *dev)
1120 {
1121 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1122 						 lladdr || !dev->addr_len);
1123 	if (neigh)
1124 		neigh_update(neigh, lladdr, NUD_STALE,
1125 			     NEIGH_UPDATE_F_OVERRIDE);
1126 	return neigh;
1127 }
1128 EXPORT_SYMBOL(neigh_event_ns);
1129 
1130 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1131 			  __be16 protocol)
1132 {
1133 	struct hh_cache	*hh;
1134 	struct net_device *dev = dst->dev;
1135 
1136 	for (hh = n->hh; hh; hh = hh->hh_next)
1137 		if (hh->hh_type == protocol)
1138 			break;
1139 
1140 	if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1141 		seqlock_init(&hh->hh_lock);
1142 		hh->hh_type = protocol;
1143 		atomic_set(&hh->hh_refcnt, 0);
1144 		hh->hh_next = NULL;
1145 
1146 		if (dev->header_ops->cache(n, hh)) {
1147 			kfree(hh);
1148 			hh = NULL;
1149 		} else {
1150 			atomic_inc(&hh->hh_refcnt);
1151 			hh->hh_next = n->hh;
1152 			n->hh	    = hh;
1153 			if (n->nud_state & NUD_CONNECTED)
1154 				hh->hh_output = n->ops->hh_output;
1155 			else
1156 				hh->hh_output = n->ops->output;
1157 		}
1158 	}
1159 	if (hh)	{
1160 		atomic_inc(&hh->hh_refcnt);
1161 		dst->hh = hh;
1162 	}
1163 }
1164 
1165 /* This function can be used in contexts, where only old dev_queue_xmit
1166    worked, f.e. if you want to override normal output path (eql, shaper),
1167    but resolution is not made yet.
1168  */
1169 
1170 int neigh_compat_output(struct sk_buff *skb)
1171 {
1172 	struct net_device *dev = skb->dev;
1173 
1174 	__skb_pull(skb, skb_network_offset(skb));
1175 
1176 	if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1177 			    skb->len) < 0 &&
1178 	    dev->header_ops->rebuild(skb))
1179 		return 0;
1180 
1181 	return dev_queue_xmit(skb);
1182 }
1183 EXPORT_SYMBOL(neigh_compat_output);
1184 
1185 /* Slow and careful. */
1186 
1187 int neigh_resolve_output(struct sk_buff *skb)
1188 {
1189 	struct dst_entry *dst = skb->dst;
1190 	struct neighbour *neigh;
1191 	int rc = 0;
1192 
1193 	if (!dst || !(neigh = dst->neighbour))
1194 		goto discard;
1195 
1196 	__skb_pull(skb, skb_network_offset(skb));
1197 
1198 	if (!neigh_event_send(neigh, skb)) {
1199 		int err;
1200 		struct net_device *dev = neigh->dev;
1201 		if (dev->header_ops->cache && !dst->hh) {
1202 			write_lock_bh(&neigh->lock);
1203 			if (!dst->hh)
1204 				neigh_hh_init(neigh, dst, dst->ops->protocol);
1205 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1206 					      neigh->ha, NULL, skb->len);
1207 			write_unlock_bh(&neigh->lock);
1208 		} else {
1209 			read_lock_bh(&neigh->lock);
1210 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1211 					      neigh->ha, NULL, skb->len);
1212 			read_unlock_bh(&neigh->lock);
1213 		}
1214 		if (err >= 0)
1215 			rc = neigh->ops->queue_xmit(skb);
1216 		else
1217 			goto out_kfree_skb;
1218 	}
1219 out:
1220 	return rc;
1221 discard:
1222 	NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1223 		      dst, dst ? dst->neighbour : NULL);
1224 out_kfree_skb:
1225 	rc = -EINVAL;
1226 	kfree_skb(skb);
1227 	goto out;
1228 }
1229 EXPORT_SYMBOL(neigh_resolve_output);
1230 
1231 /* As fast as possible without hh cache */
1232 
1233 int neigh_connected_output(struct sk_buff *skb)
1234 {
1235 	int err;
1236 	struct dst_entry *dst = skb->dst;
1237 	struct neighbour *neigh = dst->neighbour;
1238 	struct net_device *dev = neigh->dev;
1239 
1240 	__skb_pull(skb, skb_network_offset(skb));
1241 
1242 	read_lock_bh(&neigh->lock);
1243 	err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1244 			      neigh->ha, NULL, skb->len);
1245 	read_unlock_bh(&neigh->lock);
1246 	if (err >= 0)
1247 		err = neigh->ops->queue_xmit(skb);
1248 	else {
1249 		err = -EINVAL;
1250 		kfree_skb(skb);
1251 	}
1252 	return err;
1253 }
1254 EXPORT_SYMBOL(neigh_connected_output);
1255 
1256 static void neigh_proxy_process(unsigned long arg)
1257 {
1258 	struct neigh_table *tbl = (struct neigh_table *)arg;
1259 	long sched_next = 0;
1260 	unsigned long now = jiffies;
1261 	struct sk_buff *skb;
1262 
1263 	spin_lock(&tbl->proxy_queue.lock);
1264 
1265 	skb = tbl->proxy_queue.next;
1266 
1267 	while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1268 		struct sk_buff *back = skb;
1269 		long tdif = NEIGH_CB(back)->sched_next - now;
1270 
1271 		skb = skb->next;
1272 		if (tdif <= 0) {
1273 			struct net_device *dev = back->dev;
1274 			__skb_unlink(back, &tbl->proxy_queue);
1275 			if (tbl->proxy_redo && netif_running(dev))
1276 				tbl->proxy_redo(back);
1277 			else
1278 				kfree_skb(back);
1279 
1280 			dev_put(dev);
1281 		} else if (!sched_next || tdif < sched_next)
1282 			sched_next = tdif;
1283 	}
1284 	del_timer(&tbl->proxy_timer);
1285 	if (sched_next)
1286 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1287 	spin_unlock(&tbl->proxy_queue.lock);
1288 }
1289 
1290 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1291 		    struct sk_buff *skb)
1292 {
1293 	unsigned long now = jiffies;
1294 	unsigned long sched_next = now + (net_random() % p->proxy_delay);
1295 
1296 	if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1297 		kfree_skb(skb);
1298 		return;
1299 	}
1300 
1301 	NEIGH_CB(skb)->sched_next = sched_next;
1302 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1303 
1304 	spin_lock(&tbl->proxy_queue.lock);
1305 	if (del_timer(&tbl->proxy_timer)) {
1306 		if (time_before(tbl->proxy_timer.expires, sched_next))
1307 			sched_next = tbl->proxy_timer.expires;
1308 	}
1309 	dst_release(skb->dst);
1310 	skb->dst = NULL;
1311 	dev_hold(skb->dev);
1312 	__skb_queue_tail(&tbl->proxy_queue, skb);
1313 	mod_timer(&tbl->proxy_timer, sched_next);
1314 	spin_unlock(&tbl->proxy_queue.lock);
1315 }
1316 EXPORT_SYMBOL(pneigh_enqueue);
1317 
1318 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1319 						      struct net *net, int ifindex)
1320 {
1321 	struct neigh_parms *p;
1322 
1323 	for (p = &tbl->parms; p; p = p->next) {
1324 		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1325 		    (!p->dev && !ifindex))
1326 			return p;
1327 	}
1328 
1329 	return NULL;
1330 }
1331 
1332 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1333 				      struct neigh_table *tbl)
1334 {
1335 	struct neigh_parms *p, *ref;
1336 	struct net *net;
1337 
1338 	net = dev_net(dev);
1339 	ref = lookup_neigh_params(tbl, net, 0);
1340 	if (!ref)
1341 		return NULL;
1342 
1343 	p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1344 	if (p) {
1345 		p->tbl		  = tbl;
1346 		atomic_set(&p->refcnt, 1);
1347 		INIT_RCU_HEAD(&p->rcu_head);
1348 		p->reachable_time =
1349 				neigh_rand_reach_time(p->base_reachable_time);
1350 
1351 		if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1352 			kfree(p);
1353 			return NULL;
1354 		}
1355 
1356 		dev_hold(dev);
1357 		p->dev = dev;
1358 #ifdef CONFIG_NET_NS
1359 		p->net = hold_net(net);
1360 #endif
1361 		p->sysctl_table = NULL;
1362 		write_lock_bh(&tbl->lock);
1363 		p->next		= tbl->parms.next;
1364 		tbl->parms.next = p;
1365 		write_unlock_bh(&tbl->lock);
1366 	}
1367 	return p;
1368 }
1369 EXPORT_SYMBOL(neigh_parms_alloc);
1370 
1371 static void neigh_rcu_free_parms(struct rcu_head *head)
1372 {
1373 	struct neigh_parms *parms =
1374 		container_of(head, struct neigh_parms, rcu_head);
1375 
1376 	neigh_parms_put(parms);
1377 }
1378 
1379 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1380 {
1381 	struct neigh_parms **p;
1382 
1383 	if (!parms || parms == &tbl->parms)
1384 		return;
1385 	write_lock_bh(&tbl->lock);
1386 	for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1387 		if (*p == parms) {
1388 			*p = parms->next;
1389 			parms->dead = 1;
1390 			write_unlock_bh(&tbl->lock);
1391 			if (parms->dev)
1392 				dev_put(parms->dev);
1393 			call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1394 			return;
1395 		}
1396 	}
1397 	write_unlock_bh(&tbl->lock);
1398 	NEIGH_PRINTK1("neigh_parms_release: not found\n");
1399 }
1400 EXPORT_SYMBOL(neigh_parms_release);
1401 
1402 static void neigh_parms_destroy(struct neigh_parms *parms)
1403 {
1404 	release_net(neigh_parms_net(parms));
1405 	kfree(parms);
1406 }
1407 
1408 static struct lock_class_key neigh_table_proxy_queue_class;
1409 
1410 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1411 {
1412 	unsigned long now = jiffies;
1413 	unsigned long phsize;
1414 
1415 #ifdef CONFIG_NET_NS
1416 	tbl->parms.net = &init_net;
1417 #endif
1418 	atomic_set(&tbl->parms.refcnt, 1);
1419 	INIT_RCU_HEAD(&tbl->parms.rcu_head);
1420 	tbl->parms.reachable_time =
1421 			  neigh_rand_reach_time(tbl->parms.base_reachable_time);
1422 
1423 	if (!tbl->kmem_cachep)
1424 		tbl->kmem_cachep =
1425 			kmem_cache_create(tbl->id, tbl->entry_size, 0,
1426 					  SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1427 					  NULL);
1428 	tbl->stats = alloc_percpu(struct neigh_statistics);
1429 	if (!tbl->stats)
1430 		panic("cannot create neighbour cache statistics");
1431 
1432 #ifdef CONFIG_PROC_FS
1433 	tbl->pde = proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1434 				    &neigh_stat_seq_fops, tbl);
1435 	if (!tbl->pde)
1436 		panic("cannot create neighbour proc dir entry");
1437 #endif
1438 
1439 	tbl->hash_mask = 1;
1440 	tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1441 
1442 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1443 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1444 
1445 	if (!tbl->hash_buckets || !tbl->phash_buckets)
1446 		panic("cannot allocate neighbour cache hashes");
1447 
1448 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1449 
1450 	rwlock_init(&tbl->lock);
1451 	setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl);
1452 	tbl->gc_timer.expires  = now + 1;
1453 	add_timer(&tbl->gc_timer);
1454 
1455 	setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1456 	skb_queue_head_init_class(&tbl->proxy_queue,
1457 			&neigh_table_proxy_queue_class);
1458 
1459 	tbl->last_flush = now;
1460 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1461 }
1462 EXPORT_SYMBOL(neigh_table_init_no_netlink);
1463 
1464 void neigh_table_init(struct neigh_table *tbl)
1465 {
1466 	struct neigh_table *tmp;
1467 
1468 	neigh_table_init_no_netlink(tbl);
1469 	write_lock(&neigh_tbl_lock);
1470 	for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1471 		if (tmp->family == tbl->family)
1472 			break;
1473 	}
1474 	tbl->next	= neigh_tables;
1475 	neigh_tables	= tbl;
1476 	write_unlock(&neigh_tbl_lock);
1477 
1478 	if (unlikely(tmp)) {
1479 		printk(KERN_ERR "NEIGH: Registering multiple tables for "
1480 		       "family %d\n", tbl->family);
1481 		dump_stack();
1482 	}
1483 }
1484 EXPORT_SYMBOL(neigh_table_init);
1485 
1486 int neigh_table_clear(struct neigh_table *tbl)
1487 {
1488 	struct neigh_table **tp;
1489 
1490 	/* It is not clean... Fix it to unload IPv6 module safely */
1491 	del_timer_sync(&tbl->gc_timer);
1492 	del_timer_sync(&tbl->proxy_timer);
1493 	pneigh_queue_purge(&tbl->proxy_queue);
1494 	neigh_ifdown(tbl, NULL);
1495 	if (atomic_read(&tbl->entries))
1496 		printk(KERN_CRIT "neighbour leakage\n");
1497 	write_lock(&neigh_tbl_lock);
1498 	for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1499 		if (*tp == tbl) {
1500 			*tp = tbl->next;
1501 			break;
1502 		}
1503 	}
1504 	write_unlock(&neigh_tbl_lock);
1505 
1506 	neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1507 	tbl->hash_buckets = NULL;
1508 
1509 	kfree(tbl->phash_buckets);
1510 	tbl->phash_buckets = NULL;
1511 
1512 	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1513 
1514 	free_percpu(tbl->stats);
1515 	tbl->stats = NULL;
1516 
1517 	kmem_cache_destroy(tbl->kmem_cachep);
1518 	tbl->kmem_cachep = NULL;
1519 
1520 	return 0;
1521 }
1522 EXPORT_SYMBOL(neigh_table_clear);
1523 
1524 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1525 {
1526 	struct net *net = sock_net(skb->sk);
1527 	struct ndmsg *ndm;
1528 	struct nlattr *dst_attr;
1529 	struct neigh_table *tbl;
1530 	struct net_device *dev = NULL;
1531 	int err = -EINVAL;
1532 
1533 	if (nlmsg_len(nlh) < sizeof(*ndm))
1534 		goto out;
1535 
1536 	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1537 	if (dst_attr == NULL)
1538 		goto out;
1539 
1540 	ndm = nlmsg_data(nlh);
1541 	if (ndm->ndm_ifindex) {
1542 		dev = dev_get_by_index(net, ndm->ndm_ifindex);
1543 		if (dev == NULL) {
1544 			err = -ENODEV;
1545 			goto out;
1546 		}
1547 	}
1548 
1549 	read_lock(&neigh_tbl_lock);
1550 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1551 		struct neighbour *neigh;
1552 
1553 		if (tbl->family != ndm->ndm_family)
1554 			continue;
1555 		read_unlock(&neigh_tbl_lock);
1556 
1557 		if (nla_len(dst_attr) < tbl->key_len)
1558 			goto out_dev_put;
1559 
1560 		if (ndm->ndm_flags & NTF_PROXY) {
1561 			err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1562 			goto out_dev_put;
1563 		}
1564 
1565 		if (dev == NULL)
1566 			goto out_dev_put;
1567 
1568 		neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1569 		if (neigh == NULL) {
1570 			err = -ENOENT;
1571 			goto out_dev_put;
1572 		}
1573 
1574 		err = neigh_update(neigh, NULL, NUD_FAILED,
1575 				   NEIGH_UPDATE_F_OVERRIDE |
1576 				   NEIGH_UPDATE_F_ADMIN);
1577 		neigh_release(neigh);
1578 		goto out_dev_put;
1579 	}
1580 	read_unlock(&neigh_tbl_lock);
1581 	err = -EAFNOSUPPORT;
1582 
1583 out_dev_put:
1584 	if (dev)
1585 		dev_put(dev);
1586 out:
1587 	return err;
1588 }
1589 
1590 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1591 {
1592 	struct net *net = sock_net(skb->sk);
1593 	struct ndmsg *ndm;
1594 	struct nlattr *tb[NDA_MAX+1];
1595 	struct neigh_table *tbl;
1596 	struct net_device *dev = NULL;
1597 	int err;
1598 
1599 	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1600 	if (err < 0)
1601 		goto out;
1602 
1603 	err = -EINVAL;
1604 	if (tb[NDA_DST] == NULL)
1605 		goto out;
1606 
1607 	ndm = nlmsg_data(nlh);
1608 	if (ndm->ndm_ifindex) {
1609 		dev = dev_get_by_index(net, ndm->ndm_ifindex);
1610 		if (dev == NULL) {
1611 			err = -ENODEV;
1612 			goto out;
1613 		}
1614 
1615 		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1616 			goto out_dev_put;
1617 	}
1618 
1619 	read_lock(&neigh_tbl_lock);
1620 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1621 		int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1622 		struct neighbour *neigh;
1623 		void *dst, *lladdr;
1624 
1625 		if (tbl->family != ndm->ndm_family)
1626 			continue;
1627 		read_unlock(&neigh_tbl_lock);
1628 
1629 		if (nla_len(tb[NDA_DST]) < tbl->key_len)
1630 			goto out_dev_put;
1631 		dst = nla_data(tb[NDA_DST]);
1632 		lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1633 
1634 		if (ndm->ndm_flags & NTF_PROXY) {
1635 			struct pneigh_entry *pn;
1636 
1637 			err = -ENOBUFS;
1638 			pn = pneigh_lookup(tbl, net, dst, dev, 1);
1639 			if (pn) {
1640 				pn->flags = ndm->ndm_flags;
1641 				err = 0;
1642 			}
1643 			goto out_dev_put;
1644 		}
1645 
1646 		if (dev == NULL)
1647 			goto out_dev_put;
1648 
1649 		neigh = neigh_lookup(tbl, dst, dev);
1650 		if (neigh == NULL) {
1651 			if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1652 				err = -ENOENT;
1653 				goto out_dev_put;
1654 			}
1655 
1656 			neigh = __neigh_lookup_errno(tbl, dst, dev);
1657 			if (IS_ERR(neigh)) {
1658 				err = PTR_ERR(neigh);
1659 				goto out_dev_put;
1660 			}
1661 		} else {
1662 			if (nlh->nlmsg_flags & NLM_F_EXCL) {
1663 				err = -EEXIST;
1664 				neigh_release(neigh);
1665 				goto out_dev_put;
1666 			}
1667 
1668 			if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1669 				flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1670 		}
1671 
1672 		err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1673 		neigh_release(neigh);
1674 		goto out_dev_put;
1675 	}
1676 
1677 	read_unlock(&neigh_tbl_lock);
1678 	err = -EAFNOSUPPORT;
1679 
1680 out_dev_put:
1681 	if (dev)
1682 		dev_put(dev);
1683 out:
1684 	return err;
1685 }
1686 
1687 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1688 {
1689 	struct nlattr *nest;
1690 
1691 	nest = nla_nest_start(skb, NDTA_PARMS);
1692 	if (nest == NULL)
1693 		return -ENOBUFS;
1694 
1695 	if (parms->dev)
1696 		NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1697 
1698 	NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1699 	NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1700 	NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1701 	NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1702 	NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1703 	NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1704 	NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1705 	NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1706 		      parms->base_reachable_time);
1707 	NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1708 	NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1709 	NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1710 	NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1711 	NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1712 	NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1713 
1714 	return nla_nest_end(skb, nest);
1715 
1716 nla_put_failure:
1717 	return nla_nest_cancel(skb, nest);
1718 }
1719 
1720 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1721 			      u32 pid, u32 seq, int type, int flags)
1722 {
1723 	struct nlmsghdr *nlh;
1724 	struct ndtmsg *ndtmsg;
1725 
1726 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1727 	if (nlh == NULL)
1728 		return -EMSGSIZE;
1729 
1730 	ndtmsg = nlmsg_data(nlh);
1731 
1732 	read_lock_bh(&tbl->lock);
1733 	ndtmsg->ndtm_family = tbl->family;
1734 	ndtmsg->ndtm_pad1   = 0;
1735 	ndtmsg->ndtm_pad2   = 0;
1736 
1737 	NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1738 	NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1739 	NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1740 	NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1741 	NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1742 
1743 	{
1744 		unsigned long now = jiffies;
1745 		unsigned int flush_delta = now - tbl->last_flush;
1746 		unsigned int rand_delta = now - tbl->last_rand;
1747 
1748 		struct ndt_config ndc = {
1749 			.ndtc_key_len		= tbl->key_len,
1750 			.ndtc_entry_size	= tbl->entry_size,
1751 			.ndtc_entries		= atomic_read(&tbl->entries),
1752 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1753 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1754 			.ndtc_hash_rnd		= tbl->hash_rnd,
1755 			.ndtc_hash_mask		= tbl->hash_mask,
1756 			.ndtc_hash_chain_gc	= tbl->hash_chain_gc,
1757 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1758 		};
1759 
1760 		NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1761 	}
1762 
1763 	{
1764 		int cpu;
1765 		struct ndt_stats ndst;
1766 
1767 		memset(&ndst, 0, sizeof(ndst));
1768 
1769 		for_each_possible_cpu(cpu) {
1770 			struct neigh_statistics	*st;
1771 
1772 			st = per_cpu_ptr(tbl->stats, cpu);
1773 			ndst.ndts_allocs		+= st->allocs;
1774 			ndst.ndts_destroys		+= st->destroys;
1775 			ndst.ndts_hash_grows		+= st->hash_grows;
1776 			ndst.ndts_res_failed		+= st->res_failed;
1777 			ndst.ndts_lookups		+= st->lookups;
1778 			ndst.ndts_hits			+= st->hits;
1779 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1780 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1781 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1782 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1783 		}
1784 
1785 		NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1786 	}
1787 
1788 	BUG_ON(tbl->parms.dev);
1789 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1790 		goto nla_put_failure;
1791 
1792 	read_unlock_bh(&tbl->lock);
1793 	return nlmsg_end(skb, nlh);
1794 
1795 nla_put_failure:
1796 	read_unlock_bh(&tbl->lock);
1797 	nlmsg_cancel(skb, nlh);
1798 	return -EMSGSIZE;
1799 }
1800 
1801 static int neightbl_fill_param_info(struct sk_buff *skb,
1802 				    struct neigh_table *tbl,
1803 				    struct neigh_parms *parms,
1804 				    u32 pid, u32 seq, int type,
1805 				    unsigned int flags)
1806 {
1807 	struct ndtmsg *ndtmsg;
1808 	struct nlmsghdr *nlh;
1809 
1810 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1811 	if (nlh == NULL)
1812 		return -EMSGSIZE;
1813 
1814 	ndtmsg = nlmsg_data(nlh);
1815 
1816 	read_lock_bh(&tbl->lock);
1817 	ndtmsg->ndtm_family = tbl->family;
1818 	ndtmsg->ndtm_pad1   = 0;
1819 	ndtmsg->ndtm_pad2   = 0;
1820 
1821 	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1822 	    neightbl_fill_parms(skb, parms) < 0)
1823 		goto errout;
1824 
1825 	read_unlock_bh(&tbl->lock);
1826 	return nlmsg_end(skb, nlh);
1827 errout:
1828 	read_unlock_bh(&tbl->lock);
1829 	nlmsg_cancel(skb, nlh);
1830 	return -EMSGSIZE;
1831 }
1832 
1833 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1834 	[NDTA_NAME]		= { .type = NLA_STRING },
1835 	[NDTA_THRESH1]		= { .type = NLA_U32 },
1836 	[NDTA_THRESH2]		= { .type = NLA_U32 },
1837 	[NDTA_THRESH3]		= { .type = NLA_U32 },
1838 	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
1839 	[NDTA_PARMS]		= { .type = NLA_NESTED },
1840 };
1841 
1842 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1843 	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
1844 	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
1845 	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
1846 	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
1847 	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
1848 	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
1849 	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
1850 	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
1851 	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
1852 	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
1853 	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
1854 	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
1855 	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
1856 };
1857 
1858 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1859 {
1860 	struct net *net = sock_net(skb->sk);
1861 	struct neigh_table *tbl;
1862 	struct ndtmsg *ndtmsg;
1863 	struct nlattr *tb[NDTA_MAX+1];
1864 	int err;
1865 
1866 	err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1867 			  nl_neightbl_policy);
1868 	if (err < 0)
1869 		goto errout;
1870 
1871 	if (tb[NDTA_NAME] == NULL) {
1872 		err = -EINVAL;
1873 		goto errout;
1874 	}
1875 
1876 	ndtmsg = nlmsg_data(nlh);
1877 	read_lock(&neigh_tbl_lock);
1878 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1879 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1880 			continue;
1881 
1882 		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1883 			break;
1884 	}
1885 
1886 	if (tbl == NULL) {
1887 		err = -ENOENT;
1888 		goto errout_locked;
1889 	}
1890 
1891 	/*
1892 	 * We acquire tbl->lock to be nice to the periodic timers and
1893 	 * make sure they always see a consistent set of values.
1894 	 */
1895 	write_lock_bh(&tbl->lock);
1896 
1897 	if (tb[NDTA_PARMS]) {
1898 		struct nlattr *tbp[NDTPA_MAX+1];
1899 		struct neigh_parms *p;
1900 		int i, ifindex = 0;
1901 
1902 		err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1903 				       nl_ntbl_parm_policy);
1904 		if (err < 0)
1905 			goto errout_tbl_lock;
1906 
1907 		if (tbp[NDTPA_IFINDEX])
1908 			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1909 
1910 		p = lookup_neigh_params(tbl, net, ifindex);
1911 		if (p == NULL) {
1912 			err = -ENOENT;
1913 			goto errout_tbl_lock;
1914 		}
1915 
1916 		for (i = 1; i <= NDTPA_MAX; i++) {
1917 			if (tbp[i] == NULL)
1918 				continue;
1919 
1920 			switch (i) {
1921 			case NDTPA_QUEUE_LEN:
1922 				p->queue_len = nla_get_u32(tbp[i]);
1923 				break;
1924 			case NDTPA_PROXY_QLEN:
1925 				p->proxy_qlen = nla_get_u32(tbp[i]);
1926 				break;
1927 			case NDTPA_APP_PROBES:
1928 				p->app_probes = nla_get_u32(tbp[i]);
1929 				break;
1930 			case NDTPA_UCAST_PROBES:
1931 				p->ucast_probes = nla_get_u32(tbp[i]);
1932 				break;
1933 			case NDTPA_MCAST_PROBES:
1934 				p->mcast_probes = nla_get_u32(tbp[i]);
1935 				break;
1936 			case NDTPA_BASE_REACHABLE_TIME:
1937 				p->base_reachable_time = nla_get_msecs(tbp[i]);
1938 				break;
1939 			case NDTPA_GC_STALETIME:
1940 				p->gc_staletime = nla_get_msecs(tbp[i]);
1941 				break;
1942 			case NDTPA_DELAY_PROBE_TIME:
1943 				p->delay_probe_time = nla_get_msecs(tbp[i]);
1944 				break;
1945 			case NDTPA_RETRANS_TIME:
1946 				p->retrans_time = nla_get_msecs(tbp[i]);
1947 				break;
1948 			case NDTPA_ANYCAST_DELAY:
1949 				p->anycast_delay = nla_get_msecs(tbp[i]);
1950 				break;
1951 			case NDTPA_PROXY_DELAY:
1952 				p->proxy_delay = nla_get_msecs(tbp[i]);
1953 				break;
1954 			case NDTPA_LOCKTIME:
1955 				p->locktime = nla_get_msecs(tbp[i]);
1956 				break;
1957 			}
1958 		}
1959 	}
1960 
1961 	if (tb[NDTA_THRESH1])
1962 		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
1963 
1964 	if (tb[NDTA_THRESH2])
1965 		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
1966 
1967 	if (tb[NDTA_THRESH3])
1968 		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
1969 
1970 	if (tb[NDTA_GC_INTERVAL])
1971 		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
1972 
1973 	err = 0;
1974 
1975 errout_tbl_lock:
1976 	write_unlock_bh(&tbl->lock);
1977 errout_locked:
1978 	read_unlock(&neigh_tbl_lock);
1979 errout:
1980 	return err;
1981 }
1982 
1983 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1984 {
1985 	struct net *net = sock_net(skb->sk);
1986 	int family, tidx, nidx = 0;
1987 	int tbl_skip = cb->args[0];
1988 	int neigh_skip = cb->args[1];
1989 	struct neigh_table *tbl;
1990 
1991 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
1992 
1993 	read_lock(&neigh_tbl_lock);
1994 	for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
1995 		struct neigh_parms *p;
1996 
1997 		if (tidx < tbl_skip || (family && tbl->family != family))
1998 			continue;
1999 
2000 		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
2001 				       cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2002 				       NLM_F_MULTI) <= 0)
2003 			break;
2004 
2005 		for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
2006 			if (!net_eq(neigh_parms_net(p), net))
2007 				continue;
2008 
2009 			if (nidx++ < neigh_skip)
2010 				continue;
2011 
2012 			if (neightbl_fill_param_info(skb, tbl, p,
2013 						     NETLINK_CB(cb->skb).pid,
2014 						     cb->nlh->nlmsg_seq,
2015 						     RTM_NEWNEIGHTBL,
2016 						     NLM_F_MULTI) <= 0)
2017 				goto out;
2018 		}
2019 
2020 		neigh_skip = 0;
2021 	}
2022 out:
2023 	read_unlock(&neigh_tbl_lock);
2024 	cb->args[0] = tidx;
2025 	cb->args[1] = nidx;
2026 
2027 	return skb->len;
2028 }
2029 
2030 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2031 			   u32 pid, u32 seq, int type, unsigned int flags)
2032 {
2033 	unsigned long now = jiffies;
2034 	struct nda_cacheinfo ci;
2035 	struct nlmsghdr *nlh;
2036 	struct ndmsg *ndm;
2037 
2038 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2039 	if (nlh == NULL)
2040 		return -EMSGSIZE;
2041 
2042 	ndm = nlmsg_data(nlh);
2043 	ndm->ndm_family	 = neigh->ops->family;
2044 	ndm->ndm_pad1    = 0;
2045 	ndm->ndm_pad2    = 0;
2046 	ndm->ndm_flags	 = neigh->flags;
2047 	ndm->ndm_type	 = neigh->type;
2048 	ndm->ndm_ifindex = neigh->dev->ifindex;
2049 
2050 	NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
2051 
2052 	read_lock_bh(&neigh->lock);
2053 	ndm->ndm_state	 = neigh->nud_state;
2054 	if ((neigh->nud_state & NUD_VALID) &&
2055 	    nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
2056 		read_unlock_bh(&neigh->lock);
2057 		goto nla_put_failure;
2058 	}
2059 
2060 	ci.ndm_used	 = now - neigh->used;
2061 	ci.ndm_confirmed = now - neigh->confirmed;
2062 	ci.ndm_updated	 = now - neigh->updated;
2063 	ci.ndm_refcnt	 = atomic_read(&neigh->refcnt) - 1;
2064 	read_unlock_bh(&neigh->lock);
2065 
2066 	NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
2067 	NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
2068 
2069 	return nlmsg_end(skb, nlh);
2070 
2071 nla_put_failure:
2072 	nlmsg_cancel(skb, nlh);
2073 	return -EMSGSIZE;
2074 }
2075 
2076 static void neigh_update_notify(struct neighbour *neigh)
2077 {
2078 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2079 	__neigh_notify(neigh, RTM_NEWNEIGH, 0);
2080 }
2081 
2082 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2083 			    struct netlink_callback *cb)
2084 {
2085 	struct net * net = sock_net(skb->sk);
2086 	struct neighbour *n;
2087 	int rc, h, s_h = cb->args[1];
2088 	int idx, s_idx = idx = cb->args[2];
2089 
2090 	read_lock_bh(&tbl->lock);
2091 	for (h = 0; h <= tbl->hash_mask; h++) {
2092 		if (h < s_h)
2093 			continue;
2094 		if (h > s_h)
2095 			s_idx = 0;
2096 		for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
2097 			int lidx;
2098 			if (dev_net(n->dev) != net)
2099 				continue;
2100 			lidx = idx++;
2101 			if (lidx < s_idx)
2102 				continue;
2103 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2104 					    cb->nlh->nlmsg_seq,
2105 					    RTM_NEWNEIGH,
2106 					    NLM_F_MULTI) <= 0) {
2107 				read_unlock_bh(&tbl->lock);
2108 				rc = -1;
2109 				goto out;
2110 			}
2111 		}
2112 	}
2113 	read_unlock_bh(&tbl->lock);
2114 	rc = skb->len;
2115 out:
2116 	cb->args[1] = h;
2117 	cb->args[2] = idx;
2118 	return rc;
2119 }
2120 
2121 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2122 {
2123 	struct neigh_table *tbl;
2124 	int t, family, s_t;
2125 
2126 	read_lock(&neigh_tbl_lock);
2127 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2128 	s_t = cb->args[0];
2129 
2130 	for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2131 		if (t < s_t || (family && tbl->family != family))
2132 			continue;
2133 		if (t > s_t)
2134 			memset(&cb->args[1], 0, sizeof(cb->args) -
2135 						sizeof(cb->args[0]));
2136 		if (neigh_dump_table(tbl, skb, cb) < 0)
2137 			break;
2138 	}
2139 	read_unlock(&neigh_tbl_lock);
2140 
2141 	cb->args[0] = t;
2142 	return skb->len;
2143 }
2144 
2145 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2146 {
2147 	int chain;
2148 
2149 	read_lock_bh(&tbl->lock);
2150 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
2151 		struct neighbour *n;
2152 
2153 		for (n = tbl->hash_buckets[chain]; n; n = n->next)
2154 			cb(n, cookie);
2155 	}
2156 	read_unlock_bh(&tbl->lock);
2157 }
2158 EXPORT_SYMBOL(neigh_for_each);
2159 
2160 /* The tbl->lock must be held as a writer and BH disabled. */
2161 void __neigh_for_each_release(struct neigh_table *tbl,
2162 			      int (*cb)(struct neighbour *))
2163 {
2164 	int chain;
2165 
2166 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
2167 		struct neighbour *n, **np;
2168 
2169 		np = &tbl->hash_buckets[chain];
2170 		while ((n = *np) != NULL) {
2171 			int release;
2172 
2173 			write_lock(&n->lock);
2174 			release = cb(n);
2175 			if (release) {
2176 				*np = n->next;
2177 				n->dead = 1;
2178 			} else
2179 				np = &n->next;
2180 			write_unlock(&n->lock);
2181 			if (release)
2182 				neigh_cleanup_and_release(n);
2183 		}
2184 	}
2185 }
2186 EXPORT_SYMBOL(__neigh_for_each_release);
2187 
2188 #ifdef CONFIG_PROC_FS
2189 
2190 static struct neighbour *neigh_get_first(struct seq_file *seq)
2191 {
2192 	struct neigh_seq_state *state = seq->private;
2193 	struct net *net = seq_file_net(seq);
2194 	struct neigh_table *tbl = state->tbl;
2195 	struct neighbour *n = NULL;
2196 	int bucket = state->bucket;
2197 
2198 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2199 	for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2200 		n = tbl->hash_buckets[bucket];
2201 
2202 		while (n) {
2203 			if (!net_eq(dev_net(n->dev), net))
2204 				goto next;
2205 			if (state->neigh_sub_iter) {
2206 				loff_t fakep = 0;
2207 				void *v;
2208 
2209 				v = state->neigh_sub_iter(state, n, &fakep);
2210 				if (!v)
2211 					goto next;
2212 			}
2213 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2214 				break;
2215 			if (n->nud_state & ~NUD_NOARP)
2216 				break;
2217 		next:
2218 			n = n->next;
2219 		}
2220 
2221 		if (n)
2222 			break;
2223 	}
2224 	state->bucket = bucket;
2225 
2226 	return n;
2227 }
2228 
2229 static struct neighbour *neigh_get_next(struct seq_file *seq,
2230 					struct neighbour *n,
2231 					loff_t *pos)
2232 {
2233 	struct neigh_seq_state *state = seq->private;
2234 	struct net *net = seq_file_net(seq);
2235 	struct neigh_table *tbl = state->tbl;
2236 
2237 	if (state->neigh_sub_iter) {
2238 		void *v = state->neigh_sub_iter(state, n, pos);
2239 		if (v)
2240 			return n;
2241 	}
2242 	n = n->next;
2243 
2244 	while (1) {
2245 		while (n) {
2246 			if (!net_eq(dev_net(n->dev), net))
2247 				goto next;
2248 			if (state->neigh_sub_iter) {
2249 				void *v = state->neigh_sub_iter(state, n, pos);
2250 				if (v)
2251 					return n;
2252 				goto next;
2253 			}
2254 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2255 				break;
2256 
2257 			if (n->nud_state & ~NUD_NOARP)
2258 				break;
2259 		next:
2260 			n = n->next;
2261 		}
2262 
2263 		if (n)
2264 			break;
2265 
2266 		if (++state->bucket > tbl->hash_mask)
2267 			break;
2268 
2269 		n = tbl->hash_buckets[state->bucket];
2270 	}
2271 
2272 	if (n && pos)
2273 		--(*pos);
2274 	return n;
2275 }
2276 
2277 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2278 {
2279 	struct neighbour *n = neigh_get_first(seq);
2280 
2281 	if (n) {
2282 		while (*pos) {
2283 			n = neigh_get_next(seq, n, pos);
2284 			if (!n)
2285 				break;
2286 		}
2287 	}
2288 	return *pos ? NULL : n;
2289 }
2290 
2291 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2292 {
2293 	struct neigh_seq_state *state = seq->private;
2294 	struct net *net = seq_file_net(seq);
2295 	struct neigh_table *tbl = state->tbl;
2296 	struct pneigh_entry *pn = NULL;
2297 	int bucket = state->bucket;
2298 
2299 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2300 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2301 		pn = tbl->phash_buckets[bucket];
2302 		while (pn && !net_eq(pneigh_net(pn), net))
2303 			pn = pn->next;
2304 		if (pn)
2305 			break;
2306 	}
2307 	state->bucket = bucket;
2308 
2309 	return pn;
2310 }
2311 
2312 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2313 					    struct pneigh_entry *pn,
2314 					    loff_t *pos)
2315 {
2316 	struct neigh_seq_state *state = seq->private;
2317 	struct net *net = seq_file_net(seq);
2318 	struct neigh_table *tbl = state->tbl;
2319 
2320 	pn = pn->next;
2321 	while (!pn) {
2322 		if (++state->bucket > PNEIGH_HASHMASK)
2323 			break;
2324 		pn = tbl->phash_buckets[state->bucket];
2325 		while (pn && !net_eq(pneigh_net(pn), net))
2326 			pn = pn->next;
2327 		if (pn)
2328 			break;
2329 	}
2330 
2331 	if (pn && pos)
2332 		--(*pos);
2333 
2334 	return pn;
2335 }
2336 
2337 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2338 {
2339 	struct pneigh_entry *pn = pneigh_get_first(seq);
2340 
2341 	if (pn) {
2342 		while (*pos) {
2343 			pn = pneigh_get_next(seq, pn, pos);
2344 			if (!pn)
2345 				break;
2346 		}
2347 	}
2348 	return *pos ? NULL : pn;
2349 }
2350 
2351 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2352 {
2353 	struct neigh_seq_state *state = seq->private;
2354 	void *rc;
2355 
2356 	rc = neigh_get_idx(seq, pos);
2357 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2358 		rc = pneigh_get_idx(seq, pos);
2359 
2360 	return rc;
2361 }
2362 
2363 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2364 	__acquires(tbl->lock)
2365 {
2366 	struct neigh_seq_state *state = seq->private;
2367 	loff_t pos_minus_one;
2368 
2369 	state->tbl = tbl;
2370 	state->bucket = 0;
2371 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2372 
2373 	read_lock_bh(&tbl->lock);
2374 
2375 	pos_minus_one = *pos - 1;
2376 	return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2377 }
2378 EXPORT_SYMBOL(neigh_seq_start);
2379 
2380 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2381 {
2382 	struct neigh_seq_state *state;
2383 	void *rc;
2384 
2385 	if (v == SEQ_START_TOKEN) {
2386 		rc = neigh_get_idx(seq, pos);
2387 		goto out;
2388 	}
2389 
2390 	state = seq->private;
2391 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2392 		rc = neigh_get_next(seq, v, NULL);
2393 		if (rc)
2394 			goto out;
2395 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2396 			rc = pneigh_get_first(seq);
2397 	} else {
2398 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2399 		rc = pneigh_get_next(seq, v, NULL);
2400 	}
2401 out:
2402 	++(*pos);
2403 	return rc;
2404 }
2405 EXPORT_SYMBOL(neigh_seq_next);
2406 
2407 void neigh_seq_stop(struct seq_file *seq, void *v)
2408 	__releases(tbl->lock)
2409 {
2410 	struct neigh_seq_state *state = seq->private;
2411 	struct neigh_table *tbl = state->tbl;
2412 
2413 	read_unlock_bh(&tbl->lock);
2414 }
2415 EXPORT_SYMBOL(neigh_seq_stop);
2416 
2417 /* statistics via seq_file */
2418 
2419 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2420 {
2421 	struct proc_dir_entry *pde = seq->private;
2422 	struct neigh_table *tbl = pde->data;
2423 	int cpu;
2424 
2425 	if (*pos == 0)
2426 		return SEQ_START_TOKEN;
2427 
2428 	for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2429 		if (!cpu_possible(cpu))
2430 			continue;
2431 		*pos = cpu+1;
2432 		return per_cpu_ptr(tbl->stats, cpu);
2433 	}
2434 	return NULL;
2435 }
2436 
2437 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2438 {
2439 	struct proc_dir_entry *pde = seq->private;
2440 	struct neigh_table *tbl = pde->data;
2441 	int cpu;
2442 
2443 	for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2444 		if (!cpu_possible(cpu))
2445 			continue;
2446 		*pos = cpu+1;
2447 		return per_cpu_ptr(tbl->stats, cpu);
2448 	}
2449 	return NULL;
2450 }
2451 
2452 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2453 {
2454 
2455 }
2456 
2457 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2458 {
2459 	struct proc_dir_entry *pde = seq->private;
2460 	struct neigh_table *tbl = pde->data;
2461 	struct neigh_statistics *st = v;
2462 
2463 	if (v == SEQ_START_TOKEN) {
2464 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs\n");
2465 		return 0;
2466 	}
2467 
2468 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2469 			"%08lx %08lx  %08lx %08lx\n",
2470 		   atomic_read(&tbl->entries),
2471 
2472 		   st->allocs,
2473 		   st->destroys,
2474 		   st->hash_grows,
2475 
2476 		   st->lookups,
2477 		   st->hits,
2478 
2479 		   st->res_failed,
2480 
2481 		   st->rcv_probes_mcast,
2482 		   st->rcv_probes_ucast,
2483 
2484 		   st->periodic_gc_runs,
2485 		   st->forced_gc_runs
2486 		   );
2487 
2488 	return 0;
2489 }
2490 
2491 static const struct seq_operations neigh_stat_seq_ops = {
2492 	.start	= neigh_stat_seq_start,
2493 	.next	= neigh_stat_seq_next,
2494 	.stop	= neigh_stat_seq_stop,
2495 	.show	= neigh_stat_seq_show,
2496 };
2497 
2498 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2499 {
2500 	int ret = seq_open(file, &neigh_stat_seq_ops);
2501 
2502 	if (!ret) {
2503 		struct seq_file *sf = file->private_data;
2504 		sf->private = PDE(inode);
2505 	}
2506 	return ret;
2507 };
2508 
2509 static const struct file_operations neigh_stat_seq_fops = {
2510 	.owner	 = THIS_MODULE,
2511 	.open 	 = neigh_stat_seq_open,
2512 	.read	 = seq_read,
2513 	.llseek	 = seq_lseek,
2514 	.release = seq_release,
2515 };
2516 
2517 #endif /* CONFIG_PROC_FS */
2518 
2519 static inline size_t neigh_nlmsg_size(void)
2520 {
2521 	return NLMSG_ALIGN(sizeof(struct ndmsg))
2522 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2523 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2524 	       + nla_total_size(sizeof(struct nda_cacheinfo))
2525 	       + nla_total_size(4); /* NDA_PROBES */
2526 }
2527 
2528 static void __neigh_notify(struct neighbour *n, int type, int flags)
2529 {
2530 	struct net *net = dev_net(n->dev);
2531 	struct sk_buff *skb;
2532 	int err = -ENOBUFS;
2533 
2534 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2535 	if (skb == NULL)
2536 		goto errout;
2537 
2538 	err = neigh_fill_info(skb, n, 0, 0, type, flags);
2539 	if (err < 0) {
2540 		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2541 		WARN_ON(err == -EMSGSIZE);
2542 		kfree_skb(skb);
2543 		goto errout;
2544 	}
2545 	err = rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2546 errout:
2547 	if (err < 0)
2548 		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2549 }
2550 
2551 #ifdef CONFIG_ARPD
2552 void neigh_app_ns(struct neighbour *n)
2553 {
2554 	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2555 }
2556 EXPORT_SYMBOL(neigh_app_ns);
2557 #endif /* CONFIG_ARPD */
2558 
2559 #ifdef CONFIG_SYSCTL
2560 
2561 static struct neigh_sysctl_table {
2562 	struct ctl_table_header *sysctl_header;
2563 	struct ctl_table neigh_vars[__NET_NEIGH_MAX];
2564 	char *dev_name;
2565 } neigh_sysctl_template __read_mostly = {
2566 	.neigh_vars = {
2567 		{
2568 			.ctl_name	= NET_NEIGH_MCAST_SOLICIT,
2569 			.procname	= "mcast_solicit",
2570 			.maxlen		= sizeof(int),
2571 			.mode		= 0644,
2572 			.proc_handler	= &proc_dointvec,
2573 		},
2574 		{
2575 			.ctl_name	= NET_NEIGH_UCAST_SOLICIT,
2576 			.procname	= "ucast_solicit",
2577 			.maxlen		= sizeof(int),
2578 			.mode		= 0644,
2579 			.proc_handler	= &proc_dointvec,
2580 		},
2581 		{
2582 			.ctl_name	= NET_NEIGH_APP_SOLICIT,
2583 			.procname	= "app_solicit",
2584 			.maxlen		= sizeof(int),
2585 			.mode		= 0644,
2586 			.proc_handler	= &proc_dointvec,
2587 		},
2588 		{
2589 			.procname	= "retrans_time",
2590 			.maxlen		= sizeof(int),
2591 			.mode		= 0644,
2592 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2593 		},
2594 		{
2595 			.ctl_name	= NET_NEIGH_REACHABLE_TIME,
2596 			.procname	= "base_reachable_time",
2597 			.maxlen		= sizeof(int),
2598 			.mode		= 0644,
2599 			.proc_handler	= &proc_dointvec_jiffies,
2600 			.strategy	= &sysctl_jiffies,
2601 		},
2602 		{
2603 			.ctl_name	= NET_NEIGH_DELAY_PROBE_TIME,
2604 			.procname	= "delay_first_probe_time",
2605 			.maxlen		= sizeof(int),
2606 			.mode		= 0644,
2607 			.proc_handler	= &proc_dointvec_jiffies,
2608 			.strategy	= &sysctl_jiffies,
2609 		},
2610 		{
2611 			.ctl_name	= NET_NEIGH_GC_STALE_TIME,
2612 			.procname	= "gc_stale_time",
2613 			.maxlen		= sizeof(int),
2614 			.mode		= 0644,
2615 			.proc_handler	= &proc_dointvec_jiffies,
2616 			.strategy	= &sysctl_jiffies,
2617 		},
2618 		{
2619 			.ctl_name	= NET_NEIGH_UNRES_QLEN,
2620 			.procname	= "unres_qlen",
2621 			.maxlen		= sizeof(int),
2622 			.mode		= 0644,
2623 			.proc_handler	= &proc_dointvec,
2624 		},
2625 		{
2626 			.ctl_name	= NET_NEIGH_PROXY_QLEN,
2627 			.procname	= "proxy_qlen",
2628 			.maxlen		= sizeof(int),
2629 			.mode		= 0644,
2630 			.proc_handler	= &proc_dointvec,
2631 		},
2632 		{
2633 			.procname	= "anycast_delay",
2634 			.maxlen		= sizeof(int),
2635 			.mode		= 0644,
2636 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2637 		},
2638 		{
2639 			.procname	= "proxy_delay",
2640 			.maxlen		= sizeof(int),
2641 			.mode		= 0644,
2642 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2643 		},
2644 		{
2645 			.procname	= "locktime",
2646 			.maxlen		= sizeof(int),
2647 			.mode		= 0644,
2648 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2649 		},
2650 		{
2651 			.ctl_name	= NET_NEIGH_RETRANS_TIME_MS,
2652 			.procname	= "retrans_time_ms",
2653 			.maxlen		= sizeof(int),
2654 			.mode		= 0644,
2655 			.proc_handler	= &proc_dointvec_ms_jiffies,
2656 			.strategy	= &sysctl_ms_jiffies,
2657 		},
2658 		{
2659 			.ctl_name	= NET_NEIGH_REACHABLE_TIME_MS,
2660 			.procname	= "base_reachable_time_ms",
2661 			.maxlen		= sizeof(int),
2662 			.mode		= 0644,
2663 			.proc_handler	= &proc_dointvec_ms_jiffies,
2664 			.strategy	= &sysctl_ms_jiffies,
2665 		},
2666 		{
2667 			.ctl_name	= NET_NEIGH_GC_INTERVAL,
2668 			.procname	= "gc_interval",
2669 			.maxlen		= sizeof(int),
2670 			.mode		= 0644,
2671 			.proc_handler	= &proc_dointvec_jiffies,
2672 			.strategy	= &sysctl_jiffies,
2673 		},
2674 		{
2675 			.ctl_name	= NET_NEIGH_GC_THRESH1,
2676 			.procname	= "gc_thresh1",
2677 			.maxlen		= sizeof(int),
2678 			.mode		= 0644,
2679 			.proc_handler	= &proc_dointvec,
2680 		},
2681 		{
2682 			.ctl_name	= NET_NEIGH_GC_THRESH2,
2683 			.procname	= "gc_thresh2",
2684 			.maxlen		= sizeof(int),
2685 			.mode		= 0644,
2686 			.proc_handler	= &proc_dointvec,
2687 		},
2688 		{
2689 			.ctl_name	= NET_NEIGH_GC_THRESH3,
2690 			.procname	= "gc_thresh3",
2691 			.maxlen		= sizeof(int),
2692 			.mode		= 0644,
2693 			.proc_handler	= &proc_dointvec,
2694 		},
2695 		{},
2696 	},
2697 };
2698 
2699 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2700 			  int p_id, int pdev_id, char *p_name,
2701 			  proc_handler *handler, ctl_handler *strategy)
2702 {
2703 	struct neigh_sysctl_table *t;
2704 	const char *dev_name_source = NULL;
2705 
2706 #define NEIGH_CTL_PATH_ROOT	0
2707 #define NEIGH_CTL_PATH_PROTO	1
2708 #define NEIGH_CTL_PATH_NEIGH	2
2709 #define NEIGH_CTL_PATH_DEV	3
2710 
2711 	struct ctl_path neigh_path[] = {
2712 		{ .procname = "net",	 .ctl_name = CTL_NET, },
2713 		{ .procname = "proto",	 .ctl_name = 0, },
2714 		{ .procname = "neigh",	 .ctl_name = 0, },
2715 		{ .procname = "default", .ctl_name = NET_PROTO_CONF_DEFAULT, },
2716 		{ },
2717 	};
2718 
2719 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2720 	if (!t)
2721 		goto err;
2722 
2723 	t->neigh_vars[0].data  = &p->mcast_probes;
2724 	t->neigh_vars[1].data  = &p->ucast_probes;
2725 	t->neigh_vars[2].data  = &p->app_probes;
2726 	t->neigh_vars[3].data  = &p->retrans_time;
2727 	t->neigh_vars[4].data  = &p->base_reachable_time;
2728 	t->neigh_vars[5].data  = &p->delay_probe_time;
2729 	t->neigh_vars[6].data  = &p->gc_staletime;
2730 	t->neigh_vars[7].data  = &p->queue_len;
2731 	t->neigh_vars[8].data  = &p->proxy_qlen;
2732 	t->neigh_vars[9].data  = &p->anycast_delay;
2733 	t->neigh_vars[10].data = &p->proxy_delay;
2734 	t->neigh_vars[11].data = &p->locktime;
2735 	t->neigh_vars[12].data  = &p->retrans_time;
2736 	t->neigh_vars[13].data  = &p->base_reachable_time;
2737 
2738 	if (dev) {
2739 		dev_name_source = dev->name;
2740 		neigh_path[NEIGH_CTL_PATH_DEV].ctl_name = dev->ifindex;
2741 		/* Terminate the table early */
2742 		memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
2743 	} else {
2744 		dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
2745 		t->neigh_vars[14].data = (int *)(p + 1);
2746 		t->neigh_vars[15].data = (int *)(p + 1) + 1;
2747 		t->neigh_vars[16].data = (int *)(p + 1) + 2;
2748 		t->neigh_vars[17].data = (int *)(p + 1) + 3;
2749 	}
2750 
2751 
2752 	if (handler || strategy) {
2753 		/* RetransTime */
2754 		t->neigh_vars[3].proc_handler = handler;
2755 		t->neigh_vars[3].strategy = strategy;
2756 		t->neigh_vars[3].extra1 = dev;
2757 		if (!strategy)
2758 			t->neigh_vars[3].ctl_name = CTL_UNNUMBERED;
2759 		/* ReachableTime */
2760 		t->neigh_vars[4].proc_handler = handler;
2761 		t->neigh_vars[4].strategy = strategy;
2762 		t->neigh_vars[4].extra1 = dev;
2763 		if (!strategy)
2764 			t->neigh_vars[4].ctl_name = CTL_UNNUMBERED;
2765 		/* RetransTime (in milliseconds)*/
2766 		t->neigh_vars[12].proc_handler = handler;
2767 		t->neigh_vars[12].strategy = strategy;
2768 		t->neigh_vars[12].extra1 = dev;
2769 		if (!strategy)
2770 			t->neigh_vars[12].ctl_name = CTL_UNNUMBERED;
2771 		/* ReachableTime (in milliseconds) */
2772 		t->neigh_vars[13].proc_handler = handler;
2773 		t->neigh_vars[13].strategy = strategy;
2774 		t->neigh_vars[13].extra1 = dev;
2775 		if (!strategy)
2776 			t->neigh_vars[13].ctl_name = CTL_UNNUMBERED;
2777 	}
2778 
2779 	t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2780 	if (!t->dev_name)
2781 		goto free;
2782 
2783 	neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
2784 	neigh_path[NEIGH_CTL_PATH_NEIGH].ctl_name = pdev_id;
2785 	neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2786 	neigh_path[NEIGH_CTL_PATH_PROTO].ctl_name = p_id;
2787 
2788 	t->sysctl_header =
2789 		register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
2790 	if (!t->sysctl_header)
2791 		goto free_procname;
2792 
2793 	p->sysctl_table = t;
2794 	return 0;
2795 
2796 free_procname:
2797 	kfree(t->dev_name);
2798 free:
2799 	kfree(t);
2800 err:
2801 	return -ENOBUFS;
2802 }
2803 EXPORT_SYMBOL(neigh_sysctl_register);
2804 
2805 void neigh_sysctl_unregister(struct neigh_parms *p)
2806 {
2807 	if (p->sysctl_table) {
2808 		struct neigh_sysctl_table *t = p->sysctl_table;
2809 		p->sysctl_table = NULL;
2810 		unregister_sysctl_table(t->sysctl_header);
2811 		kfree(t->dev_name);
2812 		kfree(t);
2813 	}
2814 }
2815 EXPORT_SYMBOL(neigh_sysctl_unregister);
2816 
2817 #endif	/* CONFIG_SYSCTL */
2818 
2819 static int __init neigh_init(void)
2820 {
2821 	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
2822 	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
2823 	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
2824 
2825 	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
2826 	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
2827 
2828 	return 0;
2829 }
2830 
2831 subsys_initcall(neigh_init);
2832 
2833