xref: /openbmc/linux/net/core/neighbour.c (revision 732a675a)
1 /*
2  *	Generic address resolution entity
3  *
4  *	Authors:
5  *	Pedro Roque		<roque@di.fc.ul.pt>
6  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Fixes:
14  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
15  *	Harald Welte		Add neighbour cache statistics like rtstat
16  */
17 
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
30 #include <net/dst.h>
31 #include <net/sock.h>
32 #include <net/netevent.h>
33 #include <net/netlink.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/random.h>
36 #include <linux/string.h>
37 #include <linux/log2.h>
38 
39 #define NEIGH_DEBUG 1
40 
41 #define NEIGH_PRINTK(x...) printk(x)
42 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
43 #define NEIGH_PRINTK0 NEIGH_PRINTK
44 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
45 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
46 
47 #if NEIGH_DEBUG >= 1
48 #undef NEIGH_PRINTK1
49 #define NEIGH_PRINTK1 NEIGH_PRINTK
50 #endif
51 #if NEIGH_DEBUG >= 2
52 #undef NEIGH_PRINTK2
53 #define NEIGH_PRINTK2 NEIGH_PRINTK
54 #endif
55 
56 #define PNEIGH_HASHMASK		0xF
57 
58 static void neigh_timer_handler(unsigned long arg);
59 static void __neigh_notify(struct neighbour *n, int type, int flags);
60 static void neigh_update_notify(struct neighbour *neigh);
61 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62 
63 static struct neigh_table *neigh_tables;
64 #ifdef CONFIG_PROC_FS
65 static const struct file_operations neigh_stat_seq_fops;
66 #endif
67 
68 /*
69    Neighbour hash table buckets are protected with rwlock tbl->lock.
70 
71    - All the scans/updates to hash buckets MUST be made under this lock.
72    - NOTHING clever should be made under this lock: no callbacks
73      to protocol backends, no attempts to send something to network.
74      It will result in deadlocks, if backend/driver wants to use neighbour
75      cache.
76    - If the entry requires some non-trivial actions, increase
77      its reference count and release table lock.
78 
79    Neighbour entries are protected:
80    - with reference count.
81    - with rwlock neigh->lock
82 
83    Reference count prevents destruction.
84 
85    neigh->lock mainly serializes ll address data and its validity state.
86    However, the same lock is used to protect another entry fields:
87     - timer
88     - resolution queue
89 
90    Again, nothing clever shall be made under neigh->lock,
91    the most complicated procedure, which we allow is dev->hard_header.
92    It is supposed, that dev->hard_header is simplistic and does
93    not make callbacks to neighbour tables.
94 
95    The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96    list of neighbour tables. This list is used only in process context,
97  */
98 
99 static DEFINE_RWLOCK(neigh_tbl_lock);
100 
101 static int neigh_blackhole(struct sk_buff *skb)
102 {
103 	kfree_skb(skb);
104 	return -ENETDOWN;
105 }
106 
107 static void neigh_cleanup_and_release(struct neighbour *neigh)
108 {
109 	if (neigh->parms->neigh_cleanup)
110 		neigh->parms->neigh_cleanup(neigh);
111 
112 	__neigh_notify(neigh, RTM_DELNEIGH, 0);
113 	neigh_release(neigh);
114 }
115 
116 /*
117  * It is random distribution in the interval (1/2)*base...(3/2)*base.
118  * It corresponds to default IPv6 settings and is not overridable,
119  * because it is really reasonable choice.
120  */
121 
122 unsigned long neigh_rand_reach_time(unsigned long base)
123 {
124 	return (base ? (net_random() % base) + (base >> 1) : 0);
125 }
126 EXPORT_SYMBOL(neigh_rand_reach_time);
127 
128 
129 static int neigh_forced_gc(struct neigh_table *tbl)
130 {
131 	int shrunk = 0;
132 	int i;
133 
134 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
135 
136 	write_lock_bh(&tbl->lock);
137 	for (i = 0; i <= tbl->hash_mask; i++) {
138 		struct neighbour *n, **np;
139 
140 		np = &tbl->hash_buckets[i];
141 		while ((n = *np) != NULL) {
142 			/* Neighbour record may be discarded if:
143 			 * - nobody refers to it.
144 			 * - it is not permanent
145 			 */
146 			write_lock(&n->lock);
147 			if (atomic_read(&n->refcnt) == 1 &&
148 			    !(n->nud_state & NUD_PERMANENT)) {
149 				*np	= n->next;
150 				n->dead = 1;
151 				shrunk	= 1;
152 				write_unlock(&n->lock);
153 				neigh_cleanup_and_release(n);
154 				continue;
155 			}
156 			write_unlock(&n->lock);
157 			np = &n->next;
158 		}
159 	}
160 
161 	tbl->last_flush = jiffies;
162 
163 	write_unlock_bh(&tbl->lock);
164 
165 	return shrunk;
166 }
167 
168 static void neigh_add_timer(struct neighbour *n, unsigned long when)
169 {
170 	neigh_hold(n);
171 	if (unlikely(mod_timer(&n->timer, when))) {
172 		printk("NEIGH: BUG, double timer add, state is %x\n",
173 		       n->nud_state);
174 		dump_stack();
175 	}
176 }
177 
178 static int neigh_del_timer(struct neighbour *n)
179 {
180 	if ((n->nud_state & NUD_IN_TIMER) &&
181 	    del_timer(&n->timer)) {
182 		neigh_release(n);
183 		return 1;
184 	}
185 	return 0;
186 }
187 
188 static void pneigh_queue_purge(struct sk_buff_head *list)
189 {
190 	struct sk_buff *skb;
191 
192 	while ((skb = skb_dequeue(list)) != NULL) {
193 		dev_put(skb->dev);
194 		kfree_skb(skb);
195 	}
196 }
197 
198 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
199 {
200 	int i;
201 
202 	for (i = 0; i <= tbl->hash_mask; i++) {
203 		struct neighbour *n, **np = &tbl->hash_buckets[i];
204 
205 		while ((n = *np) != NULL) {
206 			if (dev && n->dev != dev) {
207 				np = &n->next;
208 				continue;
209 			}
210 			*np = n->next;
211 			write_lock(&n->lock);
212 			neigh_del_timer(n);
213 			n->dead = 1;
214 
215 			if (atomic_read(&n->refcnt) != 1) {
216 				/* The most unpleasant situation.
217 				   We must destroy neighbour entry,
218 				   but someone still uses it.
219 
220 				   The destroy will be delayed until
221 				   the last user releases us, but
222 				   we must kill timers etc. and move
223 				   it to safe state.
224 				 */
225 				skb_queue_purge(&n->arp_queue);
226 				n->output = neigh_blackhole;
227 				if (n->nud_state & NUD_VALID)
228 					n->nud_state = NUD_NOARP;
229 				else
230 					n->nud_state = NUD_NONE;
231 				NEIGH_PRINTK2("neigh %p is stray.\n", n);
232 			}
233 			write_unlock(&n->lock);
234 			neigh_cleanup_and_release(n);
235 		}
236 	}
237 }
238 
239 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
240 {
241 	write_lock_bh(&tbl->lock);
242 	neigh_flush_dev(tbl, dev);
243 	write_unlock_bh(&tbl->lock);
244 }
245 EXPORT_SYMBOL(neigh_changeaddr);
246 
247 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
248 {
249 	write_lock_bh(&tbl->lock);
250 	neigh_flush_dev(tbl, dev);
251 	pneigh_ifdown(tbl, dev);
252 	write_unlock_bh(&tbl->lock);
253 
254 	del_timer_sync(&tbl->proxy_timer);
255 	pneigh_queue_purge(&tbl->proxy_queue);
256 	return 0;
257 }
258 EXPORT_SYMBOL(neigh_ifdown);
259 
260 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
261 {
262 	struct neighbour *n = NULL;
263 	unsigned long now = jiffies;
264 	int entries;
265 
266 	entries = atomic_inc_return(&tbl->entries) - 1;
267 	if (entries >= tbl->gc_thresh3 ||
268 	    (entries >= tbl->gc_thresh2 &&
269 	     time_after(now, tbl->last_flush + 5 * HZ))) {
270 		if (!neigh_forced_gc(tbl) &&
271 		    entries >= tbl->gc_thresh3)
272 			goto out_entries;
273 	}
274 
275 	n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
276 	if (!n)
277 		goto out_entries;
278 
279 	skb_queue_head_init(&n->arp_queue);
280 	rwlock_init(&n->lock);
281 	n->updated	  = n->used = now;
282 	n->nud_state	  = NUD_NONE;
283 	n->output	  = neigh_blackhole;
284 	n->parms	  = neigh_parms_clone(&tbl->parms);
285 	setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
286 
287 	NEIGH_CACHE_STAT_INC(tbl, allocs);
288 	n->tbl		  = tbl;
289 	atomic_set(&n->refcnt, 1);
290 	n->dead		  = 1;
291 out:
292 	return n;
293 
294 out_entries:
295 	atomic_dec(&tbl->entries);
296 	goto out;
297 }
298 
299 static struct neighbour **neigh_hash_alloc(unsigned int entries)
300 {
301 	unsigned long size = entries * sizeof(struct neighbour *);
302 	struct neighbour **ret;
303 
304 	if (size <= PAGE_SIZE) {
305 		ret = kzalloc(size, GFP_ATOMIC);
306 	} else {
307 		ret = (struct neighbour **)
308 		      __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
309 	}
310 	return ret;
311 }
312 
313 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
314 {
315 	unsigned long size = entries * sizeof(struct neighbour *);
316 
317 	if (size <= PAGE_SIZE)
318 		kfree(hash);
319 	else
320 		free_pages((unsigned long)hash, get_order(size));
321 }
322 
323 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
324 {
325 	struct neighbour **new_hash, **old_hash;
326 	unsigned int i, new_hash_mask, old_entries;
327 
328 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
329 
330 	BUG_ON(!is_power_of_2(new_entries));
331 	new_hash = neigh_hash_alloc(new_entries);
332 	if (!new_hash)
333 		return;
334 
335 	old_entries = tbl->hash_mask + 1;
336 	new_hash_mask = new_entries - 1;
337 	old_hash = tbl->hash_buckets;
338 
339 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
340 	for (i = 0; i < old_entries; i++) {
341 		struct neighbour *n, *next;
342 
343 		for (n = old_hash[i]; n; n = next) {
344 			unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
345 
346 			hash_val &= new_hash_mask;
347 			next = n->next;
348 
349 			n->next = new_hash[hash_val];
350 			new_hash[hash_val] = n;
351 		}
352 	}
353 	tbl->hash_buckets = new_hash;
354 	tbl->hash_mask = new_hash_mask;
355 
356 	neigh_hash_free(old_hash, old_entries);
357 }
358 
359 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
360 			       struct net_device *dev)
361 {
362 	struct neighbour *n;
363 	int key_len = tbl->key_len;
364 	u32 hash_val;
365 
366 	NEIGH_CACHE_STAT_INC(tbl, lookups);
367 
368 	read_lock_bh(&tbl->lock);
369 	hash_val = tbl->hash(pkey, dev);
370 	for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
371 		if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
372 			neigh_hold(n);
373 			NEIGH_CACHE_STAT_INC(tbl, hits);
374 			break;
375 		}
376 	}
377 	read_unlock_bh(&tbl->lock);
378 	return n;
379 }
380 EXPORT_SYMBOL(neigh_lookup);
381 
382 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
383 				     const void *pkey)
384 {
385 	struct neighbour *n;
386 	int key_len = tbl->key_len;
387 	u32 hash_val;
388 
389 	NEIGH_CACHE_STAT_INC(tbl, lookups);
390 
391 	read_lock_bh(&tbl->lock);
392 	hash_val = tbl->hash(pkey, NULL);
393 	for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
394 		if (!memcmp(n->primary_key, pkey, key_len) &&
395 		    net_eq(dev_net(n->dev), net)) {
396 			neigh_hold(n);
397 			NEIGH_CACHE_STAT_INC(tbl, hits);
398 			break;
399 		}
400 	}
401 	read_unlock_bh(&tbl->lock);
402 	return n;
403 }
404 EXPORT_SYMBOL(neigh_lookup_nodev);
405 
406 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
407 			       struct net_device *dev)
408 {
409 	u32 hash_val;
410 	int key_len = tbl->key_len;
411 	int error;
412 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
413 
414 	if (!n) {
415 		rc = ERR_PTR(-ENOBUFS);
416 		goto out;
417 	}
418 
419 	memcpy(n->primary_key, pkey, key_len);
420 	n->dev = dev;
421 	dev_hold(dev);
422 
423 	/* Protocol specific setup. */
424 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
425 		rc = ERR_PTR(error);
426 		goto out_neigh_release;
427 	}
428 
429 	/* Device specific setup. */
430 	if (n->parms->neigh_setup &&
431 	    (error = n->parms->neigh_setup(n)) < 0) {
432 		rc = ERR_PTR(error);
433 		goto out_neigh_release;
434 	}
435 
436 	n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
437 
438 	write_lock_bh(&tbl->lock);
439 
440 	if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
441 		neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
442 
443 	hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
444 
445 	if (n->parms->dead) {
446 		rc = ERR_PTR(-EINVAL);
447 		goto out_tbl_unlock;
448 	}
449 
450 	for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
451 		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
452 			neigh_hold(n1);
453 			rc = n1;
454 			goto out_tbl_unlock;
455 		}
456 	}
457 
458 	n->next = tbl->hash_buckets[hash_val];
459 	tbl->hash_buckets[hash_val] = n;
460 	n->dead = 0;
461 	neigh_hold(n);
462 	write_unlock_bh(&tbl->lock);
463 	NEIGH_PRINTK2("neigh %p is created.\n", n);
464 	rc = n;
465 out:
466 	return rc;
467 out_tbl_unlock:
468 	write_unlock_bh(&tbl->lock);
469 out_neigh_release:
470 	neigh_release(n);
471 	goto out;
472 }
473 EXPORT_SYMBOL(neigh_create);
474 
475 static u32 pneigh_hash(const void *pkey, int key_len)
476 {
477 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
478 	hash_val ^= (hash_val >> 16);
479 	hash_val ^= hash_val >> 8;
480 	hash_val ^= hash_val >> 4;
481 	hash_val &= PNEIGH_HASHMASK;
482 	return hash_val;
483 }
484 
485 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
486 					      struct net *net,
487 					      const void *pkey,
488 					      int key_len,
489 					      struct net_device *dev)
490 {
491 	while (n) {
492 		if (!memcmp(n->key, pkey, key_len) &&
493 		    net_eq(pneigh_net(n), net) &&
494 		    (n->dev == dev || !n->dev))
495 			return n;
496 		n = n->next;
497 	}
498 	return NULL;
499 }
500 
501 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
502 		struct net *net, const void *pkey, struct net_device *dev)
503 {
504 	int key_len = tbl->key_len;
505 	u32 hash_val = pneigh_hash(pkey, key_len);
506 
507 	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
508 				 net, pkey, key_len, dev);
509 }
510 EXPORT_SYMBOL_GPL(__pneigh_lookup);
511 
512 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
513 				    struct net *net, const void *pkey,
514 				    struct net_device *dev, int creat)
515 {
516 	struct pneigh_entry *n;
517 	int key_len = tbl->key_len;
518 	u32 hash_val = pneigh_hash(pkey, key_len);
519 
520 	read_lock_bh(&tbl->lock);
521 	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
522 			      net, pkey, key_len, dev);
523 	read_unlock_bh(&tbl->lock);
524 
525 	if (n || !creat)
526 		goto out;
527 
528 	ASSERT_RTNL();
529 
530 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
531 	if (!n)
532 		goto out;
533 
534 #ifdef CONFIG_NET_NS
535 	n->net = hold_net(net);
536 #endif
537 	memcpy(n->key, pkey, key_len);
538 	n->dev = dev;
539 	if (dev)
540 		dev_hold(dev);
541 
542 	if (tbl->pconstructor && tbl->pconstructor(n)) {
543 		if (dev)
544 			dev_put(dev);
545 		release_net(net);
546 		kfree(n);
547 		n = NULL;
548 		goto out;
549 	}
550 
551 	write_lock_bh(&tbl->lock);
552 	n->next = tbl->phash_buckets[hash_val];
553 	tbl->phash_buckets[hash_val] = n;
554 	write_unlock_bh(&tbl->lock);
555 out:
556 	return n;
557 }
558 EXPORT_SYMBOL(pneigh_lookup);
559 
560 
561 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
562 		  struct net_device *dev)
563 {
564 	struct pneigh_entry *n, **np;
565 	int key_len = tbl->key_len;
566 	u32 hash_val = pneigh_hash(pkey, key_len);
567 
568 	write_lock_bh(&tbl->lock);
569 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
570 	     np = &n->next) {
571 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
572 		    net_eq(pneigh_net(n), net)) {
573 			*np = n->next;
574 			write_unlock_bh(&tbl->lock);
575 			if (tbl->pdestructor)
576 				tbl->pdestructor(n);
577 			if (n->dev)
578 				dev_put(n->dev);
579 			release_net(pneigh_net(n));
580 			kfree(n);
581 			return 0;
582 		}
583 	}
584 	write_unlock_bh(&tbl->lock);
585 	return -ENOENT;
586 }
587 
588 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
589 {
590 	struct pneigh_entry *n, **np;
591 	u32 h;
592 
593 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
594 		np = &tbl->phash_buckets[h];
595 		while ((n = *np) != NULL) {
596 			if (!dev || n->dev == dev) {
597 				*np = n->next;
598 				if (tbl->pdestructor)
599 					tbl->pdestructor(n);
600 				if (n->dev)
601 					dev_put(n->dev);
602 				release_net(pneigh_net(n));
603 				kfree(n);
604 				continue;
605 			}
606 			np = &n->next;
607 		}
608 	}
609 	return -ENOENT;
610 }
611 
612 static void neigh_parms_destroy(struct neigh_parms *parms);
613 
614 static inline void neigh_parms_put(struct neigh_parms *parms)
615 {
616 	if (atomic_dec_and_test(&parms->refcnt))
617 		neigh_parms_destroy(parms);
618 }
619 
620 /*
621  *	neighbour must already be out of the table;
622  *
623  */
624 void neigh_destroy(struct neighbour *neigh)
625 {
626 	struct hh_cache *hh;
627 
628 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
629 
630 	if (!neigh->dead) {
631 		printk(KERN_WARNING
632 		       "Destroying alive neighbour %p\n", neigh);
633 		dump_stack();
634 		return;
635 	}
636 
637 	if (neigh_del_timer(neigh))
638 		printk(KERN_WARNING "Impossible event.\n");
639 
640 	while ((hh = neigh->hh) != NULL) {
641 		neigh->hh = hh->hh_next;
642 		hh->hh_next = NULL;
643 
644 		write_seqlock_bh(&hh->hh_lock);
645 		hh->hh_output = neigh_blackhole;
646 		write_sequnlock_bh(&hh->hh_lock);
647 		if (atomic_dec_and_test(&hh->hh_refcnt))
648 			kfree(hh);
649 	}
650 
651 	skb_queue_purge(&neigh->arp_queue);
652 
653 	dev_put(neigh->dev);
654 	neigh_parms_put(neigh->parms);
655 
656 	NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
657 
658 	atomic_dec(&neigh->tbl->entries);
659 	kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
660 }
661 EXPORT_SYMBOL(neigh_destroy);
662 
663 /* Neighbour state is suspicious;
664    disable fast path.
665 
666    Called with write_locked neigh.
667  */
668 static void neigh_suspect(struct neighbour *neigh)
669 {
670 	struct hh_cache *hh;
671 
672 	NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
673 
674 	neigh->output = neigh->ops->output;
675 
676 	for (hh = neigh->hh; hh; hh = hh->hh_next)
677 		hh->hh_output = neigh->ops->output;
678 }
679 
680 /* Neighbour state is OK;
681    enable fast path.
682 
683    Called with write_locked neigh.
684  */
685 static void neigh_connect(struct neighbour *neigh)
686 {
687 	struct hh_cache *hh;
688 
689 	NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
690 
691 	neigh->output = neigh->ops->connected_output;
692 
693 	for (hh = neigh->hh; hh; hh = hh->hh_next)
694 		hh->hh_output = neigh->ops->hh_output;
695 }
696 
697 static void neigh_periodic_timer(unsigned long arg)
698 {
699 	struct neigh_table *tbl = (struct neigh_table *)arg;
700 	struct neighbour *n, **np;
701 	unsigned long expire, now = jiffies;
702 
703 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
704 
705 	write_lock(&tbl->lock);
706 
707 	/*
708 	 *	periodically recompute ReachableTime from random function
709 	 */
710 
711 	if (time_after(now, tbl->last_rand + 300 * HZ)) {
712 		struct neigh_parms *p;
713 		tbl->last_rand = now;
714 		for (p = &tbl->parms; p; p = p->next)
715 			p->reachable_time =
716 				neigh_rand_reach_time(p->base_reachable_time);
717 	}
718 
719 	np = &tbl->hash_buckets[tbl->hash_chain_gc];
720 	tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
721 
722 	while ((n = *np) != NULL) {
723 		unsigned int state;
724 
725 		write_lock(&n->lock);
726 
727 		state = n->nud_state;
728 		if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
729 			write_unlock(&n->lock);
730 			goto next_elt;
731 		}
732 
733 		if (time_before(n->used, n->confirmed))
734 			n->used = n->confirmed;
735 
736 		if (atomic_read(&n->refcnt) == 1 &&
737 		    (state == NUD_FAILED ||
738 		     time_after(now, n->used + n->parms->gc_staletime))) {
739 			*np = n->next;
740 			n->dead = 1;
741 			write_unlock(&n->lock);
742 			neigh_cleanup_and_release(n);
743 			continue;
744 		}
745 		write_unlock(&n->lock);
746 
747 next_elt:
748 		np = &n->next;
749 	}
750 
751 	/* Cycle through all hash buckets every base_reachable_time/2 ticks.
752 	 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
753 	 * base_reachable_time.
754 	 */
755 	expire = tbl->parms.base_reachable_time >> 1;
756 	expire /= (tbl->hash_mask + 1);
757 	if (!expire)
758 		expire = 1;
759 
760 	if (expire>HZ)
761 		mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
762 	else
763 		mod_timer(&tbl->gc_timer, now + expire);
764 
765 	write_unlock(&tbl->lock);
766 }
767 
768 static __inline__ int neigh_max_probes(struct neighbour *n)
769 {
770 	struct neigh_parms *p = n->parms;
771 	return (n->nud_state & NUD_PROBE ?
772 		p->ucast_probes :
773 		p->ucast_probes + p->app_probes + p->mcast_probes);
774 }
775 
776 /* Called when a timer expires for a neighbour entry. */
777 
778 static void neigh_timer_handler(unsigned long arg)
779 {
780 	unsigned long now, next;
781 	struct neighbour *neigh = (struct neighbour *)arg;
782 	unsigned state;
783 	int notify = 0;
784 
785 	write_lock(&neigh->lock);
786 
787 	state = neigh->nud_state;
788 	now = jiffies;
789 	next = now + HZ;
790 
791 	if (!(state & NUD_IN_TIMER)) {
792 #ifndef CONFIG_SMP
793 		printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
794 #endif
795 		goto out;
796 	}
797 
798 	if (state & NUD_REACHABLE) {
799 		if (time_before_eq(now,
800 				   neigh->confirmed + neigh->parms->reachable_time)) {
801 			NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
802 			next = neigh->confirmed + neigh->parms->reachable_time;
803 		} else if (time_before_eq(now,
804 					  neigh->used + neigh->parms->delay_probe_time)) {
805 			NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
806 			neigh->nud_state = NUD_DELAY;
807 			neigh->updated = jiffies;
808 			neigh_suspect(neigh);
809 			next = now + neigh->parms->delay_probe_time;
810 		} else {
811 			NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
812 			neigh->nud_state = NUD_STALE;
813 			neigh->updated = jiffies;
814 			neigh_suspect(neigh);
815 			notify = 1;
816 		}
817 	} else if (state & NUD_DELAY) {
818 		if (time_before_eq(now,
819 				   neigh->confirmed + neigh->parms->delay_probe_time)) {
820 			NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
821 			neigh->nud_state = NUD_REACHABLE;
822 			neigh->updated = jiffies;
823 			neigh_connect(neigh);
824 			notify = 1;
825 			next = neigh->confirmed + neigh->parms->reachable_time;
826 		} else {
827 			NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
828 			neigh->nud_state = NUD_PROBE;
829 			neigh->updated = jiffies;
830 			atomic_set(&neigh->probes, 0);
831 			next = now + neigh->parms->retrans_time;
832 		}
833 	} else {
834 		/* NUD_PROBE|NUD_INCOMPLETE */
835 		next = now + neigh->parms->retrans_time;
836 	}
837 
838 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
839 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
840 		struct sk_buff *skb;
841 
842 		neigh->nud_state = NUD_FAILED;
843 		neigh->updated = jiffies;
844 		notify = 1;
845 		NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
846 		NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
847 
848 		/* It is very thin place. report_unreachable is very complicated
849 		   routine. Particularly, it can hit the same neighbour entry!
850 
851 		   So that, we try to be accurate and avoid dead loop. --ANK
852 		 */
853 		while (neigh->nud_state == NUD_FAILED &&
854 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
855 			write_unlock(&neigh->lock);
856 			neigh->ops->error_report(neigh, skb);
857 			write_lock(&neigh->lock);
858 		}
859 		skb_queue_purge(&neigh->arp_queue);
860 	}
861 
862 	if (neigh->nud_state & NUD_IN_TIMER) {
863 		if (time_before(next, jiffies + HZ/2))
864 			next = jiffies + HZ/2;
865 		if (!mod_timer(&neigh->timer, next))
866 			neigh_hold(neigh);
867 	}
868 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
869 		struct sk_buff *skb = skb_peek(&neigh->arp_queue);
870 		/* keep skb alive even if arp_queue overflows */
871 		if (skb)
872 			skb = skb_copy(skb, GFP_ATOMIC);
873 		write_unlock(&neigh->lock);
874 		neigh->ops->solicit(neigh, skb);
875 		atomic_inc(&neigh->probes);
876 		if (skb)
877 			kfree_skb(skb);
878 	} else {
879 out:
880 		write_unlock(&neigh->lock);
881 	}
882 
883 	if (notify)
884 		neigh_update_notify(neigh);
885 
886 	neigh_release(neigh);
887 }
888 
889 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
890 {
891 	int rc;
892 	unsigned long now;
893 
894 	write_lock_bh(&neigh->lock);
895 
896 	rc = 0;
897 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
898 		goto out_unlock_bh;
899 
900 	now = jiffies;
901 
902 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
903 		if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
904 			atomic_set(&neigh->probes, neigh->parms->ucast_probes);
905 			neigh->nud_state     = NUD_INCOMPLETE;
906 			neigh->updated = jiffies;
907 			neigh_add_timer(neigh, now + 1);
908 		} else {
909 			neigh->nud_state = NUD_FAILED;
910 			neigh->updated = jiffies;
911 			write_unlock_bh(&neigh->lock);
912 
913 			if (skb)
914 				kfree_skb(skb);
915 			return 1;
916 		}
917 	} else if (neigh->nud_state & NUD_STALE) {
918 		NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
919 		neigh->nud_state = NUD_DELAY;
920 		neigh->updated = jiffies;
921 		neigh_add_timer(neigh,
922 				jiffies + neigh->parms->delay_probe_time);
923 	}
924 
925 	if (neigh->nud_state == NUD_INCOMPLETE) {
926 		if (skb) {
927 			if (skb_queue_len(&neigh->arp_queue) >=
928 			    neigh->parms->queue_len) {
929 				struct sk_buff *buff;
930 				buff = neigh->arp_queue.next;
931 				__skb_unlink(buff, &neigh->arp_queue);
932 				kfree_skb(buff);
933 			}
934 			__skb_queue_tail(&neigh->arp_queue, skb);
935 		}
936 		rc = 1;
937 	}
938 out_unlock_bh:
939 	write_unlock_bh(&neigh->lock);
940 	return rc;
941 }
942 EXPORT_SYMBOL(__neigh_event_send);
943 
944 static void neigh_update_hhs(struct neighbour *neigh)
945 {
946 	struct hh_cache *hh;
947 	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
948 		= neigh->dev->header_ops->cache_update;
949 
950 	if (update) {
951 		for (hh = neigh->hh; hh; hh = hh->hh_next) {
952 			write_seqlock_bh(&hh->hh_lock);
953 			update(hh, neigh->dev, neigh->ha);
954 			write_sequnlock_bh(&hh->hh_lock);
955 		}
956 	}
957 }
958 
959 
960 
961 /* Generic update routine.
962    -- lladdr is new lladdr or NULL, if it is not supplied.
963    -- new    is new state.
964    -- flags
965 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
966 				if it is different.
967 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
968 				lladdr instead of overriding it
969 				if it is different.
970 				It also allows to retain current state
971 				if lladdr is unchanged.
972 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
973 
974 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
975 				NTF_ROUTER flag.
976 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
977 				a router.
978 
979    Caller MUST hold reference count on the entry.
980  */
981 
982 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
983 		 u32 flags)
984 {
985 	u8 old;
986 	int err;
987 	int notify = 0;
988 	struct net_device *dev;
989 	int update_isrouter = 0;
990 
991 	write_lock_bh(&neigh->lock);
992 
993 	dev    = neigh->dev;
994 	old    = neigh->nud_state;
995 	err    = -EPERM;
996 
997 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
998 	    (old & (NUD_NOARP | NUD_PERMANENT)))
999 		goto out;
1000 
1001 	if (!(new & NUD_VALID)) {
1002 		neigh_del_timer(neigh);
1003 		if (old & NUD_CONNECTED)
1004 			neigh_suspect(neigh);
1005 		neigh->nud_state = new;
1006 		err = 0;
1007 		notify = old & NUD_VALID;
1008 		goto out;
1009 	}
1010 
1011 	/* Compare new lladdr with cached one */
1012 	if (!dev->addr_len) {
1013 		/* First case: device needs no address. */
1014 		lladdr = neigh->ha;
1015 	} else if (lladdr) {
1016 		/* The second case: if something is already cached
1017 		   and a new address is proposed:
1018 		   - compare new & old
1019 		   - if they are different, check override flag
1020 		 */
1021 		if ((old & NUD_VALID) &&
1022 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1023 			lladdr = neigh->ha;
1024 	} else {
1025 		/* No address is supplied; if we know something,
1026 		   use it, otherwise discard the request.
1027 		 */
1028 		err = -EINVAL;
1029 		if (!(old & NUD_VALID))
1030 			goto out;
1031 		lladdr = neigh->ha;
1032 	}
1033 
1034 	if (new & NUD_CONNECTED)
1035 		neigh->confirmed = jiffies;
1036 	neigh->updated = jiffies;
1037 
1038 	/* If entry was valid and address is not changed,
1039 	   do not change entry state, if new one is STALE.
1040 	 */
1041 	err = 0;
1042 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1043 	if (old & NUD_VALID) {
1044 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1045 			update_isrouter = 0;
1046 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1047 			    (old & NUD_CONNECTED)) {
1048 				lladdr = neigh->ha;
1049 				new = NUD_STALE;
1050 			} else
1051 				goto out;
1052 		} else {
1053 			if (lladdr == neigh->ha && new == NUD_STALE &&
1054 			    ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1055 			     (old & NUD_CONNECTED))
1056 			    )
1057 				new = old;
1058 		}
1059 	}
1060 
1061 	if (new != old) {
1062 		neigh_del_timer(neigh);
1063 		if (new & NUD_IN_TIMER)
1064 			neigh_add_timer(neigh, (jiffies +
1065 						((new & NUD_REACHABLE) ?
1066 						 neigh->parms->reachable_time :
1067 						 0)));
1068 		neigh->nud_state = new;
1069 	}
1070 
1071 	if (lladdr != neigh->ha) {
1072 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1073 		neigh_update_hhs(neigh);
1074 		if (!(new & NUD_CONNECTED))
1075 			neigh->confirmed = jiffies -
1076 				      (neigh->parms->base_reachable_time << 1);
1077 		notify = 1;
1078 	}
1079 	if (new == old)
1080 		goto out;
1081 	if (new & NUD_CONNECTED)
1082 		neigh_connect(neigh);
1083 	else
1084 		neigh_suspect(neigh);
1085 	if (!(old & NUD_VALID)) {
1086 		struct sk_buff *skb;
1087 
1088 		/* Again: avoid dead loop if something went wrong */
1089 
1090 		while (neigh->nud_state & NUD_VALID &&
1091 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1092 			struct neighbour *n1 = neigh;
1093 			write_unlock_bh(&neigh->lock);
1094 			/* On shaper/eql skb->dst->neighbour != neigh :( */
1095 			if (skb->dst && skb->dst->neighbour)
1096 				n1 = skb->dst->neighbour;
1097 			n1->output(skb);
1098 			write_lock_bh(&neigh->lock);
1099 		}
1100 		skb_queue_purge(&neigh->arp_queue);
1101 	}
1102 out:
1103 	if (update_isrouter) {
1104 		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1105 			(neigh->flags | NTF_ROUTER) :
1106 			(neigh->flags & ~NTF_ROUTER);
1107 	}
1108 	write_unlock_bh(&neigh->lock);
1109 
1110 	if (notify)
1111 		neigh_update_notify(neigh);
1112 
1113 	return err;
1114 }
1115 EXPORT_SYMBOL(neigh_update);
1116 
1117 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1118 				 u8 *lladdr, void *saddr,
1119 				 struct net_device *dev)
1120 {
1121 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1122 						 lladdr || !dev->addr_len);
1123 	if (neigh)
1124 		neigh_update(neigh, lladdr, NUD_STALE,
1125 			     NEIGH_UPDATE_F_OVERRIDE);
1126 	return neigh;
1127 }
1128 EXPORT_SYMBOL(neigh_event_ns);
1129 
1130 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1131 			  __be16 protocol)
1132 {
1133 	struct hh_cache	*hh;
1134 	struct net_device *dev = dst->dev;
1135 
1136 	for (hh = n->hh; hh; hh = hh->hh_next)
1137 		if (hh->hh_type == protocol)
1138 			break;
1139 
1140 	if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1141 		seqlock_init(&hh->hh_lock);
1142 		hh->hh_type = protocol;
1143 		atomic_set(&hh->hh_refcnt, 0);
1144 		hh->hh_next = NULL;
1145 
1146 		if (dev->header_ops->cache(n, hh)) {
1147 			kfree(hh);
1148 			hh = NULL;
1149 		} else {
1150 			atomic_inc(&hh->hh_refcnt);
1151 			hh->hh_next = n->hh;
1152 			n->hh	    = hh;
1153 			if (n->nud_state & NUD_CONNECTED)
1154 				hh->hh_output = n->ops->hh_output;
1155 			else
1156 				hh->hh_output = n->ops->output;
1157 		}
1158 	}
1159 	if (hh)	{
1160 		atomic_inc(&hh->hh_refcnt);
1161 		dst->hh = hh;
1162 	}
1163 }
1164 
1165 /* This function can be used in contexts, where only old dev_queue_xmit
1166    worked, f.e. if you want to override normal output path (eql, shaper),
1167    but resolution is not made yet.
1168  */
1169 
1170 int neigh_compat_output(struct sk_buff *skb)
1171 {
1172 	struct net_device *dev = skb->dev;
1173 
1174 	__skb_pull(skb, skb_network_offset(skb));
1175 
1176 	if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1177 			    skb->len) < 0 &&
1178 	    dev->header_ops->rebuild(skb))
1179 		return 0;
1180 
1181 	return dev_queue_xmit(skb);
1182 }
1183 EXPORT_SYMBOL(neigh_compat_output);
1184 
1185 /* Slow and careful. */
1186 
1187 int neigh_resolve_output(struct sk_buff *skb)
1188 {
1189 	struct dst_entry *dst = skb->dst;
1190 	struct neighbour *neigh;
1191 	int rc = 0;
1192 
1193 	if (!dst || !(neigh = dst->neighbour))
1194 		goto discard;
1195 
1196 	__skb_pull(skb, skb_network_offset(skb));
1197 
1198 	if (!neigh_event_send(neigh, skb)) {
1199 		int err;
1200 		struct net_device *dev = neigh->dev;
1201 		if (dev->header_ops->cache && !dst->hh) {
1202 			write_lock_bh(&neigh->lock);
1203 			if (!dst->hh)
1204 				neigh_hh_init(neigh, dst, dst->ops->protocol);
1205 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1206 					      neigh->ha, NULL, skb->len);
1207 			write_unlock_bh(&neigh->lock);
1208 		} else {
1209 			read_lock_bh(&neigh->lock);
1210 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1211 					      neigh->ha, NULL, skb->len);
1212 			read_unlock_bh(&neigh->lock);
1213 		}
1214 		if (err >= 0)
1215 			rc = neigh->ops->queue_xmit(skb);
1216 		else
1217 			goto out_kfree_skb;
1218 	}
1219 out:
1220 	return rc;
1221 discard:
1222 	NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1223 		      dst, dst ? dst->neighbour : NULL);
1224 out_kfree_skb:
1225 	rc = -EINVAL;
1226 	kfree_skb(skb);
1227 	goto out;
1228 }
1229 EXPORT_SYMBOL(neigh_resolve_output);
1230 
1231 /* As fast as possible without hh cache */
1232 
1233 int neigh_connected_output(struct sk_buff *skb)
1234 {
1235 	int err;
1236 	struct dst_entry *dst = skb->dst;
1237 	struct neighbour *neigh = dst->neighbour;
1238 	struct net_device *dev = neigh->dev;
1239 
1240 	__skb_pull(skb, skb_network_offset(skb));
1241 
1242 	read_lock_bh(&neigh->lock);
1243 	err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1244 			      neigh->ha, NULL, skb->len);
1245 	read_unlock_bh(&neigh->lock);
1246 	if (err >= 0)
1247 		err = neigh->ops->queue_xmit(skb);
1248 	else {
1249 		err = -EINVAL;
1250 		kfree_skb(skb);
1251 	}
1252 	return err;
1253 }
1254 EXPORT_SYMBOL(neigh_connected_output);
1255 
1256 static void neigh_proxy_process(unsigned long arg)
1257 {
1258 	struct neigh_table *tbl = (struct neigh_table *)arg;
1259 	long sched_next = 0;
1260 	unsigned long now = jiffies;
1261 	struct sk_buff *skb;
1262 
1263 	spin_lock(&tbl->proxy_queue.lock);
1264 
1265 	skb = tbl->proxy_queue.next;
1266 
1267 	while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1268 		struct sk_buff *back = skb;
1269 		long tdif = NEIGH_CB(back)->sched_next - now;
1270 
1271 		skb = skb->next;
1272 		if (tdif <= 0) {
1273 			struct net_device *dev = back->dev;
1274 			__skb_unlink(back, &tbl->proxy_queue);
1275 			if (tbl->proxy_redo && netif_running(dev))
1276 				tbl->proxy_redo(back);
1277 			else
1278 				kfree_skb(back);
1279 
1280 			dev_put(dev);
1281 		} else if (!sched_next || tdif < sched_next)
1282 			sched_next = tdif;
1283 	}
1284 	del_timer(&tbl->proxy_timer);
1285 	if (sched_next)
1286 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1287 	spin_unlock(&tbl->proxy_queue.lock);
1288 }
1289 
1290 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1291 		    struct sk_buff *skb)
1292 {
1293 	unsigned long now = jiffies;
1294 	unsigned long sched_next = now + (net_random() % p->proxy_delay);
1295 
1296 	if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1297 		kfree_skb(skb);
1298 		return;
1299 	}
1300 
1301 	NEIGH_CB(skb)->sched_next = sched_next;
1302 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1303 
1304 	spin_lock(&tbl->proxy_queue.lock);
1305 	if (del_timer(&tbl->proxy_timer)) {
1306 		if (time_before(tbl->proxy_timer.expires, sched_next))
1307 			sched_next = tbl->proxy_timer.expires;
1308 	}
1309 	dst_release(skb->dst);
1310 	skb->dst = NULL;
1311 	dev_hold(skb->dev);
1312 	__skb_queue_tail(&tbl->proxy_queue, skb);
1313 	mod_timer(&tbl->proxy_timer, sched_next);
1314 	spin_unlock(&tbl->proxy_queue.lock);
1315 }
1316 EXPORT_SYMBOL(pneigh_enqueue);
1317 
1318 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1319 						      struct net *net, int ifindex)
1320 {
1321 	struct neigh_parms *p;
1322 
1323 	for (p = &tbl->parms; p; p = p->next) {
1324 		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1325 		    (!p->dev && !ifindex))
1326 			return p;
1327 	}
1328 
1329 	return NULL;
1330 }
1331 
1332 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1333 				      struct neigh_table *tbl)
1334 {
1335 	struct neigh_parms *p, *ref;
1336 	struct net *net;
1337 
1338 	net = dev_net(dev);
1339 	ref = lookup_neigh_params(tbl, net, 0);
1340 	if (!ref)
1341 		return NULL;
1342 
1343 	p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1344 	if (p) {
1345 		p->tbl		  = tbl;
1346 		atomic_set(&p->refcnt, 1);
1347 		INIT_RCU_HEAD(&p->rcu_head);
1348 		p->reachable_time =
1349 				neigh_rand_reach_time(p->base_reachable_time);
1350 
1351 		if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1352 			kfree(p);
1353 			return NULL;
1354 		}
1355 
1356 		dev_hold(dev);
1357 		p->dev = dev;
1358 #ifdef CONFIG_NET_NS
1359 		p->net = hold_net(net);
1360 #endif
1361 		p->sysctl_table = NULL;
1362 		write_lock_bh(&tbl->lock);
1363 		p->next		= tbl->parms.next;
1364 		tbl->parms.next = p;
1365 		write_unlock_bh(&tbl->lock);
1366 	}
1367 	return p;
1368 }
1369 EXPORT_SYMBOL(neigh_parms_alloc);
1370 
1371 static void neigh_rcu_free_parms(struct rcu_head *head)
1372 {
1373 	struct neigh_parms *parms =
1374 		container_of(head, struct neigh_parms, rcu_head);
1375 
1376 	neigh_parms_put(parms);
1377 }
1378 
1379 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1380 {
1381 	struct neigh_parms **p;
1382 
1383 	if (!parms || parms == &tbl->parms)
1384 		return;
1385 	write_lock_bh(&tbl->lock);
1386 	for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1387 		if (*p == parms) {
1388 			*p = parms->next;
1389 			parms->dead = 1;
1390 			write_unlock_bh(&tbl->lock);
1391 			if (parms->dev)
1392 				dev_put(parms->dev);
1393 			call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1394 			return;
1395 		}
1396 	}
1397 	write_unlock_bh(&tbl->lock);
1398 	NEIGH_PRINTK1("neigh_parms_release: not found\n");
1399 }
1400 EXPORT_SYMBOL(neigh_parms_release);
1401 
1402 static void neigh_parms_destroy(struct neigh_parms *parms)
1403 {
1404 	release_net(neigh_parms_net(parms));
1405 	kfree(parms);
1406 }
1407 
1408 static struct lock_class_key neigh_table_proxy_queue_class;
1409 
1410 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1411 {
1412 	unsigned long now = jiffies;
1413 	unsigned long phsize;
1414 
1415 #ifdef CONFIG_NET_NS
1416 	tbl->parms.net = &init_net;
1417 #endif
1418 	atomic_set(&tbl->parms.refcnt, 1);
1419 	INIT_RCU_HEAD(&tbl->parms.rcu_head);
1420 	tbl->parms.reachable_time =
1421 			  neigh_rand_reach_time(tbl->parms.base_reachable_time);
1422 
1423 	if (!tbl->kmem_cachep)
1424 		tbl->kmem_cachep =
1425 			kmem_cache_create(tbl->id, tbl->entry_size, 0,
1426 					  SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1427 					  NULL);
1428 	tbl->stats = alloc_percpu(struct neigh_statistics);
1429 	if (!tbl->stats)
1430 		panic("cannot create neighbour cache statistics");
1431 
1432 #ifdef CONFIG_PROC_FS
1433 	tbl->pde = proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1434 				    &neigh_stat_seq_fops, tbl);
1435 	if (!tbl->pde)
1436 		panic("cannot create neighbour proc dir entry");
1437 #endif
1438 
1439 	tbl->hash_mask = 1;
1440 	tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1441 
1442 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1443 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1444 
1445 	if (!tbl->hash_buckets || !tbl->phash_buckets)
1446 		panic("cannot allocate neighbour cache hashes");
1447 
1448 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1449 
1450 	rwlock_init(&tbl->lock);
1451 	setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl);
1452 	tbl->gc_timer.expires  = now + 1;
1453 	add_timer(&tbl->gc_timer);
1454 
1455 	setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1456 	skb_queue_head_init_class(&tbl->proxy_queue,
1457 			&neigh_table_proxy_queue_class);
1458 
1459 	tbl->last_flush = now;
1460 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1461 }
1462 EXPORT_SYMBOL(neigh_table_init_no_netlink);
1463 
1464 void neigh_table_init(struct neigh_table *tbl)
1465 {
1466 	struct neigh_table *tmp;
1467 
1468 	neigh_table_init_no_netlink(tbl);
1469 	write_lock(&neigh_tbl_lock);
1470 	for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1471 		if (tmp->family == tbl->family)
1472 			break;
1473 	}
1474 	tbl->next	= neigh_tables;
1475 	neigh_tables	= tbl;
1476 	write_unlock(&neigh_tbl_lock);
1477 
1478 	if (unlikely(tmp)) {
1479 		printk(KERN_ERR "NEIGH: Registering multiple tables for "
1480 		       "family %d\n", tbl->family);
1481 		dump_stack();
1482 	}
1483 }
1484 EXPORT_SYMBOL(neigh_table_init);
1485 
1486 int neigh_table_clear(struct neigh_table *tbl)
1487 {
1488 	struct neigh_table **tp;
1489 
1490 	/* It is not clean... Fix it to unload IPv6 module safely */
1491 	del_timer_sync(&tbl->gc_timer);
1492 	del_timer_sync(&tbl->proxy_timer);
1493 	pneigh_queue_purge(&tbl->proxy_queue);
1494 	neigh_ifdown(tbl, NULL);
1495 	if (atomic_read(&tbl->entries))
1496 		printk(KERN_CRIT "neighbour leakage\n");
1497 	write_lock(&neigh_tbl_lock);
1498 	for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1499 		if (*tp == tbl) {
1500 			*tp = tbl->next;
1501 			break;
1502 		}
1503 	}
1504 	write_unlock(&neigh_tbl_lock);
1505 
1506 	neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1507 	tbl->hash_buckets = NULL;
1508 
1509 	kfree(tbl->phash_buckets);
1510 	tbl->phash_buckets = NULL;
1511 
1512 	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1513 
1514 	free_percpu(tbl->stats);
1515 	tbl->stats = NULL;
1516 
1517 	kmem_cache_destroy(tbl->kmem_cachep);
1518 	tbl->kmem_cachep = NULL;
1519 
1520 	return 0;
1521 }
1522 EXPORT_SYMBOL(neigh_table_clear);
1523 
1524 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1525 {
1526 	struct net *net = sock_net(skb->sk);
1527 	struct ndmsg *ndm;
1528 	struct nlattr *dst_attr;
1529 	struct neigh_table *tbl;
1530 	struct net_device *dev = NULL;
1531 	int err = -EINVAL;
1532 
1533 	if (nlmsg_len(nlh) < sizeof(*ndm))
1534 		goto out;
1535 
1536 	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1537 	if (dst_attr == NULL)
1538 		goto out;
1539 
1540 	ndm = nlmsg_data(nlh);
1541 	if (ndm->ndm_ifindex) {
1542 		dev = dev_get_by_index(net, ndm->ndm_ifindex);
1543 		if (dev == NULL) {
1544 			err = -ENODEV;
1545 			goto out;
1546 		}
1547 	}
1548 
1549 	read_lock(&neigh_tbl_lock);
1550 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1551 		struct neighbour *neigh;
1552 
1553 		if (tbl->family != ndm->ndm_family)
1554 			continue;
1555 		read_unlock(&neigh_tbl_lock);
1556 
1557 		if (nla_len(dst_attr) < tbl->key_len)
1558 			goto out_dev_put;
1559 
1560 		if (ndm->ndm_flags & NTF_PROXY) {
1561 			err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1562 			goto out_dev_put;
1563 		}
1564 
1565 		if (dev == NULL)
1566 			goto out_dev_put;
1567 
1568 		neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1569 		if (neigh == NULL) {
1570 			err = -ENOENT;
1571 			goto out_dev_put;
1572 		}
1573 
1574 		err = neigh_update(neigh, NULL, NUD_FAILED,
1575 				   NEIGH_UPDATE_F_OVERRIDE |
1576 				   NEIGH_UPDATE_F_ADMIN);
1577 		neigh_release(neigh);
1578 		goto out_dev_put;
1579 	}
1580 	read_unlock(&neigh_tbl_lock);
1581 	err = -EAFNOSUPPORT;
1582 
1583 out_dev_put:
1584 	if (dev)
1585 		dev_put(dev);
1586 out:
1587 	return err;
1588 }
1589 
1590 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1591 {
1592 	struct net *net = sock_net(skb->sk);
1593 	struct ndmsg *ndm;
1594 	struct nlattr *tb[NDA_MAX+1];
1595 	struct neigh_table *tbl;
1596 	struct net_device *dev = NULL;
1597 	int err;
1598 
1599 	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1600 	if (err < 0)
1601 		goto out;
1602 
1603 	err = -EINVAL;
1604 	if (tb[NDA_DST] == NULL)
1605 		goto out;
1606 
1607 	ndm = nlmsg_data(nlh);
1608 	if (ndm->ndm_ifindex) {
1609 		dev = dev_get_by_index(net, ndm->ndm_ifindex);
1610 		if (dev == NULL) {
1611 			err = -ENODEV;
1612 			goto out;
1613 		}
1614 
1615 		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1616 			goto out_dev_put;
1617 	}
1618 
1619 	read_lock(&neigh_tbl_lock);
1620 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1621 		int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1622 		struct neighbour *neigh;
1623 		void *dst, *lladdr;
1624 
1625 		if (tbl->family != ndm->ndm_family)
1626 			continue;
1627 		read_unlock(&neigh_tbl_lock);
1628 
1629 		if (nla_len(tb[NDA_DST]) < tbl->key_len)
1630 			goto out_dev_put;
1631 		dst = nla_data(tb[NDA_DST]);
1632 		lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1633 
1634 		if (ndm->ndm_flags & NTF_PROXY) {
1635 			struct pneigh_entry *pn;
1636 
1637 			err = -ENOBUFS;
1638 			pn = pneigh_lookup(tbl, net, dst, dev, 1);
1639 			if (pn) {
1640 				pn->flags = ndm->ndm_flags;
1641 				err = 0;
1642 			}
1643 			goto out_dev_put;
1644 		}
1645 
1646 		if (dev == NULL)
1647 			goto out_dev_put;
1648 
1649 		neigh = neigh_lookup(tbl, dst, dev);
1650 		if (neigh == NULL) {
1651 			if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1652 				err = -ENOENT;
1653 				goto out_dev_put;
1654 			}
1655 
1656 			neigh = __neigh_lookup_errno(tbl, dst, dev);
1657 			if (IS_ERR(neigh)) {
1658 				err = PTR_ERR(neigh);
1659 				goto out_dev_put;
1660 			}
1661 		} else {
1662 			if (nlh->nlmsg_flags & NLM_F_EXCL) {
1663 				err = -EEXIST;
1664 				neigh_release(neigh);
1665 				goto out_dev_put;
1666 			}
1667 
1668 			if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1669 				flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1670 		}
1671 
1672 		err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1673 		neigh_release(neigh);
1674 		goto out_dev_put;
1675 	}
1676 
1677 	read_unlock(&neigh_tbl_lock);
1678 	err = -EAFNOSUPPORT;
1679 
1680 out_dev_put:
1681 	if (dev)
1682 		dev_put(dev);
1683 out:
1684 	return err;
1685 }
1686 
1687 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1688 {
1689 	struct nlattr *nest;
1690 
1691 	nest = nla_nest_start(skb, NDTA_PARMS);
1692 	if (nest == NULL)
1693 		return -ENOBUFS;
1694 
1695 	if (parms->dev)
1696 		NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1697 
1698 	NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1699 	NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1700 	NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1701 	NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1702 	NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1703 	NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1704 	NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1705 	NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1706 		      parms->base_reachable_time);
1707 	NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1708 	NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1709 	NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1710 	NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1711 	NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1712 	NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1713 
1714 	return nla_nest_end(skb, nest);
1715 
1716 nla_put_failure:
1717 	nla_nest_cancel(skb, nest);
1718 	return -EMSGSIZE;
1719 }
1720 
1721 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1722 			      u32 pid, u32 seq, int type, int flags)
1723 {
1724 	struct nlmsghdr *nlh;
1725 	struct ndtmsg *ndtmsg;
1726 
1727 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1728 	if (nlh == NULL)
1729 		return -EMSGSIZE;
1730 
1731 	ndtmsg = nlmsg_data(nlh);
1732 
1733 	read_lock_bh(&tbl->lock);
1734 	ndtmsg->ndtm_family = tbl->family;
1735 	ndtmsg->ndtm_pad1   = 0;
1736 	ndtmsg->ndtm_pad2   = 0;
1737 
1738 	NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1739 	NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1740 	NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1741 	NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1742 	NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1743 
1744 	{
1745 		unsigned long now = jiffies;
1746 		unsigned int flush_delta = now - tbl->last_flush;
1747 		unsigned int rand_delta = now - tbl->last_rand;
1748 
1749 		struct ndt_config ndc = {
1750 			.ndtc_key_len		= tbl->key_len,
1751 			.ndtc_entry_size	= tbl->entry_size,
1752 			.ndtc_entries		= atomic_read(&tbl->entries),
1753 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1754 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1755 			.ndtc_hash_rnd		= tbl->hash_rnd,
1756 			.ndtc_hash_mask		= tbl->hash_mask,
1757 			.ndtc_hash_chain_gc	= tbl->hash_chain_gc,
1758 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1759 		};
1760 
1761 		NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1762 	}
1763 
1764 	{
1765 		int cpu;
1766 		struct ndt_stats ndst;
1767 
1768 		memset(&ndst, 0, sizeof(ndst));
1769 
1770 		for_each_possible_cpu(cpu) {
1771 			struct neigh_statistics	*st;
1772 
1773 			st = per_cpu_ptr(tbl->stats, cpu);
1774 			ndst.ndts_allocs		+= st->allocs;
1775 			ndst.ndts_destroys		+= st->destroys;
1776 			ndst.ndts_hash_grows		+= st->hash_grows;
1777 			ndst.ndts_res_failed		+= st->res_failed;
1778 			ndst.ndts_lookups		+= st->lookups;
1779 			ndst.ndts_hits			+= st->hits;
1780 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1781 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1782 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1783 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1784 		}
1785 
1786 		NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1787 	}
1788 
1789 	BUG_ON(tbl->parms.dev);
1790 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1791 		goto nla_put_failure;
1792 
1793 	read_unlock_bh(&tbl->lock);
1794 	return nlmsg_end(skb, nlh);
1795 
1796 nla_put_failure:
1797 	read_unlock_bh(&tbl->lock);
1798 	nlmsg_cancel(skb, nlh);
1799 	return -EMSGSIZE;
1800 }
1801 
1802 static int neightbl_fill_param_info(struct sk_buff *skb,
1803 				    struct neigh_table *tbl,
1804 				    struct neigh_parms *parms,
1805 				    u32 pid, u32 seq, int type,
1806 				    unsigned int flags)
1807 {
1808 	struct ndtmsg *ndtmsg;
1809 	struct nlmsghdr *nlh;
1810 
1811 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1812 	if (nlh == NULL)
1813 		return -EMSGSIZE;
1814 
1815 	ndtmsg = nlmsg_data(nlh);
1816 
1817 	read_lock_bh(&tbl->lock);
1818 	ndtmsg->ndtm_family = tbl->family;
1819 	ndtmsg->ndtm_pad1   = 0;
1820 	ndtmsg->ndtm_pad2   = 0;
1821 
1822 	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1823 	    neightbl_fill_parms(skb, parms) < 0)
1824 		goto errout;
1825 
1826 	read_unlock_bh(&tbl->lock);
1827 	return nlmsg_end(skb, nlh);
1828 errout:
1829 	read_unlock_bh(&tbl->lock);
1830 	nlmsg_cancel(skb, nlh);
1831 	return -EMSGSIZE;
1832 }
1833 
1834 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1835 	[NDTA_NAME]		= { .type = NLA_STRING },
1836 	[NDTA_THRESH1]		= { .type = NLA_U32 },
1837 	[NDTA_THRESH2]		= { .type = NLA_U32 },
1838 	[NDTA_THRESH3]		= { .type = NLA_U32 },
1839 	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
1840 	[NDTA_PARMS]		= { .type = NLA_NESTED },
1841 };
1842 
1843 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1844 	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
1845 	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
1846 	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
1847 	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
1848 	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
1849 	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
1850 	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
1851 	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
1852 	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
1853 	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
1854 	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
1855 	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
1856 	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
1857 };
1858 
1859 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1860 {
1861 	struct net *net = sock_net(skb->sk);
1862 	struct neigh_table *tbl;
1863 	struct ndtmsg *ndtmsg;
1864 	struct nlattr *tb[NDTA_MAX+1];
1865 	int err;
1866 
1867 	err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1868 			  nl_neightbl_policy);
1869 	if (err < 0)
1870 		goto errout;
1871 
1872 	if (tb[NDTA_NAME] == NULL) {
1873 		err = -EINVAL;
1874 		goto errout;
1875 	}
1876 
1877 	ndtmsg = nlmsg_data(nlh);
1878 	read_lock(&neigh_tbl_lock);
1879 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1880 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1881 			continue;
1882 
1883 		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1884 			break;
1885 	}
1886 
1887 	if (tbl == NULL) {
1888 		err = -ENOENT;
1889 		goto errout_locked;
1890 	}
1891 
1892 	/*
1893 	 * We acquire tbl->lock to be nice to the periodic timers and
1894 	 * make sure they always see a consistent set of values.
1895 	 */
1896 	write_lock_bh(&tbl->lock);
1897 
1898 	if (tb[NDTA_PARMS]) {
1899 		struct nlattr *tbp[NDTPA_MAX+1];
1900 		struct neigh_parms *p;
1901 		int i, ifindex = 0;
1902 
1903 		err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1904 				       nl_ntbl_parm_policy);
1905 		if (err < 0)
1906 			goto errout_tbl_lock;
1907 
1908 		if (tbp[NDTPA_IFINDEX])
1909 			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1910 
1911 		p = lookup_neigh_params(tbl, net, ifindex);
1912 		if (p == NULL) {
1913 			err = -ENOENT;
1914 			goto errout_tbl_lock;
1915 		}
1916 
1917 		for (i = 1; i <= NDTPA_MAX; i++) {
1918 			if (tbp[i] == NULL)
1919 				continue;
1920 
1921 			switch (i) {
1922 			case NDTPA_QUEUE_LEN:
1923 				p->queue_len = nla_get_u32(tbp[i]);
1924 				break;
1925 			case NDTPA_PROXY_QLEN:
1926 				p->proxy_qlen = nla_get_u32(tbp[i]);
1927 				break;
1928 			case NDTPA_APP_PROBES:
1929 				p->app_probes = nla_get_u32(tbp[i]);
1930 				break;
1931 			case NDTPA_UCAST_PROBES:
1932 				p->ucast_probes = nla_get_u32(tbp[i]);
1933 				break;
1934 			case NDTPA_MCAST_PROBES:
1935 				p->mcast_probes = nla_get_u32(tbp[i]);
1936 				break;
1937 			case NDTPA_BASE_REACHABLE_TIME:
1938 				p->base_reachable_time = nla_get_msecs(tbp[i]);
1939 				break;
1940 			case NDTPA_GC_STALETIME:
1941 				p->gc_staletime = nla_get_msecs(tbp[i]);
1942 				break;
1943 			case NDTPA_DELAY_PROBE_TIME:
1944 				p->delay_probe_time = nla_get_msecs(tbp[i]);
1945 				break;
1946 			case NDTPA_RETRANS_TIME:
1947 				p->retrans_time = nla_get_msecs(tbp[i]);
1948 				break;
1949 			case NDTPA_ANYCAST_DELAY:
1950 				p->anycast_delay = nla_get_msecs(tbp[i]);
1951 				break;
1952 			case NDTPA_PROXY_DELAY:
1953 				p->proxy_delay = nla_get_msecs(tbp[i]);
1954 				break;
1955 			case NDTPA_LOCKTIME:
1956 				p->locktime = nla_get_msecs(tbp[i]);
1957 				break;
1958 			}
1959 		}
1960 	}
1961 
1962 	if (tb[NDTA_THRESH1])
1963 		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
1964 
1965 	if (tb[NDTA_THRESH2])
1966 		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
1967 
1968 	if (tb[NDTA_THRESH3])
1969 		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
1970 
1971 	if (tb[NDTA_GC_INTERVAL])
1972 		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
1973 
1974 	err = 0;
1975 
1976 errout_tbl_lock:
1977 	write_unlock_bh(&tbl->lock);
1978 errout_locked:
1979 	read_unlock(&neigh_tbl_lock);
1980 errout:
1981 	return err;
1982 }
1983 
1984 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1985 {
1986 	struct net *net = sock_net(skb->sk);
1987 	int family, tidx, nidx = 0;
1988 	int tbl_skip = cb->args[0];
1989 	int neigh_skip = cb->args[1];
1990 	struct neigh_table *tbl;
1991 
1992 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
1993 
1994 	read_lock(&neigh_tbl_lock);
1995 	for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
1996 		struct neigh_parms *p;
1997 
1998 		if (tidx < tbl_skip || (family && tbl->family != family))
1999 			continue;
2000 
2001 		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
2002 				       cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2003 				       NLM_F_MULTI) <= 0)
2004 			break;
2005 
2006 		for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
2007 			if (!net_eq(neigh_parms_net(p), net))
2008 				continue;
2009 
2010 			if (nidx++ < neigh_skip)
2011 				continue;
2012 
2013 			if (neightbl_fill_param_info(skb, tbl, p,
2014 						     NETLINK_CB(cb->skb).pid,
2015 						     cb->nlh->nlmsg_seq,
2016 						     RTM_NEWNEIGHTBL,
2017 						     NLM_F_MULTI) <= 0)
2018 				goto out;
2019 		}
2020 
2021 		neigh_skip = 0;
2022 	}
2023 out:
2024 	read_unlock(&neigh_tbl_lock);
2025 	cb->args[0] = tidx;
2026 	cb->args[1] = nidx;
2027 
2028 	return skb->len;
2029 }
2030 
2031 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2032 			   u32 pid, u32 seq, int type, unsigned int flags)
2033 {
2034 	unsigned long now = jiffies;
2035 	struct nda_cacheinfo ci;
2036 	struct nlmsghdr *nlh;
2037 	struct ndmsg *ndm;
2038 
2039 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2040 	if (nlh == NULL)
2041 		return -EMSGSIZE;
2042 
2043 	ndm = nlmsg_data(nlh);
2044 	ndm->ndm_family	 = neigh->ops->family;
2045 	ndm->ndm_pad1    = 0;
2046 	ndm->ndm_pad2    = 0;
2047 	ndm->ndm_flags	 = neigh->flags;
2048 	ndm->ndm_type	 = neigh->type;
2049 	ndm->ndm_ifindex = neigh->dev->ifindex;
2050 
2051 	NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
2052 
2053 	read_lock_bh(&neigh->lock);
2054 	ndm->ndm_state	 = neigh->nud_state;
2055 	if ((neigh->nud_state & NUD_VALID) &&
2056 	    nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
2057 		read_unlock_bh(&neigh->lock);
2058 		goto nla_put_failure;
2059 	}
2060 
2061 	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2062 	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2063 	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2064 	ci.ndm_refcnt	 = atomic_read(&neigh->refcnt) - 1;
2065 	read_unlock_bh(&neigh->lock);
2066 
2067 	NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
2068 	NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
2069 
2070 	return nlmsg_end(skb, nlh);
2071 
2072 nla_put_failure:
2073 	nlmsg_cancel(skb, nlh);
2074 	return -EMSGSIZE;
2075 }
2076 
2077 static void neigh_update_notify(struct neighbour *neigh)
2078 {
2079 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2080 	__neigh_notify(neigh, RTM_NEWNEIGH, 0);
2081 }
2082 
2083 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2084 			    struct netlink_callback *cb)
2085 {
2086 	struct net * net = sock_net(skb->sk);
2087 	struct neighbour *n;
2088 	int rc, h, s_h = cb->args[1];
2089 	int idx, s_idx = idx = cb->args[2];
2090 
2091 	read_lock_bh(&tbl->lock);
2092 	for (h = 0; h <= tbl->hash_mask; h++) {
2093 		if (h < s_h)
2094 			continue;
2095 		if (h > s_h)
2096 			s_idx = 0;
2097 		for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
2098 			int lidx;
2099 			if (dev_net(n->dev) != net)
2100 				continue;
2101 			lidx = idx++;
2102 			if (lidx < s_idx)
2103 				continue;
2104 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2105 					    cb->nlh->nlmsg_seq,
2106 					    RTM_NEWNEIGH,
2107 					    NLM_F_MULTI) <= 0) {
2108 				read_unlock_bh(&tbl->lock);
2109 				rc = -1;
2110 				goto out;
2111 			}
2112 		}
2113 	}
2114 	read_unlock_bh(&tbl->lock);
2115 	rc = skb->len;
2116 out:
2117 	cb->args[1] = h;
2118 	cb->args[2] = idx;
2119 	return rc;
2120 }
2121 
2122 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2123 {
2124 	struct neigh_table *tbl;
2125 	int t, family, s_t;
2126 
2127 	read_lock(&neigh_tbl_lock);
2128 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2129 	s_t = cb->args[0];
2130 
2131 	for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2132 		if (t < s_t || (family && tbl->family != family))
2133 			continue;
2134 		if (t > s_t)
2135 			memset(&cb->args[1], 0, sizeof(cb->args) -
2136 						sizeof(cb->args[0]));
2137 		if (neigh_dump_table(tbl, skb, cb) < 0)
2138 			break;
2139 	}
2140 	read_unlock(&neigh_tbl_lock);
2141 
2142 	cb->args[0] = t;
2143 	return skb->len;
2144 }
2145 
2146 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2147 {
2148 	int chain;
2149 
2150 	read_lock_bh(&tbl->lock);
2151 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
2152 		struct neighbour *n;
2153 
2154 		for (n = tbl->hash_buckets[chain]; n; n = n->next)
2155 			cb(n, cookie);
2156 	}
2157 	read_unlock_bh(&tbl->lock);
2158 }
2159 EXPORT_SYMBOL(neigh_for_each);
2160 
2161 /* The tbl->lock must be held as a writer and BH disabled. */
2162 void __neigh_for_each_release(struct neigh_table *tbl,
2163 			      int (*cb)(struct neighbour *))
2164 {
2165 	int chain;
2166 
2167 	for (chain = 0; chain <= tbl->hash_mask; chain++) {
2168 		struct neighbour *n, **np;
2169 
2170 		np = &tbl->hash_buckets[chain];
2171 		while ((n = *np) != NULL) {
2172 			int release;
2173 
2174 			write_lock(&n->lock);
2175 			release = cb(n);
2176 			if (release) {
2177 				*np = n->next;
2178 				n->dead = 1;
2179 			} else
2180 				np = &n->next;
2181 			write_unlock(&n->lock);
2182 			if (release)
2183 				neigh_cleanup_and_release(n);
2184 		}
2185 	}
2186 }
2187 EXPORT_SYMBOL(__neigh_for_each_release);
2188 
2189 #ifdef CONFIG_PROC_FS
2190 
2191 static struct neighbour *neigh_get_first(struct seq_file *seq)
2192 {
2193 	struct neigh_seq_state *state = seq->private;
2194 	struct net *net = seq_file_net(seq);
2195 	struct neigh_table *tbl = state->tbl;
2196 	struct neighbour *n = NULL;
2197 	int bucket = state->bucket;
2198 
2199 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2200 	for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2201 		n = tbl->hash_buckets[bucket];
2202 
2203 		while (n) {
2204 			if (!net_eq(dev_net(n->dev), net))
2205 				goto next;
2206 			if (state->neigh_sub_iter) {
2207 				loff_t fakep = 0;
2208 				void *v;
2209 
2210 				v = state->neigh_sub_iter(state, n, &fakep);
2211 				if (!v)
2212 					goto next;
2213 			}
2214 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2215 				break;
2216 			if (n->nud_state & ~NUD_NOARP)
2217 				break;
2218 		next:
2219 			n = n->next;
2220 		}
2221 
2222 		if (n)
2223 			break;
2224 	}
2225 	state->bucket = bucket;
2226 
2227 	return n;
2228 }
2229 
2230 static struct neighbour *neigh_get_next(struct seq_file *seq,
2231 					struct neighbour *n,
2232 					loff_t *pos)
2233 {
2234 	struct neigh_seq_state *state = seq->private;
2235 	struct net *net = seq_file_net(seq);
2236 	struct neigh_table *tbl = state->tbl;
2237 
2238 	if (state->neigh_sub_iter) {
2239 		void *v = state->neigh_sub_iter(state, n, pos);
2240 		if (v)
2241 			return n;
2242 	}
2243 	n = n->next;
2244 
2245 	while (1) {
2246 		while (n) {
2247 			if (!net_eq(dev_net(n->dev), net))
2248 				goto next;
2249 			if (state->neigh_sub_iter) {
2250 				void *v = state->neigh_sub_iter(state, n, pos);
2251 				if (v)
2252 					return n;
2253 				goto next;
2254 			}
2255 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2256 				break;
2257 
2258 			if (n->nud_state & ~NUD_NOARP)
2259 				break;
2260 		next:
2261 			n = n->next;
2262 		}
2263 
2264 		if (n)
2265 			break;
2266 
2267 		if (++state->bucket > tbl->hash_mask)
2268 			break;
2269 
2270 		n = tbl->hash_buckets[state->bucket];
2271 	}
2272 
2273 	if (n && pos)
2274 		--(*pos);
2275 	return n;
2276 }
2277 
2278 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2279 {
2280 	struct neighbour *n = neigh_get_first(seq);
2281 
2282 	if (n) {
2283 		while (*pos) {
2284 			n = neigh_get_next(seq, n, pos);
2285 			if (!n)
2286 				break;
2287 		}
2288 	}
2289 	return *pos ? NULL : n;
2290 }
2291 
2292 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2293 {
2294 	struct neigh_seq_state *state = seq->private;
2295 	struct net *net = seq_file_net(seq);
2296 	struct neigh_table *tbl = state->tbl;
2297 	struct pneigh_entry *pn = NULL;
2298 	int bucket = state->bucket;
2299 
2300 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2301 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2302 		pn = tbl->phash_buckets[bucket];
2303 		while (pn && !net_eq(pneigh_net(pn), net))
2304 			pn = pn->next;
2305 		if (pn)
2306 			break;
2307 	}
2308 	state->bucket = bucket;
2309 
2310 	return pn;
2311 }
2312 
2313 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2314 					    struct pneigh_entry *pn,
2315 					    loff_t *pos)
2316 {
2317 	struct neigh_seq_state *state = seq->private;
2318 	struct net *net = seq_file_net(seq);
2319 	struct neigh_table *tbl = state->tbl;
2320 
2321 	pn = pn->next;
2322 	while (!pn) {
2323 		if (++state->bucket > PNEIGH_HASHMASK)
2324 			break;
2325 		pn = tbl->phash_buckets[state->bucket];
2326 		while (pn && !net_eq(pneigh_net(pn), net))
2327 			pn = pn->next;
2328 		if (pn)
2329 			break;
2330 	}
2331 
2332 	if (pn && pos)
2333 		--(*pos);
2334 
2335 	return pn;
2336 }
2337 
2338 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2339 {
2340 	struct pneigh_entry *pn = pneigh_get_first(seq);
2341 
2342 	if (pn) {
2343 		while (*pos) {
2344 			pn = pneigh_get_next(seq, pn, pos);
2345 			if (!pn)
2346 				break;
2347 		}
2348 	}
2349 	return *pos ? NULL : pn;
2350 }
2351 
2352 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2353 {
2354 	struct neigh_seq_state *state = seq->private;
2355 	void *rc;
2356 
2357 	rc = neigh_get_idx(seq, pos);
2358 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2359 		rc = pneigh_get_idx(seq, pos);
2360 
2361 	return rc;
2362 }
2363 
2364 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2365 	__acquires(tbl->lock)
2366 {
2367 	struct neigh_seq_state *state = seq->private;
2368 	loff_t pos_minus_one;
2369 
2370 	state->tbl = tbl;
2371 	state->bucket = 0;
2372 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2373 
2374 	read_lock_bh(&tbl->lock);
2375 
2376 	pos_minus_one = *pos - 1;
2377 	return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2378 }
2379 EXPORT_SYMBOL(neigh_seq_start);
2380 
2381 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2382 {
2383 	struct neigh_seq_state *state;
2384 	void *rc;
2385 
2386 	if (v == SEQ_START_TOKEN) {
2387 		rc = neigh_get_idx(seq, pos);
2388 		goto out;
2389 	}
2390 
2391 	state = seq->private;
2392 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2393 		rc = neigh_get_next(seq, v, NULL);
2394 		if (rc)
2395 			goto out;
2396 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2397 			rc = pneigh_get_first(seq);
2398 	} else {
2399 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2400 		rc = pneigh_get_next(seq, v, NULL);
2401 	}
2402 out:
2403 	++(*pos);
2404 	return rc;
2405 }
2406 EXPORT_SYMBOL(neigh_seq_next);
2407 
2408 void neigh_seq_stop(struct seq_file *seq, void *v)
2409 	__releases(tbl->lock)
2410 {
2411 	struct neigh_seq_state *state = seq->private;
2412 	struct neigh_table *tbl = state->tbl;
2413 
2414 	read_unlock_bh(&tbl->lock);
2415 }
2416 EXPORT_SYMBOL(neigh_seq_stop);
2417 
2418 /* statistics via seq_file */
2419 
2420 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2421 {
2422 	struct proc_dir_entry *pde = seq->private;
2423 	struct neigh_table *tbl = pde->data;
2424 	int cpu;
2425 
2426 	if (*pos == 0)
2427 		return SEQ_START_TOKEN;
2428 
2429 	for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2430 		if (!cpu_possible(cpu))
2431 			continue;
2432 		*pos = cpu+1;
2433 		return per_cpu_ptr(tbl->stats, cpu);
2434 	}
2435 	return NULL;
2436 }
2437 
2438 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2439 {
2440 	struct proc_dir_entry *pde = seq->private;
2441 	struct neigh_table *tbl = pde->data;
2442 	int cpu;
2443 
2444 	for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2445 		if (!cpu_possible(cpu))
2446 			continue;
2447 		*pos = cpu+1;
2448 		return per_cpu_ptr(tbl->stats, cpu);
2449 	}
2450 	return NULL;
2451 }
2452 
2453 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2454 {
2455 
2456 }
2457 
2458 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2459 {
2460 	struct proc_dir_entry *pde = seq->private;
2461 	struct neigh_table *tbl = pde->data;
2462 	struct neigh_statistics *st = v;
2463 
2464 	if (v == SEQ_START_TOKEN) {
2465 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs\n");
2466 		return 0;
2467 	}
2468 
2469 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2470 			"%08lx %08lx  %08lx %08lx\n",
2471 		   atomic_read(&tbl->entries),
2472 
2473 		   st->allocs,
2474 		   st->destroys,
2475 		   st->hash_grows,
2476 
2477 		   st->lookups,
2478 		   st->hits,
2479 
2480 		   st->res_failed,
2481 
2482 		   st->rcv_probes_mcast,
2483 		   st->rcv_probes_ucast,
2484 
2485 		   st->periodic_gc_runs,
2486 		   st->forced_gc_runs
2487 		   );
2488 
2489 	return 0;
2490 }
2491 
2492 static const struct seq_operations neigh_stat_seq_ops = {
2493 	.start	= neigh_stat_seq_start,
2494 	.next	= neigh_stat_seq_next,
2495 	.stop	= neigh_stat_seq_stop,
2496 	.show	= neigh_stat_seq_show,
2497 };
2498 
2499 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2500 {
2501 	int ret = seq_open(file, &neigh_stat_seq_ops);
2502 
2503 	if (!ret) {
2504 		struct seq_file *sf = file->private_data;
2505 		sf->private = PDE(inode);
2506 	}
2507 	return ret;
2508 };
2509 
2510 static const struct file_operations neigh_stat_seq_fops = {
2511 	.owner	 = THIS_MODULE,
2512 	.open 	 = neigh_stat_seq_open,
2513 	.read	 = seq_read,
2514 	.llseek	 = seq_lseek,
2515 	.release = seq_release,
2516 };
2517 
2518 #endif /* CONFIG_PROC_FS */
2519 
2520 static inline size_t neigh_nlmsg_size(void)
2521 {
2522 	return NLMSG_ALIGN(sizeof(struct ndmsg))
2523 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2524 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2525 	       + nla_total_size(sizeof(struct nda_cacheinfo))
2526 	       + nla_total_size(4); /* NDA_PROBES */
2527 }
2528 
2529 static void __neigh_notify(struct neighbour *n, int type, int flags)
2530 {
2531 	struct net *net = dev_net(n->dev);
2532 	struct sk_buff *skb;
2533 	int err = -ENOBUFS;
2534 
2535 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2536 	if (skb == NULL)
2537 		goto errout;
2538 
2539 	err = neigh_fill_info(skb, n, 0, 0, type, flags);
2540 	if (err < 0) {
2541 		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2542 		WARN_ON(err == -EMSGSIZE);
2543 		kfree_skb(skb);
2544 		goto errout;
2545 	}
2546 	err = rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2547 errout:
2548 	if (err < 0)
2549 		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2550 }
2551 
2552 #ifdef CONFIG_ARPD
2553 void neigh_app_ns(struct neighbour *n)
2554 {
2555 	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2556 }
2557 EXPORT_SYMBOL(neigh_app_ns);
2558 #endif /* CONFIG_ARPD */
2559 
2560 #ifdef CONFIG_SYSCTL
2561 
2562 static struct neigh_sysctl_table {
2563 	struct ctl_table_header *sysctl_header;
2564 	struct ctl_table neigh_vars[__NET_NEIGH_MAX];
2565 	char *dev_name;
2566 } neigh_sysctl_template __read_mostly = {
2567 	.neigh_vars = {
2568 		{
2569 			.ctl_name	= NET_NEIGH_MCAST_SOLICIT,
2570 			.procname	= "mcast_solicit",
2571 			.maxlen		= sizeof(int),
2572 			.mode		= 0644,
2573 			.proc_handler	= &proc_dointvec,
2574 		},
2575 		{
2576 			.ctl_name	= NET_NEIGH_UCAST_SOLICIT,
2577 			.procname	= "ucast_solicit",
2578 			.maxlen		= sizeof(int),
2579 			.mode		= 0644,
2580 			.proc_handler	= &proc_dointvec,
2581 		},
2582 		{
2583 			.ctl_name	= NET_NEIGH_APP_SOLICIT,
2584 			.procname	= "app_solicit",
2585 			.maxlen		= sizeof(int),
2586 			.mode		= 0644,
2587 			.proc_handler	= &proc_dointvec,
2588 		},
2589 		{
2590 			.procname	= "retrans_time",
2591 			.maxlen		= sizeof(int),
2592 			.mode		= 0644,
2593 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2594 		},
2595 		{
2596 			.ctl_name	= NET_NEIGH_REACHABLE_TIME,
2597 			.procname	= "base_reachable_time",
2598 			.maxlen		= sizeof(int),
2599 			.mode		= 0644,
2600 			.proc_handler	= &proc_dointvec_jiffies,
2601 			.strategy	= &sysctl_jiffies,
2602 		},
2603 		{
2604 			.ctl_name	= NET_NEIGH_DELAY_PROBE_TIME,
2605 			.procname	= "delay_first_probe_time",
2606 			.maxlen		= sizeof(int),
2607 			.mode		= 0644,
2608 			.proc_handler	= &proc_dointvec_jiffies,
2609 			.strategy	= &sysctl_jiffies,
2610 		},
2611 		{
2612 			.ctl_name	= NET_NEIGH_GC_STALE_TIME,
2613 			.procname	= "gc_stale_time",
2614 			.maxlen		= sizeof(int),
2615 			.mode		= 0644,
2616 			.proc_handler	= &proc_dointvec_jiffies,
2617 			.strategy	= &sysctl_jiffies,
2618 		},
2619 		{
2620 			.ctl_name	= NET_NEIGH_UNRES_QLEN,
2621 			.procname	= "unres_qlen",
2622 			.maxlen		= sizeof(int),
2623 			.mode		= 0644,
2624 			.proc_handler	= &proc_dointvec,
2625 		},
2626 		{
2627 			.ctl_name	= NET_NEIGH_PROXY_QLEN,
2628 			.procname	= "proxy_qlen",
2629 			.maxlen		= sizeof(int),
2630 			.mode		= 0644,
2631 			.proc_handler	= &proc_dointvec,
2632 		},
2633 		{
2634 			.procname	= "anycast_delay",
2635 			.maxlen		= sizeof(int),
2636 			.mode		= 0644,
2637 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2638 		},
2639 		{
2640 			.procname	= "proxy_delay",
2641 			.maxlen		= sizeof(int),
2642 			.mode		= 0644,
2643 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2644 		},
2645 		{
2646 			.procname	= "locktime",
2647 			.maxlen		= sizeof(int),
2648 			.mode		= 0644,
2649 			.proc_handler	= &proc_dointvec_userhz_jiffies,
2650 		},
2651 		{
2652 			.ctl_name	= NET_NEIGH_RETRANS_TIME_MS,
2653 			.procname	= "retrans_time_ms",
2654 			.maxlen		= sizeof(int),
2655 			.mode		= 0644,
2656 			.proc_handler	= &proc_dointvec_ms_jiffies,
2657 			.strategy	= &sysctl_ms_jiffies,
2658 		},
2659 		{
2660 			.ctl_name	= NET_NEIGH_REACHABLE_TIME_MS,
2661 			.procname	= "base_reachable_time_ms",
2662 			.maxlen		= sizeof(int),
2663 			.mode		= 0644,
2664 			.proc_handler	= &proc_dointvec_ms_jiffies,
2665 			.strategy	= &sysctl_ms_jiffies,
2666 		},
2667 		{
2668 			.ctl_name	= NET_NEIGH_GC_INTERVAL,
2669 			.procname	= "gc_interval",
2670 			.maxlen		= sizeof(int),
2671 			.mode		= 0644,
2672 			.proc_handler	= &proc_dointvec_jiffies,
2673 			.strategy	= &sysctl_jiffies,
2674 		},
2675 		{
2676 			.ctl_name	= NET_NEIGH_GC_THRESH1,
2677 			.procname	= "gc_thresh1",
2678 			.maxlen		= sizeof(int),
2679 			.mode		= 0644,
2680 			.proc_handler	= &proc_dointvec,
2681 		},
2682 		{
2683 			.ctl_name	= NET_NEIGH_GC_THRESH2,
2684 			.procname	= "gc_thresh2",
2685 			.maxlen		= sizeof(int),
2686 			.mode		= 0644,
2687 			.proc_handler	= &proc_dointvec,
2688 		},
2689 		{
2690 			.ctl_name	= NET_NEIGH_GC_THRESH3,
2691 			.procname	= "gc_thresh3",
2692 			.maxlen		= sizeof(int),
2693 			.mode		= 0644,
2694 			.proc_handler	= &proc_dointvec,
2695 		},
2696 		{},
2697 	},
2698 };
2699 
2700 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2701 			  int p_id, int pdev_id, char *p_name,
2702 			  proc_handler *handler, ctl_handler *strategy)
2703 {
2704 	struct neigh_sysctl_table *t;
2705 	const char *dev_name_source = NULL;
2706 
2707 #define NEIGH_CTL_PATH_ROOT	0
2708 #define NEIGH_CTL_PATH_PROTO	1
2709 #define NEIGH_CTL_PATH_NEIGH	2
2710 #define NEIGH_CTL_PATH_DEV	3
2711 
2712 	struct ctl_path neigh_path[] = {
2713 		{ .procname = "net",	 .ctl_name = CTL_NET, },
2714 		{ .procname = "proto",	 .ctl_name = 0, },
2715 		{ .procname = "neigh",	 .ctl_name = 0, },
2716 		{ .procname = "default", .ctl_name = NET_PROTO_CONF_DEFAULT, },
2717 		{ },
2718 	};
2719 
2720 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2721 	if (!t)
2722 		goto err;
2723 
2724 	t->neigh_vars[0].data  = &p->mcast_probes;
2725 	t->neigh_vars[1].data  = &p->ucast_probes;
2726 	t->neigh_vars[2].data  = &p->app_probes;
2727 	t->neigh_vars[3].data  = &p->retrans_time;
2728 	t->neigh_vars[4].data  = &p->base_reachable_time;
2729 	t->neigh_vars[5].data  = &p->delay_probe_time;
2730 	t->neigh_vars[6].data  = &p->gc_staletime;
2731 	t->neigh_vars[7].data  = &p->queue_len;
2732 	t->neigh_vars[8].data  = &p->proxy_qlen;
2733 	t->neigh_vars[9].data  = &p->anycast_delay;
2734 	t->neigh_vars[10].data = &p->proxy_delay;
2735 	t->neigh_vars[11].data = &p->locktime;
2736 	t->neigh_vars[12].data  = &p->retrans_time;
2737 	t->neigh_vars[13].data  = &p->base_reachable_time;
2738 
2739 	if (dev) {
2740 		dev_name_source = dev->name;
2741 		neigh_path[NEIGH_CTL_PATH_DEV].ctl_name = dev->ifindex;
2742 		/* Terminate the table early */
2743 		memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
2744 	} else {
2745 		dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
2746 		t->neigh_vars[14].data = (int *)(p + 1);
2747 		t->neigh_vars[15].data = (int *)(p + 1) + 1;
2748 		t->neigh_vars[16].data = (int *)(p + 1) + 2;
2749 		t->neigh_vars[17].data = (int *)(p + 1) + 3;
2750 	}
2751 
2752 
2753 	if (handler || strategy) {
2754 		/* RetransTime */
2755 		t->neigh_vars[3].proc_handler = handler;
2756 		t->neigh_vars[3].strategy = strategy;
2757 		t->neigh_vars[3].extra1 = dev;
2758 		if (!strategy)
2759 			t->neigh_vars[3].ctl_name = CTL_UNNUMBERED;
2760 		/* ReachableTime */
2761 		t->neigh_vars[4].proc_handler = handler;
2762 		t->neigh_vars[4].strategy = strategy;
2763 		t->neigh_vars[4].extra1 = dev;
2764 		if (!strategy)
2765 			t->neigh_vars[4].ctl_name = CTL_UNNUMBERED;
2766 		/* RetransTime (in milliseconds)*/
2767 		t->neigh_vars[12].proc_handler = handler;
2768 		t->neigh_vars[12].strategy = strategy;
2769 		t->neigh_vars[12].extra1 = dev;
2770 		if (!strategy)
2771 			t->neigh_vars[12].ctl_name = CTL_UNNUMBERED;
2772 		/* ReachableTime (in milliseconds) */
2773 		t->neigh_vars[13].proc_handler = handler;
2774 		t->neigh_vars[13].strategy = strategy;
2775 		t->neigh_vars[13].extra1 = dev;
2776 		if (!strategy)
2777 			t->neigh_vars[13].ctl_name = CTL_UNNUMBERED;
2778 	}
2779 
2780 	t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2781 	if (!t->dev_name)
2782 		goto free;
2783 
2784 	neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
2785 	neigh_path[NEIGH_CTL_PATH_NEIGH].ctl_name = pdev_id;
2786 	neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2787 	neigh_path[NEIGH_CTL_PATH_PROTO].ctl_name = p_id;
2788 
2789 	t->sysctl_header =
2790 		register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
2791 	if (!t->sysctl_header)
2792 		goto free_procname;
2793 
2794 	p->sysctl_table = t;
2795 	return 0;
2796 
2797 free_procname:
2798 	kfree(t->dev_name);
2799 free:
2800 	kfree(t);
2801 err:
2802 	return -ENOBUFS;
2803 }
2804 EXPORT_SYMBOL(neigh_sysctl_register);
2805 
2806 void neigh_sysctl_unregister(struct neigh_parms *p)
2807 {
2808 	if (p->sysctl_table) {
2809 		struct neigh_sysctl_table *t = p->sysctl_table;
2810 		p->sysctl_table = NULL;
2811 		unregister_sysctl_table(t->sysctl_header);
2812 		kfree(t->dev_name);
2813 		kfree(t);
2814 	}
2815 }
2816 EXPORT_SYMBOL(neigh_sysctl_unregister);
2817 
2818 #endif	/* CONFIG_SYSCTL */
2819 
2820 static int __init neigh_init(void)
2821 {
2822 	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
2823 	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
2824 	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
2825 
2826 	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
2827 	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
2828 
2829 	return 0;
2830 }
2831 
2832 subsys_initcall(neigh_init);
2833 
2834