xref: /openbmc/linux/net/core/neighbour.c (revision 36c0f8b3)
1 /*
2  *	Generic address resolution entity
3  *
4  *	Authors:
5  *	Pedro Roque		<roque@di.fc.ul.pt>
6  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Fixes:
14  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
15  *	Harald Welte		Add neighbour cache statistics like rtstat
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/socket.h>
25 #include <linux/netdevice.h>
26 #include <linux/proc_fs.h>
27 #ifdef CONFIG_SYSCTL
28 #include <linux/sysctl.h>
29 #endif
30 #include <linux/times.h>
31 #include <net/net_namespace.h>
32 #include <net/neighbour.h>
33 #include <net/dst.h>
34 #include <net/sock.h>
35 #include <net/netevent.h>
36 #include <net/netlink.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/random.h>
39 #include <linux/string.h>
40 #include <linux/log2.h>
41 #include <linux/inetdevice.h>
42 #include <net/addrconf.h>
43 
44 #define DEBUG
45 #define NEIGH_DEBUG 1
46 #define neigh_dbg(level, fmt, ...)		\
47 do {						\
48 	if (level <= NEIGH_DEBUG)		\
49 		pr_debug(fmt, ##__VA_ARGS__);	\
50 } while (0)
51 
52 #define PNEIGH_HASHMASK		0xF
53 
54 static void neigh_timer_handler(unsigned long arg);
55 static void __neigh_notify(struct neighbour *n, int type, int flags);
56 static void neigh_update_notify(struct neighbour *neigh);
57 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
58 
59 #ifdef CONFIG_PROC_FS
60 static const struct file_operations neigh_stat_seq_fops;
61 #endif
62 
63 /*
64    Neighbour hash table buckets are protected with rwlock tbl->lock.
65 
66    - All the scans/updates to hash buckets MUST be made under this lock.
67    - NOTHING clever should be made under this lock: no callbacks
68      to protocol backends, no attempts to send something to network.
69      It will result in deadlocks, if backend/driver wants to use neighbour
70      cache.
71    - If the entry requires some non-trivial actions, increase
72      its reference count and release table lock.
73 
74    Neighbour entries are protected:
75    - with reference count.
76    - with rwlock neigh->lock
77 
78    Reference count prevents destruction.
79 
80    neigh->lock mainly serializes ll address data and its validity state.
81    However, the same lock is used to protect another entry fields:
82     - timer
83     - resolution queue
84 
85    Again, nothing clever shall be made under neigh->lock,
86    the most complicated procedure, which we allow is dev->hard_header.
87    It is supposed, that dev->hard_header is simplistic and does
88    not make callbacks to neighbour tables.
89  */
90 
91 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
92 {
93 	kfree_skb(skb);
94 	return -ENETDOWN;
95 }
96 
97 static void neigh_cleanup_and_release(struct neighbour *neigh)
98 {
99 	if (neigh->parms->neigh_cleanup)
100 		neigh->parms->neigh_cleanup(neigh);
101 
102 	__neigh_notify(neigh, RTM_DELNEIGH, 0);
103 	neigh_release(neigh);
104 }
105 
106 /*
107  * It is random distribution in the interval (1/2)*base...(3/2)*base.
108  * It corresponds to default IPv6 settings and is not overridable,
109  * because it is really reasonable choice.
110  */
111 
112 unsigned long neigh_rand_reach_time(unsigned long base)
113 {
114 	return base ? (prandom_u32() % base) + (base >> 1) : 0;
115 }
116 EXPORT_SYMBOL(neigh_rand_reach_time);
117 
118 
119 static int neigh_forced_gc(struct neigh_table *tbl)
120 {
121 	int shrunk = 0;
122 	int i;
123 	struct neigh_hash_table *nht;
124 
125 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
126 
127 	write_lock_bh(&tbl->lock);
128 	nht = rcu_dereference_protected(tbl->nht,
129 					lockdep_is_held(&tbl->lock));
130 	for (i = 0; i < (1 << nht->hash_shift); i++) {
131 		struct neighbour *n;
132 		struct neighbour __rcu **np;
133 
134 		np = &nht->hash_buckets[i];
135 		while ((n = rcu_dereference_protected(*np,
136 					lockdep_is_held(&tbl->lock))) != NULL) {
137 			/* Neighbour record may be discarded if:
138 			 * - nobody refers to it.
139 			 * - it is not permanent
140 			 */
141 			write_lock(&n->lock);
142 			if (atomic_read(&n->refcnt) == 1 &&
143 			    !(n->nud_state & NUD_PERMANENT)) {
144 				rcu_assign_pointer(*np,
145 					rcu_dereference_protected(n->next,
146 						  lockdep_is_held(&tbl->lock)));
147 				n->dead = 1;
148 				shrunk	= 1;
149 				write_unlock(&n->lock);
150 				neigh_cleanup_and_release(n);
151 				continue;
152 			}
153 			write_unlock(&n->lock);
154 			np = &n->next;
155 		}
156 	}
157 
158 	tbl->last_flush = jiffies;
159 
160 	write_unlock_bh(&tbl->lock);
161 
162 	return shrunk;
163 }
164 
165 static void neigh_add_timer(struct neighbour *n, unsigned long when)
166 {
167 	neigh_hold(n);
168 	if (unlikely(mod_timer(&n->timer, when))) {
169 		printk("NEIGH: BUG, double timer add, state is %x\n",
170 		       n->nud_state);
171 		dump_stack();
172 	}
173 }
174 
175 static int neigh_del_timer(struct neighbour *n)
176 {
177 	if ((n->nud_state & NUD_IN_TIMER) &&
178 	    del_timer(&n->timer)) {
179 		neigh_release(n);
180 		return 1;
181 	}
182 	return 0;
183 }
184 
185 static void pneigh_queue_purge(struct sk_buff_head *list)
186 {
187 	struct sk_buff *skb;
188 
189 	while ((skb = skb_dequeue(list)) != NULL) {
190 		dev_put(skb->dev);
191 		kfree_skb(skb);
192 	}
193 }
194 
195 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
196 {
197 	int i;
198 	struct neigh_hash_table *nht;
199 
200 	nht = rcu_dereference_protected(tbl->nht,
201 					lockdep_is_held(&tbl->lock));
202 
203 	for (i = 0; i < (1 << nht->hash_shift); i++) {
204 		struct neighbour *n;
205 		struct neighbour __rcu **np = &nht->hash_buckets[i];
206 
207 		while ((n = rcu_dereference_protected(*np,
208 					lockdep_is_held(&tbl->lock))) != NULL) {
209 			if (dev && n->dev != dev) {
210 				np = &n->next;
211 				continue;
212 			}
213 			rcu_assign_pointer(*np,
214 				   rcu_dereference_protected(n->next,
215 						lockdep_is_held(&tbl->lock)));
216 			write_lock(&n->lock);
217 			neigh_del_timer(n);
218 			n->dead = 1;
219 
220 			if (atomic_read(&n->refcnt) != 1) {
221 				/* The most unpleasant situation.
222 				   We must destroy neighbour entry,
223 				   but someone still uses it.
224 
225 				   The destroy will be delayed until
226 				   the last user releases us, but
227 				   we must kill timers etc. and move
228 				   it to safe state.
229 				 */
230 				__skb_queue_purge(&n->arp_queue);
231 				n->arp_queue_len_bytes = 0;
232 				n->output = neigh_blackhole;
233 				if (n->nud_state & NUD_VALID)
234 					n->nud_state = NUD_NOARP;
235 				else
236 					n->nud_state = NUD_NONE;
237 				neigh_dbg(2, "neigh %p is stray\n", n);
238 			}
239 			write_unlock(&n->lock);
240 			neigh_cleanup_and_release(n);
241 		}
242 	}
243 }
244 
245 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
246 {
247 	write_lock_bh(&tbl->lock);
248 	neigh_flush_dev(tbl, dev);
249 	write_unlock_bh(&tbl->lock);
250 }
251 EXPORT_SYMBOL(neigh_changeaddr);
252 
253 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
254 {
255 	write_lock_bh(&tbl->lock);
256 	neigh_flush_dev(tbl, dev);
257 	pneigh_ifdown(tbl, dev);
258 	write_unlock_bh(&tbl->lock);
259 
260 	del_timer_sync(&tbl->proxy_timer);
261 	pneigh_queue_purge(&tbl->proxy_queue);
262 	return 0;
263 }
264 EXPORT_SYMBOL(neigh_ifdown);
265 
266 static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
267 {
268 	struct neighbour *n = NULL;
269 	unsigned long now = jiffies;
270 	int entries;
271 
272 	entries = atomic_inc_return(&tbl->entries) - 1;
273 	if (entries >= tbl->gc_thresh3 ||
274 	    (entries >= tbl->gc_thresh2 &&
275 	     time_after(now, tbl->last_flush + 5 * HZ))) {
276 		if (!neigh_forced_gc(tbl) &&
277 		    entries >= tbl->gc_thresh3) {
278 			net_info_ratelimited("%s: neighbor table overflow!\n",
279 					     tbl->id);
280 			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
281 			goto out_entries;
282 		}
283 	}
284 
285 	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
286 	if (!n)
287 		goto out_entries;
288 
289 	__skb_queue_head_init(&n->arp_queue);
290 	rwlock_init(&n->lock);
291 	seqlock_init(&n->ha_lock);
292 	n->updated	  = n->used = now;
293 	n->nud_state	  = NUD_NONE;
294 	n->output	  = neigh_blackhole;
295 	seqlock_init(&n->hh.hh_lock);
296 	n->parms	  = neigh_parms_clone(&tbl->parms);
297 	setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
298 
299 	NEIGH_CACHE_STAT_INC(tbl, allocs);
300 	n->tbl		  = tbl;
301 	atomic_set(&n->refcnt, 1);
302 	n->dead		  = 1;
303 out:
304 	return n;
305 
306 out_entries:
307 	atomic_dec(&tbl->entries);
308 	goto out;
309 }
310 
311 static void neigh_get_hash_rnd(u32 *x)
312 {
313 	get_random_bytes(x, sizeof(*x));
314 	*x |= 1;
315 }
316 
317 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
318 {
319 	size_t size = (1 << shift) * sizeof(struct neighbour *);
320 	struct neigh_hash_table *ret;
321 	struct neighbour __rcu **buckets;
322 	int i;
323 
324 	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
325 	if (!ret)
326 		return NULL;
327 	if (size <= PAGE_SIZE)
328 		buckets = kzalloc(size, GFP_ATOMIC);
329 	else
330 		buckets = (struct neighbour __rcu **)
331 			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
332 					   get_order(size));
333 	if (!buckets) {
334 		kfree(ret);
335 		return NULL;
336 	}
337 	ret->hash_buckets = buckets;
338 	ret->hash_shift = shift;
339 	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
340 		neigh_get_hash_rnd(&ret->hash_rnd[i]);
341 	return ret;
342 }
343 
344 static void neigh_hash_free_rcu(struct rcu_head *head)
345 {
346 	struct neigh_hash_table *nht = container_of(head,
347 						    struct neigh_hash_table,
348 						    rcu);
349 	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
350 	struct neighbour __rcu **buckets = nht->hash_buckets;
351 
352 	if (size <= PAGE_SIZE)
353 		kfree(buckets);
354 	else
355 		free_pages((unsigned long)buckets, get_order(size));
356 	kfree(nht);
357 }
358 
359 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
360 						unsigned long new_shift)
361 {
362 	unsigned int i, hash;
363 	struct neigh_hash_table *new_nht, *old_nht;
364 
365 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
366 
367 	old_nht = rcu_dereference_protected(tbl->nht,
368 					    lockdep_is_held(&tbl->lock));
369 	new_nht = neigh_hash_alloc(new_shift);
370 	if (!new_nht)
371 		return old_nht;
372 
373 	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
374 		struct neighbour *n, *next;
375 
376 		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
377 						   lockdep_is_held(&tbl->lock));
378 		     n != NULL;
379 		     n = next) {
380 			hash = tbl->hash(n->primary_key, n->dev,
381 					 new_nht->hash_rnd);
382 
383 			hash >>= (32 - new_nht->hash_shift);
384 			next = rcu_dereference_protected(n->next,
385 						lockdep_is_held(&tbl->lock));
386 
387 			rcu_assign_pointer(n->next,
388 					   rcu_dereference_protected(
389 						new_nht->hash_buckets[hash],
390 						lockdep_is_held(&tbl->lock)));
391 			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
392 		}
393 	}
394 
395 	rcu_assign_pointer(tbl->nht, new_nht);
396 	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
397 	return new_nht;
398 }
399 
400 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
401 			       struct net_device *dev)
402 {
403 	struct neighbour *n;
404 
405 	NEIGH_CACHE_STAT_INC(tbl, lookups);
406 
407 	rcu_read_lock_bh();
408 	n = __neigh_lookup_noref(tbl, pkey, dev);
409 	if (n) {
410 		if (!atomic_inc_not_zero(&n->refcnt))
411 			n = NULL;
412 		NEIGH_CACHE_STAT_INC(tbl, hits);
413 	}
414 
415 	rcu_read_unlock_bh();
416 	return n;
417 }
418 EXPORT_SYMBOL(neigh_lookup);
419 
420 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
421 				     const void *pkey)
422 {
423 	struct neighbour *n;
424 	int key_len = tbl->key_len;
425 	u32 hash_val;
426 	struct neigh_hash_table *nht;
427 
428 	NEIGH_CACHE_STAT_INC(tbl, lookups);
429 
430 	rcu_read_lock_bh();
431 	nht = rcu_dereference_bh(tbl->nht);
432 	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
433 
434 	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
435 	     n != NULL;
436 	     n = rcu_dereference_bh(n->next)) {
437 		if (!memcmp(n->primary_key, pkey, key_len) &&
438 		    net_eq(dev_net(n->dev), net)) {
439 			if (!atomic_inc_not_zero(&n->refcnt))
440 				n = NULL;
441 			NEIGH_CACHE_STAT_INC(tbl, hits);
442 			break;
443 		}
444 	}
445 
446 	rcu_read_unlock_bh();
447 	return n;
448 }
449 EXPORT_SYMBOL(neigh_lookup_nodev);
450 
451 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
452 				 struct net_device *dev, bool want_ref)
453 {
454 	u32 hash_val;
455 	int key_len = tbl->key_len;
456 	int error;
457 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
458 	struct neigh_hash_table *nht;
459 
460 	if (!n) {
461 		rc = ERR_PTR(-ENOBUFS);
462 		goto out;
463 	}
464 
465 	memcpy(n->primary_key, pkey, key_len);
466 	n->dev = dev;
467 	dev_hold(dev);
468 
469 	/* Protocol specific setup. */
470 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
471 		rc = ERR_PTR(error);
472 		goto out_neigh_release;
473 	}
474 
475 	if (dev->netdev_ops->ndo_neigh_construct) {
476 		error = dev->netdev_ops->ndo_neigh_construct(n);
477 		if (error < 0) {
478 			rc = ERR_PTR(error);
479 			goto out_neigh_release;
480 		}
481 	}
482 
483 	/* Device specific setup. */
484 	if (n->parms->neigh_setup &&
485 	    (error = n->parms->neigh_setup(n)) < 0) {
486 		rc = ERR_PTR(error);
487 		goto out_neigh_release;
488 	}
489 
490 	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
491 
492 	write_lock_bh(&tbl->lock);
493 	nht = rcu_dereference_protected(tbl->nht,
494 					lockdep_is_held(&tbl->lock));
495 
496 	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
497 		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
498 
499 	hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
500 
501 	if (n->parms->dead) {
502 		rc = ERR_PTR(-EINVAL);
503 		goto out_tbl_unlock;
504 	}
505 
506 	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
507 					    lockdep_is_held(&tbl->lock));
508 	     n1 != NULL;
509 	     n1 = rcu_dereference_protected(n1->next,
510 			lockdep_is_held(&tbl->lock))) {
511 		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
512 			if (want_ref)
513 				neigh_hold(n1);
514 			rc = n1;
515 			goto out_tbl_unlock;
516 		}
517 	}
518 
519 	n->dead = 0;
520 	if (want_ref)
521 		neigh_hold(n);
522 	rcu_assign_pointer(n->next,
523 			   rcu_dereference_protected(nht->hash_buckets[hash_val],
524 						     lockdep_is_held(&tbl->lock)));
525 	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
526 	write_unlock_bh(&tbl->lock);
527 	neigh_dbg(2, "neigh %p is created\n", n);
528 	rc = n;
529 out:
530 	return rc;
531 out_tbl_unlock:
532 	write_unlock_bh(&tbl->lock);
533 out_neigh_release:
534 	neigh_release(n);
535 	goto out;
536 }
537 EXPORT_SYMBOL(__neigh_create);
538 
539 static u32 pneigh_hash(const void *pkey, int key_len)
540 {
541 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
542 	hash_val ^= (hash_val >> 16);
543 	hash_val ^= hash_val >> 8;
544 	hash_val ^= hash_val >> 4;
545 	hash_val &= PNEIGH_HASHMASK;
546 	return hash_val;
547 }
548 
549 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
550 					      struct net *net,
551 					      const void *pkey,
552 					      int key_len,
553 					      struct net_device *dev)
554 {
555 	while (n) {
556 		if (!memcmp(n->key, pkey, key_len) &&
557 		    net_eq(pneigh_net(n), net) &&
558 		    (n->dev == dev || !n->dev))
559 			return n;
560 		n = n->next;
561 	}
562 	return NULL;
563 }
564 
565 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
566 		struct net *net, const void *pkey, struct net_device *dev)
567 {
568 	int key_len = tbl->key_len;
569 	u32 hash_val = pneigh_hash(pkey, key_len);
570 
571 	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
572 				 net, pkey, key_len, dev);
573 }
574 EXPORT_SYMBOL_GPL(__pneigh_lookup);
575 
576 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
577 				    struct net *net, const void *pkey,
578 				    struct net_device *dev, int creat)
579 {
580 	struct pneigh_entry *n;
581 	int key_len = tbl->key_len;
582 	u32 hash_val = pneigh_hash(pkey, key_len);
583 
584 	read_lock_bh(&tbl->lock);
585 	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
586 			      net, pkey, key_len, dev);
587 	read_unlock_bh(&tbl->lock);
588 
589 	if (n || !creat)
590 		goto out;
591 
592 	ASSERT_RTNL();
593 
594 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
595 	if (!n)
596 		goto out;
597 
598 	write_pnet(&n->net, net);
599 	memcpy(n->key, pkey, key_len);
600 	n->dev = dev;
601 	if (dev)
602 		dev_hold(dev);
603 
604 	if (tbl->pconstructor && tbl->pconstructor(n)) {
605 		if (dev)
606 			dev_put(dev);
607 		kfree(n);
608 		n = NULL;
609 		goto out;
610 	}
611 
612 	write_lock_bh(&tbl->lock);
613 	n->next = tbl->phash_buckets[hash_val];
614 	tbl->phash_buckets[hash_val] = n;
615 	write_unlock_bh(&tbl->lock);
616 out:
617 	return n;
618 }
619 EXPORT_SYMBOL(pneigh_lookup);
620 
621 
622 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
623 		  struct net_device *dev)
624 {
625 	struct pneigh_entry *n, **np;
626 	int key_len = tbl->key_len;
627 	u32 hash_val = pneigh_hash(pkey, key_len);
628 
629 	write_lock_bh(&tbl->lock);
630 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
631 	     np = &n->next) {
632 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
633 		    net_eq(pneigh_net(n), net)) {
634 			*np = n->next;
635 			write_unlock_bh(&tbl->lock);
636 			if (tbl->pdestructor)
637 				tbl->pdestructor(n);
638 			if (n->dev)
639 				dev_put(n->dev);
640 			kfree(n);
641 			return 0;
642 		}
643 	}
644 	write_unlock_bh(&tbl->lock);
645 	return -ENOENT;
646 }
647 
648 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
649 {
650 	struct pneigh_entry *n, **np;
651 	u32 h;
652 
653 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
654 		np = &tbl->phash_buckets[h];
655 		while ((n = *np) != NULL) {
656 			if (!dev || n->dev == dev) {
657 				*np = n->next;
658 				if (tbl->pdestructor)
659 					tbl->pdestructor(n);
660 				if (n->dev)
661 					dev_put(n->dev);
662 				kfree(n);
663 				continue;
664 			}
665 			np = &n->next;
666 		}
667 	}
668 	return -ENOENT;
669 }
670 
671 static void neigh_parms_destroy(struct neigh_parms *parms);
672 
673 static inline void neigh_parms_put(struct neigh_parms *parms)
674 {
675 	if (atomic_dec_and_test(&parms->refcnt))
676 		neigh_parms_destroy(parms);
677 }
678 
679 /*
680  *	neighbour must already be out of the table;
681  *
682  */
683 void neigh_destroy(struct neighbour *neigh)
684 {
685 	struct net_device *dev = neigh->dev;
686 
687 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
688 
689 	if (!neigh->dead) {
690 		pr_warn("Destroying alive neighbour %p\n", neigh);
691 		dump_stack();
692 		return;
693 	}
694 
695 	if (neigh_del_timer(neigh))
696 		pr_warn("Impossible event\n");
697 
698 	write_lock_bh(&neigh->lock);
699 	__skb_queue_purge(&neigh->arp_queue);
700 	write_unlock_bh(&neigh->lock);
701 	neigh->arp_queue_len_bytes = 0;
702 
703 	if (dev->netdev_ops->ndo_neigh_destroy)
704 		dev->netdev_ops->ndo_neigh_destroy(neigh);
705 
706 	dev_put(dev);
707 	neigh_parms_put(neigh->parms);
708 
709 	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
710 
711 	atomic_dec(&neigh->tbl->entries);
712 	kfree_rcu(neigh, rcu);
713 }
714 EXPORT_SYMBOL(neigh_destroy);
715 
716 /* Neighbour state is suspicious;
717    disable fast path.
718 
719    Called with write_locked neigh.
720  */
721 static void neigh_suspect(struct neighbour *neigh)
722 {
723 	neigh_dbg(2, "neigh %p is suspected\n", neigh);
724 
725 	neigh->output = neigh->ops->output;
726 }
727 
728 /* Neighbour state is OK;
729    enable fast path.
730 
731    Called with write_locked neigh.
732  */
733 static void neigh_connect(struct neighbour *neigh)
734 {
735 	neigh_dbg(2, "neigh %p is connected\n", neigh);
736 
737 	neigh->output = neigh->ops->connected_output;
738 }
739 
740 static void neigh_periodic_work(struct work_struct *work)
741 {
742 	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
743 	struct neighbour *n;
744 	struct neighbour __rcu **np;
745 	unsigned int i;
746 	struct neigh_hash_table *nht;
747 
748 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
749 
750 	write_lock_bh(&tbl->lock);
751 	nht = rcu_dereference_protected(tbl->nht,
752 					lockdep_is_held(&tbl->lock));
753 
754 	/*
755 	 *	periodically recompute ReachableTime from random function
756 	 */
757 
758 	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
759 		struct neigh_parms *p;
760 		tbl->last_rand = jiffies;
761 		list_for_each_entry(p, &tbl->parms_list, list)
762 			p->reachable_time =
763 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
764 	}
765 
766 	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
767 		goto out;
768 
769 	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
770 		np = &nht->hash_buckets[i];
771 
772 		while ((n = rcu_dereference_protected(*np,
773 				lockdep_is_held(&tbl->lock))) != NULL) {
774 			unsigned int state;
775 
776 			write_lock(&n->lock);
777 
778 			state = n->nud_state;
779 			if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
780 				write_unlock(&n->lock);
781 				goto next_elt;
782 			}
783 
784 			if (time_before(n->used, n->confirmed))
785 				n->used = n->confirmed;
786 
787 			if (atomic_read(&n->refcnt) == 1 &&
788 			    (state == NUD_FAILED ||
789 			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
790 				*np = n->next;
791 				n->dead = 1;
792 				write_unlock(&n->lock);
793 				neigh_cleanup_and_release(n);
794 				continue;
795 			}
796 			write_unlock(&n->lock);
797 
798 next_elt:
799 			np = &n->next;
800 		}
801 		/*
802 		 * It's fine to release lock here, even if hash table
803 		 * grows while we are preempted.
804 		 */
805 		write_unlock_bh(&tbl->lock);
806 		cond_resched();
807 		write_lock_bh(&tbl->lock);
808 		nht = rcu_dereference_protected(tbl->nht,
809 						lockdep_is_held(&tbl->lock));
810 	}
811 out:
812 	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
813 	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
814 	 * BASE_REACHABLE_TIME.
815 	 */
816 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
817 			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
818 	write_unlock_bh(&tbl->lock);
819 }
820 
821 static __inline__ int neigh_max_probes(struct neighbour *n)
822 {
823 	struct neigh_parms *p = n->parms;
824 	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
825 	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
826 	        NEIGH_VAR(p, MCAST_PROBES));
827 }
828 
829 static void neigh_invalidate(struct neighbour *neigh)
830 	__releases(neigh->lock)
831 	__acquires(neigh->lock)
832 {
833 	struct sk_buff *skb;
834 
835 	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
836 	neigh_dbg(2, "neigh %p is failed\n", neigh);
837 	neigh->updated = jiffies;
838 
839 	/* It is very thin place. report_unreachable is very complicated
840 	   routine. Particularly, it can hit the same neighbour entry!
841 
842 	   So that, we try to be accurate and avoid dead loop. --ANK
843 	 */
844 	while (neigh->nud_state == NUD_FAILED &&
845 	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
846 		write_unlock(&neigh->lock);
847 		neigh->ops->error_report(neigh, skb);
848 		write_lock(&neigh->lock);
849 	}
850 	__skb_queue_purge(&neigh->arp_queue);
851 	neigh->arp_queue_len_bytes = 0;
852 }
853 
854 static void neigh_probe(struct neighbour *neigh)
855 	__releases(neigh->lock)
856 {
857 	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
858 	/* keep skb alive even if arp_queue overflows */
859 	if (skb)
860 		skb = skb_clone(skb, GFP_ATOMIC);
861 	write_unlock(&neigh->lock);
862 	neigh->ops->solicit(neigh, skb);
863 	atomic_inc(&neigh->probes);
864 	kfree_skb(skb);
865 }
866 
867 /* Called when a timer expires for a neighbour entry. */
868 
869 static void neigh_timer_handler(unsigned long arg)
870 {
871 	unsigned long now, next;
872 	struct neighbour *neigh = (struct neighbour *)arg;
873 	unsigned int state;
874 	int notify = 0;
875 
876 	write_lock(&neigh->lock);
877 
878 	state = neigh->nud_state;
879 	now = jiffies;
880 	next = now + HZ;
881 
882 	if (!(state & NUD_IN_TIMER))
883 		goto out;
884 
885 	if (state & NUD_REACHABLE) {
886 		if (time_before_eq(now,
887 				   neigh->confirmed + neigh->parms->reachable_time)) {
888 			neigh_dbg(2, "neigh %p is still alive\n", neigh);
889 			next = neigh->confirmed + neigh->parms->reachable_time;
890 		} else if (time_before_eq(now,
891 					  neigh->used +
892 					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
893 			neigh_dbg(2, "neigh %p is delayed\n", neigh);
894 			neigh->nud_state = NUD_DELAY;
895 			neigh->updated = jiffies;
896 			neigh_suspect(neigh);
897 			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
898 		} else {
899 			neigh_dbg(2, "neigh %p is suspected\n", neigh);
900 			neigh->nud_state = NUD_STALE;
901 			neigh->updated = jiffies;
902 			neigh_suspect(neigh);
903 			notify = 1;
904 		}
905 	} else if (state & NUD_DELAY) {
906 		if (time_before_eq(now,
907 				   neigh->confirmed +
908 				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
909 			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
910 			neigh->nud_state = NUD_REACHABLE;
911 			neigh->updated = jiffies;
912 			neigh_connect(neigh);
913 			notify = 1;
914 			next = neigh->confirmed + neigh->parms->reachable_time;
915 		} else {
916 			neigh_dbg(2, "neigh %p is probed\n", neigh);
917 			neigh->nud_state = NUD_PROBE;
918 			neigh->updated = jiffies;
919 			atomic_set(&neigh->probes, 0);
920 			notify = 1;
921 			next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
922 		}
923 	} else {
924 		/* NUD_PROBE|NUD_INCOMPLETE */
925 		next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
926 	}
927 
928 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
929 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
930 		neigh->nud_state = NUD_FAILED;
931 		notify = 1;
932 		neigh_invalidate(neigh);
933 		goto out;
934 	}
935 
936 	if (neigh->nud_state & NUD_IN_TIMER) {
937 		if (time_before(next, jiffies + HZ/2))
938 			next = jiffies + HZ/2;
939 		if (!mod_timer(&neigh->timer, next))
940 			neigh_hold(neigh);
941 	}
942 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
943 		neigh_probe(neigh);
944 	} else {
945 out:
946 		write_unlock(&neigh->lock);
947 	}
948 
949 	if (notify)
950 		neigh_update_notify(neigh);
951 
952 	neigh_release(neigh);
953 }
954 
955 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
956 {
957 	int rc;
958 	bool immediate_probe = false;
959 
960 	write_lock_bh(&neigh->lock);
961 
962 	rc = 0;
963 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
964 		goto out_unlock_bh;
965 	if (neigh->dead)
966 		goto out_dead;
967 
968 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
969 		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
970 		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
971 			unsigned long next, now = jiffies;
972 
973 			atomic_set(&neigh->probes,
974 				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
975 			neigh->nud_state     = NUD_INCOMPLETE;
976 			neigh->updated = now;
977 			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
978 					 HZ/2);
979 			neigh_add_timer(neigh, next);
980 			immediate_probe = true;
981 		} else {
982 			neigh->nud_state = NUD_FAILED;
983 			neigh->updated = jiffies;
984 			write_unlock_bh(&neigh->lock);
985 
986 			kfree_skb(skb);
987 			return 1;
988 		}
989 	} else if (neigh->nud_state & NUD_STALE) {
990 		neigh_dbg(2, "neigh %p is delayed\n", neigh);
991 		neigh->nud_state = NUD_DELAY;
992 		neigh->updated = jiffies;
993 		neigh_add_timer(neigh, jiffies +
994 				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
995 	}
996 
997 	if (neigh->nud_state == NUD_INCOMPLETE) {
998 		if (skb) {
999 			while (neigh->arp_queue_len_bytes + skb->truesize >
1000 			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1001 				struct sk_buff *buff;
1002 
1003 				buff = __skb_dequeue(&neigh->arp_queue);
1004 				if (!buff)
1005 					break;
1006 				neigh->arp_queue_len_bytes -= buff->truesize;
1007 				kfree_skb(buff);
1008 				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1009 			}
1010 			skb_dst_force(skb);
1011 			__skb_queue_tail(&neigh->arp_queue, skb);
1012 			neigh->arp_queue_len_bytes += skb->truesize;
1013 		}
1014 		rc = 1;
1015 	}
1016 out_unlock_bh:
1017 	if (immediate_probe)
1018 		neigh_probe(neigh);
1019 	else
1020 		write_unlock(&neigh->lock);
1021 	local_bh_enable();
1022 	return rc;
1023 
1024 out_dead:
1025 	if (neigh->nud_state & NUD_STALE)
1026 		goto out_unlock_bh;
1027 	write_unlock_bh(&neigh->lock);
1028 	kfree_skb(skb);
1029 	return 1;
1030 }
1031 EXPORT_SYMBOL(__neigh_event_send);
1032 
1033 static void neigh_update_hhs(struct neighbour *neigh)
1034 {
1035 	struct hh_cache *hh;
1036 	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1037 		= NULL;
1038 
1039 	if (neigh->dev->header_ops)
1040 		update = neigh->dev->header_ops->cache_update;
1041 
1042 	if (update) {
1043 		hh = &neigh->hh;
1044 		if (hh->hh_len) {
1045 			write_seqlock_bh(&hh->hh_lock);
1046 			update(hh, neigh->dev, neigh->ha);
1047 			write_sequnlock_bh(&hh->hh_lock);
1048 		}
1049 	}
1050 }
1051 
1052 
1053 
1054 /* Generic update routine.
1055    -- lladdr is new lladdr or NULL, if it is not supplied.
1056    -- new    is new state.
1057    -- flags
1058 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1059 				if it is different.
1060 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1061 				lladdr instead of overriding it
1062 				if it is different.
1063 				It also allows to retain current state
1064 				if lladdr is unchanged.
1065 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1066 
1067 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1068 				NTF_ROUTER flag.
1069 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1070 				a router.
1071 
1072    Caller MUST hold reference count on the entry.
1073  */
1074 
1075 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1076 		 u32 flags)
1077 {
1078 	u8 old;
1079 	int err;
1080 	int notify = 0;
1081 	struct net_device *dev;
1082 	int update_isrouter = 0;
1083 
1084 	write_lock_bh(&neigh->lock);
1085 
1086 	dev    = neigh->dev;
1087 	old    = neigh->nud_state;
1088 	err    = -EPERM;
1089 
1090 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1091 	    (old & (NUD_NOARP | NUD_PERMANENT)))
1092 		goto out;
1093 	if (neigh->dead)
1094 		goto out;
1095 
1096 	if (!(new & NUD_VALID)) {
1097 		neigh_del_timer(neigh);
1098 		if (old & NUD_CONNECTED)
1099 			neigh_suspect(neigh);
1100 		neigh->nud_state = new;
1101 		err = 0;
1102 		notify = old & NUD_VALID;
1103 		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1104 		    (new & NUD_FAILED)) {
1105 			neigh_invalidate(neigh);
1106 			notify = 1;
1107 		}
1108 		goto out;
1109 	}
1110 
1111 	/* Compare new lladdr with cached one */
1112 	if (!dev->addr_len) {
1113 		/* First case: device needs no address. */
1114 		lladdr = neigh->ha;
1115 	} else if (lladdr) {
1116 		/* The second case: if something is already cached
1117 		   and a new address is proposed:
1118 		   - compare new & old
1119 		   - if they are different, check override flag
1120 		 */
1121 		if ((old & NUD_VALID) &&
1122 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1123 			lladdr = neigh->ha;
1124 	} else {
1125 		/* No address is supplied; if we know something,
1126 		   use it, otherwise discard the request.
1127 		 */
1128 		err = -EINVAL;
1129 		if (!(old & NUD_VALID))
1130 			goto out;
1131 		lladdr = neigh->ha;
1132 	}
1133 
1134 	if (new & NUD_CONNECTED)
1135 		neigh->confirmed = jiffies;
1136 	neigh->updated = jiffies;
1137 
1138 	/* If entry was valid and address is not changed,
1139 	   do not change entry state, if new one is STALE.
1140 	 */
1141 	err = 0;
1142 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1143 	if (old & NUD_VALID) {
1144 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1145 			update_isrouter = 0;
1146 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1147 			    (old & NUD_CONNECTED)) {
1148 				lladdr = neigh->ha;
1149 				new = NUD_STALE;
1150 			} else
1151 				goto out;
1152 		} else {
1153 			if (lladdr == neigh->ha && new == NUD_STALE &&
1154 			    ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1155 			     (old & NUD_CONNECTED))
1156 			    )
1157 				new = old;
1158 		}
1159 	}
1160 
1161 	if (new != old) {
1162 		neigh_del_timer(neigh);
1163 		if (new & NUD_PROBE)
1164 			atomic_set(&neigh->probes, 0);
1165 		if (new & NUD_IN_TIMER)
1166 			neigh_add_timer(neigh, (jiffies +
1167 						((new & NUD_REACHABLE) ?
1168 						 neigh->parms->reachable_time :
1169 						 0)));
1170 		neigh->nud_state = new;
1171 		notify = 1;
1172 	}
1173 
1174 	if (lladdr != neigh->ha) {
1175 		write_seqlock(&neigh->ha_lock);
1176 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1177 		write_sequnlock(&neigh->ha_lock);
1178 		neigh_update_hhs(neigh);
1179 		if (!(new & NUD_CONNECTED))
1180 			neigh->confirmed = jiffies -
1181 				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1182 		notify = 1;
1183 	}
1184 	if (new == old)
1185 		goto out;
1186 	if (new & NUD_CONNECTED)
1187 		neigh_connect(neigh);
1188 	else
1189 		neigh_suspect(neigh);
1190 	if (!(old & NUD_VALID)) {
1191 		struct sk_buff *skb;
1192 
1193 		/* Again: avoid dead loop if something went wrong */
1194 
1195 		while (neigh->nud_state & NUD_VALID &&
1196 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1197 			struct dst_entry *dst = skb_dst(skb);
1198 			struct neighbour *n2, *n1 = neigh;
1199 			write_unlock_bh(&neigh->lock);
1200 
1201 			rcu_read_lock();
1202 
1203 			/* Why not just use 'neigh' as-is?  The problem is that
1204 			 * things such as shaper, eql, and sch_teql can end up
1205 			 * using alternative, different, neigh objects to output
1206 			 * the packet in the output path.  So what we need to do
1207 			 * here is re-lookup the top-level neigh in the path so
1208 			 * we can reinject the packet there.
1209 			 */
1210 			n2 = NULL;
1211 			if (dst) {
1212 				n2 = dst_neigh_lookup_skb(dst, skb);
1213 				if (n2)
1214 					n1 = n2;
1215 			}
1216 			n1->output(n1, skb);
1217 			if (n2)
1218 				neigh_release(n2);
1219 			rcu_read_unlock();
1220 
1221 			write_lock_bh(&neigh->lock);
1222 		}
1223 		__skb_queue_purge(&neigh->arp_queue);
1224 		neigh->arp_queue_len_bytes = 0;
1225 	}
1226 out:
1227 	if (update_isrouter) {
1228 		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1229 			(neigh->flags | NTF_ROUTER) :
1230 			(neigh->flags & ~NTF_ROUTER);
1231 	}
1232 	write_unlock_bh(&neigh->lock);
1233 
1234 	if (notify)
1235 		neigh_update_notify(neigh);
1236 
1237 	return err;
1238 }
1239 EXPORT_SYMBOL(neigh_update);
1240 
1241 /* Update the neigh to listen temporarily for probe responses, even if it is
1242  * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1243  */
1244 void __neigh_set_probe_once(struct neighbour *neigh)
1245 {
1246 	if (neigh->dead)
1247 		return;
1248 	neigh->updated = jiffies;
1249 	if (!(neigh->nud_state & NUD_FAILED))
1250 		return;
1251 	neigh->nud_state = NUD_INCOMPLETE;
1252 	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1253 	neigh_add_timer(neigh,
1254 			jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1255 }
1256 EXPORT_SYMBOL(__neigh_set_probe_once);
1257 
1258 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1259 				 u8 *lladdr, void *saddr,
1260 				 struct net_device *dev)
1261 {
1262 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1263 						 lladdr || !dev->addr_len);
1264 	if (neigh)
1265 		neigh_update(neigh, lladdr, NUD_STALE,
1266 			     NEIGH_UPDATE_F_OVERRIDE);
1267 	return neigh;
1268 }
1269 EXPORT_SYMBOL(neigh_event_ns);
1270 
1271 /* called with read_lock_bh(&n->lock); */
1272 static void neigh_hh_init(struct neighbour *n)
1273 {
1274 	struct net_device *dev = n->dev;
1275 	__be16 prot = n->tbl->protocol;
1276 	struct hh_cache	*hh = &n->hh;
1277 
1278 	write_lock_bh(&n->lock);
1279 
1280 	/* Only one thread can come in here and initialize the
1281 	 * hh_cache entry.
1282 	 */
1283 	if (!hh->hh_len)
1284 		dev->header_ops->cache(n, hh, prot);
1285 
1286 	write_unlock_bh(&n->lock);
1287 }
1288 
1289 /* Slow and careful. */
1290 
1291 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1292 {
1293 	int rc = 0;
1294 
1295 	if (!neigh_event_send(neigh, skb)) {
1296 		int err;
1297 		struct net_device *dev = neigh->dev;
1298 		unsigned int seq;
1299 
1300 		if (dev->header_ops->cache && !neigh->hh.hh_len)
1301 			neigh_hh_init(neigh);
1302 
1303 		do {
1304 			__skb_pull(skb, skb_network_offset(skb));
1305 			seq = read_seqbegin(&neigh->ha_lock);
1306 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1307 					      neigh->ha, NULL, skb->len);
1308 		} while (read_seqretry(&neigh->ha_lock, seq));
1309 
1310 		if (err >= 0)
1311 			rc = dev_queue_xmit(skb);
1312 		else
1313 			goto out_kfree_skb;
1314 	}
1315 out:
1316 	return rc;
1317 out_kfree_skb:
1318 	rc = -EINVAL;
1319 	kfree_skb(skb);
1320 	goto out;
1321 }
1322 EXPORT_SYMBOL(neigh_resolve_output);
1323 
1324 /* As fast as possible without hh cache */
1325 
1326 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1327 {
1328 	struct net_device *dev = neigh->dev;
1329 	unsigned int seq;
1330 	int err;
1331 
1332 	do {
1333 		__skb_pull(skb, skb_network_offset(skb));
1334 		seq = read_seqbegin(&neigh->ha_lock);
1335 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1336 				      neigh->ha, NULL, skb->len);
1337 	} while (read_seqretry(&neigh->ha_lock, seq));
1338 
1339 	if (err >= 0)
1340 		err = dev_queue_xmit(skb);
1341 	else {
1342 		err = -EINVAL;
1343 		kfree_skb(skb);
1344 	}
1345 	return err;
1346 }
1347 EXPORT_SYMBOL(neigh_connected_output);
1348 
1349 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1350 {
1351 	return dev_queue_xmit(skb);
1352 }
1353 EXPORT_SYMBOL(neigh_direct_output);
1354 
1355 static void neigh_proxy_process(unsigned long arg)
1356 {
1357 	struct neigh_table *tbl = (struct neigh_table *)arg;
1358 	long sched_next = 0;
1359 	unsigned long now = jiffies;
1360 	struct sk_buff *skb, *n;
1361 
1362 	spin_lock(&tbl->proxy_queue.lock);
1363 
1364 	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1365 		long tdif = NEIGH_CB(skb)->sched_next - now;
1366 
1367 		if (tdif <= 0) {
1368 			struct net_device *dev = skb->dev;
1369 
1370 			__skb_unlink(skb, &tbl->proxy_queue);
1371 			if (tbl->proxy_redo && netif_running(dev)) {
1372 				rcu_read_lock();
1373 				tbl->proxy_redo(skb);
1374 				rcu_read_unlock();
1375 			} else {
1376 				kfree_skb(skb);
1377 			}
1378 
1379 			dev_put(dev);
1380 		} else if (!sched_next || tdif < sched_next)
1381 			sched_next = tdif;
1382 	}
1383 	del_timer(&tbl->proxy_timer);
1384 	if (sched_next)
1385 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1386 	spin_unlock(&tbl->proxy_queue.lock);
1387 }
1388 
1389 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1390 		    struct sk_buff *skb)
1391 {
1392 	unsigned long now = jiffies;
1393 
1394 	unsigned long sched_next = now + (prandom_u32() %
1395 					  NEIGH_VAR(p, PROXY_DELAY));
1396 
1397 	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1398 		kfree_skb(skb);
1399 		return;
1400 	}
1401 
1402 	NEIGH_CB(skb)->sched_next = sched_next;
1403 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1404 
1405 	spin_lock(&tbl->proxy_queue.lock);
1406 	if (del_timer(&tbl->proxy_timer)) {
1407 		if (time_before(tbl->proxy_timer.expires, sched_next))
1408 			sched_next = tbl->proxy_timer.expires;
1409 	}
1410 	skb_dst_drop(skb);
1411 	dev_hold(skb->dev);
1412 	__skb_queue_tail(&tbl->proxy_queue, skb);
1413 	mod_timer(&tbl->proxy_timer, sched_next);
1414 	spin_unlock(&tbl->proxy_queue.lock);
1415 }
1416 EXPORT_SYMBOL(pneigh_enqueue);
1417 
1418 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1419 						      struct net *net, int ifindex)
1420 {
1421 	struct neigh_parms *p;
1422 
1423 	list_for_each_entry(p, &tbl->parms_list, list) {
1424 		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1425 		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1426 			return p;
1427 	}
1428 
1429 	return NULL;
1430 }
1431 
1432 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1433 				      struct neigh_table *tbl)
1434 {
1435 	struct neigh_parms *p;
1436 	struct net *net = dev_net(dev);
1437 	const struct net_device_ops *ops = dev->netdev_ops;
1438 
1439 	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1440 	if (p) {
1441 		p->tbl		  = tbl;
1442 		atomic_set(&p->refcnt, 1);
1443 		p->reachable_time =
1444 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1445 		dev_hold(dev);
1446 		p->dev = dev;
1447 		write_pnet(&p->net, net);
1448 		p->sysctl_table = NULL;
1449 
1450 		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1451 			dev_put(dev);
1452 			kfree(p);
1453 			return NULL;
1454 		}
1455 
1456 		write_lock_bh(&tbl->lock);
1457 		list_add(&p->list, &tbl->parms.list);
1458 		write_unlock_bh(&tbl->lock);
1459 
1460 		neigh_parms_data_state_cleanall(p);
1461 	}
1462 	return p;
1463 }
1464 EXPORT_SYMBOL(neigh_parms_alloc);
1465 
1466 static void neigh_rcu_free_parms(struct rcu_head *head)
1467 {
1468 	struct neigh_parms *parms =
1469 		container_of(head, struct neigh_parms, rcu_head);
1470 
1471 	neigh_parms_put(parms);
1472 }
1473 
1474 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1475 {
1476 	if (!parms || parms == &tbl->parms)
1477 		return;
1478 	write_lock_bh(&tbl->lock);
1479 	list_del(&parms->list);
1480 	parms->dead = 1;
1481 	write_unlock_bh(&tbl->lock);
1482 	if (parms->dev)
1483 		dev_put(parms->dev);
1484 	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1485 }
1486 EXPORT_SYMBOL(neigh_parms_release);
1487 
1488 static void neigh_parms_destroy(struct neigh_parms *parms)
1489 {
1490 	kfree(parms);
1491 }
1492 
1493 static struct lock_class_key neigh_table_proxy_queue_class;
1494 
1495 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1496 
1497 void neigh_table_init(int index, struct neigh_table *tbl)
1498 {
1499 	unsigned long now = jiffies;
1500 	unsigned long phsize;
1501 
1502 	INIT_LIST_HEAD(&tbl->parms_list);
1503 	list_add(&tbl->parms.list, &tbl->parms_list);
1504 	write_pnet(&tbl->parms.net, &init_net);
1505 	atomic_set(&tbl->parms.refcnt, 1);
1506 	tbl->parms.reachable_time =
1507 			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1508 
1509 	tbl->stats = alloc_percpu(struct neigh_statistics);
1510 	if (!tbl->stats)
1511 		panic("cannot create neighbour cache statistics");
1512 
1513 #ifdef CONFIG_PROC_FS
1514 	if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1515 			      &neigh_stat_seq_fops, tbl))
1516 		panic("cannot create neighbour proc dir entry");
1517 #endif
1518 
1519 	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1520 
1521 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1522 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1523 
1524 	if (!tbl->nht || !tbl->phash_buckets)
1525 		panic("cannot allocate neighbour cache hashes");
1526 
1527 	if (!tbl->entry_size)
1528 		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1529 					tbl->key_len, NEIGH_PRIV_ALIGN);
1530 	else
1531 		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1532 
1533 	rwlock_init(&tbl->lock);
1534 	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1535 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1536 			tbl->parms.reachable_time);
1537 	setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1538 	skb_queue_head_init_class(&tbl->proxy_queue,
1539 			&neigh_table_proxy_queue_class);
1540 
1541 	tbl->last_flush = now;
1542 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1543 
1544 	neigh_tables[index] = tbl;
1545 }
1546 EXPORT_SYMBOL(neigh_table_init);
1547 
1548 int neigh_table_clear(int index, struct neigh_table *tbl)
1549 {
1550 	neigh_tables[index] = NULL;
1551 	/* It is not clean... Fix it to unload IPv6 module safely */
1552 	cancel_delayed_work_sync(&tbl->gc_work);
1553 	del_timer_sync(&tbl->proxy_timer);
1554 	pneigh_queue_purge(&tbl->proxy_queue);
1555 	neigh_ifdown(tbl, NULL);
1556 	if (atomic_read(&tbl->entries))
1557 		pr_crit("neighbour leakage\n");
1558 
1559 	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1560 		 neigh_hash_free_rcu);
1561 	tbl->nht = NULL;
1562 
1563 	kfree(tbl->phash_buckets);
1564 	tbl->phash_buckets = NULL;
1565 
1566 	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1567 
1568 	free_percpu(tbl->stats);
1569 	tbl->stats = NULL;
1570 
1571 	return 0;
1572 }
1573 EXPORT_SYMBOL(neigh_table_clear);
1574 
1575 static struct neigh_table *neigh_find_table(int family)
1576 {
1577 	struct neigh_table *tbl = NULL;
1578 
1579 	switch (family) {
1580 	case AF_INET:
1581 		tbl = neigh_tables[NEIGH_ARP_TABLE];
1582 		break;
1583 	case AF_INET6:
1584 		tbl = neigh_tables[NEIGH_ND_TABLE];
1585 		break;
1586 	case AF_DECnet:
1587 		tbl = neigh_tables[NEIGH_DN_TABLE];
1588 		break;
1589 	}
1590 
1591 	return tbl;
1592 }
1593 
1594 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh)
1595 {
1596 	struct net *net = sock_net(skb->sk);
1597 	struct ndmsg *ndm;
1598 	struct nlattr *dst_attr;
1599 	struct neigh_table *tbl;
1600 	struct neighbour *neigh;
1601 	struct net_device *dev = NULL;
1602 	int err = -EINVAL;
1603 
1604 	ASSERT_RTNL();
1605 	if (nlmsg_len(nlh) < sizeof(*ndm))
1606 		goto out;
1607 
1608 	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1609 	if (dst_attr == NULL)
1610 		goto out;
1611 
1612 	ndm = nlmsg_data(nlh);
1613 	if (ndm->ndm_ifindex) {
1614 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1615 		if (dev == NULL) {
1616 			err = -ENODEV;
1617 			goto out;
1618 		}
1619 	}
1620 
1621 	tbl = neigh_find_table(ndm->ndm_family);
1622 	if (tbl == NULL)
1623 		return -EAFNOSUPPORT;
1624 
1625 	if (nla_len(dst_attr) < tbl->key_len)
1626 		goto out;
1627 
1628 	if (ndm->ndm_flags & NTF_PROXY) {
1629 		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1630 		goto out;
1631 	}
1632 
1633 	if (dev == NULL)
1634 		goto out;
1635 
1636 	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1637 	if (neigh == NULL) {
1638 		err = -ENOENT;
1639 		goto out;
1640 	}
1641 
1642 	err = neigh_update(neigh, NULL, NUD_FAILED,
1643 			   NEIGH_UPDATE_F_OVERRIDE |
1644 			   NEIGH_UPDATE_F_ADMIN);
1645 	neigh_release(neigh);
1646 
1647 out:
1648 	return err;
1649 }
1650 
1651 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh)
1652 {
1653 	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1654 	struct net *net = sock_net(skb->sk);
1655 	struct ndmsg *ndm;
1656 	struct nlattr *tb[NDA_MAX+1];
1657 	struct neigh_table *tbl;
1658 	struct net_device *dev = NULL;
1659 	struct neighbour *neigh;
1660 	void *dst, *lladdr;
1661 	int err;
1662 
1663 	ASSERT_RTNL();
1664 	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1665 	if (err < 0)
1666 		goto out;
1667 
1668 	err = -EINVAL;
1669 	if (tb[NDA_DST] == NULL)
1670 		goto out;
1671 
1672 	ndm = nlmsg_data(nlh);
1673 	if (ndm->ndm_ifindex) {
1674 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1675 		if (dev == NULL) {
1676 			err = -ENODEV;
1677 			goto out;
1678 		}
1679 
1680 		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1681 			goto out;
1682 	}
1683 
1684 	tbl = neigh_find_table(ndm->ndm_family);
1685 	if (tbl == NULL)
1686 		return -EAFNOSUPPORT;
1687 
1688 	if (nla_len(tb[NDA_DST]) < tbl->key_len)
1689 		goto out;
1690 	dst = nla_data(tb[NDA_DST]);
1691 	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1692 
1693 	if (ndm->ndm_flags & NTF_PROXY) {
1694 		struct pneigh_entry *pn;
1695 
1696 		err = -ENOBUFS;
1697 		pn = pneigh_lookup(tbl, net, dst, dev, 1);
1698 		if (pn) {
1699 			pn->flags = ndm->ndm_flags;
1700 			err = 0;
1701 		}
1702 		goto out;
1703 	}
1704 
1705 	if (dev == NULL)
1706 		goto out;
1707 
1708 	neigh = neigh_lookup(tbl, dst, dev);
1709 	if (neigh == NULL) {
1710 		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1711 			err = -ENOENT;
1712 			goto out;
1713 		}
1714 
1715 		neigh = __neigh_lookup_errno(tbl, dst, dev);
1716 		if (IS_ERR(neigh)) {
1717 			err = PTR_ERR(neigh);
1718 			goto out;
1719 		}
1720 	} else {
1721 		if (nlh->nlmsg_flags & NLM_F_EXCL) {
1722 			err = -EEXIST;
1723 			neigh_release(neigh);
1724 			goto out;
1725 		}
1726 
1727 		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1728 			flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1729 	}
1730 
1731 	if (ndm->ndm_flags & NTF_USE) {
1732 		neigh_event_send(neigh, NULL);
1733 		err = 0;
1734 	} else
1735 		err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1736 	neigh_release(neigh);
1737 
1738 out:
1739 	return err;
1740 }
1741 
1742 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1743 {
1744 	struct nlattr *nest;
1745 
1746 	nest = nla_nest_start(skb, NDTA_PARMS);
1747 	if (nest == NULL)
1748 		return -ENOBUFS;
1749 
1750 	if ((parms->dev &&
1751 	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1752 	    nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
1753 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1754 			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1755 	    /* approximative value for deprecated QUEUE_LEN (in packets) */
1756 	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
1757 			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1758 	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1759 	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
1760 	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
1761 			NEIGH_VAR(parms, UCAST_PROBES)) ||
1762 	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
1763 			NEIGH_VAR(parms, MCAST_PROBES)) ||
1764 	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
1765 			NEIGH_VAR(parms, MCAST_REPROBES)) ||
1766 	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
1767 			  NDTPA_PAD) ||
1768 	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1769 			  NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
1770 	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
1771 			  NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
1772 	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1773 			  NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
1774 	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
1775 			  NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
1776 	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
1777 			  NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
1778 	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
1779 			  NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
1780 	    nla_put_msecs(skb, NDTPA_LOCKTIME,
1781 			  NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
1782 		goto nla_put_failure;
1783 	return nla_nest_end(skb, nest);
1784 
1785 nla_put_failure:
1786 	nla_nest_cancel(skb, nest);
1787 	return -EMSGSIZE;
1788 }
1789 
1790 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1791 			      u32 pid, u32 seq, int type, int flags)
1792 {
1793 	struct nlmsghdr *nlh;
1794 	struct ndtmsg *ndtmsg;
1795 
1796 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1797 	if (nlh == NULL)
1798 		return -EMSGSIZE;
1799 
1800 	ndtmsg = nlmsg_data(nlh);
1801 
1802 	read_lock_bh(&tbl->lock);
1803 	ndtmsg->ndtm_family = tbl->family;
1804 	ndtmsg->ndtm_pad1   = 0;
1805 	ndtmsg->ndtm_pad2   = 0;
1806 
1807 	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1808 	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
1809 	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1810 	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1811 	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1812 		goto nla_put_failure;
1813 	{
1814 		unsigned long now = jiffies;
1815 		unsigned int flush_delta = now - tbl->last_flush;
1816 		unsigned int rand_delta = now - tbl->last_rand;
1817 		struct neigh_hash_table *nht;
1818 		struct ndt_config ndc = {
1819 			.ndtc_key_len		= tbl->key_len,
1820 			.ndtc_entry_size	= tbl->entry_size,
1821 			.ndtc_entries		= atomic_read(&tbl->entries),
1822 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1823 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1824 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1825 		};
1826 
1827 		rcu_read_lock_bh();
1828 		nht = rcu_dereference_bh(tbl->nht);
1829 		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1830 		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1831 		rcu_read_unlock_bh();
1832 
1833 		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1834 			goto nla_put_failure;
1835 	}
1836 
1837 	{
1838 		int cpu;
1839 		struct ndt_stats ndst;
1840 
1841 		memset(&ndst, 0, sizeof(ndst));
1842 
1843 		for_each_possible_cpu(cpu) {
1844 			struct neigh_statistics	*st;
1845 
1846 			st = per_cpu_ptr(tbl->stats, cpu);
1847 			ndst.ndts_allocs		+= st->allocs;
1848 			ndst.ndts_destroys		+= st->destroys;
1849 			ndst.ndts_hash_grows		+= st->hash_grows;
1850 			ndst.ndts_res_failed		+= st->res_failed;
1851 			ndst.ndts_lookups		+= st->lookups;
1852 			ndst.ndts_hits			+= st->hits;
1853 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1854 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1855 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1856 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1857 			ndst.ndts_table_fulls		+= st->table_fulls;
1858 		}
1859 
1860 		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
1861 				  NDTA_PAD))
1862 			goto nla_put_failure;
1863 	}
1864 
1865 	BUG_ON(tbl->parms.dev);
1866 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1867 		goto nla_put_failure;
1868 
1869 	read_unlock_bh(&tbl->lock);
1870 	nlmsg_end(skb, nlh);
1871 	return 0;
1872 
1873 nla_put_failure:
1874 	read_unlock_bh(&tbl->lock);
1875 	nlmsg_cancel(skb, nlh);
1876 	return -EMSGSIZE;
1877 }
1878 
1879 static int neightbl_fill_param_info(struct sk_buff *skb,
1880 				    struct neigh_table *tbl,
1881 				    struct neigh_parms *parms,
1882 				    u32 pid, u32 seq, int type,
1883 				    unsigned int flags)
1884 {
1885 	struct ndtmsg *ndtmsg;
1886 	struct nlmsghdr *nlh;
1887 
1888 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1889 	if (nlh == NULL)
1890 		return -EMSGSIZE;
1891 
1892 	ndtmsg = nlmsg_data(nlh);
1893 
1894 	read_lock_bh(&tbl->lock);
1895 	ndtmsg->ndtm_family = tbl->family;
1896 	ndtmsg->ndtm_pad1   = 0;
1897 	ndtmsg->ndtm_pad2   = 0;
1898 
1899 	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1900 	    neightbl_fill_parms(skb, parms) < 0)
1901 		goto errout;
1902 
1903 	read_unlock_bh(&tbl->lock);
1904 	nlmsg_end(skb, nlh);
1905 	return 0;
1906 errout:
1907 	read_unlock_bh(&tbl->lock);
1908 	nlmsg_cancel(skb, nlh);
1909 	return -EMSGSIZE;
1910 }
1911 
1912 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1913 	[NDTA_NAME]		= { .type = NLA_STRING },
1914 	[NDTA_THRESH1]		= { .type = NLA_U32 },
1915 	[NDTA_THRESH2]		= { .type = NLA_U32 },
1916 	[NDTA_THRESH3]		= { .type = NLA_U32 },
1917 	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
1918 	[NDTA_PARMS]		= { .type = NLA_NESTED },
1919 };
1920 
1921 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1922 	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
1923 	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
1924 	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
1925 	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
1926 	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
1927 	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
1928 	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
1929 	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
1930 	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
1931 	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
1932 	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
1933 	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
1934 	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
1935 	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
1936 };
1937 
1938 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
1939 {
1940 	struct net *net = sock_net(skb->sk);
1941 	struct neigh_table *tbl;
1942 	struct ndtmsg *ndtmsg;
1943 	struct nlattr *tb[NDTA_MAX+1];
1944 	bool found = false;
1945 	int err, tidx;
1946 
1947 	err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1948 			  nl_neightbl_policy);
1949 	if (err < 0)
1950 		goto errout;
1951 
1952 	if (tb[NDTA_NAME] == NULL) {
1953 		err = -EINVAL;
1954 		goto errout;
1955 	}
1956 
1957 	ndtmsg = nlmsg_data(nlh);
1958 
1959 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
1960 		tbl = neigh_tables[tidx];
1961 		if (!tbl)
1962 			continue;
1963 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1964 			continue;
1965 		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
1966 			found = true;
1967 			break;
1968 		}
1969 	}
1970 
1971 	if (!found)
1972 		return -ENOENT;
1973 
1974 	/*
1975 	 * We acquire tbl->lock to be nice to the periodic timers and
1976 	 * make sure they always see a consistent set of values.
1977 	 */
1978 	write_lock_bh(&tbl->lock);
1979 
1980 	if (tb[NDTA_PARMS]) {
1981 		struct nlattr *tbp[NDTPA_MAX+1];
1982 		struct neigh_parms *p;
1983 		int i, ifindex = 0;
1984 
1985 		err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1986 				       nl_ntbl_parm_policy);
1987 		if (err < 0)
1988 			goto errout_tbl_lock;
1989 
1990 		if (tbp[NDTPA_IFINDEX])
1991 			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1992 
1993 		p = lookup_neigh_parms(tbl, net, ifindex);
1994 		if (p == NULL) {
1995 			err = -ENOENT;
1996 			goto errout_tbl_lock;
1997 		}
1998 
1999 		for (i = 1; i <= NDTPA_MAX; i++) {
2000 			if (tbp[i] == NULL)
2001 				continue;
2002 
2003 			switch (i) {
2004 			case NDTPA_QUEUE_LEN:
2005 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2006 					      nla_get_u32(tbp[i]) *
2007 					      SKB_TRUESIZE(ETH_FRAME_LEN));
2008 				break;
2009 			case NDTPA_QUEUE_LENBYTES:
2010 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2011 					      nla_get_u32(tbp[i]));
2012 				break;
2013 			case NDTPA_PROXY_QLEN:
2014 				NEIGH_VAR_SET(p, PROXY_QLEN,
2015 					      nla_get_u32(tbp[i]));
2016 				break;
2017 			case NDTPA_APP_PROBES:
2018 				NEIGH_VAR_SET(p, APP_PROBES,
2019 					      nla_get_u32(tbp[i]));
2020 				break;
2021 			case NDTPA_UCAST_PROBES:
2022 				NEIGH_VAR_SET(p, UCAST_PROBES,
2023 					      nla_get_u32(tbp[i]));
2024 				break;
2025 			case NDTPA_MCAST_PROBES:
2026 				NEIGH_VAR_SET(p, MCAST_PROBES,
2027 					      nla_get_u32(tbp[i]));
2028 				break;
2029 			case NDTPA_MCAST_REPROBES:
2030 				NEIGH_VAR_SET(p, MCAST_REPROBES,
2031 					      nla_get_u32(tbp[i]));
2032 				break;
2033 			case NDTPA_BASE_REACHABLE_TIME:
2034 				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2035 					      nla_get_msecs(tbp[i]));
2036 				/* update reachable_time as well, otherwise, the change will
2037 				 * only be effective after the next time neigh_periodic_work
2038 				 * decides to recompute it (can be multiple minutes)
2039 				 */
2040 				p->reachable_time =
2041 					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2042 				break;
2043 			case NDTPA_GC_STALETIME:
2044 				NEIGH_VAR_SET(p, GC_STALETIME,
2045 					      nla_get_msecs(tbp[i]));
2046 				break;
2047 			case NDTPA_DELAY_PROBE_TIME:
2048 				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2049 					      nla_get_msecs(tbp[i]));
2050 				break;
2051 			case NDTPA_RETRANS_TIME:
2052 				NEIGH_VAR_SET(p, RETRANS_TIME,
2053 					      nla_get_msecs(tbp[i]));
2054 				break;
2055 			case NDTPA_ANYCAST_DELAY:
2056 				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2057 					      nla_get_msecs(tbp[i]));
2058 				break;
2059 			case NDTPA_PROXY_DELAY:
2060 				NEIGH_VAR_SET(p, PROXY_DELAY,
2061 					      nla_get_msecs(tbp[i]));
2062 				break;
2063 			case NDTPA_LOCKTIME:
2064 				NEIGH_VAR_SET(p, LOCKTIME,
2065 					      nla_get_msecs(tbp[i]));
2066 				break;
2067 			}
2068 		}
2069 	}
2070 
2071 	err = -ENOENT;
2072 	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2073 	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2074 	    !net_eq(net, &init_net))
2075 		goto errout_tbl_lock;
2076 
2077 	if (tb[NDTA_THRESH1])
2078 		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2079 
2080 	if (tb[NDTA_THRESH2])
2081 		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2082 
2083 	if (tb[NDTA_THRESH3])
2084 		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2085 
2086 	if (tb[NDTA_GC_INTERVAL])
2087 		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2088 
2089 	err = 0;
2090 
2091 errout_tbl_lock:
2092 	write_unlock_bh(&tbl->lock);
2093 errout:
2094 	return err;
2095 }
2096 
2097 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2098 {
2099 	struct net *net = sock_net(skb->sk);
2100 	int family, tidx, nidx = 0;
2101 	int tbl_skip = cb->args[0];
2102 	int neigh_skip = cb->args[1];
2103 	struct neigh_table *tbl;
2104 
2105 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2106 
2107 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2108 		struct neigh_parms *p;
2109 
2110 		tbl = neigh_tables[tidx];
2111 		if (!tbl)
2112 			continue;
2113 
2114 		if (tidx < tbl_skip || (family && tbl->family != family))
2115 			continue;
2116 
2117 		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2118 				       cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2119 				       NLM_F_MULTI) < 0)
2120 			break;
2121 
2122 		nidx = 0;
2123 		p = list_next_entry(&tbl->parms, list);
2124 		list_for_each_entry_from(p, &tbl->parms_list, list) {
2125 			if (!net_eq(neigh_parms_net(p), net))
2126 				continue;
2127 
2128 			if (nidx < neigh_skip)
2129 				goto next;
2130 
2131 			if (neightbl_fill_param_info(skb, tbl, p,
2132 						     NETLINK_CB(cb->skb).portid,
2133 						     cb->nlh->nlmsg_seq,
2134 						     RTM_NEWNEIGHTBL,
2135 						     NLM_F_MULTI) < 0)
2136 				goto out;
2137 		next:
2138 			nidx++;
2139 		}
2140 
2141 		neigh_skip = 0;
2142 	}
2143 out:
2144 	cb->args[0] = tidx;
2145 	cb->args[1] = nidx;
2146 
2147 	return skb->len;
2148 }
2149 
2150 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2151 			   u32 pid, u32 seq, int type, unsigned int flags)
2152 {
2153 	unsigned long now = jiffies;
2154 	struct nda_cacheinfo ci;
2155 	struct nlmsghdr *nlh;
2156 	struct ndmsg *ndm;
2157 
2158 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2159 	if (nlh == NULL)
2160 		return -EMSGSIZE;
2161 
2162 	ndm = nlmsg_data(nlh);
2163 	ndm->ndm_family	 = neigh->ops->family;
2164 	ndm->ndm_pad1    = 0;
2165 	ndm->ndm_pad2    = 0;
2166 	ndm->ndm_flags	 = neigh->flags;
2167 	ndm->ndm_type	 = neigh->type;
2168 	ndm->ndm_ifindex = neigh->dev->ifindex;
2169 
2170 	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2171 		goto nla_put_failure;
2172 
2173 	read_lock_bh(&neigh->lock);
2174 	ndm->ndm_state	 = neigh->nud_state;
2175 	if (neigh->nud_state & NUD_VALID) {
2176 		char haddr[MAX_ADDR_LEN];
2177 
2178 		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2179 		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2180 			read_unlock_bh(&neigh->lock);
2181 			goto nla_put_failure;
2182 		}
2183 	}
2184 
2185 	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2186 	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2187 	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2188 	ci.ndm_refcnt	 = atomic_read(&neigh->refcnt) - 1;
2189 	read_unlock_bh(&neigh->lock);
2190 
2191 	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2192 	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2193 		goto nla_put_failure;
2194 
2195 	nlmsg_end(skb, nlh);
2196 	return 0;
2197 
2198 nla_put_failure:
2199 	nlmsg_cancel(skb, nlh);
2200 	return -EMSGSIZE;
2201 }
2202 
2203 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2204 			    u32 pid, u32 seq, int type, unsigned int flags,
2205 			    struct neigh_table *tbl)
2206 {
2207 	struct nlmsghdr *nlh;
2208 	struct ndmsg *ndm;
2209 
2210 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2211 	if (nlh == NULL)
2212 		return -EMSGSIZE;
2213 
2214 	ndm = nlmsg_data(nlh);
2215 	ndm->ndm_family	 = tbl->family;
2216 	ndm->ndm_pad1    = 0;
2217 	ndm->ndm_pad2    = 0;
2218 	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
2219 	ndm->ndm_type	 = RTN_UNICAST;
2220 	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2221 	ndm->ndm_state	 = NUD_NONE;
2222 
2223 	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2224 		goto nla_put_failure;
2225 
2226 	nlmsg_end(skb, nlh);
2227 	return 0;
2228 
2229 nla_put_failure:
2230 	nlmsg_cancel(skb, nlh);
2231 	return -EMSGSIZE;
2232 }
2233 
2234 static void neigh_update_notify(struct neighbour *neigh)
2235 {
2236 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2237 	__neigh_notify(neigh, RTM_NEWNEIGH, 0);
2238 }
2239 
2240 static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2241 {
2242 	struct net_device *master;
2243 
2244 	if (!master_idx)
2245 		return false;
2246 
2247 	master = netdev_master_upper_dev_get(dev);
2248 	if (!master || master->ifindex != master_idx)
2249 		return true;
2250 
2251 	return false;
2252 }
2253 
2254 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2255 {
2256 	if (filter_idx && dev->ifindex != filter_idx)
2257 		return true;
2258 
2259 	return false;
2260 }
2261 
2262 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2263 			    struct netlink_callback *cb)
2264 {
2265 	struct net *net = sock_net(skb->sk);
2266 	const struct nlmsghdr *nlh = cb->nlh;
2267 	struct nlattr *tb[NDA_MAX + 1];
2268 	struct neighbour *n;
2269 	int rc, h, s_h = cb->args[1];
2270 	int idx, s_idx = idx = cb->args[2];
2271 	struct neigh_hash_table *nht;
2272 	int filter_master_idx = 0, filter_idx = 0;
2273 	unsigned int flags = NLM_F_MULTI;
2274 	int err;
2275 
2276 	err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
2277 	if (!err) {
2278 		if (tb[NDA_IFINDEX])
2279 			filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
2280 
2281 		if (tb[NDA_MASTER])
2282 			filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
2283 
2284 		if (filter_idx || filter_master_idx)
2285 			flags |= NLM_F_DUMP_FILTERED;
2286 	}
2287 
2288 	rcu_read_lock_bh();
2289 	nht = rcu_dereference_bh(tbl->nht);
2290 
2291 	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2292 		if (h > s_h)
2293 			s_idx = 0;
2294 		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2295 		     n != NULL;
2296 		     n = rcu_dereference_bh(n->next)) {
2297 			if (!net_eq(dev_net(n->dev), net))
2298 				continue;
2299 			if (neigh_ifindex_filtered(n->dev, filter_idx))
2300 				continue;
2301 			if (neigh_master_filtered(n->dev, filter_master_idx))
2302 				continue;
2303 			if (idx < s_idx)
2304 				goto next;
2305 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2306 					    cb->nlh->nlmsg_seq,
2307 					    RTM_NEWNEIGH,
2308 					    flags) < 0) {
2309 				rc = -1;
2310 				goto out;
2311 			}
2312 next:
2313 			idx++;
2314 		}
2315 	}
2316 	rc = skb->len;
2317 out:
2318 	rcu_read_unlock_bh();
2319 	cb->args[1] = h;
2320 	cb->args[2] = idx;
2321 	return rc;
2322 }
2323 
2324 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2325 			     struct netlink_callback *cb)
2326 {
2327 	struct pneigh_entry *n;
2328 	struct net *net = sock_net(skb->sk);
2329 	int rc, h, s_h = cb->args[3];
2330 	int idx, s_idx = idx = cb->args[4];
2331 
2332 	read_lock_bh(&tbl->lock);
2333 
2334 	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2335 		if (h > s_h)
2336 			s_idx = 0;
2337 		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2338 			if (pneigh_net(n) != net)
2339 				continue;
2340 			if (idx < s_idx)
2341 				goto next;
2342 			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2343 					    cb->nlh->nlmsg_seq,
2344 					    RTM_NEWNEIGH,
2345 					    NLM_F_MULTI, tbl) < 0) {
2346 				read_unlock_bh(&tbl->lock);
2347 				rc = -1;
2348 				goto out;
2349 			}
2350 		next:
2351 			idx++;
2352 		}
2353 	}
2354 
2355 	read_unlock_bh(&tbl->lock);
2356 	rc = skb->len;
2357 out:
2358 	cb->args[3] = h;
2359 	cb->args[4] = idx;
2360 	return rc;
2361 
2362 }
2363 
2364 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2365 {
2366 	struct neigh_table *tbl;
2367 	int t, family, s_t;
2368 	int proxy = 0;
2369 	int err;
2370 
2371 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2372 
2373 	/* check for full ndmsg structure presence, family member is
2374 	 * the same for both structures
2375 	 */
2376 	if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2377 	    ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2378 		proxy = 1;
2379 
2380 	s_t = cb->args[0];
2381 
2382 	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2383 		tbl = neigh_tables[t];
2384 
2385 		if (!tbl)
2386 			continue;
2387 		if (t < s_t || (family && tbl->family != family))
2388 			continue;
2389 		if (t > s_t)
2390 			memset(&cb->args[1], 0, sizeof(cb->args) -
2391 						sizeof(cb->args[0]));
2392 		if (proxy)
2393 			err = pneigh_dump_table(tbl, skb, cb);
2394 		else
2395 			err = neigh_dump_table(tbl, skb, cb);
2396 		if (err < 0)
2397 			break;
2398 	}
2399 
2400 	cb->args[0] = t;
2401 	return skb->len;
2402 }
2403 
2404 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2405 {
2406 	int chain;
2407 	struct neigh_hash_table *nht;
2408 
2409 	rcu_read_lock_bh();
2410 	nht = rcu_dereference_bh(tbl->nht);
2411 
2412 	read_lock(&tbl->lock); /* avoid resizes */
2413 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2414 		struct neighbour *n;
2415 
2416 		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2417 		     n != NULL;
2418 		     n = rcu_dereference_bh(n->next))
2419 			cb(n, cookie);
2420 	}
2421 	read_unlock(&tbl->lock);
2422 	rcu_read_unlock_bh();
2423 }
2424 EXPORT_SYMBOL(neigh_for_each);
2425 
2426 /* The tbl->lock must be held as a writer and BH disabled. */
2427 void __neigh_for_each_release(struct neigh_table *tbl,
2428 			      int (*cb)(struct neighbour *))
2429 {
2430 	int chain;
2431 	struct neigh_hash_table *nht;
2432 
2433 	nht = rcu_dereference_protected(tbl->nht,
2434 					lockdep_is_held(&tbl->lock));
2435 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2436 		struct neighbour *n;
2437 		struct neighbour __rcu **np;
2438 
2439 		np = &nht->hash_buckets[chain];
2440 		while ((n = rcu_dereference_protected(*np,
2441 					lockdep_is_held(&tbl->lock))) != NULL) {
2442 			int release;
2443 
2444 			write_lock(&n->lock);
2445 			release = cb(n);
2446 			if (release) {
2447 				rcu_assign_pointer(*np,
2448 					rcu_dereference_protected(n->next,
2449 						lockdep_is_held(&tbl->lock)));
2450 				n->dead = 1;
2451 			} else
2452 				np = &n->next;
2453 			write_unlock(&n->lock);
2454 			if (release)
2455 				neigh_cleanup_and_release(n);
2456 		}
2457 	}
2458 }
2459 EXPORT_SYMBOL(__neigh_for_each_release);
2460 
2461 int neigh_xmit(int index, struct net_device *dev,
2462 	       const void *addr, struct sk_buff *skb)
2463 {
2464 	int err = -EAFNOSUPPORT;
2465 	if (likely(index < NEIGH_NR_TABLES)) {
2466 		struct neigh_table *tbl;
2467 		struct neighbour *neigh;
2468 
2469 		tbl = neigh_tables[index];
2470 		if (!tbl)
2471 			goto out;
2472 		neigh = __neigh_lookup_noref(tbl, addr, dev);
2473 		if (!neigh)
2474 			neigh = __neigh_create(tbl, addr, dev, false);
2475 		err = PTR_ERR(neigh);
2476 		if (IS_ERR(neigh))
2477 			goto out_kfree_skb;
2478 		err = neigh->output(neigh, skb);
2479 	}
2480 	else if (index == NEIGH_LINK_TABLE) {
2481 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
2482 				      addr, NULL, skb->len);
2483 		if (err < 0)
2484 			goto out_kfree_skb;
2485 		err = dev_queue_xmit(skb);
2486 	}
2487 out:
2488 	return err;
2489 out_kfree_skb:
2490 	kfree_skb(skb);
2491 	goto out;
2492 }
2493 EXPORT_SYMBOL(neigh_xmit);
2494 
2495 #ifdef CONFIG_PROC_FS
2496 
2497 static struct neighbour *neigh_get_first(struct seq_file *seq)
2498 {
2499 	struct neigh_seq_state *state = seq->private;
2500 	struct net *net = seq_file_net(seq);
2501 	struct neigh_hash_table *nht = state->nht;
2502 	struct neighbour *n = NULL;
2503 	int bucket = state->bucket;
2504 
2505 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2506 	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2507 		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2508 
2509 		while (n) {
2510 			if (!net_eq(dev_net(n->dev), net))
2511 				goto next;
2512 			if (state->neigh_sub_iter) {
2513 				loff_t fakep = 0;
2514 				void *v;
2515 
2516 				v = state->neigh_sub_iter(state, n, &fakep);
2517 				if (!v)
2518 					goto next;
2519 			}
2520 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2521 				break;
2522 			if (n->nud_state & ~NUD_NOARP)
2523 				break;
2524 next:
2525 			n = rcu_dereference_bh(n->next);
2526 		}
2527 
2528 		if (n)
2529 			break;
2530 	}
2531 	state->bucket = bucket;
2532 
2533 	return n;
2534 }
2535 
2536 static struct neighbour *neigh_get_next(struct seq_file *seq,
2537 					struct neighbour *n,
2538 					loff_t *pos)
2539 {
2540 	struct neigh_seq_state *state = seq->private;
2541 	struct net *net = seq_file_net(seq);
2542 	struct neigh_hash_table *nht = state->nht;
2543 
2544 	if (state->neigh_sub_iter) {
2545 		void *v = state->neigh_sub_iter(state, n, pos);
2546 		if (v)
2547 			return n;
2548 	}
2549 	n = rcu_dereference_bh(n->next);
2550 
2551 	while (1) {
2552 		while (n) {
2553 			if (!net_eq(dev_net(n->dev), net))
2554 				goto next;
2555 			if (state->neigh_sub_iter) {
2556 				void *v = state->neigh_sub_iter(state, n, pos);
2557 				if (v)
2558 					return n;
2559 				goto next;
2560 			}
2561 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2562 				break;
2563 
2564 			if (n->nud_state & ~NUD_NOARP)
2565 				break;
2566 next:
2567 			n = rcu_dereference_bh(n->next);
2568 		}
2569 
2570 		if (n)
2571 			break;
2572 
2573 		if (++state->bucket >= (1 << nht->hash_shift))
2574 			break;
2575 
2576 		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2577 	}
2578 
2579 	if (n && pos)
2580 		--(*pos);
2581 	return n;
2582 }
2583 
2584 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2585 {
2586 	struct neighbour *n = neigh_get_first(seq);
2587 
2588 	if (n) {
2589 		--(*pos);
2590 		while (*pos) {
2591 			n = neigh_get_next(seq, n, pos);
2592 			if (!n)
2593 				break;
2594 		}
2595 	}
2596 	return *pos ? NULL : n;
2597 }
2598 
2599 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2600 {
2601 	struct neigh_seq_state *state = seq->private;
2602 	struct net *net = seq_file_net(seq);
2603 	struct neigh_table *tbl = state->tbl;
2604 	struct pneigh_entry *pn = NULL;
2605 	int bucket = state->bucket;
2606 
2607 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2608 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2609 		pn = tbl->phash_buckets[bucket];
2610 		while (pn && !net_eq(pneigh_net(pn), net))
2611 			pn = pn->next;
2612 		if (pn)
2613 			break;
2614 	}
2615 	state->bucket = bucket;
2616 
2617 	return pn;
2618 }
2619 
2620 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2621 					    struct pneigh_entry *pn,
2622 					    loff_t *pos)
2623 {
2624 	struct neigh_seq_state *state = seq->private;
2625 	struct net *net = seq_file_net(seq);
2626 	struct neigh_table *tbl = state->tbl;
2627 
2628 	do {
2629 		pn = pn->next;
2630 	} while (pn && !net_eq(pneigh_net(pn), net));
2631 
2632 	while (!pn) {
2633 		if (++state->bucket > PNEIGH_HASHMASK)
2634 			break;
2635 		pn = tbl->phash_buckets[state->bucket];
2636 		while (pn && !net_eq(pneigh_net(pn), net))
2637 			pn = pn->next;
2638 		if (pn)
2639 			break;
2640 	}
2641 
2642 	if (pn && pos)
2643 		--(*pos);
2644 
2645 	return pn;
2646 }
2647 
2648 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2649 {
2650 	struct pneigh_entry *pn = pneigh_get_first(seq);
2651 
2652 	if (pn) {
2653 		--(*pos);
2654 		while (*pos) {
2655 			pn = pneigh_get_next(seq, pn, pos);
2656 			if (!pn)
2657 				break;
2658 		}
2659 	}
2660 	return *pos ? NULL : pn;
2661 }
2662 
2663 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2664 {
2665 	struct neigh_seq_state *state = seq->private;
2666 	void *rc;
2667 	loff_t idxpos = *pos;
2668 
2669 	rc = neigh_get_idx(seq, &idxpos);
2670 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2671 		rc = pneigh_get_idx(seq, &idxpos);
2672 
2673 	return rc;
2674 }
2675 
2676 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2677 	__acquires(rcu_bh)
2678 {
2679 	struct neigh_seq_state *state = seq->private;
2680 
2681 	state->tbl = tbl;
2682 	state->bucket = 0;
2683 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2684 
2685 	rcu_read_lock_bh();
2686 	state->nht = rcu_dereference_bh(tbl->nht);
2687 
2688 	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2689 }
2690 EXPORT_SYMBOL(neigh_seq_start);
2691 
2692 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2693 {
2694 	struct neigh_seq_state *state;
2695 	void *rc;
2696 
2697 	if (v == SEQ_START_TOKEN) {
2698 		rc = neigh_get_first(seq);
2699 		goto out;
2700 	}
2701 
2702 	state = seq->private;
2703 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2704 		rc = neigh_get_next(seq, v, NULL);
2705 		if (rc)
2706 			goto out;
2707 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2708 			rc = pneigh_get_first(seq);
2709 	} else {
2710 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2711 		rc = pneigh_get_next(seq, v, NULL);
2712 	}
2713 out:
2714 	++(*pos);
2715 	return rc;
2716 }
2717 EXPORT_SYMBOL(neigh_seq_next);
2718 
2719 void neigh_seq_stop(struct seq_file *seq, void *v)
2720 	__releases(rcu_bh)
2721 {
2722 	rcu_read_unlock_bh();
2723 }
2724 EXPORT_SYMBOL(neigh_seq_stop);
2725 
2726 /* statistics via seq_file */
2727 
2728 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2729 {
2730 	struct neigh_table *tbl = seq->private;
2731 	int cpu;
2732 
2733 	if (*pos == 0)
2734 		return SEQ_START_TOKEN;
2735 
2736 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2737 		if (!cpu_possible(cpu))
2738 			continue;
2739 		*pos = cpu+1;
2740 		return per_cpu_ptr(tbl->stats, cpu);
2741 	}
2742 	return NULL;
2743 }
2744 
2745 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2746 {
2747 	struct neigh_table *tbl = seq->private;
2748 	int cpu;
2749 
2750 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2751 		if (!cpu_possible(cpu))
2752 			continue;
2753 		*pos = cpu+1;
2754 		return per_cpu_ptr(tbl->stats, cpu);
2755 	}
2756 	return NULL;
2757 }
2758 
2759 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2760 {
2761 
2762 }
2763 
2764 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2765 {
2766 	struct neigh_table *tbl = seq->private;
2767 	struct neigh_statistics *st = v;
2768 
2769 	if (v == SEQ_START_TOKEN) {
2770 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
2771 		return 0;
2772 	}
2773 
2774 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2775 			"%08lx %08lx  %08lx %08lx %08lx %08lx\n",
2776 		   atomic_read(&tbl->entries),
2777 
2778 		   st->allocs,
2779 		   st->destroys,
2780 		   st->hash_grows,
2781 
2782 		   st->lookups,
2783 		   st->hits,
2784 
2785 		   st->res_failed,
2786 
2787 		   st->rcv_probes_mcast,
2788 		   st->rcv_probes_ucast,
2789 
2790 		   st->periodic_gc_runs,
2791 		   st->forced_gc_runs,
2792 		   st->unres_discards,
2793 		   st->table_fulls
2794 		   );
2795 
2796 	return 0;
2797 }
2798 
2799 static const struct seq_operations neigh_stat_seq_ops = {
2800 	.start	= neigh_stat_seq_start,
2801 	.next	= neigh_stat_seq_next,
2802 	.stop	= neigh_stat_seq_stop,
2803 	.show	= neigh_stat_seq_show,
2804 };
2805 
2806 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2807 {
2808 	int ret = seq_open(file, &neigh_stat_seq_ops);
2809 
2810 	if (!ret) {
2811 		struct seq_file *sf = file->private_data;
2812 		sf->private = PDE_DATA(inode);
2813 	}
2814 	return ret;
2815 };
2816 
2817 static const struct file_operations neigh_stat_seq_fops = {
2818 	.owner	 = THIS_MODULE,
2819 	.open 	 = neigh_stat_seq_open,
2820 	.read	 = seq_read,
2821 	.llseek	 = seq_lseek,
2822 	.release = seq_release,
2823 };
2824 
2825 #endif /* CONFIG_PROC_FS */
2826 
2827 static inline size_t neigh_nlmsg_size(void)
2828 {
2829 	return NLMSG_ALIGN(sizeof(struct ndmsg))
2830 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2831 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2832 	       + nla_total_size(sizeof(struct nda_cacheinfo))
2833 	       + nla_total_size(4); /* NDA_PROBES */
2834 }
2835 
2836 static void __neigh_notify(struct neighbour *n, int type, int flags)
2837 {
2838 	struct net *net = dev_net(n->dev);
2839 	struct sk_buff *skb;
2840 	int err = -ENOBUFS;
2841 
2842 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2843 	if (skb == NULL)
2844 		goto errout;
2845 
2846 	err = neigh_fill_info(skb, n, 0, 0, type, flags);
2847 	if (err < 0) {
2848 		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2849 		WARN_ON(err == -EMSGSIZE);
2850 		kfree_skb(skb);
2851 		goto errout;
2852 	}
2853 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2854 	return;
2855 errout:
2856 	if (err < 0)
2857 		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2858 }
2859 
2860 void neigh_app_ns(struct neighbour *n)
2861 {
2862 	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2863 }
2864 EXPORT_SYMBOL(neigh_app_ns);
2865 
2866 #ifdef CONFIG_SYSCTL
2867 static int zero;
2868 static int int_max = INT_MAX;
2869 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
2870 
2871 static int proc_unres_qlen(struct ctl_table *ctl, int write,
2872 			   void __user *buffer, size_t *lenp, loff_t *ppos)
2873 {
2874 	int size, ret;
2875 	struct ctl_table tmp = *ctl;
2876 
2877 	tmp.extra1 = &zero;
2878 	tmp.extra2 = &unres_qlen_max;
2879 	tmp.data = &size;
2880 
2881 	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
2882 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2883 
2884 	if (write && !ret)
2885 		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2886 	return ret;
2887 }
2888 
2889 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
2890 						   int family)
2891 {
2892 	switch (family) {
2893 	case AF_INET:
2894 		return __in_dev_arp_parms_get_rcu(dev);
2895 	case AF_INET6:
2896 		return __in6_dev_nd_parms_get_rcu(dev);
2897 	}
2898 	return NULL;
2899 }
2900 
2901 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
2902 				  int index)
2903 {
2904 	struct net_device *dev;
2905 	int family = neigh_parms_family(p);
2906 
2907 	rcu_read_lock();
2908 	for_each_netdev_rcu(net, dev) {
2909 		struct neigh_parms *dst_p =
2910 				neigh_get_dev_parms_rcu(dev, family);
2911 
2912 		if (dst_p && !test_bit(index, dst_p->data_state))
2913 			dst_p->data[index] = p->data[index];
2914 	}
2915 	rcu_read_unlock();
2916 }
2917 
2918 static void neigh_proc_update(struct ctl_table *ctl, int write)
2919 {
2920 	struct net_device *dev = ctl->extra1;
2921 	struct neigh_parms *p = ctl->extra2;
2922 	struct net *net = neigh_parms_net(p);
2923 	int index = (int *) ctl->data - p->data;
2924 
2925 	if (!write)
2926 		return;
2927 
2928 	set_bit(index, p->data_state);
2929 	if (!dev) /* NULL dev means this is default value */
2930 		neigh_copy_dflt_parms(net, p, index);
2931 }
2932 
2933 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
2934 					   void __user *buffer,
2935 					   size_t *lenp, loff_t *ppos)
2936 {
2937 	struct ctl_table tmp = *ctl;
2938 	int ret;
2939 
2940 	tmp.extra1 = &zero;
2941 	tmp.extra2 = &int_max;
2942 
2943 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2944 	neigh_proc_update(ctl, write);
2945 	return ret;
2946 }
2947 
2948 int neigh_proc_dointvec(struct ctl_table *ctl, int write,
2949 			void __user *buffer, size_t *lenp, loff_t *ppos)
2950 {
2951 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2952 
2953 	neigh_proc_update(ctl, write);
2954 	return ret;
2955 }
2956 EXPORT_SYMBOL(neigh_proc_dointvec);
2957 
2958 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
2959 				void __user *buffer,
2960 				size_t *lenp, loff_t *ppos)
2961 {
2962 	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
2963 
2964 	neigh_proc_update(ctl, write);
2965 	return ret;
2966 }
2967 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
2968 
2969 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
2970 					      void __user *buffer,
2971 					      size_t *lenp, loff_t *ppos)
2972 {
2973 	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
2974 
2975 	neigh_proc_update(ctl, write);
2976 	return ret;
2977 }
2978 
2979 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
2980 				   void __user *buffer,
2981 				   size_t *lenp, loff_t *ppos)
2982 {
2983 	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
2984 
2985 	neigh_proc_update(ctl, write);
2986 	return ret;
2987 }
2988 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
2989 
2990 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
2991 					  void __user *buffer,
2992 					  size_t *lenp, loff_t *ppos)
2993 {
2994 	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
2995 
2996 	neigh_proc_update(ctl, write);
2997 	return ret;
2998 }
2999 
3000 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3001 					  void __user *buffer,
3002 					  size_t *lenp, loff_t *ppos)
3003 {
3004 	struct neigh_parms *p = ctl->extra2;
3005 	int ret;
3006 
3007 	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3008 		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3009 	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3010 		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3011 	else
3012 		ret = -1;
3013 
3014 	if (write && ret == 0) {
3015 		/* update reachable_time as well, otherwise, the change will
3016 		 * only be effective after the next time neigh_periodic_work
3017 		 * decides to recompute it
3018 		 */
3019 		p->reachable_time =
3020 			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3021 	}
3022 	return ret;
3023 }
3024 
3025 #define NEIGH_PARMS_DATA_OFFSET(index)	\
3026 	(&((struct neigh_parms *) 0)->data[index])
3027 
3028 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3029 	[NEIGH_VAR_ ## attr] = { \
3030 		.procname	= name, \
3031 		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3032 		.maxlen		= sizeof(int), \
3033 		.mode		= mval, \
3034 		.proc_handler	= proc, \
3035 	}
3036 
3037 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3038 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3039 
3040 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3041 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3042 
3043 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3044 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3045 
3046 #define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
3047 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3048 
3049 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3050 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3051 
3052 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3053 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3054 
3055 static struct neigh_sysctl_table {
3056 	struct ctl_table_header *sysctl_header;
3057 	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3058 } neigh_sysctl_template __read_mostly = {
3059 	.neigh_vars = {
3060 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3061 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3062 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3063 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3064 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3065 		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3066 		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3067 		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3068 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3069 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3070 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3071 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3072 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3073 		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3074 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3075 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3076 		[NEIGH_VAR_GC_INTERVAL] = {
3077 			.procname	= "gc_interval",
3078 			.maxlen		= sizeof(int),
3079 			.mode		= 0644,
3080 			.proc_handler	= proc_dointvec_jiffies,
3081 		},
3082 		[NEIGH_VAR_GC_THRESH1] = {
3083 			.procname	= "gc_thresh1",
3084 			.maxlen		= sizeof(int),
3085 			.mode		= 0644,
3086 			.extra1 	= &zero,
3087 			.extra2		= &int_max,
3088 			.proc_handler	= proc_dointvec_minmax,
3089 		},
3090 		[NEIGH_VAR_GC_THRESH2] = {
3091 			.procname	= "gc_thresh2",
3092 			.maxlen		= sizeof(int),
3093 			.mode		= 0644,
3094 			.extra1 	= &zero,
3095 			.extra2		= &int_max,
3096 			.proc_handler	= proc_dointvec_minmax,
3097 		},
3098 		[NEIGH_VAR_GC_THRESH3] = {
3099 			.procname	= "gc_thresh3",
3100 			.maxlen		= sizeof(int),
3101 			.mode		= 0644,
3102 			.extra1 	= &zero,
3103 			.extra2		= &int_max,
3104 			.proc_handler	= proc_dointvec_minmax,
3105 		},
3106 		{},
3107 	},
3108 };
3109 
3110 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3111 			  proc_handler *handler)
3112 {
3113 	int i;
3114 	struct neigh_sysctl_table *t;
3115 	const char *dev_name_source;
3116 	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3117 	char *p_name;
3118 
3119 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3120 	if (!t)
3121 		goto err;
3122 
3123 	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3124 		t->neigh_vars[i].data += (long) p;
3125 		t->neigh_vars[i].extra1 = dev;
3126 		t->neigh_vars[i].extra2 = p;
3127 	}
3128 
3129 	if (dev) {
3130 		dev_name_source = dev->name;
3131 		/* Terminate the table early */
3132 		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3133 		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3134 	} else {
3135 		struct neigh_table *tbl = p->tbl;
3136 		dev_name_source = "default";
3137 		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3138 		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3139 		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3140 		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3141 	}
3142 
3143 	if (handler) {
3144 		/* RetransTime */
3145 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3146 		/* ReachableTime */
3147 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3148 		/* RetransTime (in milliseconds)*/
3149 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3150 		/* ReachableTime (in milliseconds) */
3151 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3152 	} else {
3153 		/* Those handlers will update p->reachable_time after
3154 		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3155 		 * applied after the next neighbour update instead of waiting for
3156 		 * neigh_periodic_work to update its value (can be multiple minutes)
3157 		 * So any handler that replaces them should do this as well
3158 		 */
3159 		/* ReachableTime */
3160 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3161 			neigh_proc_base_reachable_time;
3162 		/* ReachableTime (in milliseconds) */
3163 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3164 			neigh_proc_base_reachable_time;
3165 	}
3166 
3167 	/* Don't export sysctls to unprivileged users */
3168 	if (neigh_parms_net(p)->user_ns != &init_user_ns)
3169 		t->neigh_vars[0].procname = NULL;
3170 
3171 	switch (neigh_parms_family(p)) {
3172 	case AF_INET:
3173 	      p_name = "ipv4";
3174 	      break;
3175 	case AF_INET6:
3176 	      p_name = "ipv6";
3177 	      break;
3178 	default:
3179 	      BUG();
3180 	}
3181 
3182 	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3183 		p_name, dev_name_source);
3184 	t->sysctl_header =
3185 		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3186 	if (!t->sysctl_header)
3187 		goto free;
3188 
3189 	p->sysctl_table = t;
3190 	return 0;
3191 
3192 free:
3193 	kfree(t);
3194 err:
3195 	return -ENOBUFS;
3196 }
3197 EXPORT_SYMBOL(neigh_sysctl_register);
3198 
3199 void neigh_sysctl_unregister(struct neigh_parms *p)
3200 {
3201 	if (p->sysctl_table) {
3202 		struct neigh_sysctl_table *t = p->sysctl_table;
3203 		p->sysctl_table = NULL;
3204 		unregister_net_sysctl_table(t->sysctl_header);
3205 		kfree(t);
3206 	}
3207 }
3208 EXPORT_SYMBOL(neigh_sysctl_unregister);
3209 
3210 #endif	/* CONFIG_SYSCTL */
3211 
3212 static int __init neigh_init(void)
3213 {
3214 	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
3215 	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
3216 	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
3217 
3218 	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3219 		      NULL);
3220 	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
3221 
3222 	return 0;
3223 }
3224 
3225 subsys_initcall(neigh_init);
3226 
3227