xref: /openbmc/linux/net/core/neighbour.c (revision e33bbe69149b802c0c77bfb822685772f85388ca)
1 /*
2  *	Generic address resolution entity
3  *
4  *	Authors:
5  *	Pedro Roque		<roque@di.fc.ul.pt>
6  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Fixes:
14  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
15  *	Harald Welte		Add neighbour cache statistics like rtstat
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/socket.h>
25 #include <linux/netdevice.h>
26 #include <linux/proc_fs.h>
27 #ifdef CONFIG_SYSCTL
28 #include <linux/sysctl.h>
29 #endif
30 #include <linux/times.h>
31 #include <net/net_namespace.h>
32 #include <net/neighbour.h>
33 #include <net/dst.h>
34 #include <net/sock.h>
35 #include <net/netevent.h>
36 #include <net/netlink.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/random.h>
39 #include <linux/string.h>
40 #include <linux/log2.h>
41 #include <linux/inetdevice.h>
42 #include <net/addrconf.h>
43 
44 #define DEBUG
45 #define NEIGH_DEBUG 1
46 #define neigh_dbg(level, fmt, ...)		\
47 do {						\
48 	if (level <= NEIGH_DEBUG)		\
49 		pr_debug(fmt, ##__VA_ARGS__);	\
50 } while (0)
51 
52 #define PNEIGH_HASHMASK		0xF
53 
54 static void neigh_timer_handler(struct timer_list *t);
55 static void __neigh_notify(struct neighbour *n, int type, int flags,
56 			   u32 pid);
57 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
58 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
59 				    struct net_device *dev);
60 
61 #ifdef CONFIG_PROC_FS
62 static const struct file_operations neigh_stat_seq_fops;
63 #endif
64 
65 /*
66    Neighbour hash table buckets are protected with rwlock tbl->lock.
67 
68    - All the scans/updates to hash buckets MUST be made under this lock.
69    - NOTHING clever should be made under this lock: no callbacks
70      to protocol backends, no attempts to send something to network.
71      It will result in deadlocks, if backend/driver wants to use neighbour
72      cache.
73    - If the entry requires some non-trivial actions, increase
74      its reference count and release table lock.
75 
76    Neighbour entries are protected:
77    - with reference count.
78    - with rwlock neigh->lock
79 
80    Reference count prevents destruction.
81 
82    neigh->lock mainly serializes ll address data and its validity state.
83    However, the same lock is used to protect another entry fields:
84     - timer
85     - resolution queue
86 
87    Again, nothing clever shall be made under neigh->lock,
88    the most complicated procedure, which we allow is dev->hard_header.
89    It is supposed, that dev->hard_header is simplistic and does
90    not make callbacks to neighbour tables.
91  */
92 
93 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
94 {
95 	kfree_skb(skb);
96 	return -ENETDOWN;
97 }
98 
99 static void neigh_cleanup_and_release(struct neighbour *neigh)
100 {
101 	if (neigh->parms->neigh_cleanup)
102 		neigh->parms->neigh_cleanup(neigh);
103 
104 	__neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
105 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
106 	neigh_release(neigh);
107 }
108 
109 /*
110  * It is random distribution in the interval (1/2)*base...(3/2)*base.
111  * It corresponds to default IPv6 settings and is not overridable,
112  * because it is really reasonable choice.
113  */
114 
115 unsigned long neigh_rand_reach_time(unsigned long base)
116 {
117 	return base ? (prandom_u32() % base) + (base >> 1) : 0;
118 }
119 EXPORT_SYMBOL(neigh_rand_reach_time);
120 
121 
122 static bool neigh_del(struct neighbour *n, __u8 state,
123 		      struct neighbour __rcu **np, struct neigh_table *tbl)
124 {
125 	bool retval = false;
126 
127 	write_lock(&n->lock);
128 	if (refcount_read(&n->refcnt) == 1 && !(n->nud_state & state)) {
129 		struct neighbour *neigh;
130 
131 		neigh = rcu_dereference_protected(n->next,
132 						  lockdep_is_held(&tbl->lock));
133 		rcu_assign_pointer(*np, neigh);
134 		n->dead = 1;
135 		retval = true;
136 	}
137 	write_unlock(&n->lock);
138 	if (retval)
139 		neigh_cleanup_and_release(n);
140 	return retval;
141 }
142 
143 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
144 {
145 	struct neigh_hash_table *nht;
146 	void *pkey = ndel->primary_key;
147 	u32 hash_val;
148 	struct neighbour *n;
149 	struct neighbour __rcu **np;
150 
151 	nht = rcu_dereference_protected(tbl->nht,
152 					lockdep_is_held(&tbl->lock));
153 	hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
154 	hash_val = hash_val >> (32 - nht->hash_shift);
155 
156 	np = &nht->hash_buckets[hash_val];
157 	while ((n = rcu_dereference_protected(*np,
158 					      lockdep_is_held(&tbl->lock)))) {
159 		if (n == ndel)
160 			return neigh_del(n, 0, np, tbl);
161 		np = &n->next;
162 	}
163 	return false;
164 }
165 
166 static int neigh_forced_gc(struct neigh_table *tbl)
167 {
168 	int shrunk = 0;
169 	int i;
170 	struct neigh_hash_table *nht;
171 
172 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
173 
174 	write_lock_bh(&tbl->lock);
175 	nht = rcu_dereference_protected(tbl->nht,
176 					lockdep_is_held(&tbl->lock));
177 	for (i = 0; i < (1 << nht->hash_shift); i++) {
178 		struct neighbour *n;
179 		struct neighbour __rcu **np;
180 
181 		np = &nht->hash_buckets[i];
182 		while ((n = rcu_dereference_protected(*np,
183 					lockdep_is_held(&tbl->lock))) != NULL) {
184 			/* Neighbour record may be discarded if:
185 			 * - nobody refers to it.
186 			 * - it is not permanent
187 			 */
188 			if (neigh_del(n, NUD_PERMANENT, np, tbl)) {
189 				shrunk = 1;
190 				continue;
191 			}
192 			np = &n->next;
193 		}
194 	}
195 
196 	tbl->last_flush = jiffies;
197 
198 	write_unlock_bh(&tbl->lock);
199 
200 	return shrunk;
201 }
202 
203 static void neigh_add_timer(struct neighbour *n, unsigned long when)
204 {
205 	neigh_hold(n);
206 	if (unlikely(mod_timer(&n->timer, when))) {
207 		printk("NEIGH: BUG, double timer add, state is %x\n",
208 		       n->nud_state);
209 		dump_stack();
210 	}
211 }
212 
213 static int neigh_del_timer(struct neighbour *n)
214 {
215 	if ((n->nud_state & NUD_IN_TIMER) &&
216 	    del_timer(&n->timer)) {
217 		neigh_release(n);
218 		return 1;
219 	}
220 	return 0;
221 }
222 
223 static void pneigh_queue_purge(struct sk_buff_head *list)
224 {
225 	struct sk_buff *skb;
226 
227 	while ((skb = skb_dequeue(list)) != NULL) {
228 		dev_put(skb->dev);
229 		kfree_skb(skb);
230 	}
231 }
232 
233 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
234 {
235 	int i;
236 	struct neigh_hash_table *nht;
237 
238 	nht = rcu_dereference_protected(tbl->nht,
239 					lockdep_is_held(&tbl->lock));
240 
241 	for (i = 0; i < (1 << nht->hash_shift); i++) {
242 		struct neighbour *n;
243 		struct neighbour __rcu **np = &nht->hash_buckets[i];
244 
245 		while ((n = rcu_dereference_protected(*np,
246 					lockdep_is_held(&tbl->lock))) != NULL) {
247 			if (dev && n->dev != dev) {
248 				np = &n->next;
249 				continue;
250 			}
251 			rcu_assign_pointer(*np,
252 				   rcu_dereference_protected(n->next,
253 						lockdep_is_held(&tbl->lock)));
254 			write_lock(&n->lock);
255 			neigh_del_timer(n);
256 			n->dead = 1;
257 
258 			if (refcount_read(&n->refcnt) != 1) {
259 				/* The most unpleasant situation.
260 				   We must destroy neighbour entry,
261 				   but someone still uses it.
262 
263 				   The destroy will be delayed until
264 				   the last user releases us, but
265 				   we must kill timers etc. and move
266 				   it to safe state.
267 				 */
268 				__skb_queue_purge(&n->arp_queue);
269 				n->arp_queue_len_bytes = 0;
270 				n->output = neigh_blackhole;
271 				if (n->nud_state & NUD_VALID)
272 					n->nud_state = NUD_NOARP;
273 				else
274 					n->nud_state = NUD_NONE;
275 				neigh_dbg(2, "neigh %p is stray\n", n);
276 			}
277 			write_unlock(&n->lock);
278 			neigh_cleanup_and_release(n);
279 		}
280 	}
281 }
282 
283 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
284 {
285 	write_lock_bh(&tbl->lock);
286 	neigh_flush_dev(tbl, dev);
287 	write_unlock_bh(&tbl->lock);
288 }
289 EXPORT_SYMBOL(neigh_changeaddr);
290 
291 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
292 {
293 	write_lock_bh(&tbl->lock);
294 	neigh_flush_dev(tbl, dev);
295 	pneigh_ifdown_and_unlock(tbl, dev);
296 
297 	del_timer_sync(&tbl->proxy_timer);
298 	pneigh_queue_purge(&tbl->proxy_queue);
299 	return 0;
300 }
301 EXPORT_SYMBOL(neigh_ifdown);
302 
303 static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
304 {
305 	struct neighbour *n = NULL;
306 	unsigned long now = jiffies;
307 	int entries;
308 
309 	entries = atomic_inc_return(&tbl->entries) - 1;
310 	if (entries >= tbl->gc_thresh3 ||
311 	    (entries >= tbl->gc_thresh2 &&
312 	     time_after(now, tbl->last_flush + 5 * HZ))) {
313 		if (!neigh_forced_gc(tbl) &&
314 		    entries >= tbl->gc_thresh3) {
315 			net_info_ratelimited("%s: neighbor table overflow!\n",
316 					     tbl->id);
317 			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
318 			goto out_entries;
319 		}
320 	}
321 
322 	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
323 	if (!n)
324 		goto out_entries;
325 
326 	__skb_queue_head_init(&n->arp_queue);
327 	rwlock_init(&n->lock);
328 	seqlock_init(&n->ha_lock);
329 	n->updated	  = n->used = now;
330 	n->nud_state	  = NUD_NONE;
331 	n->output	  = neigh_blackhole;
332 	seqlock_init(&n->hh.hh_lock);
333 	n->parms	  = neigh_parms_clone(&tbl->parms);
334 	timer_setup(&n->timer, neigh_timer_handler, 0);
335 
336 	NEIGH_CACHE_STAT_INC(tbl, allocs);
337 	n->tbl		  = tbl;
338 	refcount_set(&n->refcnt, 1);
339 	n->dead		  = 1;
340 out:
341 	return n;
342 
343 out_entries:
344 	atomic_dec(&tbl->entries);
345 	goto out;
346 }
347 
348 static void neigh_get_hash_rnd(u32 *x)
349 {
350 	*x = get_random_u32() | 1;
351 }
352 
353 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
354 {
355 	size_t size = (1 << shift) * sizeof(struct neighbour *);
356 	struct neigh_hash_table *ret;
357 	struct neighbour __rcu **buckets;
358 	int i;
359 
360 	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
361 	if (!ret)
362 		return NULL;
363 	if (size <= PAGE_SIZE)
364 		buckets = kzalloc(size, GFP_ATOMIC);
365 	else
366 		buckets = (struct neighbour __rcu **)
367 			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
368 					   get_order(size));
369 	if (!buckets) {
370 		kfree(ret);
371 		return NULL;
372 	}
373 	ret->hash_buckets = buckets;
374 	ret->hash_shift = shift;
375 	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
376 		neigh_get_hash_rnd(&ret->hash_rnd[i]);
377 	return ret;
378 }
379 
380 static void neigh_hash_free_rcu(struct rcu_head *head)
381 {
382 	struct neigh_hash_table *nht = container_of(head,
383 						    struct neigh_hash_table,
384 						    rcu);
385 	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
386 	struct neighbour __rcu **buckets = nht->hash_buckets;
387 
388 	if (size <= PAGE_SIZE)
389 		kfree(buckets);
390 	else
391 		free_pages((unsigned long)buckets, get_order(size));
392 	kfree(nht);
393 }
394 
395 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
396 						unsigned long new_shift)
397 {
398 	unsigned int i, hash;
399 	struct neigh_hash_table *new_nht, *old_nht;
400 
401 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
402 
403 	old_nht = rcu_dereference_protected(tbl->nht,
404 					    lockdep_is_held(&tbl->lock));
405 	new_nht = neigh_hash_alloc(new_shift);
406 	if (!new_nht)
407 		return old_nht;
408 
409 	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
410 		struct neighbour *n, *next;
411 
412 		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
413 						   lockdep_is_held(&tbl->lock));
414 		     n != NULL;
415 		     n = next) {
416 			hash = tbl->hash(n->primary_key, n->dev,
417 					 new_nht->hash_rnd);
418 
419 			hash >>= (32 - new_nht->hash_shift);
420 			next = rcu_dereference_protected(n->next,
421 						lockdep_is_held(&tbl->lock));
422 
423 			rcu_assign_pointer(n->next,
424 					   rcu_dereference_protected(
425 						new_nht->hash_buckets[hash],
426 						lockdep_is_held(&tbl->lock)));
427 			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
428 		}
429 	}
430 
431 	rcu_assign_pointer(tbl->nht, new_nht);
432 	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
433 	return new_nht;
434 }
435 
436 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
437 			       struct net_device *dev)
438 {
439 	struct neighbour *n;
440 
441 	NEIGH_CACHE_STAT_INC(tbl, lookups);
442 
443 	rcu_read_lock_bh();
444 	n = __neigh_lookup_noref(tbl, pkey, dev);
445 	if (n) {
446 		if (!refcount_inc_not_zero(&n->refcnt))
447 			n = NULL;
448 		NEIGH_CACHE_STAT_INC(tbl, hits);
449 	}
450 
451 	rcu_read_unlock_bh();
452 	return n;
453 }
454 EXPORT_SYMBOL(neigh_lookup);
455 
456 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
457 				     const void *pkey)
458 {
459 	struct neighbour *n;
460 	unsigned int key_len = tbl->key_len;
461 	u32 hash_val;
462 	struct neigh_hash_table *nht;
463 
464 	NEIGH_CACHE_STAT_INC(tbl, lookups);
465 
466 	rcu_read_lock_bh();
467 	nht = rcu_dereference_bh(tbl->nht);
468 	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
469 
470 	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
471 	     n != NULL;
472 	     n = rcu_dereference_bh(n->next)) {
473 		if (!memcmp(n->primary_key, pkey, key_len) &&
474 		    net_eq(dev_net(n->dev), net)) {
475 			if (!refcount_inc_not_zero(&n->refcnt))
476 				n = NULL;
477 			NEIGH_CACHE_STAT_INC(tbl, hits);
478 			break;
479 		}
480 	}
481 
482 	rcu_read_unlock_bh();
483 	return n;
484 }
485 EXPORT_SYMBOL(neigh_lookup_nodev);
486 
487 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
488 				 struct net_device *dev, bool want_ref)
489 {
490 	u32 hash_val;
491 	unsigned int key_len = tbl->key_len;
492 	int error;
493 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
494 	struct neigh_hash_table *nht;
495 
496 	if (!n) {
497 		rc = ERR_PTR(-ENOBUFS);
498 		goto out;
499 	}
500 
501 	memcpy(n->primary_key, pkey, key_len);
502 	n->dev = dev;
503 	dev_hold(dev);
504 
505 	/* Protocol specific setup. */
506 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
507 		rc = ERR_PTR(error);
508 		goto out_neigh_release;
509 	}
510 
511 	if (dev->netdev_ops->ndo_neigh_construct) {
512 		error = dev->netdev_ops->ndo_neigh_construct(dev, n);
513 		if (error < 0) {
514 			rc = ERR_PTR(error);
515 			goto out_neigh_release;
516 		}
517 	}
518 
519 	/* Device specific setup. */
520 	if (n->parms->neigh_setup &&
521 	    (error = n->parms->neigh_setup(n)) < 0) {
522 		rc = ERR_PTR(error);
523 		goto out_neigh_release;
524 	}
525 
526 	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
527 
528 	write_lock_bh(&tbl->lock);
529 	nht = rcu_dereference_protected(tbl->nht,
530 					lockdep_is_held(&tbl->lock));
531 
532 	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
533 		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
534 
535 	hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
536 
537 	if (n->parms->dead) {
538 		rc = ERR_PTR(-EINVAL);
539 		goto out_tbl_unlock;
540 	}
541 
542 	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
543 					    lockdep_is_held(&tbl->lock));
544 	     n1 != NULL;
545 	     n1 = rcu_dereference_protected(n1->next,
546 			lockdep_is_held(&tbl->lock))) {
547 		if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
548 			if (want_ref)
549 				neigh_hold(n1);
550 			rc = n1;
551 			goto out_tbl_unlock;
552 		}
553 	}
554 
555 	n->dead = 0;
556 	if (want_ref)
557 		neigh_hold(n);
558 	rcu_assign_pointer(n->next,
559 			   rcu_dereference_protected(nht->hash_buckets[hash_val],
560 						     lockdep_is_held(&tbl->lock)));
561 	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
562 	write_unlock_bh(&tbl->lock);
563 	neigh_dbg(2, "neigh %p is created\n", n);
564 	rc = n;
565 out:
566 	return rc;
567 out_tbl_unlock:
568 	write_unlock_bh(&tbl->lock);
569 out_neigh_release:
570 	neigh_release(n);
571 	goto out;
572 }
573 EXPORT_SYMBOL(__neigh_create);
574 
575 static u32 pneigh_hash(const void *pkey, unsigned int key_len)
576 {
577 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
578 	hash_val ^= (hash_val >> 16);
579 	hash_val ^= hash_val >> 8;
580 	hash_val ^= hash_val >> 4;
581 	hash_val &= PNEIGH_HASHMASK;
582 	return hash_val;
583 }
584 
585 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
586 					      struct net *net,
587 					      const void *pkey,
588 					      unsigned int key_len,
589 					      struct net_device *dev)
590 {
591 	while (n) {
592 		if (!memcmp(n->key, pkey, key_len) &&
593 		    net_eq(pneigh_net(n), net) &&
594 		    (n->dev == dev || !n->dev))
595 			return n;
596 		n = n->next;
597 	}
598 	return NULL;
599 }
600 
601 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
602 		struct net *net, const void *pkey, struct net_device *dev)
603 {
604 	unsigned int key_len = tbl->key_len;
605 	u32 hash_val = pneigh_hash(pkey, key_len);
606 
607 	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
608 				 net, pkey, key_len, dev);
609 }
610 EXPORT_SYMBOL_GPL(__pneigh_lookup);
611 
612 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
613 				    struct net *net, const void *pkey,
614 				    struct net_device *dev, int creat)
615 {
616 	struct pneigh_entry *n;
617 	unsigned int key_len = tbl->key_len;
618 	u32 hash_val = pneigh_hash(pkey, key_len);
619 
620 	read_lock_bh(&tbl->lock);
621 	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
622 			      net, pkey, key_len, dev);
623 	read_unlock_bh(&tbl->lock);
624 
625 	if (n || !creat)
626 		goto out;
627 
628 	ASSERT_RTNL();
629 
630 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
631 	if (!n)
632 		goto out;
633 
634 	write_pnet(&n->net, net);
635 	memcpy(n->key, pkey, key_len);
636 	n->dev = dev;
637 	if (dev)
638 		dev_hold(dev);
639 
640 	if (tbl->pconstructor && tbl->pconstructor(n)) {
641 		if (dev)
642 			dev_put(dev);
643 		kfree(n);
644 		n = NULL;
645 		goto out;
646 	}
647 
648 	write_lock_bh(&tbl->lock);
649 	n->next = tbl->phash_buckets[hash_val];
650 	tbl->phash_buckets[hash_val] = n;
651 	write_unlock_bh(&tbl->lock);
652 out:
653 	return n;
654 }
655 EXPORT_SYMBOL(pneigh_lookup);
656 
657 
658 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
659 		  struct net_device *dev)
660 {
661 	struct pneigh_entry *n, **np;
662 	unsigned int key_len = tbl->key_len;
663 	u32 hash_val = pneigh_hash(pkey, key_len);
664 
665 	write_lock_bh(&tbl->lock);
666 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
667 	     np = &n->next) {
668 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
669 		    net_eq(pneigh_net(n), net)) {
670 			*np = n->next;
671 			write_unlock_bh(&tbl->lock);
672 			if (tbl->pdestructor)
673 				tbl->pdestructor(n);
674 			if (n->dev)
675 				dev_put(n->dev);
676 			kfree(n);
677 			return 0;
678 		}
679 	}
680 	write_unlock_bh(&tbl->lock);
681 	return -ENOENT;
682 }
683 
684 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
685 				    struct net_device *dev)
686 {
687 	struct pneigh_entry *n, **np, *freelist = NULL;
688 	u32 h;
689 
690 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
691 		np = &tbl->phash_buckets[h];
692 		while ((n = *np) != NULL) {
693 			if (!dev || n->dev == dev) {
694 				*np = n->next;
695 				n->next = freelist;
696 				freelist = n;
697 				continue;
698 			}
699 			np = &n->next;
700 		}
701 	}
702 	write_unlock_bh(&tbl->lock);
703 	while ((n = freelist)) {
704 		freelist = n->next;
705 		n->next = NULL;
706 		if (tbl->pdestructor)
707 			tbl->pdestructor(n);
708 		if (n->dev)
709 			dev_put(n->dev);
710 		kfree(n);
711 	}
712 	return -ENOENT;
713 }
714 
715 static void neigh_parms_destroy(struct neigh_parms *parms);
716 
717 static inline void neigh_parms_put(struct neigh_parms *parms)
718 {
719 	if (refcount_dec_and_test(&parms->refcnt))
720 		neigh_parms_destroy(parms);
721 }
722 
723 /*
724  *	neighbour must already be out of the table;
725  *
726  */
727 void neigh_destroy(struct neighbour *neigh)
728 {
729 	struct net_device *dev = neigh->dev;
730 
731 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
732 
733 	if (!neigh->dead) {
734 		pr_warn("Destroying alive neighbour %p\n", neigh);
735 		dump_stack();
736 		return;
737 	}
738 
739 	if (neigh_del_timer(neigh))
740 		pr_warn("Impossible event\n");
741 
742 	write_lock_bh(&neigh->lock);
743 	__skb_queue_purge(&neigh->arp_queue);
744 	write_unlock_bh(&neigh->lock);
745 	neigh->arp_queue_len_bytes = 0;
746 
747 	if (dev->netdev_ops->ndo_neigh_destroy)
748 		dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
749 
750 	dev_put(dev);
751 	neigh_parms_put(neigh->parms);
752 
753 	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
754 
755 	atomic_dec(&neigh->tbl->entries);
756 	kfree_rcu(neigh, rcu);
757 }
758 EXPORT_SYMBOL(neigh_destroy);
759 
760 /* Neighbour state is suspicious;
761    disable fast path.
762 
763    Called with write_locked neigh.
764  */
765 static void neigh_suspect(struct neighbour *neigh)
766 {
767 	neigh_dbg(2, "neigh %p is suspected\n", neigh);
768 
769 	neigh->output = neigh->ops->output;
770 }
771 
772 /* Neighbour state is OK;
773    enable fast path.
774 
775    Called with write_locked neigh.
776  */
777 static void neigh_connect(struct neighbour *neigh)
778 {
779 	neigh_dbg(2, "neigh %p is connected\n", neigh);
780 
781 	neigh->output = neigh->ops->connected_output;
782 }
783 
784 static void neigh_periodic_work(struct work_struct *work)
785 {
786 	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
787 	struct neighbour *n;
788 	struct neighbour __rcu **np;
789 	unsigned int i;
790 	struct neigh_hash_table *nht;
791 
792 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
793 
794 	write_lock_bh(&tbl->lock);
795 	nht = rcu_dereference_protected(tbl->nht,
796 					lockdep_is_held(&tbl->lock));
797 
798 	/*
799 	 *	periodically recompute ReachableTime from random function
800 	 */
801 
802 	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
803 		struct neigh_parms *p;
804 		tbl->last_rand = jiffies;
805 		list_for_each_entry(p, &tbl->parms_list, list)
806 			p->reachable_time =
807 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
808 	}
809 
810 	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
811 		goto out;
812 
813 	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
814 		np = &nht->hash_buckets[i];
815 
816 		while ((n = rcu_dereference_protected(*np,
817 				lockdep_is_held(&tbl->lock))) != NULL) {
818 			unsigned int state;
819 
820 			write_lock(&n->lock);
821 
822 			state = n->nud_state;
823 			if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
824 				write_unlock(&n->lock);
825 				goto next_elt;
826 			}
827 
828 			if (time_before(n->used, n->confirmed))
829 				n->used = n->confirmed;
830 
831 			if (refcount_read(&n->refcnt) == 1 &&
832 			    (state == NUD_FAILED ||
833 			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
834 				*np = n->next;
835 				n->dead = 1;
836 				write_unlock(&n->lock);
837 				neigh_cleanup_and_release(n);
838 				continue;
839 			}
840 			write_unlock(&n->lock);
841 
842 next_elt:
843 			np = &n->next;
844 		}
845 		/*
846 		 * It's fine to release lock here, even if hash table
847 		 * grows while we are preempted.
848 		 */
849 		write_unlock_bh(&tbl->lock);
850 		cond_resched();
851 		write_lock_bh(&tbl->lock);
852 		nht = rcu_dereference_protected(tbl->nht,
853 						lockdep_is_held(&tbl->lock));
854 	}
855 out:
856 	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
857 	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
858 	 * BASE_REACHABLE_TIME.
859 	 */
860 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
861 			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
862 	write_unlock_bh(&tbl->lock);
863 }
864 
865 static __inline__ int neigh_max_probes(struct neighbour *n)
866 {
867 	struct neigh_parms *p = n->parms;
868 	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
869 	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
870 	        NEIGH_VAR(p, MCAST_PROBES));
871 }
872 
873 static void neigh_invalidate(struct neighbour *neigh)
874 	__releases(neigh->lock)
875 	__acquires(neigh->lock)
876 {
877 	struct sk_buff *skb;
878 
879 	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
880 	neigh_dbg(2, "neigh %p is failed\n", neigh);
881 	neigh->updated = jiffies;
882 
883 	/* It is very thin place. report_unreachable is very complicated
884 	   routine. Particularly, it can hit the same neighbour entry!
885 
886 	   So that, we try to be accurate and avoid dead loop. --ANK
887 	 */
888 	while (neigh->nud_state == NUD_FAILED &&
889 	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
890 		write_unlock(&neigh->lock);
891 		neigh->ops->error_report(neigh, skb);
892 		write_lock(&neigh->lock);
893 	}
894 	__skb_queue_purge(&neigh->arp_queue);
895 	neigh->arp_queue_len_bytes = 0;
896 }
897 
898 static void neigh_probe(struct neighbour *neigh)
899 	__releases(neigh->lock)
900 {
901 	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
902 	/* keep skb alive even if arp_queue overflows */
903 	if (skb)
904 		skb = skb_clone(skb, GFP_ATOMIC);
905 	write_unlock(&neigh->lock);
906 	if (neigh->ops->solicit)
907 		neigh->ops->solicit(neigh, skb);
908 	atomic_inc(&neigh->probes);
909 	kfree_skb(skb);
910 }
911 
912 /* Called when a timer expires for a neighbour entry. */
913 
914 static void neigh_timer_handler(struct timer_list *t)
915 {
916 	unsigned long now, next;
917 	struct neighbour *neigh = from_timer(neigh, t, timer);
918 	unsigned int state;
919 	int notify = 0;
920 
921 	write_lock(&neigh->lock);
922 
923 	state = neigh->nud_state;
924 	now = jiffies;
925 	next = now + HZ;
926 
927 	if (!(state & NUD_IN_TIMER))
928 		goto out;
929 
930 	if (state & NUD_REACHABLE) {
931 		if (time_before_eq(now,
932 				   neigh->confirmed + neigh->parms->reachable_time)) {
933 			neigh_dbg(2, "neigh %p is still alive\n", neigh);
934 			next = neigh->confirmed + neigh->parms->reachable_time;
935 		} else if (time_before_eq(now,
936 					  neigh->used +
937 					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
938 			neigh_dbg(2, "neigh %p is delayed\n", neigh);
939 			neigh->nud_state = NUD_DELAY;
940 			neigh->updated = jiffies;
941 			neigh_suspect(neigh);
942 			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
943 		} else {
944 			neigh_dbg(2, "neigh %p is suspected\n", neigh);
945 			neigh->nud_state = NUD_STALE;
946 			neigh->updated = jiffies;
947 			neigh_suspect(neigh);
948 			notify = 1;
949 		}
950 	} else if (state & NUD_DELAY) {
951 		if (time_before_eq(now,
952 				   neigh->confirmed +
953 				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
954 			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
955 			neigh->nud_state = NUD_REACHABLE;
956 			neigh->updated = jiffies;
957 			neigh_connect(neigh);
958 			notify = 1;
959 			next = neigh->confirmed + neigh->parms->reachable_time;
960 		} else {
961 			neigh_dbg(2, "neigh %p is probed\n", neigh);
962 			neigh->nud_state = NUD_PROBE;
963 			neigh->updated = jiffies;
964 			atomic_set(&neigh->probes, 0);
965 			notify = 1;
966 			next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
967 		}
968 	} else {
969 		/* NUD_PROBE|NUD_INCOMPLETE */
970 		next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
971 	}
972 
973 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
974 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
975 		neigh->nud_state = NUD_FAILED;
976 		notify = 1;
977 		neigh_invalidate(neigh);
978 		goto out;
979 	}
980 
981 	if (neigh->nud_state & NUD_IN_TIMER) {
982 		if (time_before(next, jiffies + HZ/2))
983 			next = jiffies + HZ/2;
984 		if (!mod_timer(&neigh->timer, next))
985 			neigh_hold(neigh);
986 	}
987 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
988 		neigh_probe(neigh);
989 	} else {
990 out:
991 		write_unlock(&neigh->lock);
992 	}
993 
994 	if (notify)
995 		neigh_update_notify(neigh, 0);
996 
997 	neigh_release(neigh);
998 }
999 
1000 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
1001 {
1002 	int rc;
1003 	bool immediate_probe = false;
1004 
1005 	write_lock_bh(&neigh->lock);
1006 
1007 	rc = 0;
1008 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1009 		goto out_unlock_bh;
1010 	if (neigh->dead)
1011 		goto out_dead;
1012 
1013 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1014 		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1015 		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
1016 			unsigned long next, now = jiffies;
1017 
1018 			atomic_set(&neigh->probes,
1019 				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
1020 			neigh->nud_state     = NUD_INCOMPLETE;
1021 			neigh->updated = now;
1022 			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1023 					 HZ/2);
1024 			neigh_add_timer(neigh, next);
1025 			immediate_probe = true;
1026 		} else {
1027 			neigh->nud_state = NUD_FAILED;
1028 			neigh->updated = jiffies;
1029 			write_unlock_bh(&neigh->lock);
1030 
1031 			kfree_skb(skb);
1032 			return 1;
1033 		}
1034 	} else if (neigh->nud_state & NUD_STALE) {
1035 		neigh_dbg(2, "neigh %p is delayed\n", neigh);
1036 		neigh->nud_state = NUD_DELAY;
1037 		neigh->updated = jiffies;
1038 		neigh_add_timer(neigh, jiffies +
1039 				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1040 	}
1041 
1042 	if (neigh->nud_state == NUD_INCOMPLETE) {
1043 		if (skb) {
1044 			while (neigh->arp_queue_len_bytes + skb->truesize >
1045 			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1046 				struct sk_buff *buff;
1047 
1048 				buff = __skb_dequeue(&neigh->arp_queue);
1049 				if (!buff)
1050 					break;
1051 				neigh->arp_queue_len_bytes -= buff->truesize;
1052 				kfree_skb(buff);
1053 				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1054 			}
1055 			skb_dst_force(skb);
1056 			__skb_queue_tail(&neigh->arp_queue, skb);
1057 			neigh->arp_queue_len_bytes += skb->truesize;
1058 		}
1059 		rc = 1;
1060 	}
1061 out_unlock_bh:
1062 	if (immediate_probe)
1063 		neigh_probe(neigh);
1064 	else
1065 		write_unlock(&neigh->lock);
1066 	local_bh_enable();
1067 	return rc;
1068 
1069 out_dead:
1070 	if (neigh->nud_state & NUD_STALE)
1071 		goto out_unlock_bh;
1072 	write_unlock_bh(&neigh->lock);
1073 	kfree_skb(skb);
1074 	return 1;
1075 }
1076 EXPORT_SYMBOL(__neigh_event_send);
1077 
1078 static void neigh_update_hhs(struct neighbour *neigh)
1079 {
1080 	struct hh_cache *hh;
1081 	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1082 		= NULL;
1083 
1084 	if (neigh->dev->header_ops)
1085 		update = neigh->dev->header_ops->cache_update;
1086 
1087 	if (update) {
1088 		hh = &neigh->hh;
1089 		if (hh->hh_len) {
1090 			write_seqlock_bh(&hh->hh_lock);
1091 			update(hh, neigh->dev, neigh->ha);
1092 			write_sequnlock_bh(&hh->hh_lock);
1093 		}
1094 	}
1095 }
1096 
1097 
1098 
1099 /* Generic update routine.
1100    -- lladdr is new lladdr or NULL, if it is not supplied.
1101    -- new    is new state.
1102    -- flags
1103 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1104 				if it is different.
1105 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1106 				lladdr instead of overriding it
1107 				if it is different.
1108 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1109 
1110 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1111 				NTF_ROUTER flag.
1112 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1113 				a router.
1114 
1115    Caller MUST hold reference count on the entry.
1116  */
1117 
1118 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1119 		 u32 flags, u32 nlmsg_pid)
1120 {
1121 	u8 old;
1122 	int err;
1123 	int notify = 0;
1124 	struct net_device *dev;
1125 	int update_isrouter = 0;
1126 
1127 	write_lock_bh(&neigh->lock);
1128 
1129 	dev    = neigh->dev;
1130 	old    = neigh->nud_state;
1131 	err    = -EPERM;
1132 
1133 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1134 	    (old & (NUD_NOARP | NUD_PERMANENT)))
1135 		goto out;
1136 	if (neigh->dead)
1137 		goto out;
1138 
1139 	if (!(new & NUD_VALID)) {
1140 		neigh_del_timer(neigh);
1141 		if (old & NUD_CONNECTED)
1142 			neigh_suspect(neigh);
1143 		neigh->nud_state = new;
1144 		err = 0;
1145 		notify = old & NUD_VALID;
1146 		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1147 		    (new & NUD_FAILED)) {
1148 			neigh_invalidate(neigh);
1149 			notify = 1;
1150 		}
1151 		goto out;
1152 	}
1153 
1154 	/* Compare new lladdr with cached one */
1155 	if (!dev->addr_len) {
1156 		/* First case: device needs no address. */
1157 		lladdr = neigh->ha;
1158 	} else if (lladdr) {
1159 		/* The second case: if something is already cached
1160 		   and a new address is proposed:
1161 		   - compare new & old
1162 		   - if they are different, check override flag
1163 		 */
1164 		if ((old & NUD_VALID) &&
1165 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1166 			lladdr = neigh->ha;
1167 	} else {
1168 		/* No address is supplied; if we know something,
1169 		   use it, otherwise discard the request.
1170 		 */
1171 		err = -EINVAL;
1172 		if (!(old & NUD_VALID))
1173 			goto out;
1174 		lladdr = neigh->ha;
1175 	}
1176 
1177 	/* If entry was valid and address is not changed,
1178 	   do not change entry state, if new one is STALE.
1179 	 */
1180 	err = 0;
1181 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1182 	if (old & NUD_VALID) {
1183 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1184 			update_isrouter = 0;
1185 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1186 			    (old & NUD_CONNECTED)) {
1187 				lladdr = neigh->ha;
1188 				new = NUD_STALE;
1189 			} else
1190 				goto out;
1191 		} else {
1192 			if (lladdr == neigh->ha && new == NUD_STALE &&
1193 			    !(flags & NEIGH_UPDATE_F_ADMIN))
1194 				new = old;
1195 		}
1196 	}
1197 
1198 	/* Update timestamps only once we know we will make a change to the
1199 	 * neighbour entry. Otherwise we risk to move the locktime window with
1200 	 * noop updates and ignore relevant ARP updates.
1201 	 */
1202 	if (new != old || lladdr != neigh->ha) {
1203 		if (new & NUD_CONNECTED)
1204 			neigh->confirmed = jiffies;
1205 		neigh->updated = jiffies;
1206 	}
1207 
1208 	if (new != old) {
1209 		neigh_del_timer(neigh);
1210 		if (new & NUD_PROBE)
1211 			atomic_set(&neigh->probes, 0);
1212 		if (new & NUD_IN_TIMER)
1213 			neigh_add_timer(neigh, (jiffies +
1214 						((new & NUD_REACHABLE) ?
1215 						 neigh->parms->reachable_time :
1216 						 0)));
1217 		neigh->nud_state = new;
1218 		notify = 1;
1219 	}
1220 
1221 	if (lladdr != neigh->ha) {
1222 		write_seqlock(&neigh->ha_lock);
1223 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1224 		write_sequnlock(&neigh->ha_lock);
1225 		neigh_update_hhs(neigh);
1226 		if (!(new & NUD_CONNECTED))
1227 			neigh->confirmed = jiffies -
1228 				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1229 		notify = 1;
1230 	}
1231 	if (new == old)
1232 		goto out;
1233 	if (new & NUD_CONNECTED)
1234 		neigh_connect(neigh);
1235 	else
1236 		neigh_suspect(neigh);
1237 	if (!(old & NUD_VALID)) {
1238 		struct sk_buff *skb;
1239 
1240 		/* Again: avoid dead loop if something went wrong */
1241 
1242 		while (neigh->nud_state & NUD_VALID &&
1243 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1244 			struct dst_entry *dst = skb_dst(skb);
1245 			struct neighbour *n2, *n1 = neigh;
1246 			write_unlock_bh(&neigh->lock);
1247 
1248 			rcu_read_lock();
1249 
1250 			/* Why not just use 'neigh' as-is?  The problem is that
1251 			 * things such as shaper, eql, and sch_teql can end up
1252 			 * using alternative, different, neigh objects to output
1253 			 * the packet in the output path.  So what we need to do
1254 			 * here is re-lookup the top-level neigh in the path so
1255 			 * we can reinject the packet there.
1256 			 */
1257 			n2 = NULL;
1258 			if (dst) {
1259 				n2 = dst_neigh_lookup_skb(dst, skb);
1260 				if (n2)
1261 					n1 = n2;
1262 			}
1263 			n1->output(n1, skb);
1264 			if (n2)
1265 				neigh_release(n2);
1266 			rcu_read_unlock();
1267 
1268 			write_lock_bh(&neigh->lock);
1269 		}
1270 		__skb_queue_purge(&neigh->arp_queue);
1271 		neigh->arp_queue_len_bytes = 0;
1272 	}
1273 out:
1274 	if (update_isrouter) {
1275 		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1276 			(neigh->flags | NTF_ROUTER) :
1277 			(neigh->flags & ~NTF_ROUTER);
1278 	}
1279 	write_unlock_bh(&neigh->lock);
1280 
1281 	if (notify)
1282 		neigh_update_notify(neigh, nlmsg_pid);
1283 
1284 	return err;
1285 }
1286 EXPORT_SYMBOL(neigh_update);
1287 
1288 /* Update the neigh to listen temporarily for probe responses, even if it is
1289  * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1290  */
1291 void __neigh_set_probe_once(struct neighbour *neigh)
1292 {
1293 	if (neigh->dead)
1294 		return;
1295 	neigh->updated = jiffies;
1296 	if (!(neigh->nud_state & NUD_FAILED))
1297 		return;
1298 	neigh->nud_state = NUD_INCOMPLETE;
1299 	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1300 	neigh_add_timer(neigh,
1301 			jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1302 }
1303 EXPORT_SYMBOL(__neigh_set_probe_once);
1304 
1305 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1306 				 u8 *lladdr, void *saddr,
1307 				 struct net_device *dev)
1308 {
1309 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1310 						 lladdr || !dev->addr_len);
1311 	if (neigh)
1312 		neigh_update(neigh, lladdr, NUD_STALE,
1313 			     NEIGH_UPDATE_F_OVERRIDE, 0);
1314 	return neigh;
1315 }
1316 EXPORT_SYMBOL(neigh_event_ns);
1317 
1318 /* called with read_lock_bh(&n->lock); */
1319 static void neigh_hh_init(struct neighbour *n)
1320 {
1321 	struct net_device *dev = n->dev;
1322 	__be16 prot = n->tbl->protocol;
1323 	struct hh_cache	*hh = &n->hh;
1324 
1325 	write_lock_bh(&n->lock);
1326 
1327 	/* Only one thread can come in here and initialize the
1328 	 * hh_cache entry.
1329 	 */
1330 	if (!hh->hh_len)
1331 		dev->header_ops->cache(n, hh, prot);
1332 
1333 	write_unlock_bh(&n->lock);
1334 }
1335 
1336 /* Slow and careful. */
1337 
1338 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1339 {
1340 	int rc = 0;
1341 
1342 	if (!neigh_event_send(neigh, skb)) {
1343 		int err;
1344 		struct net_device *dev = neigh->dev;
1345 		unsigned int seq;
1346 
1347 		if (dev->header_ops->cache && !neigh->hh.hh_len)
1348 			neigh_hh_init(neigh);
1349 
1350 		do {
1351 			__skb_pull(skb, skb_network_offset(skb));
1352 			seq = read_seqbegin(&neigh->ha_lock);
1353 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1354 					      neigh->ha, NULL, skb->len);
1355 		} while (read_seqretry(&neigh->ha_lock, seq));
1356 
1357 		if (err >= 0)
1358 			rc = dev_queue_xmit(skb);
1359 		else
1360 			goto out_kfree_skb;
1361 	}
1362 out:
1363 	return rc;
1364 out_kfree_skb:
1365 	rc = -EINVAL;
1366 	kfree_skb(skb);
1367 	goto out;
1368 }
1369 EXPORT_SYMBOL(neigh_resolve_output);
1370 
1371 /* As fast as possible without hh cache */
1372 
1373 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1374 {
1375 	struct net_device *dev = neigh->dev;
1376 	unsigned int seq;
1377 	int err;
1378 
1379 	do {
1380 		__skb_pull(skb, skb_network_offset(skb));
1381 		seq = read_seqbegin(&neigh->ha_lock);
1382 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1383 				      neigh->ha, NULL, skb->len);
1384 	} while (read_seqretry(&neigh->ha_lock, seq));
1385 
1386 	if (err >= 0)
1387 		err = dev_queue_xmit(skb);
1388 	else {
1389 		err = -EINVAL;
1390 		kfree_skb(skb);
1391 	}
1392 	return err;
1393 }
1394 EXPORT_SYMBOL(neigh_connected_output);
1395 
1396 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1397 {
1398 	return dev_queue_xmit(skb);
1399 }
1400 EXPORT_SYMBOL(neigh_direct_output);
1401 
1402 static void neigh_proxy_process(struct timer_list *t)
1403 {
1404 	struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1405 	long sched_next = 0;
1406 	unsigned long now = jiffies;
1407 	struct sk_buff *skb, *n;
1408 
1409 	spin_lock(&tbl->proxy_queue.lock);
1410 
1411 	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1412 		long tdif = NEIGH_CB(skb)->sched_next - now;
1413 
1414 		if (tdif <= 0) {
1415 			struct net_device *dev = skb->dev;
1416 
1417 			__skb_unlink(skb, &tbl->proxy_queue);
1418 			if (tbl->proxy_redo && netif_running(dev)) {
1419 				rcu_read_lock();
1420 				tbl->proxy_redo(skb);
1421 				rcu_read_unlock();
1422 			} else {
1423 				kfree_skb(skb);
1424 			}
1425 
1426 			dev_put(dev);
1427 		} else if (!sched_next || tdif < sched_next)
1428 			sched_next = tdif;
1429 	}
1430 	del_timer(&tbl->proxy_timer);
1431 	if (sched_next)
1432 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1433 	spin_unlock(&tbl->proxy_queue.lock);
1434 }
1435 
1436 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1437 		    struct sk_buff *skb)
1438 {
1439 	unsigned long now = jiffies;
1440 
1441 	unsigned long sched_next = now + (prandom_u32() %
1442 					  NEIGH_VAR(p, PROXY_DELAY));
1443 
1444 	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1445 		kfree_skb(skb);
1446 		return;
1447 	}
1448 
1449 	NEIGH_CB(skb)->sched_next = sched_next;
1450 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1451 
1452 	spin_lock(&tbl->proxy_queue.lock);
1453 	if (del_timer(&tbl->proxy_timer)) {
1454 		if (time_before(tbl->proxy_timer.expires, sched_next))
1455 			sched_next = tbl->proxy_timer.expires;
1456 	}
1457 	skb_dst_drop(skb);
1458 	dev_hold(skb->dev);
1459 	__skb_queue_tail(&tbl->proxy_queue, skb);
1460 	mod_timer(&tbl->proxy_timer, sched_next);
1461 	spin_unlock(&tbl->proxy_queue.lock);
1462 }
1463 EXPORT_SYMBOL(pneigh_enqueue);
1464 
1465 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1466 						      struct net *net, int ifindex)
1467 {
1468 	struct neigh_parms *p;
1469 
1470 	list_for_each_entry(p, &tbl->parms_list, list) {
1471 		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1472 		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1473 			return p;
1474 	}
1475 
1476 	return NULL;
1477 }
1478 
1479 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1480 				      struct neigh_table *tbl)
1481 {
1482 	struct neigh_parms *p;
1483 	struct net *net = dev_net(dev);
1484 	const struct net_device_ops *ops = dev->netdev_ops;
1485 
1486 	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1487 	if (p) {
1488 		p->tbl		  = tbl;
1489 		refcount_set(&p->refcnt, 1);
1490 		p->reachable_time =
1491 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1492 		dev_hold(dev);
1493 		p->dev = dev;
1494 		write_pnet(&p->net, net);
1495 		p->sysctl_table = NULL;
1496 
1497 		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1498 			dev_put(dev);
1499 			kfree(p);
1500 			return NULL;
1501 		}
1502 
1503 		write_lock_bh(&tbl->lock);
1504 		list_add(&p->list, &tbl->parms.list);
1505 		write_unlock_bh(&tbl->lock);
1506 
1507 		neigh_parms_data_state_cleanall(p);
1508 	}
1509 	return p;
1510 }
1511 EXPORT_SYMBOL(neigh_parms_alloc);
1512 
1513 static void neigh_rcu_free_parms(struct rcu_head *head)
1514 {
1515 	struct neigh_parms *parms =
1516 		container_of(head, struct neigh_parms, rcu_head);
1517 
1518 	neigh_parms_put(parms);
1519 }
1520 
1521 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1522 {
1523 	if (!parms || parms == &tbl->parms)
1524 		return;
1525 	write_lock_bh(&tbl->lock);
1526 	list_del(&parms->list);
1527 	parms->dead = 1;
1528 	write_unlock_bh(&tbl->lock);
1529 	if (parms->dev)
1530 		dev_put(parms->dev);
1531 	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1532 }
1533 EXPORT_SYMBOL(neigh_parms_release);
1534 
1535 static void neigh_parms_destroy(struct neigh_parms *parms)
1536 {
1537 	kfree(parms);
1538 }
1539 
1540 static struct lock_class_key neigh_table_proxy_queue_class;
1541 
1542 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1543 
1544 void neigh_table_init(int index, struct neigh_table *tbl)
1545 {
1546 	unsigned long now = jiffies;
1547 	unsigned long phsize;
1548 
1549 	INIT_LIST_HEAD(&tbl->parms_list);
1550 	list_add(&tbl->parms.list, &tbl->parms_list);
1551 	write_pnet(&tbl->parms.net, &init_net);
1552 	refcount_set(&tbl->parms.refcnt, 1);
1553 	tbl->parms.reachable_time =
1554 			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1555 
1556 	tbl->stats = alloc_percpu(struct neigh_statistics);
1557 	if (!tbl->stats)
1558 		panic("cannot create neighbour cache statistics");
1559 
1560 #ifdef CONFIG_PROC_FS
1561 	if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1562 			      &neigh_stat_seq_fops, tbl))
1563 		panic("cannot create neighbour proc dir entry");
1564 #endif
1565 
1566 	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1567 
1568 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1569 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1570 
1571 	if (!tbl->nht || !tbl->phash_buckets)
1572 		panic("cannot allocate neighbour cache hashes");
1573 
1574 	if (!tbl->entry_size)
1575 		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1576 					tbl->key_len, NEIGH_PRIV_ALIGN);
1577 	else
1578 		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1579 
1580 	rwlock_init(&tbl->lock);
1581 	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1582 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1583 			tbl->parms.reachable_time);
1584 	timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1585 	skb_queue_head_init_class(&tbl->proxy_queue,
1586 			&neigh_table_proxy_queue_class);
1587 
1588 	tbl->last_flush = now;
1589 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1590 
1591 	neigh_tables[index] = tbl;
1592 }
1593 EXPORT_SYMBOL(neigh_table_init);
1594 
1595 int neigh_table_clear(int index, struct neigh_table *tbl)
1596 {
1597 	neigh_tables[index] = NULL;
1598 	/* It is not clean... Fix it to unload IPv6 module safely */
1599 	cancel_delayed_work_sync(&tbl->gc_work);
1600 	del_timer_sync(&tbl->proxy_timer);
1601 	pneigh_queue_purge(&tbl->proxy_queue);
1602 	neigh_ifdown(tbl, NULL);
1603 	if (atomic_read(&tbl->entries))
1604 		pr_crit("neighbour leakage\n");
1605 
1606 	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1607 		 neigh_hash_free_rcu);
1608 	tbl->nht = NULL;
1609 
1610 	kfree(tbl->phash_buckets);
1611 	tbl->phash_buckets = NULL;
1612 
1613 	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1614 
1615 	free_percpu(tbl->stats);
1616 	tbl->stats = NULL;
1617 
1618 	return 0;
1619 }
1620 EXPORT_SYMBOL(neigh_table_clear);
1621 
1622 static struct neigh_table *neigh_find_table(int family)
1623 {
1624 	struct neigh_table *tbl = NULL;
1625 
1626 	switch (family) {
1627 	case AF_INET:
1628 		tbl = neigh_tables[NEIGH_ARP_TABLE];
1629 		break;
1630 	case AF_INET6:
1631 		tbl = neigh_tables[NEIGH_ND_TABLE];
1632 		break;
1633 	case AF_DECnet:
1634 		tbl = neigh_tables[NEIGH_DN_TABLE];
1635 		break;
1636 	}
1637 
1638 	return tbl;
1639 }
1640 
1641 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1642 			struct netlink_ext_ack *extack)
1643 {
1644 	struct net *net = sock_net(skb->sk);
1645 	struct ndmsg *ndm;
1646 	struct nlattr *dst_attr;
1647 	struct neigh_table *tbl;
1648 	struct neighbour *neigh;
1649 	struct net_device *dev = NULL;
1650 	int err = -EINVAL;
1651 
1652 	ASSERT_RTNL();
1653 	if (nlmsg_len(nlh) < sizeof(*ndm))
1654 		goto out;
1655 
1656 	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1657 	if (dst_attr == NULL)
1658 		goto out;
1659 
1660 	ndm = nlmsg_data(nlh);
1661 	if (ndm->ndm_ifindex) {
1662 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1663 		if (dev == NULL) {
1664 			err = -ENODEV;
1665 			goto out;
1666 		}
1667 	}
1668 
1669 	tbl = neigh_find_table(ndm->ndm_family);
1670 	if (tbl == NULL)
1671 		return -EAFNOSUPPORT;
1672 
1673 	if (nla_len(dst_attr) < (int)tbl->key_len)
1674 		goto out;
1675 
1676 	if (ndm->ndm_flags & NTF_PROXY) {
1677 		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1678 		goto out;
1679 	}
1680 
1681 	if (dev == NULL)
1682 		goto out;
1683 
1684 	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1685 	if (neigh == NULL) {
1686 		err = -ENOENT;
1687 		goto out;
1688 	}
1689 
1690 	err = neigh_update(neigh, NULL, NUD_FAILED,
1691 			   NEIGH_UPDATE_F_OVERRIDE |
1692 			   NEIGH_UPDATE_F_ADMIN,
1693 			   NETLINK_CB(skb).portid);
1694 	write_lock_bh(&tbl->lock);
1695 	neigh_release(neigh);
1696 	neigh_remove_one(neigh, tbl);
1697 	write_unlock_bh(&tbl->lock);
1698 
1699 out:
1700 	return err;
1701 }
1702 
1703 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1704 		     struct netlink_ext_ack *extack)
1705 {
1706 	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1707 	struct net *net = sock_net(skb->sk);
1708 	struct ndmsg *ndm;
1709 	struct nlattr *tb[NDA_MAX+1];
1710 	struct neigh_table *tbl;
1711 	struct net_device *dev = NULL;
1712 	struct neighbour *neigh;
1713 	void *dst, *lladdr;
1714 	int err;
1715 
1716 	ASSERT_RTNL();
1717 	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
1718 	if (err < 0)
1719 		goto out;
1720 
1721 	err = -EINVAL;
1722 	if (tb[NDA_DST] == NULL)
1723 		goto out;
1724 
1725 	ndm = nlmsg_data(nlh);
1726 	if (ndm->ndm_ifindex) {
1727 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1728 		if (dev == NULL) {
1729 			err = -ENODEV;
1730 			goto out;
1731 		}
1732 
1733 		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1734 			goto out;
1735 	}
1736 
1737 	tbl = neigh_find_table(ndm->ndm_family);
1738 	if (tbl == NULL)
1739 		return -EAFNOSUPPORT;
1740 
1741 	if (nla_len(tb[NDA_DST]) < (int)tbl->key_len)
1742 		goto out;
1743 	dst = nla_data(tb[NDA_DST]);
1744 	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1745 
1746 	if (ndm->ndm_flags & NTF_PROXY) {
1747 		struct pneigh_entry *pn;
1748 
1749 		err = -ENOBUFS;
1750 		pn = pneigh_lookup(tbl, net, dst, dev, 1);
1751 		if (pn) {
1752 			pn->flags = ndm->ndm_flags;
1753 			err = 0;
1754 		}
1755 		goto out;
1756 	}
1757 
1758 	if (dev == NULL)
1759 		goto out;
1760 
1761 	neigh = neigh_lookup(tbl, dst, dev);
1762 	if (neigh == NULL) {
1763 		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1764 			err = -ENOENT;
1765 			goto out;
1766 		}
1767 
1768 		neigh = __neigh_lookup_errno(tbl, dst, dev);
1769 		if (IS_ERR(neigh)) {
1770 			err = PTR_ERR(neigh);
1771 			goto out;
1772 		}
1773 	} else {
1774 		if (nlh->nlmsg_flags & NLM_F_EXCL) {
1775 			err = -EEXIST;
1776 			neigh_release(neigh);
1777 			goto out;
1778 		}
1779 
1780 		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1781 			flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1782 	}
1783 
1784 	if (ndm->ndm_flags & NTF_USE) {
1785 		neigh_event_send(neigh, NULL);
1786 		err = 0;
1787 	} else
1788 		err = neigh_update(neigh, lladdr, ndm->ndm_state, flags,
1789 				   NETLINK_CB(skb).portid);
1790 	neigh_release(neigh);
1791 
1792 out:
1793 	return err;
1794 }
1795 
1796 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1797 {
1798 	struct nlattr *nest;
1799 
1800 	nest = nla_nest_start(skb, NDTA_PARMS);
1801 	if (nest == NULL)
1802 		return -ENOBUFS;
1803 
1804 	if ((parms->dev &&
1805 	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1806 	    nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
1807 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1808 			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1809 	    /* approximative value for deprecated QUEUE_LEN (in packets) */
1810 	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
1811 			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1812 	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1813 	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
1814 	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
1815 			NEIGH_VAR(parms, UCAST_PROBES)) ||
1816 	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
1817 			NEIGH_VAR(parms, MCAST_PROBES)) ||
1818 	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
1819 			NEIGH_VAR(parms, MCAST_REPROBES)) ||
1820 	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
1821 			  NDTPA_PAD) ||
1822 	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1823 			  NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
1824 	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
1825 			  NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
1826 	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1827 			  NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
1828 	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
1829 			  NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
1830 	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
1831 			  NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
1832 	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
1833 			  NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
1834 	    nla_put_msecs(skb, NDTPA_LOCKTIME,
1835 			  NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
1836 		goto nla_put_failure;
1837 	return nla_nest_end(skb, nest);
1838 
1839 nla_put_failure:
1840 	nla_nest_cancel(skb, nest);
1841 	return -EMSGSIZE;
1842 }
1843 
1844 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1845 			      u32 pid, u32 seq, int type, int flags)
1846 {
1847 	struct nlmsghdr *nlh;
1848 	struct ndtmsg *ndtmsg;
1849 
1850 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1851 	if (nlh == NULL)
1852 		return -EMSGSIZE;
1853 
1854 	ndtmsg = nlmsg_data(nlh);
1855 
1856 	read_lock_bh(&tbl->lock);
1857 	ndtmsg->ndtm_family = tbl->family;
1858 	ndtmsg->ndtm_pad1   = 0;
1859 	ndtmsg->ndtm_pad2   = 0;
1860 
1861 	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1862 	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
1863 	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1864 	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1865 	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1866 		goto nla_put_failure;
1867 	{
1868 		unsigned long now = jiffies;
1869 		unsigned int flush_delta = now - tbl->last_flush;
1870 		unsigned int rand_delta = now - tbl->last_rand;
1871 		struct neigh_hash_table *nht;
1872 		struct ndt_config ndc = {
1873 			.ndtc_key_len		= tbl->key_len,
1874 			.ndtc_entry_size	= tbl->entry_size,
1875 			.ndtc_entries		= atomic_read(&tbl->entries),
1876 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1877 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1878 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1879 		};
1880 
1881 		rcu_read_lock_bh();
1882 		nht = rcu_dereference_bh(tbl->nht);
1883 		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1884 		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1885 		rcu_read_unlock_bh();
1886 
1887 		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1888 			goto nla_put_failure;
1889 	}
1890 
1891 	{
1892 		int cpu;
1893 		struct ndt_stats ndst;
1894 
1895 		memset(&ndst, 0, sizeof(ndst));
1896 
1897 		for_each_possible_cpu(cpu) {
1898 			struct neigh_statistics	*st;
1899 
1900 			st = per_cpu_ptr(tbl->stats, cpu);
1901 			ndst.ndts_allocs		+= st->allocs;
1902 			ndst.ndts_destroys		+= st->destroys;
1903 			ndst.ndts_hash_grows		+= st->hash_grows;
1904 			ndst.ndts_res_failed		+= st->res_failed;
1905 			ndst.ndts_lookups		+= st->lookups;
1906 			ndst.ndts_hits			+= st->hits;
1907 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1908 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1909 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1910 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1911 			ndst.ndts_table_fulls		+= st->table_fulls;
1912 		}
1913 
1914 		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
1915 				  NDTA_PAD))
1916 			goto nla_put_failure;
1917 	}
1918 
1919 	BUG_ON(tbl->parms.dev);
1920 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1921 		goto nla_put_failure;
1922 
1923 	read_unlock_bh(&tbl->lock);
1924 	nlmsg_end(skb, nlh);
1925 	return 0;
1926 
1927 nla_put_failure:
1928 	read_unlock_bh(&tbl->lock);
1929 	nlmsg_cancel(skb, nlh);
1930 	return -EMSGSIZE;
1931 }
1932 
1933 static int neightbl_fill_param_info(struct sk_buff *skb,
1934 				    struct neigh_table *tbl,
1935 				    struct neigh_parms *parms,
1936 				    u32 pid, u32 seq, int type,
1937 				    unsigned int flags)
1938 {
1939 	struct ndtmsg *ndtmsg;
1940 	struct nlmsghdr *nlh;
1941 
1942 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1943 	if (nlh == NULL)
1944 		return -EMSGSIZE;
1945 
1946 	ndtmsg = nlmsg_data(nlh);
1947 
1948 	read_lock_bh(&tbl->lock);
1949 	ndtmsg->ndtm_family = tbl->family;
1950 	ndtmsg->ndtm_pad1   = 0;
1951 	ndtmsg->ndtm_pad2   = 0;
1952 
1953 	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1954 	    neightbl_fill_parms(skb, parms) < 0)
1955 		goto errout;
1956 
1957 	read_unlock_bh(&tbl->lock);
1958 	nlmsg_end(skb, nlh);
1959 	return 0;
1960 errout:
1961 	read_unlock_bh(&tbl->lock);
1962 	nlmsg_cancel(skb, nlh);
1963 	return -EMSGSIZE;
1964 }
1965 
1966 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1967 	[NDTA_NAME]		= { .type = NLA_STRING },
1968 	[NDTA_THRESH1]		= { .type = NLA_U32 },
1969 	[NDTA_THRESH2]		= { .type = NLA_U32 },
1970 	[NDTA_THRESH3]		= { .type = NLA_U32 },
1971 	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
1972 	[NDTA_PARMS]		= { .type = NLA_NESTED },
1973 };
1974 
1975 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1976 	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
1977 	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
1978 	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
1979 	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
1980 	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
1981 	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
1982 	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
1983 	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
1984 	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
1985 	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
1986 	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
1987 	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
1988 	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
1989 	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
1990 };
1991 
1992 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
1993 			struct netlink_ext_ack *extack)
1994 {
1995 	struct net *net = sock_net(skb->sk);
1996 	struct neigh_table *tbl;
1997 	struct ndtmsg *ndtmsg;
1998 	struct nlattr *tb[NDTA_MAX+1];
1999 	bool found = false;
2000 	int err, tidx;
2001 
2002 	err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2003 			  nl_neightbl_policy, extack);
2004 	if (err < 0)
2005 		goto errout;
2006 
2007 	if (tb[NDTA_NAME] == NULL) {
2008 		err = -EINVAL;
2009 		goto errout;
2010 	}
2011 
2012 	ndtmsg = nlmsg_data(nlh);
2013 
2014 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2015 		tbl = neigh_tables[tidx];
2016 		if (!tbl)
2017 			continue;
2018 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2019 			continue;
2020 		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2021 			found = true;
2022 			break;
2023 		}
2024 	}
2025 
2026 	if (!found)
2027 		return -ENOENT;
2028 
2029 	/*
2030 	 * We acquire tbl->lock to be nice to the periodic timers and
2031 	 * make sure they always see a consistent set of values.
2032 	 */
2033 	write_lock_bh(&tbl->lock);
2034 
2035 	if (tb[NDTA_PARMS]) {
2036 		struct nlattr *tbp[NDTPA_MAX+1];
2037 		struct neigh_parms *p;
2038 		int i, ifindex = 0;
2039 
2040 		err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
2041 				       nl_ntbl_parm_policy, extack);
2042 		if (err < 0)
2043 			goto errout_tbl_lock;
2044 
2045 		if (tbp[NDTPA_IFINDEX])
2046 			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2047 
2048 		p = lookup_neigh_parms(tbl, net, ifindex);
2049 		if (p == NULL) {
2050 			err = -ENOENT;
2051 			goto errout_tbl_lock;
2052 		}
2053 
2054 		for (i = 1; i <= NDTPA_MAX; i++) {
2055 			if (tbp[i] == NULL)
2056 				continue;
2057 
2058 			switch (i) {
2059 			case NDTPA_QUEUE_LEN:
2060 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2061 					      nla_get_u32(tbp[i]) *
2062 					      SKB_TRUESIZE(ETH_FRAME_LEN));
2063 				break;
2064 			case NDTPA_QUEUE_LENBYTES:
2065 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2066 					      nla_get_u32(tbp[i]));
2067 				break;
2068 			case NDTPA_PROXY_QLEN:
2069 				NEIGH_VAR_SET(p, PROXY_QLEN,
2070 					      nla_get_u32(tbp[i]));
2071 				break;
2072 			case NDTPA_APP_PROBES:
2073 				NEIGH_VAR_SET(p, APP_PROBES,
2074 					      nla_get_u32(tbp[i]));
2075 				break;
2076 			case NDTPA_UCAST_PROBES:
2077 				NEIGH_VAR_SET(p, UCAST_PROBES,
2078 					      nla_get_u32(tbp[i]));
2079 				break;
2080 			case NDTPA_MCAST_PROBES:
2081 				NEIGH_VAR_SET(p, MCAST_PROBES,
2082 					      nla_get_u32(tbp[i]));
2083 				break;
2084 			case NDTPA_MCAST_REPROBES:
2085 				NEIGH_VAR_SET(p, MCAST_REPROBES,
2086 					      nla_get_u32(tbp[i]));
2087 				break;
2088 			case NDTPA_BASE_REACHABLE_TIME:
2089 				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2090 					      nla_get_msecs(tbp[i]));
2091 				/* update reachable_time as well, otherwise, the change will
2092 				 * only be effective after the next time neigh_periodic_work
2093 				 * decides to recompute it (can be multiple minutes)
2094 				 */
2095 				p->reachable_time =
2096 					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2097 				break;
2098 			case NDTPA_GC_STALETIME:
2099 				NEIGH_VAR_SET(p, GC_STALETIME,
2100 					      nla_get_msecs(tbp[i]));
2101 				break;
2102 			case NDTPA_DELAY_PROBE_TIME:
2103 				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2104 					      nla_get_msecs(tbp[i]));
2105 				call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2106 				break;
2107 			case NDTPA_RETRANS_TIME:
2108 				NEIGH_VAR_SET(p, RETRANS_TIME,
2109 					      nla_get_msecs(tbp[i]));
2110 				break;
2111 			case NDTPA_ANYCAST_DELAY:
2112 				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2113 					      nla_get_msecs(tbp[i]));
2114 				break;
2115 			case NDTPA_PROXY_DELAY:
2116 				NEIGH_VAR_SET(p, PROXY_DELAY,
2117 					      nla_get_msecs(tbp[i]));
2118 				break;
2119 			case NDTPA_LOCKTIME:
2120 				NEIGH_VAR_SET(p, LOCKTIME,
2121 					      nla_get_msecs(tbp[i]));
2122 				break;
2123 			}
2124 		}
2125 	}
2126 
2127 	err = -ENOENT;
2128 	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2129 	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2130 	    !net_eq(net, &init_net))
2131 		goto errout_tbl_lock;
2132 
2133 	if (tb[NDTA_THRESH1])
2134 		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2135 
2136 	if (tb[NDTA_THRESH2])
2137 		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2138 
2139 	if (tb[NDTA_THRESH3])
2140 		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2141 
2142 	if (tb[NDTA_GC_INTERVAL])
2143 		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2144 
2145 	err = 0;
2146 
2147 errout_tbl_lock:
2148 	write_unlock_bh(&tbl->lock);
2149 errout:
2150 	return err;
2151 }
2152 
2153 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2154 {
2155 	struct net *net = sock_net(skb->sk);
2156 	int family, tidx, nidx = 0;
2157 	int tbl_skip = cb->args[0];
2158 	int neigh_skip = cb->args[1];
2159 	struct neigh_table *tbl;
2160 
2161 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2162 
2163 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2164 		struct neigh_parms *p;
2165 
2166 		tbl = neigh_tables[tidx];
2167 		if (!tbl)
2168 			continue;
2169 
2170 		if (tidx < tbl_skip || (family && tbl->family != family))
2171 			continue;
2172 
2173 		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2174 				       cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2175 				       NLM_F_MULTI) < 0)
2176 			break;
2177 
2178 		nidx = 0;
2179 		p = list_next_entry(&tbl->parms, list);
2180 		list_for_each_entry_from(p, &tbl->parms_list, list) {
2181 			if (!net_eq(neigh_parms_net(p), net))
2182 				continue;
2183 
2184 			if (nidx < neigh_skip)
2185 				goto next;
2186 
2187 			if (neightbl_fill_param_info(skb, tbl, p,
2188 						     NETLINK_CB(cb->skb).portid,
2189 						     cb->nlh->nlmsg_seq,
2190 						     RTM_NEWNEIGHTBL,
2191 						     NLM_F_MULTI) < 0)
2192 				goto out;
2193 		next:
2194 			nidx++;
2195 		}
2196 
2197 		neigh_skip = 0;
2198 	}
2199 out:
2200 	cb->args[0] = tidx;
2201 	cb->args[1] = nidx;
2202 
2203 	return skb->len;
2204 }
2205 
2206 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2207 			   u32 pid, u32 seq, int type, unsigned int flags)
2208 {
2209 	unsigned long now = jiffies;
2210 	struct nda_cacheinfo ci;
2211 	struct nlmsghdr *nlh;
2212 	struct ndmsg *ndm;
2213 
2214 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2215 	if (nlh == NULL)
2216 		return -EMSGSIZE;
2217 
2218 	ndm = nlmsg_data(nlh);
2219 	ndm->ndm_family	 = neigh->ops->family;
2220 	ndm->ndm_pad1    = 0;
2221 	ndm->ndm_pad2    = 0;
2222 	ndm->ndm_flags	 = neigh->flags;
2223 	ndm->ndm_type	 = neigh->type;
2224 	ndm->ndm_ifindex = neigh->dev->ifindex;
2225 
2226 	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2227 		goto nla_put_failure;
2228 
2229 	read_lock_bh(&neigh->lock);
2230 	ndm->ndm_state	 = neigh->nud_state;
2231 	if (neigh->nud_state & NUD_VALID) {
2232 		char haddr[MAX_ADDR_LEN];
2233 
2234 		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2235 		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2236 			read_unlock_bh(&neigh->lock);
2237 			goto nla_put_failure;
2238 		}
2239 	}
2240 
2241 	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2242 	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2243 	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2244 	ci.ndm_refcnt	 = refcount_read(&neigh->refcnt) - 1;
2245 	read_unlock_bh(&neigh->lock);
2246 
2247 	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2248 	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2249 		goto nla_put_failure;
2250 
2251 	nlmsg_end(skb, nlh);
2252 	return 0;
2253 
2254 nla_put_failure:
2255 	nlmsg_cancel(skb, nlh);
2256 	return -EMSGSIZE;
2257 }
2258 
2259 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2260 			    u32 pid, u32 seq, int type, unsigned int flags,
2261 			    struct neigh_table *tbl)
2262 {
2263 	struct nlmsghdr *nlh;
2264 	struct ndmsg *ndm;
2265 
2266 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2267 	if (nlh == NULL)
2268 		return -EMSGSIZE;
2269 
2270 	ndm = nlmsg_data(nlh);
2271 	ndm->ndm_family	 = tbl->family;
2272 	ndm->ndm_pad1    = 0;
2273 	ndm->ndm_pad2    = 0;
2274 	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
2275 	ndm->ndm_type	 = RTN_UNICAST;
2276 	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2277 	ndm->ndm_state	 = NUD_NONE;
2278 
2279 	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2280 		goto nla_put_failure;
2281 
2282 	nlmsg_end(skb, nlh);
2283 	return 0;
2284 
2285 nla_put_failure:
2286 	nlmsg_cancel(skb, nlh);
2287 	return -EMSGSIZE;
2288 }
2289 
2290 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2291 {
2292 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2293 	__neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2294 }
2295 
2296 static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2297 {
2298 	struct net_device *master;
2299 
2300 	if (!master_idx)
2301 		return false;
2302 
2303 	master = netdev_master_upper_dev_get(dev);
2304 	if (!master || master->ifindex != master_idx)
2305 		return true;
2306 
2307 	return false;
2308 }
2309 
2310 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2311 {
2312 	if (filter_idx && dev->ifindex != filter_idx)
2313 		return true;
2314 
2315 	return false;
2316 }
2317 
2318 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2319 			    struct netlink_callback *cb)
2320 {
2321 	struct net *net = sock_net(skb->sk);
2322 	const struct nlmsghdr *nlh = cb->nlh;
2323 	struct nlattr *tb[NDA_MAX + 1];
2324 	struct neighbour *n;
2325 	int rc, h, s_h = cb->args[1];
2326 	int idx, s_idx = idx = cb->args[2];
2327 	struct neigh_hash_table *nht;
2328 	int filter_master_idx = 0, filter_idx = 0;
2329 	unsigned int flags = NLM_F_MULTI;
2330 	int err;
2331 
2332 	err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL);
2333 	if (!err) {
2334 		if (tb[NDA_IFINDEX]) {
2335 			if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
2336 				return -EINVAL;
2337 			filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
2338 		}
2339 		if (tb[NDA_MASTER]) {
2340 			if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
2341 				return -EINVAL;
2342 			filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
2343 		}
2344 		if (filter_idx || filter_master_idx)
2345 			flags |= NLM_F_DUMP_FILTERED;
2346 	}
2347 
2348 	rcu_read_lock_bh();
2349 	nht = rcu_dereference_bh(tbl->nht);
2350 
2351 	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2352 		if (h > s_h)
2353 			s_idx = 0;
2354 		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2355 		     n != NULL;
2356 		     n = rcu_dereference_bh(n->next)) {
2357 			if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2358 				goto next;
2359 			if (neigh_ifindex_filtered(n->dev, filter_idx) ||
2360 			    neigh_master_filtered(n->dev, filter_master_idx))
2361 				goto next;
2362 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2363 					    cb->nlh->nlmsg_seq,
2364 					    RTM_NEWNEIGH,
2365 					    flags) < 0) {
2366 				rc = -1;
2367 				goto out;
2368 			}
2369 next:
2370 			idx++;
2371 		}
2372 	}
2373 	rc = skb->len;
2374 out:
2375 	rcu_read_unlock_bh();
2376 	cb->args[1] = h;
2377 	cb->args[2] = idx;
2378 	return rc;
2379 }
2380 
2381 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2382 			     struct netlink_callback *cb)
2383 {
2384 	struct pneigh_entry *n;
2385 	struct net *net = sock_net(skb->sk);
2386 	int rc, h, s_h = cb->args[3];
2387 	int idx, s_idx = idx = cb->args[4];
2388 
2389 	read_lock_bh(&tbl->lock);
2390 
2391 	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2392 		if (h > s_h)
2393 			s_idx = 0;
2394 		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2395 			if (idx < s_idx || pneigh_net(n) != net)
2396 				goto next;
2397 			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2398 					    cb->nlh->nlmsg_seq,
2399 					    RTM_NEWNEIGH,
2400 					    NLM_F_MULTI, tbl) < 0) {
2401 				read_unlock_bh(&tbl->lock);
2402 				rc = -1;
2403 				goto out;
2404 			}
2405 		next:
2406 			idx++;
2407 		}
2408 	}
2409 
2410 	read_unlock_bh(&tbl->lock);
2411 	rc = skb->len;
2412 out:
2413 	cb->args[3] = h;
2414 	cb->args[4] = idx;
2415 	return rc;
2416 
2417 }
2418 
2419 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2420 {
2421 	struct neigh_table *tbl;
2422 	int t, family, s_t;
2423 	int proxy = 0;
2424 	int err;
2425 
2426 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2427 
2428 	/* check for full ndmsg structure presence, family member is
2429 	 * the same for both structures
2430 	 */
2431 	if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2432 	    ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2433 		proxy = 1;
2434 
2435 	s_t = cb->args[0];
2436 
2437 	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2438 		tbl = neigh_tables[t];
2439 
2440 		if (!tbl)
2441 			continue;
2442 		if (t < s_t || (family && tbl->family != family))
2443 			continue;
2444 		if (t > s_t)
2445 			memset(&cb->args[1], 0, sizeof(cb->args) -
2446 						sizeof(cb->args[0]));
2447 		if (proxy)
2448 			err = pneigh_dump_table(tbl, skb, cb);
2449 		else
2450 			err = neigh_dump_table(tbl, skb, cb);
2451 		if (err < 0)
2452 			break;
2453 	}
2454 
2455 	cb->args[0] = t;
2456 	return skb->len;
2457 }
2458 
2459 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2460 {
2461 	int chain;
2462 	struct neigh_hash_table *nht;
2463 
2464 	rcu_read_lock_bh();
2465 	nht = rcu_dereference_bh(tbl->nht);
2466 
2467 	read_lock(&tbl->lock); /* avoid resizes */
2468 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2469 		struct neighbour *n;
2470 
2471 		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2472 		     n != NULL;
2473 		     n = rcu_dereference_bh(n->next))
2474 			cb(n, cookie);
2475 	}
2476 	read_unlock(&tbl->lock);
2477 	rcu_read_unlock_bh();
2478 }
2479 EXPORT_SYMBOL(neigh_for_each);
2480 
2481 /* The tbl->lock must be held as a writer and BH disabled. */
2482 void __neigh_for_each_release(struct neigh_table *tbl,
2483 			      int (*cb)(struct neighbour *))
2484 {
2485 	int chain;
2486 	struct neigh_hash_table *nht;
2487 
2488 	nht = rcu_dereference_protected(tbl->nht,
2489 					lockdep_is_held(&tbl->lock));
2490 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2491 		struct neighbour *n;
2492 		struct neighbour __rcu **np;
2493 
2494 		np = &nht->hash_buckets[chain];
2495 		while ((n = rcu_dereference_protected(*np,
2496 					lockdep_is_held(&tbl->lock))) != NULL) {
2497 			int release;
2498 
2499 			write_lock(&n->lock);
2500 			release = cb(n);
2501 			if (release) {
2502 				rcu_assign_pointer(*np,
2503 					rcu_dereference_protected(n->next,
2504 						lockdep_is_held(&tbl->lock)));
2505 				n->dead = 1;
2506 			} else
2507 				np = &n->next;
2508 			write_unlock(&n->lock);
2509 			if (release)
2510 				neigh_cleanup_and_release(n);
2511 		}
2512 	}
2513 }
2514 EXPORT_SYMBOL(__neigh_for_each_release);
2515 
2516 int neigh_xmit(int index, struct net_device *dev,
2517 	       const void *addr, struct sk_buff *skb)
2518 {
2519 	int err = -EAFNOSUPPORT;
2520 	if (likely(index < NEIGH_NR_TABLES)) {
2521 		struct neigh_table *tbl;
2522 		struct neighbour *neigh;
2523 
2524 		tbl = neigh_tables[index];
2525 		if (!tbl)
2526 			goto out;
2527 		rcu_read_lock_bh();
2528 		neigh = __neigh_lookup_noref(tbl, addr, dev);
2529 		if (!neigh)
2530 			neigh = __neigh_create(tbl, addr, dev, false);
2531 		err = PTR_ERR(neigh);
2532 		if (IS_ERR(neigh)) {
2533 			rcu_read_unlock_bh();
2534 			goto out_kfree_skb;
2535 		}
2536 		err = neigh->output(neigh, skb);
2537 		rcu_read_unlock_bh();
2538 	}
2539 	else if (index == NEIGH_LINK_TABLE) {
2540 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
2541 				      addr, NULL, skb->len);
2542 		if (err < 0)
2543 			goto out_kfree_skb;
2544 		err = dev_queue_xmit(skb);
2545 	}
2546 out:
2547 	return err;
2548 out_kfree_skb:
2549 	kfree_skb(skb);
2550 	goto out;
2551 }
2552 EXPORT_SYMBOL(neigh_xmit);
2553 
2554 #ifdef CONFIG_PROC_FS
2555 
2556 static struct neighbour *neigh_get_first(struct seq_file *seq)
2557 {
2558 	struct neigh_seq_state *state = seq->private;
2559 	struct net *net = seq_file_net(seq);
2560 	struct neigh_hash_table *nht = state->nht;
2561 	struct neighbour *n = NULL;
2562 	int bucket = state->bucket;
2563 
2564 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2565 	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2566 		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2567 
2568 		while (n) {
2569 			if (!net_eq(dev_net(n->dev), net))
2570 				goto next;
2571 			if (state->neigh_sub_iter) {
2572 				loff_t fakep = 0;
2573 				void *v;
2574 
2575 				v = state->neigh_sub_iter(state, n, &fakep);
2576 				if (!v)
2577 					goto next;
2578 			}
2579 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2580 				break;
2581 			if (n->nud_state & ~NUD_NOARP)
2582 				break;
2583 next:
2584 			n = rcu_dereference_bh(n->next);
2585 		}
2586 
2587 		if (n)
2588 			break;
2589 	}
2590 	state->bucket = bucket;
2591 
2592 	return n;
2593 }
2594 
2595 static struct neighbour *neigh_get_next(struct seq_file *seq,
2596 					struct neighbour *n,
2597 					loff_t *pos)
2598 {
2599 	struct neigh_seq_state *state = seq->private;
2600 	struct net *net = seq_file_net(seq);
2601 	struct neigh_hash_table *nht = state->nht;
2602 
2603 	if (state->neigh_sub_iter) {
2604 		void *v = state->neigh_sub_iter(state, n, pos);
2605 		if (v)
2606 			return n;
2607 	}
2608 	n = rcu_dereference_bh(n->next);
2609 
2610 	while (1) {
2611 		while (n) {
2612 			if (!net_eq(dev_net(n->dev), net))
2613 				goto next;
2614 			if (state->neigh_sub_iter) {
2615 				void *v = state->neigh_sub_iter(state, n, pos);
2616 				if (v)
2617 					return n;
2618 				goto next;
2619 			}
2620 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2621 				break;
2622 
2623 			if (n->nud_state & ~NUD_NOARP)
2624 				break;
2625 next:
2626 			n = rcu_dereference_bh(n->next);
2627 		}
2628 
2629 		if (n)
2630 			break;
2631 
2632 		if (++state->bucket >= (1 << nht->hash_shift))
2633 			break;
2634 
2635 		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2636 	}
2637 
2638 	if (n && pos)
2639 		--(*pos);
2640 	return n;
2641 }
2642 
2643 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2644 {
2645 	struct neighbour *n = neigh_get_first(seq);
2646 
2647 	if (n) {
2648 		--(*pos);
2649 		while (*pos) {
2650 			n = neigh_get_next(seq, n, pos);
2651 			if (!n)
2652 				break;
2653 		}
2654 	}
2655 	return *pos ? NULL : n;
2656 }
2657 
2658 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2659 {
2660 	struct neigh_seq_state *state = seq->private;
2661 	struct net *net = seq_file_net(seq);
2662 	struct neigh_table *tbl = state->tbl;
2663 	struct pneigh_entry *pn = NULL;
2664 	int bucket = state->bucket;
2665 
2666 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2667 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2668 		pn = tbl->phash_buckets[bucket];
2669 		while (pn && !net_eq(pneigh_net(pn), net))
2670 			pn = pn->next;
2671 		if (pn)
2672 			break;
2673 	}
2674 	state->bucket = bucket;
2675 
2676 	return pn;
2677 }
2678 
2679 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2680 					    struct pneigh_entry *pn,
2681 					    loff_t *pos)
2682 {
2683 	struct neigh_seq_state *state = seq->private;
2684 	struct net *net = seq_file_net(seq);
2685 	struct neigh_table *tbl = state->tbl;
2686 
2687 	do {
2688 		pn = pn->next;
2689 	} while (pn && !net_eq(pneigh_net(pn), net));
2690 
2691 	while (!pn) {
2692 		if (++state->bucket > PNEIGH_HASHMASK)
2693 			break;
2694 		pn = tbl->phash_buckets[state->bucket];
2695 		while (pn && !net_eq(pneigh_net(pn), net))
2696 			pn = pn->next;
2697 		if (pn)
2698 			break;
2699 	}
2700 
2701 	if (pn && pos)
2702 		--(*pos);
2703 
2704 	return pn;
2705 }
2706 
2707 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2708 {
2709 	struct pneigh_entry *pn = pneigh_get_first(seq);
2710 
2711 	if (pn) {
2712 		--(*pos);
2713 		while (*pos) {
2714 			pn = pneigh_get_next(seq, pn, pos);
2715 			if (!pn)
2716 				break;
2717 		}
2718 	}
2719 	return *pos ? NULL : pn;
2720 }
2721 
2722 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2723 {
2724 	struct neigh_seq_state *state = seq->private;
2725 	void *rc;
2726 	loff_t idxpos = *pos;
2727 
2728 	rc = neigh_get_idx(seq, &idxpos);
2729 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2730 		rc = pneigh_get_idx(seq, &idxpos);
2731 
2732 	return rc;
2733 }
2734 
2735 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2736 	__acquires(rcu_bh)
2737 {
2738 	struct neigh_seq_state *state = seq->private;
2739 
2740 	state->tbl = tbl;
2741 	state->bucket = 0;
2742 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2743 
2744 	rcu_read_lock_bh();
2745 	state->nht = rcu_dereference_bh(tbl->nht);
2746 
2747 	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2748 }
2749 EXPORT_SYMBOL(neigh_seq_start);
2750 
2751 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2752 {
2753 	struct neigh_seq_state *state;
2754 	void *rc;
2755 
2756 	if (v == SEQ_START_TOKEN) {
2757 		rc = neigh_get_first(seq);
2758 		goto out;
2759 	}
2760 
2761 	state = seq->private;
2762 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2763 		rc = neigh_get_next(seq, v, NULL);
2764 		if (rc)
2765 			goto out;
2766 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2767 			rc = pneigh_get_first(seq);
2768 	} else {
2769 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2770 		rc = pneigh_get_next(seq, v, NULL);
2771 	}
2772 out:
2773 	++(*pos);
2774 	return rc;
2775 }
2776 EXPORT_SYMBOL(neigh_seq_next);
2777 
2778 void neigh_seq_stop(struct seq_file *seq, void *v)
2779 	__releases(rcu_bh)
2780 {
2781 	rcu_read_unlock_bh();
2782 }
2783 EXPORT_SYMBOL(neigh_seq_stop);
2784 
2785 /* statistics via seq_file */
2786 
2787 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2788 {
2789 	struct neigh_table *tbl = seq->private;
2790 	int cpu;
2791 
2792 	if (*pos == 0)
2793 		return SEQ_START_TOKEN;
2794 
2795 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2796 		if (!cpu_possible(cpu))
2797 			continue;
2798 		*pos = cpu+1;
2799 		return per_cpu_ptr(tbl->stats, cpu);
2800 	}
2801 	return NULL;
2802 }
2803 
2804 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2805 {
2806 	struct neigh_table *tbl = seq->private;
2807 	int cpu;
2808 
2809 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2810 		if (!cpu_possible(cpu))
2811 			continue;
2812 		*pos = cpu+1;
2813 		return per_cpu_ptr(tbl->stats, cpu);
2814 	}
2815 	return NULL;
2816 }
2817 
2818 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2819 {
2820 
2821 }
2822 
2823 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2824 {
2825 	struct neigh_table *tbl = seq->private;
2826 	struct neigh_statistics *st = v;
2827 
2828 	if (v == SEQ_START_TOKEN) {
2829 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
2830 		return 0;
2831 	}
2832 
2833 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2834 			"%08lx %08lx  %08lx %08lx %08lx %08lx\n",
2835 		   atomic_read(&tbl->entries),
2836 
2837 		   st->allocs,
2838 		   st->destroys,
2839 		   st->hash_grows,
2840 
2841 		   st->lookups,
2842 		   st->hits,
2843 
2844 		   st->res_failed,
2845 
2846 		   st->rcv_probes_mcast,
2847 		   st->rcv_probes_ucast,
2848 
2849 		   st->periodic_gc_runs,
2850 		   st->forced_gc_runs,
2851 		   st->unres_discards,
2852 		   st->table_fulls
2853 		   );
2854 
2855 	return 0;
2856 }
2857 
2858 static const struct seq_operations neigh_stat_seq_ops = {
2859 	.start	= neigh_stat_seq_start,
2860 	.next	= neigh_stat_seq_next,
2861 	.stop	= neigh_stat_seq_stop,
2862 	.show	= neigh_stat_seq_show,
2863 };
2864 
2865 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2866 {
2867 	int ret = seq_open(file, &neigh_stat_seq_ops);
2868 
2869 	if (!ret) {
2870 		struct seq_file *sf = file->private_data;
2871 		sf->private = PDE_DATA(inode);
2872 	}
2873 	return ret;
2874 };
2875 
2876 static const struct file_operations neigh_stat_seq_fops = {
2877 	.open 	 = neigh_stat_seq_open,
2878 	.read	 = seq_read,
2879 	.llseek	 = seq_lseek,
2880 	.release = seq_release,
2881 };
2882 
2883 #endif /* CONFIG_PROC_FS */
2884 
2885 static inline size_t neigh_nlmsg_size(void)
2886 {
2887 	return NLMSG_ALIGN(sizeof(struct ndmsg))
2888 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2889 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2890 	       + nla_total_size(sizeof(struct nda_cacheinfo))
2891 	       + nla_total_size(4); /* NDA_PROBES */
2892 }
2893 
2894 static void __neigh_notify(struct neighbour *n, int type, int flags,
2895 			   u32 pid)
2896 {
2897 	struct net *net = dev_net(n->dev);
2898 	struct sk_buff *skb;
2899 	int err = -ENOBUFS;
2900 
2901 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2902 	if (skb == NULL)
2903 		goto errout;
2904 
2905 	err = neigh_fill_info(skb, n, pid, 0, type, flags);
2906 	if (err < 0) {
2907 		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2908 		WARN_ON(err == -EMSGSIZE);
2909 		kfree_skb(skb);
2910 		goto errout;
2911 	}
2912 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2913 	return;
2914 errout:
2915 	if (err < 0)
2916 		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2917 }
2918 
2919 void neigh_app_ns(struct neighbour *n)
2920 {
2921 	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
2922 }
2923 EXPORT_SYMBOL(neigh_app_ns);
2924 
2925 #ifdef CONFIG_SYSCTL
2926 static int zero;
2927 static int int_max = INT_MAX;
2928 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
2929 
2930 static int proc_unres_qlen(struct ctl_table *ctl, int write,
2931 			   void __user *buffer, size_t *lenp, loff_t *ppos)
2932 {
2933 	int size, ret;
2934 	struct ctl_table tmp = *ctl;
2935 
2936 	tmp.extra1 = &zero;
2937 	tmp.extra2 = &unres_qlen_max;
2938 	tmp.data = &size;
2939 
2940 	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
2941 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2942 
2943 	if (write && !ret)
2944 		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2945 	return ret;
2946 }
2947 
2948 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
2949 						   int family)
2950 {
2951 	switch (family) {
2952 	case AF_INET:
2953 		return __in_dev_arp_parms_get_rcu(dev);
2954 	case AF_INET6:
2955 		return __in6_dev_nd_parms_get_rcu(dev);
2956 	}
2957 	return NULL;
2958 }
2959 
2960 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
2961 				  int index)
2962 {
2963 	struct net_device *dev;
2964 	int family = neigh_parms_family(p);
2965 
2966 	rcu_read_lock();
2967 	for_each_netdev_rcu(net, dev) {
2968 		struct neigh_parms *dst_p =
2969 				neigh_get_dev_parms_rcu(dev, family);
2970 
2971 		if (dst_p && !test_bit(index, dst_p->data_state))
2972 			dst_p->data[index] = p->data[index];
2973 	}
2974 	rcu_read_unlock();
2975 }
2976 
2977 static void neigh_proc_update(struct ctl_table *ctl, int write)
2978 {
2979 	struct net_device *dev = ctl->extra1;
2980 	struct neigh_parms *p = ctl->extra2;
2981 	struct net *net = neigh_parms_net(p);
2982 	int index = (int *) ctl->data - p->data;
2983 
2984 	if (!write)
2985 		return;
2986 
2987 	set_bit(index, p->data_state);
2988 	if (index == NEIGH_VAR_DELAY_PROBE_TIME)
2989 		call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2990 	if (!dev) /* NULL dev means this is default value */
2991 		neigh_copy_dflt_parms(net, p, index);
2992 }
2993 
2994 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
2995 					   void __user *buffer,
2996 					   size_t *lenp, loff_t *ppos)
2997 {
2998 	struct ctl_table tmp = *ctl;
2999 	int ret;
3000 
3001 	tmp.extra1 = &zero;
3002 	tmp.extra2 = &int_max;
3003 
3004 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3005 	neigh_proc_update(ctl, write);
3006 	return ret;
3007 }
3008 
3009 int neigh_proc_dointvec(struct ctl_table *ctl, int write,
3010 			void __user *buffer, size_t *lenp, loff_t *ppos)
3011 {
3012 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3013 
3014 	neigh_proc_update(ctl, write);
3015 	return ret;
3016 }
3017 EXPORT_SYMBOL(neigh_proc_dointvec);
3018 
3019 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
3020 				void __user *buffer,
3021 				size_t *lenp, loff_t *ppos)
3022 {
3023 	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3024 
3025 	neigh_proc_update(ctl, write);
3026 	return ret;
3027 }
3028 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3029 
3030 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3031 					      void __user *buffer,
3032 					      size_t *lenp, loff_t *ppos)
3033 {
3034 	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3035 
3036 	neigh_proc_update(ctl, write);
3037 	return ret;
3038 }
3039 
3040 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3041 				   void __user *buffer,
3042 				   size_t *lenp, loff_t *ppos)
3043 {
3044 	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3045 
3046 	neigh_proc_update(ctl, write);
3047 	return ret;
3048 }
3049 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3050 
3051 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3052 					  void __user *buffer,
3053 					  size_t *lenp, loff_t *ppos)
3054 {
3055 	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3056 
3057 	neigh_proc_update(ctl, write);
3058 	return ret;
3059 }
3060 
3061 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3062 					  void __user *buffer,
3063 					  size_t *lenp, loff_t *ppos)
3064 {
3065 	struct neigh_parms *p = ctl->extra2;
3066 	int ret;
3067 
3068 	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3069 		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3070 	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3071 		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3072 	else
3073 		ret = -1;
3074 
3075 	if (write && ret == 0) {
3076 		/* update reachable_time as well, otherwise, the change will
3077 		 * only be effective after the next time neigh_periodic_work
3078 		 * decides to recompute it
3079 		 */
3080 		p->reachable_time =
3081 			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3082 	}
3083 	return ret;
3084 }
3085 
3086 #define NEIGH_PARMS_DATA_OFFSET(index)	\
3087 	(&((struct neigh_parms *) 0)->data[index])
3088 
3089 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3090 	[NEIGH_VAR_ ## attr] = { \
3091 		.procname	= name, \
3092 		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3093 		.maxlen		= sizeof(int), \
3094 		.mode		= mval, \
3095 		.proc_handler	= proc, \
3096 	}
3097 
3098 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3099 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3100 
3101 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3102 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3103 
3104 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3105 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3106 
3107 #define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
3108 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3109 
3110 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3111 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3112 
3113 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3114 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3115 
3116 static struct neigh_sysctl_table {
3117 	struct ctl_table_header *sysctl_header;
3118 	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3119 } neigh_sysctl_template __read_mostly = {
3120 	.neigh_vars = {
3121 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3122 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3123 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3124 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3125 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3126 		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3127 		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3128 		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3129 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3130 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3131 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3132 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3133 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3134 		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3135 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3136 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3137 		[NEIGH_VAR_GC_INTERVAL] = {
3138 			.procname	= "gc_interval",
3139 			.maxlen		= sizeof(int),
3140 			.mode		= 0644,
3141 			.proc_handler	= proc_dointvec_jiffies,
3142 		},
3143 		[NEIGH_VAR_GC_THRESH1] = {
3144 			.procname	= "gc_thresh1",
3145 			.maxlen		= sizeof(int),
3146 			.mode		= 0644,
3147 			.extra1 	= &zero,
3148 			.extra2		= &int_max,
3149 			.proc_handler	= proc_dointvec_minmax,
3150 		},
3151 		[NEIGH_VAR_GC_THRESH2] = {
3152 			.procname	= "gc_thresh2",
3153 			.maxlen		= sizeof(int),
3154 			.mode		= 0644,
3155 			.extra1 	= &zero,
3156 			.extra2		= &int_max,
3157 			.proc_handler	= proc_dointvec_minmax,
3158 		},
3159 		[NEIGH_VAR_GC_THRESH3] = {
3160 			.procname	= "gc_thresh3",
3161 			.maxlen		= sizeof(int),
3162 			.mode		= 0644,
3163 			.extra1 	= &zero,
3164 			.extra2		= &int_max,
3165 			.proc_handler	= proc_dointvec_minmax,
3166 		},
3167 		{},
3168 	},
3169 };
3170 
3171 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3172 			  proc_handler *handler)
3173 {
3174 	int i;
3175 	struct neigh_sysctl_table *t;
3176 	const char *dev_name_source;
3177 	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3178 	char *p_name;
3179 
3180 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3181 	if (!t)
3182 		goto err;
3183 
3184 	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3185 		t->neigh_vars[i].data += (long) p;
3186 		t->neigh_vars[i].extra1 = dev;
3187 		t->neigh_vars[i].extra2 = p;
3188 	}
3189 
3190 	if (dev) {
3191 		dev_name_source = dev->name;
3192 		/* Terminate the table early */
3193 		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3194 		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3195 	} else {
3196 		struct neigh_table *tbl = p->tbl;
3197 		dev_name_source = "default";
3198 		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3199 		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3200 		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3201 		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3202 	}
3203 
3204 	if (handler) {
3205 		/* RetransTime */
3206 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3207 		/* ReachableTime */
3208 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3209 		/* RetransTime (in milliseconds)*/
3210 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3211 		/* ReachableTime (in milliseconds) */
3212 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3213 	} else {
3214 		/* Those handlers will update p->reachable_time after
3215 		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3216 		 * applied after the next neighbour update instead of waiting for
3217 		 * neigh_periodic_work to update its value (can be multiple minutes)
3218 		 * So any handler that replaces them should do this as well
3219 		 */
3220 		/* ReachableTime */
3221 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3222 			neigh_proc_base_reachable_time;
3223 		/* ReachableTime (in milliseconds) */
3224 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3225 			neigh_proc_base_reachable_time;
3226 	}
3227 
3228 	/* Don't export sysctls to unprivileged users */
3229 	if (neigh_parms_net(p)->user_ns != &init_user_ns)
3230 		t->neigh_vars[0].procname = NULL;
3231 
3232 	switch (neigh_parms_family(p)) {
3233 	case AF_INET:
3234 	      p_name = "ipv4";
3235 	      break;
3236 	case AF_INET6:
3237 	      p_name = "ipv6";
3238 	      break;
3239 	default:
3240 	      BUG();
3241 	}
3242 
3243 	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3244 		p_name, dev_name_source);
3245 	t->sysctl_header =
3246 		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3247 	if (!t->sysctl_header)
3248 		goto free;
3249 
3250 	p->sysctl_table = t;
3251 	return 0;
3252 
3253 free:
3254 	kfree(t);
3255 err:
3256 	return -ENOBUFS;
3257 }
3258 EXPORT_SYMBOL(neigh_sysctl_register);
3259 
3260 void neigh_sysctl_unregister(struct neigh_parms *p)
3261 {
3262 	if (p->sysctl_table) {
3263 		struct neigh_sysctl_table *t = p->sysctl_table;
3264 		p->sysctl_table = NULL;
3265 		unregister_net_sysctl_table(t->sysctl_header);
3266 		kfree(t);
3267 	}
3268 }
3269 EXPORT_SYMBOL(neigh_sysctl_unregister);
3270 
3271 #endif	/* CONFIG_SYSCTL */
3272 
3273 static int __init neigh_init(void)
3274 {
3275 	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3276 	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3277 	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, 0);
3278 
3279 	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3280 		      0);
3281 	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3282 
3283 	return 0;
3284 }
3285 
3286 subsys_initcall(neigh_init);
3287 
3288