xref: /openbmc/linux/net/core/neighbour.c (revision aa0dc6a7)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Generic address resolution entity
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
8  *
9  *	Fixes:
10  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
11  *	Harald Welte		Add neighbour cache statistics like rtstat
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/slab.h>
17 #include <linux/kmemleak.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
30 #include <net/arp.h>
31 #include <net/dst.h>
32 #include <net/sock.h>
33 #include <net/netevent.h>
34 #include <net/netlink.h>
35 #include <linux/rtnetlink.h>
36 #include <linux/random.h>
37 #include <linux/string.h>
38 #include <linux/log2.h>
39 #include <linux/inetdevice.h>
40 #include <net/addrconf.h>
41 
42 #include <trace/events/neigh.h>
43 
44 #define NEIGH_DEBUG 1
45 #define neigh_dbg(level, fmt, ...)		\
46 do {						\
47 	if (level <= NEIGH_DEBUG)		\
48 		pr_debug(fmt, ##__VA_ARGS__);	\
49 } while (0)
50 
51 #define PNEIGH_HASHMASK		0xF
52 
53 static void neigh_timer_handler(struct timer_list *t);
54 static void __neigh_notify(struct neighbour *n, int type, int flags,
55 			   u32 pid);
56 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
57 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
58 				    struct net_device *dev);
59 
60 #ifdef CONFIG_PROC_FS
61 static const struct seq_operations neigh_stat_seq_ops;
62 #endif
63 
64 /*
65    Neighbour hash table buckets are protected with rwlock tbl->lock.
66 
67    - All the scans/updates to hash buckets MUST be made under this lock.
68    - NOTHING clever should be made under this lock: no callbacks
69      to protocol backends, no attempts to send something to network.
70      It will result in deadlocks, if backend/driver wants to use neighbour
71      cache.
72    - If the entry requires some non-trivial actions, increase
73      its reference count and release table lock.
74 
75    Neighbour entries are protected:
76    - with reference count.
77    - with rwlock neigh->lock
78 
79    Reference count prevents destruction.
80 
81    neigh->lock mainly serializes ll address data and its validity state.
82    However, the same lock is used to protect another entry fields:
83     - timer
84     - resolution queue
85 
86    Again, nothing clever shall be made under neigh->lock,
87    the most complicated procedure, which we allow is dev->hard_header.
88    It is supposed, that dev->hard_header is simplistic and does
89    not make callbacks to neighbour tables.
90  */
91 
92 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
93 {
94 	kfree_skb(skb);
95 	return -ENETDOWN;
96 }
97 
98 static void neigh_cleanup_and_release(struct neighbour *neigh)
99 {
100 	trace_neigh_cleanup_and_release(neigh, 0);
101 	__neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
102 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
103 	neigh_release(neigh);
104 }
105 
106 /*
107  * It is random distribution in the interval (1/2)*base...(3/2)*base.
108  * It corresponds to default IPv6 settings and is not overridable,
109  * because it is really reasonable choice.
110  */
111 
112 unsigned long neigh_rand_reach_time(unsigned long base)
113 {
114 	return base ? (prandom_u32() % base) + (base >> 1) : 0;
115 }
116 EXPORT_SYMBOL(neigh_rand_reach_time);
117 
118 static void neigh_mark_dead(struct neighbour *n)
119 {
120 	n->dead = 1;
121 	if (!list_empty(&n->gc_list)) {
122 		list_del_init(&n->gc_list);
123 		atomic_dec(&n->tbl->gc_entries);
124 	}
125 }
126 
127 static void neigh_update_gc_list(struct neighbour *n)
128 {
129 	bool on_gc_list, exempt_from_gc;
130 
131 	write_lock_bh(&n->tbl->lock);
132 	write_lock(&n->lock);
133 
134 	if (n->dead)
135 		goto out;
136 
137 	/* remove from the gc list if new state is permanent or if neighbor
138 	 * is externally learned; otherwise entry should be on the gc list
139 	 */
140 	exempt_from_gc = n->nud_state & NUD_PERMANENT ||
141 			 n->flags & NTF_EXT_LEARNED;
142 	on_gc_list = !list_empty(&n->gc_list);
143 
144 	if (exempt_from_gc && on_gc_list) {
145 		list_del_init(&n->gc_list);
146 		atomic_dec(&n->tbl->gc_entries);
147 	} else if (!exempt_from_gc && !on_gc_list) {
148 		/* add entries to the tail; cleaning removes from the front */
149 		list_add_tail(&n->gc_list, &n->tbl->gc_list);
150 		atomic_inc(&n->tbl->gc_entries);
151 	}
152 
153 out:
154 	write_unlock(&n->lock);
155 	write_unlock_bh(&n->tbl->lock);
156 }
157 
158 static bool neigh_update_ext_learned(struct neighbour *neigh, u32 flags,
159 				     int *notify)
160 {
161 	bool rc = false;
162 	u8 ndm_flags;
163 
164 	if (!(flags & NEIGH_UPDATE_F_ADMIN))
165 		return rc;
166 
167 	ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
168 	if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) {
169 		if (ndm_flags & NTF_EXT_LEARNED)
170 			neigh->flags |= NTF_EXT_LEARNED;
171 		else
172 			neigh->flags &= ~NTF_EXT_LEARNED;
173 		rc = true;
174 		*notify = 1;
175 	}
176 
177 	return rc;
178 }
179 
180 static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
181 		      struct neigh_table *tbl)
182 {
183 	bool retval = false;
184 
185 	write_lock(&n->lock);
186 	if (refcount_read(&n->refcnt) == 1) {
187 		struct neighbour *neigh;
188 
189 		neigh = rcu_dereference_protected(n->next,
190 						  lockdep_is_held(&tbl->lock));
191 		rcu_assign_pointer(*np, neigh);
192 		neigh_mark_dead(n);
193 		retval = true;
194 	}
195 	write_unlock(&n->lock);
196 	if (retval)
197 		neigh_cleanup_and_release(n);
198 	return retval;
199 }
200 
201 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
202 {
203 	struct neigh_hash_table *nht;
204 	void *pkey = ndel->primary_key;
205 	u32 hash_val;
206 	struct neighbour *n;
207 	struct neighbour __rcu **np;
208 
209 	nht = rcu_dereference_protected(tbl->nht,
210 					lockdep_is_held(&tbl->lock));
211 	hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
212 	hash_val = hash_val >> (32 - nht->hash_shift);
213 
214 	np = &nht->hash_buckets[hash_val];
215 	while ((n = rcu_dereference_protected(*np,
216 					      lockdep_is_held(&tbl->lock)))) {
217 		if (n == ndel)
218 			return neigh_del(n, np, tbl);
219 		np = &n->next;
220 	}
221 	return false;
222 }
223 
224 static int neigh_forced_gc(struct neigh_table *tbl)
225 {
226 	int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
227 	unsigned long tref = jiffies - 5 * HZ;
228 	struct neighbour *n, *tmp;
229 	int shrunk = 0;
230 
231 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
232 
233 	write_lock_bh(&tbl->lock);
234 
235 	list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
236 		if (refcount_read(&n->refcnt) == 1) {
237 			bool remove = false;
238 
239 			write_lock(&n->lock);
240 			if ((n->nud_state == NUD_FAILED) ||
241 			    (n->nud_state == NUD_NOARP) ||
242 			    (tbl->is_multicast &&
243 			     tbl->is_multicast(n->primary_key)) ||
244 			    time_after(tref, n->updated))
245 				remove = true;
246 			write_unlock(&n->lock);
247 
248 			if (remove && neigh_remove_one(n, tbl))
249 				shrunk++;
250 			if (shrunk >= max_clean)
251 				break;
252 		}
253 	}
254 
255 	tbl->last_flush = jiffies;
256 
257 	write_unlock_bh(&tbl->lock);
258 
259 	return shrunk;
260 }
261 
262 static void neigh_add_timer(struct neighbour *n, unsigned long when)
263 {
264 	neigh_hold(n);
265 	if (unlikely(mod_timer(&n->timer, when))) {
266 		printk("NEIGH: BUG, double timer add, state is %x\n",
267 		       n->nud_state);
268 		dump_stack();
269 	}
270 }
271 
272 static int neigh_del_timer(struct neighbour *n)
273 {
274 	if ((n->nud_state & NUD_IN_TIMER) &&
275 	    del_timer(&n->timer)) {
276 		neigh_release(n);
277 		return 1;
278 	}
279 	return 0;
280 }
281 
282 static void pneigh_queue_purge(struct sk_buff_head *list)
283 {
284 	struct sk_buff *skb;
285 
286 	while ((skb = skb_dequeue(list)) != NULL) {
287 		dev_put(skb->dev);
288 		kfree_skb(skb);
289 	}
290 }
291 
292 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
293 			    bool skip_perm)
294 {
295 	int i;
296 	struct neigh_hash_table *nht;
297 
298 	nht = rcu_dereference_protected(tbl->nht,
299 					lockdep_is_held(&tbl->lock));
300 
301 	for (i = 0; i < (1 << nht->hash_shift); i++) {
302 		struct neighbour *n;
303 		struct neighbour __rcu **np = &nht->hash_buckets[i];
304 
305 		while ((n = rcu_dereference_protected(*np,
306 					lockdep_is_held(&tbl->lock))) != NULL) {
307 			if (dev && n->dev != dev) {
308 				np = &n->next;
309 				continue;
310 			}
311 			if (skip_perm && n->nud_state & NUD_PERMANENT) {
312 				np = &n->next;
313 				continue;
314 			}
315 			rcu_assign_pointer(*np,
316 				   rcu_dereference_protected(n->next,
317 						lockdep_is_held(&tbl->lock)));
318 			write_lock(&n->lock);
319 			neigh_del_timer(n);
320 			neigh_mark_dead(n);
321 			if (refcount_read(&n->refcnt) != 1) {
322 				/* The most unpleasant situation.
323 				   We must destroy neighbour entry,
324 				   but someone still uses it.
325 
326 				   The destroy will be delayed until
327 				   the last user releases us, but
328 				   we must kill timers etc. and move
329 				   it to safe state.
330 				 */
331 				__skb_queue_purge(&n->arp_queue);
332 				n->arp_queue_len_bytes = 0;
333 				n->output = neigh_blackhole;
334 				if (n->nud_state & NUD_VALID)
335 					n->nud_state = NUD_NOARP;
336 				else
337 					n->nud_state = NUD_NONE;
338 				neigh_dbg(2, "neigh %p is stray\n", n);
339 			}
340 			write_unlock(&n->lock);
341 			neigh_cleanup_and_release(n);
342 		}
343 	}
344 }
345 
346 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
347 {
348 	write_lock_bh(&tbl->lock);
349 	neigh_flush_dev(tbl, dev, false);
350 	write_unlock_bh(&tbl->lock);
351 }
352 EXPORT_SYMBOL(neigh_changeaddr);
353 
354 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
355 			  bool skip_perm)
356 {
357 	write_lock_bh(&tbl->lock);
358 	neigh_flush_dev(tbl, dev, skip_perm);
359 	pneigh_ifdown_and_unlock(tbl, dev);
360 
361 	del_timer_sync(&tbl->proxy_timer);
362 	pneigh_queue_purge(&tbl->proxy_queue);
363 	return 0;
364 }
365 
366 int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
367 {
368 	__neigh_ifdown(tbl, dev, true);
369 	return 0;
370 }
371 EXPORT_SYMBOL(neigh_carrier_down);
372 
373 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
374 {
375 	__neigh_ifdown(tbl, dev, false);
376 	return 0;
377 }
378 EXPORT_SYMBOL(neigh_ifdown);
379 
380 static struct neighbour *neigh_alloc(struct neigh_table *tbl,
381 				     struct net_device *dev,
382 				     bool exempt_from_gc)
383 {
384 	struct neighbour *n = NULL;
385 	unsigned long now = jiffies;
386 	int entries;
387 
388 	if (exempt_from_gc)
389 		goto do_alloc;
390 
391 	entries = atomic_inc_return(&tbl->gc_entries) - 1;
392 	if (entries >= tbl->gc_thresh3 ||
393 	    (entries >= tbl->gc_thresh2 &&
394 	     time_after(now, tbl->last_flush + 5 * HZ))) {
395 		if (!neigh_forced_gc(tbl) &&
396 		    entries >= tbl->gc_thresh3) {
397 			net_info_ratelimited("%s: neighbor table overflow!\n",
398 					     tbl->id);
399 			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
400 			goto out_entries;
401 		}
402 	}
403 
404 do_alloc:
405 	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
406 	if (!n)
407 		goto out_entries;
408 
409 	__skb_queue_head_init(&n->arp_queue);
410 	rwlock_init(&n->lock);
411 	seqlock_init(&n->ha_lock);
412 	n->updated	  = n->used = now;
413 	n->nud_state	  = NUD_NONE;
414 	n->output	  = neigh_blackhole;
415 	seqlock_init(&n->hh.hh_lock);
416 	n->parms	  = neigh_parms_clone(&tbl->parms);
417 	timer_setup(&n->timer, neigh_timer_handler, 0);
418 
419 	NEIGH_CACHE_STAT_INC(tbl, allocs);
420 	n->tbl		  = tbl;
421 	refcount_set(&n->refcnt, 1);
422 	n->dead		  = 1;
423 	INIT_LIST_HEAD(&n->gc_list);
424 
425 	atomic_inc(&tbl->entries);
426 out:
427 	return n;
428 
429 out_entries:
430 	if (!exempt_from_gc)
431 		atomic_dec(&tbl->gc_entries);
432 	goto out;
433 }
434 
435 static void neigh_get_hash_rnd(u32 *x)
436 {
437 	*x = get_random_u32() | 1;
438 }
439 
440 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
441 {
442 	size_t size = (1 << shift) * sizeof(struct neighbour *);
443 	struct neigh_hash_table *ret;
444 	struct neighbour __rcu **buckets;
445 	int i;
446 
447 	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
448 	if (!ret)
449 		return NULL;
450 	if (size <= PAGE_SIZE) {
451 		buckets = kzalloc(size, GFP_ATOMIC);
452 	} else {
453 		buckets = (struct neighbour __rcu **)
454 			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
455 					   get_order(size));
456 		kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
457 	}
458 	if (!buckets) {
459 		kfree(ret);
460 		return NULL;
461 	}
462 	ret->hash_buckets = buckets;
463 	ret->hash_shift = shift;
464 	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
465 		neigh_get_hash_rnd(&ret->hash_rnd[i]);
466 	return ret;
467 }
468 
469 static void neigh_hash_free_rcu(struct rcu_head *head)
470 {
471 	struct neigh_hash_table *nht = container_of(head,
472 						    struct neigh_hash_table,
473 						    rcu);
474 	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
475 	struct neighbour __rcu **buckets = nht->hash_buckets;
476 
477 	if (size <= PAGE_SIZE) {
478 		kfree(buckets);
479 	} else {
480 		kmemleak_free(buckets);
481 		free_pages((unsigned long)buckets, get_order(size));
482 	}
483 	kfree(nht);
484 }
485 
486 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
487 						unsigned long new_shift)
488 {
489 	unsigned int i, hash;
490 	struct neigh_hash_table *new_nht, *old_nht;
491 
492 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
493 
494 	old_nht = rcu_dereference_protected(tbl->nht,
495 					    lockdep_is_held(&tbl->lock));
496 	new_nht = neigh_hash_alloc(new_shift);
497 	if (!new_nht)
498 		return old_nht;
499 
500 	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
501 		struct neighbour *n, *next;
502 
503 		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
504 						   lockdep_is_held(&tbl->lock));
505 		     n != NULL;
506 		     n = next) {
507 			hash = tbl->hash(n->primary_key, n->dev,
508 					 new_nht->hash_rnd);
509 
510 			hash >>= (32 - new_nht->hash_shift);
511 			next = rcu_dereference_protected(n->next,
512 						lockdep_is_held(&tbl->lock));
513 
514 			rcu_assign_pointer(n->next,
515 					   rcu_dereference_protected(
516 						new_nht->hash_buckets[hash],
517 						lockdep_is_held(&tbl->lock)));
518 			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
519 		}
520 	}
521 
522 	rcu_assign_pointer(tbl->nht, new_nht);
523 	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
524 	return new_nht;
525 }
526 
527 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
528 			       struct net_device *dev)
529 {
530 	struct neighbour *n;
531 
532 	NEIGH_CACHE_STAT_INC(tbl, lookups);
533 
534 	rcu_read_lock_bh();
535 	n = __neigh_lookup_noref(tbl, pkey, dev);
536 	if (n) {
537 		if (!refcount_inc_not_zero(&n->refcnt))
538 			n = NULL;
539 		NEIGH_CACHE_STAT_INC(tbl, hits);
540 	}
541 
542 	rcu_read_unlock_bh();
543 	return n;
544 }
545 EXPORT_SYMBOL(neigh_lookup);
546 
547 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
548 				     const void *pkey)
549 {
550 	struct neighbour *n;
551 	unsigned int key_len = tbl->key_len;
552 	u32 hash_val;
553 	struct neigh_hash_table *nht;
554 
555 	NEIGH_CACHE_STAT_INC(tbl, lookups);
556 
557 	rcu_read_lock_bh();
558 	nht = rcu_dereference_bh(tbl->nht);
559 	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
560 
561 	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
562 	     n != NULL;
563 	     n = rcu_dereference_bh(n->next)) {
564 		if (!memcmp(n->primary_key, pkey, key_len) &&
565 		    net_eq(dev_net(n->dev), net)) {
566 			if (!refcount_inc_not_zero(&n->refcnt))
567 				n = NULL;
568 			NEIGH_CACHE_STAT_INC(tbl, hits);
569 			break;
570 		}
571 	}
572 
573 	rcu_read_unlock_bh();
574 	return n;
575 }
576 EXPORT_SYMBOL(neigh_lookup_nodev);
577 
578 static struct neighbour *___neigh_create(struct neigh_table *tbl,
579 					 const void *pkey,
580 					 struct net_device *dev,
581 					 bool exempt_from_gc, bool want_ref)
582 {
583 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev, exempt_from_gc);
584 	u32 hash_val;
585 	unsigned int key_len = tbl->key_len;
586 	int error;
587 	struct neigh_hash_table *nht;
588 
589 	trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
590 
591 	if (!n) {
592 		rc = ERR_PTR(-ENOBUFS);
593 		goto out;
594 	}
595 
596 	memcpy(n->primary_key, pkey, key_len);
597 	n->dev = dev;
598 	dev_hold(dev);
599 
600 	/* Protocol specific setup. */
601 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
602 		rc = ERR_PTR(error);
603 		goto out_neigh_release;
604 	}
605 
606 	if (dev->netdev_ops->ndo_neigh_construct) {
607 		error = dev->netdev_ops->ndo_neigh_construct(dev, n);
608 		if (error < 0) {
609 			rc = ERR_PTR(error);
610 			goto out_neigh_release;
611 		}
612 	}
613 
614 	/* Device specific setup. */
615 	if (n->parms->neigh_setup &&
616 	    (error = n->parms->neigh_setup(n)) < 0) {
617 		rc = ERR_PTR(error);
618 		goto out_neigh_release;
619 	}
620 
621 	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
622 
623 	write_lock_bh(&tbl->lock);
624 	nht = rcu_dereference_protected(tbl->nht,
625 					lockdep_is_held(&tbl->lock));
626 
627 	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
628 		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
629 
630 	hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
631 
632 	if (n->parms->dead) {
633 		rc = ERR_PTR(-EINVAL);
634 		goto out_tbl_unlock;
635 	}
636 
637 	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
638 					    lockdep_is_held(&tbl->lock));
639 	     n1 != NULL;
640 	     n1 = rcu_dereference_protected(n1->next,
641 			lockdep_is_held(&tbl->lock))) {
642 		if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
643 			if (want_ref)
644 				neigh_hold(n1);
645 			rc = n1;
646 			goto out_tbl_unlock;
647 		}
648 	}
649 
650 	n->dead = 0;
651 	if (!exempt_from_gc)
652 		list_add_tail(&n->gc_list, &n->tbl->gc_list);
653 
654 	if (want_ref)
655 		neigh_hold(n);
656 	rcu_assign_pointer(n->next,
657 			   rcu_dereference_protected(nht->hash_buckets[hash_val],
658 						     lockdep_is_held(&tbl->lock)));
659 	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
660 	write_unlock_bh(&tbl->lock);
661 	neigh_dbg(2, "neigh %p is created\n", n);
662 	rc = n;
663 out:
664 	return rc;
665 out_tbl_unlock:
666 	write_unlock_bh(&tbl->lock);
667 out_neigh_release:
668 	if (!exempt_from_gc)
669 		atomic_dec(&tbl->gc_entries);
670 	neigh_release(n);
671 	goto out;
672 }
673 
674 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
675 				 struct net_device *dev, bool want_ref)
676 {
677 	return ___neigh_create(tbl, pkey, dev, false, want_ref);
678 }
679 EXPORT_SYMBOL(__neigh_create);
680 
681 static u32 pneigh_hash(const void *pkey, unsigned int key_len)
682 {
683 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
684 	hash_val ^= (hash_val >> 16);
685 	hash_val ^= hash_val >> 8;
686 	hash_val ^= hash_val >> 4;
687 	hash_val &= PNEIGH_HASHMASK;
688 	return hash_val;
689 }
690 
691 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
692 					      struct net *net,
693 					      const void *pkey,
694 					      unsigned int key_len,
695 					      struct net_device *dev)
696 {
697 	while (n) {
698 		if (!memcmp(n->key, pkey, key_len) &&
699 		    net_eq(pneigh_net(n), net) &&
700 		    (n->dev == dev || !n->dev))
701 			return n;
702 		n = n->next;
703 	}
704 	return NULL;
705 }
706 
707 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
708 		struct net *net, const void *pkey, struct net_device *dev)
709 {
710 	unsigned int key_len = tbl->key_len;
711 	u32 hash_val = pneigh_hash(pkey, key_len);
712 
713 	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
714 				 net, pkey, key_len, dev);
715 }
716 EXPORT_SYMBOL_GPL(__pneigh_lookup);
717 
718 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
719 				    struct net *net, const void *pkey,
720 				    struct net_device *dev, int creat)
721 {
722 	struct pneigh_entry *n;
723 	unsigned int key_len = tbl->key_len;
724 	u32 hash_val = pneigh_hash(pkey, key_len);
725 
726 	read_lock_bh(&tbl->lock);
727 	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
728 			      net, pkey, key_len, dev);
729 	read_unlock_bh(&tbl->lock);
730 
731 	if (n || !creat)
732 		goto out;
733 
734 	ASSERT_RTNL();
735 
736 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
737 	if (!n)
738 		goto out;
739 
740 	n->protocol = 0;
741 	write_pnet(&n->net, net);
742 	memcpy(n->key, pkey, key_len);
743 	n->dev = dev;
744 	if (dev)
745 		dev_hold(dev);
746 
747 	if (tbl->pconstructor && tbl->pconstructor(n)) {
748 		if (dev)
749 			dev_put(dev);
750 		kfree(n);
751 		n = NULL;
752 		goto out;
753 	}
754 
755 	write_lock_bh(&tbl->lock);
756 	n->next = tbl->phash_buckets[hash_val];
757 	tbl->phash_buckets[hash_val] = n;
758 	write_unlock_bh(&tbl->lock);
759 out:
760 	return n;
761 }
762 EXPORT_SYMBOL(pneigh_lookup);
763 
764 
765 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
766 		  struct net_device *dev)
767 {
768 	struct pneigh_entry *n, **np;
769 	unsigned int key_len = tbl->key_len;
770 	u32 hash_val = pneigh_hash(pkey, key_len);
771 
772 	write_lock_bh(&tbl->lock);
773 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
774 	     np = &n->next) {
775 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
776 		    net_eq(pneigh_net(n), net)) {
777 			*np = n->next;
778 			write_unlock_bh(&tbl->lock);
779 			if (tbl->pdestructor)
780 				tbl->pdestructor(n);
781 			if (n->dev)
782 				dev_put(n->dev);
783 			kfree(n);
784 			return 0;
785 		}
786 	}
787 	write_unlock_bh(&tbl->lock);
788 	return -ENOENT;
789 }
790 
791 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
792 				    struct net_device *dev)
793 {
794 	struct pneigh_entry *n, **np, *freelist = NULL;
795 	u32 h;
796 
797 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
798 		np = &tbl->phash_buckets[h];
799 		while ((n = *np) != NULL) {
800 			if (!dev || n->dev == dev) {
801 				*np = n->next;
802 				n->next = freelist;
803 				freelist = n;
804 				continue;
805 			}
806 			np = &n->next;
807 		}
808 	}
809 	write_unlock_bh(&tbl->lock);
810 	while ((n = freelist)) {
811 		freelist = n->next;
812 		n->next = NULL;
813 		if (tbl->pdestructor)
814 			tbl->pdestructor(n);
815 		if (n->dev)
816 			dev_put(n->dev);
817 		kfree(n);
818 	}
819 	return -ENOENT;
820 }
821 
822 static void neigh_parms_destroy(struct neigh_parms *parms);
823 
824 static inline void neigh_parms_put(struct neigh_parms *parms)
825 {
826 	if (refcount_dec_and_test(&parms->refcnt))
827 		neigh_parms_destroy(parms);
828 }
829 
830 /*
831  *	neighbour must already be out of the table;
832  *
833  */
834 void neigh_destroy(struct neighbour *neigh)
835 {
836 	struct net_device *dev = neigh->dev;
837 
838 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
839 
840 	if (!neigh->dead) {
841 		pr_warn("Destroying alive neighbour %p\n", neigh);
842 		dump_stack();
843 		return;
844 	}
845 
846 	if (neigh_del_timer(neigh))
847 		pr_warn("Impossible event\n");
848 
849 	write_lock_bh(&neigh->lock);
850 	__skb_queue_purge(&neigh->arp_queue);
851 	write_unlock_bh(&neigh->lock);
852 	neigh->arp_queue_len_bytes = 0;
853 
854 	if (dev->netdev_ops->ndo_neigh_destroy)
855 		dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
856 
857 	dev_put(dev);
858 	neigh_parms_put(neigh->parms);
859 
860 	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
861 
862 	atomic_dec(&neigh->tbl->entries);
863 	kfree_rcu(neigh, rcu);
864 }
865 EXPORT_SYMBOL(neigh_destroy);
866 
867 /* Neighbour state is suspicious;
868    disable fast path.
869 
870    Called with write_locked neigh.
871  */
872 static void neigh_suspect(struct neighbour *neigh)
873 {
874 	neigh_dbg(2, "neigh %p is suspected\n", neigh);
875 
876 	neigh->output = neigh->ops->output;
877 }
878 
879 /* Neighbour state is OK;
880    enable fast path.
881 
882    Called with write_locked neigh.
883  */
884 static void neigh_connect(struct neighbour *neigh)
885 {
886 	neigh_dbg(2, "neigh %p is connected\n", neigh);
887 
888 	neigh->output = neigh->ops->connected_output;
889 }
890 
891 static void neigh_periodic_work(struct work_struct *work)
892 {
893 	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
894 	struct neighbour *n;
895 	struct neighbour __rcu **np;
896 	unsigned int i;
897 	struct neigh_hash_table *nht;
898 
899 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
900 
901 	write_lock_bh(&tbl->lock);
902 	nht = rcu_dereference_protected(tbl->nht,
903 					lockdep_is_held(&tbl->lock));
904 
905 	/*
906 	 *	periodically recompute ReachableTime from random function
907 	 */
908 
909 	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
910 		struct neigh_parms *p;
911 		tbl->last_rand = jiffies;
912 		list_for_each_entry(p, &tbl->parms_list, list)
913 			p->reachable_time =
914 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
915 	}
916 
917 	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
918 		goto out;
919 
920 	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
921 		np = &nht->hash_buckets[i];
922 
923 		while ((n = rcu_dereference_protected(*np,
924 				lockdep_is_held(&tbl->lock))) != NULL) {
925 			unsigned int state;
926 
927 			write_lock(&n->lock);
928 
929 			state = n->nud_state;
930 			if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
931 			    (n->flags & NTF_EXT_LEARNED)) {
932 				write_unlock(&n->lock);
933 				goto next_elt;
934 			}
935 
936 			if (time_before(n->used, n->confirmed))
937 				n->used = n->confirmed;
938 
939 			if (refcount_read(&n->refcnt) == 1 &&
940 			    (state == NUD_FAILED ||
941 			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
942 				*np = n->next;
943 				neigh_mark_dead(n);
944 				write_unlock(&n->lock);
945 				neigh_cleanup_and_release(n);
946 				continue;
947 			}
948 			write_unlock(&n->lock);
949 
950 next_elt:
951 			np = &n->next;
952 		}
953 		/*
954 		 * It's fine to release lock here, even if hash table
955 		 * grows while we are preempted.
956 		 */
957 		write_unlock_bh(&tbl->lock);
958 		cond_resched();
959 		write_lock_bh(&tbl->lock);
960 		nht = rcu_dereference_protected(tbl->nht,
961 						lockdep_is_held(&tbl->lock));
962 	}
963 out:
964 	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
965 	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
966 	 * BASE_REACHABLE_TIME.
967 	 */
968 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
969 			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
970 	write_unlock_bh(&tbl->lock);
971 }
972 
973 static __inline__ int neigh_max_probes(struct neighbour *n)
974 {
975 	struct neigh_parms *p = n->parms;
976 	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
977 	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
978 	        NEIGH_VAR(p, MCAST_PROBES));
979 }
980 
981 static void neigh_invalidate(struct neighbour *neigh)
982 	__releases(neigh->lock)
983 	__acquires(neigh->lock)
984 {
985 	struct sk_buff *skb;
986 
987 	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
988 	neigh_dbg(2, "neigh %p is failed\n", neigh);
989 	neigh->updated = jiffies;
990 
991 	/* It is very thin place. report_unreachable is very complicated
992 	   routine. Particularly, it can hit the same neighbour entry!
993 
994 	   So that, we try to be accurate and avoid dead loop. --ANK
995 	 */
996 	while (neigh->nud_state == NUD_FAILED &&
997 	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
998 		write_unlock(&neigh->lock);
999 		neigh->ops->error_report(neigh, skb);
1000 		write_lock(&neigh->lock);
1001 	}
1002 	__skb_queue_purge(&neigh->arp_queue);
1003 	neigh->arp_queue_len_bytes = 0;
1004 }
1005 
1006 static void neigh_probe(struct neighbour *neigh)
1007 	__releases(neigh->lock)
1008 {
1009 	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1010 	/* keep skb alive even if arp_queue overflows */
1011 	if (skb)
1012 		skb = skb_clone(skb, GFP_ATOMIC);
1013 	write_unlock(&neigh->lock);
1014 	if (neigh->ops->solicit)
1015 		neigh->ops->solicit(neigh, skb);
1016 	atomic_inc(&neigh->probes);
1017 	consume_skb(skb);
1018 }
1019 
1020 /* Called when a timer expires for a neighbour entry. */
1021 
1022 static void neigh_timer_handler(struct timer_list *t)
1023 {
1024 	unsigned long now, next;
1025 	struct neighbour *neigh = from_timer(neigh, t, timer);
1026 	unsigned int state;
1027 	int notify = 0;
1028 
1029 	write_lock(&neigh->lock);
1030 
1031 	state = neigh->nud_state;
1032 	now = jiffies;
1033 	next = now + HZ;
1034 
1035 	if (!(state & NUD_IN_TIMER))
1036 		goto out;
1037 
1038 	if (state & NUD_REACHABLE) {
1039 		if (time_before_eq(now,
1040 				   neigh->confirmed + neigh->parms->reachable_time)) {
1041 			neigh_dbg(2, "neigh %p is still alive\n", neigh);
1042 			next = neigh->confirmed + neigh->parms->reachable_time;
1043 		} else if (time_before_eq(now,
1044 					  neigh->used +
1045 					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1046 			neigh_dbg(2, "neigh %p is delayed\n", neigh);
1047 			neigh->nud_state = NUD_DELAY;
1048 			neigh->updated = jiffies;
1049 			neigh_suspect(neigh);
1050 			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1051 		} else {
1052 			neigh_dbg(2, "neigh %p is suspected\n", neigh);
1053 			neigh->nud_state = NUD_STALE;
1054 			neigh->updated = jiffies;
1055 			neigh_suspect(neigh);
1056 			notify = 1;
1057 		}
1058 	} else if (state & NUD_DELAY) {
1059 		if (time_before_eq(now,
1060 				   neigh->confirmed +
1061 				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1062 			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1063 			neigh->nud_state = NUD_REACHABLE;
1064 			neigh->updated = jiffies;
1065 			neigh_connect(neigh);
1066 			notify = 1;
1067 			next = neigh->confirmed + neigh->parms->reachable_time;
1068 		} else {
1069 			neigh_dbg(2, "neigh %p is probed\n", neigh);
1070 			neigh->nud_state = NUD_PROBE;
1071 			neigh->updated = jiffies;
1072 			atomic_set(&neigh->probes, 0);
1073 			notify = 1;
1074 			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1075 					 HZ/100);
1076 		}
1077 	} else {
1078 		/* NUD_PROBE|NUD_INCOMPLETE */
1079 		next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
1080 	}
1081 
1082 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1083 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1084 		neigh->nud_state = NUD_FAILED;
1085 		notify = 1;
1086 		neigh_invalidate(neigh);
1087 		goto out;
1088 	}
1089 
1090 	if (neigh->nud_state & NUD_IN_TIMER) {
1091 		if (time_before(next, jiffies + HZ/100))
1092 			next = jiffies + HZ/100;
1093 		if (!mod_timer(&neigh->timer, next))
1094 			neigh_hold(neigh);
1095 	}
1096 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1097 		neigh_probe(neigh);
1098 	} else {
1099 out:
1100 		write_unlock(&neigh->lock);
1101 	}
1102 
1103 	if (notify)
1104 		neigh_update_notify(neigh, 0);
1105 
1106 	trace_neigh_timer_handler(neigh, 0);
1107 
1108 	neigh_release(neigh);
1109 }
1110 
1111 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
1112 {
1113 	int rc;
1114 	bool immediate_probe = false;
1115 
1116 	write_lock_bh(&neigh->lock);
1117 
1118 	rc = 0;
1119 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1120 		goto out_unlock_bh;
1121 	if (neigh->dead)
1122 		goto out_dead;
1123 
1124 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1125 		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1126 		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
1127 			unsigned long next, now = jiffies;
1128 
1129 			atomic_set(&neigh->probes,
1130 				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
1131 			neigh_del_timer(neigh);
1132 			neigh->nud_state     = NUD_INCOMPLETE;
1133 			neigh->updated = now;
1134 			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1135 					 HZ/100);
1136 			neigh_add_timer(neigh, next);
1137 			immediate_probe = true;
1138 		} else {
1139 			neigh->nud_state = NUD_FAILED;
1140 			neigh->updated = jiffies;
1141 			write_unlock_bh(&neigh->lock);
1142 
1143 			kfree_skb(skb);
1144 			return 1;
1145 		}
1146 	} else if (neigh->nud_state & NUD_STALE) {
1147 		neigh_dbg(2, "neigh %p is delayed\n", neigh);
1148 		neigh_del_timer(neigh);
1149 		neigh->nud_state = NUD_DELAY;
1150 		neigh->updated = jiffies;
1151 		neigh_add_timer(neigh, jiffies +
1152 				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1153 	}
1154 
1155 	if (neigh->nud_state == NUD_INCOMPLETE) {
1156 		if (skb) {
1157 			while (neigh->arp_queue_len_bytes + skb->truesize >
1158 			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1159 				struct sk_buff *buff;
1160 
1161 				buff = __skb_dequeue(&neigh->arp_queue);
1162 				if (!buff)
1163 					break;
1164 				neigh->arp_queue_len_bytes -= buff->truesize;
1165 				kfree_skb(buff);
1166 				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1167 			}
1168 			skb_dst_force(skb);
1169 			__skb_queue_tail(&neigh->arp_queue, skb);
1170 			neigh->arp_queue_len_bytes += skb->truesize;
1171 		}
1172 		rc = 1;
1173 	}
1174 out_unlock_bh:
1175 	if (immediate_probe)
1176 		neigh_probe(neigh);
1177 	else
1178 		write_unlock(&neigh->lock);
1179 	local_bh_enable();
1180 	trace_neigh_event_send_done(neigh, rc);
1181 	return rc;
1182 
1183 out_dead:
1184 	if (neigh->nud_state & NUD_STALE)
1185 		goto out_unlock_bh;
1186 	write_unlock_bh(&neigh->lock);
1187 	kfree_skb(skb);
1188 	trace_neigh_event_send_dead(neigh, 1);
1189 	return 1;
1190 }
1191 EXPORT_SYMBOL(__neigh_event_send);
1192 
1193 static void neigh_update_hhs(struct neighbour *neigh)
1194 {
1195 	struct hh_cache *hh;
1196 	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1197 		= NULL;
1198 
1199 	if (neigh->dev->header_ops)
1200 		update = neigh->dev->header_ops->cache_update;
1201 
1202 	if (update) {
1203 		hh = &neigh->hh;
1204 		if (READ_ONCE(hh->hh_len)) {
1205 			write_seqlock_bh(&hh->hh_lock);
1206 			update(hh, neigh->dev, neigh->ha);
1207 			write_sequnlock_bh(&hh->hh_lock);
1208 		}
1209 	}
1210 }
1211 
1212 
1213 
1214 /* Generic update routine.
1215    -- lladdr is new lladdr or NULL, if it is not supplied.
1216    -- new    is new state.
1217    -- flags
1218 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1219 				if it is different.
1220 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1221 				lladdr instead of overriding it
1222 				if it is different.
1223 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1224 
1225 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1226 				NTF_ROUTER flag.
1227 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1228 				a router.
1229 
1230    Caller MUST hold reference count on the entry.
1231  */
1232 
1233 static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1234 			  u8 new, u32 flags, u32 nlmsg_pid,
1235 			  struct netlink_ext_ack *extack)
1236 {
1237 	bool ext_learn_change = false;
1238 	u8 old;
1239 	int err;
1240 	int notify = 0;
1241 	struct net_device *dev;
1242 	int update_isrouter = 0;
1243 
1244 	trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1245 
1246 	write_lock_bh(&neigh->lock);
1247 
1248 	dev    = neigh->dev;
1249 	old    = neigh->nud_state;
1250 	err    = -EPERM;
1251 
1252 	if (neigh->dead) {
1253 		NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1254 		new = old;
1255 		goto out;
1256 	}
1257 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1258 	    (old & (NUD_NOARP | NUD_PERMANENT)))
1259 		goto out;
1260 
1261 	ext_learn_change = neigh_update_ext_learned(neigh, flags, &notify);
1262 
1263 	if (!(new & NUD_VALID)) {
1264 		neigh_del_timer(neigh);
1265 		if (old & NUD_CONNECTED)
1266 			neigh_suspect(neigh);
1267 		neigh->nud_state = new;
1268 		err = 0;
1269 		notify = old & NUD_VALID;
1270 		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1271 		    (new & NUD_FAILED)) {
1272 			neigh_invalidate(neigh);
1273 			notify = 1;
1274 		}
1275 		goto out;
1276 	}
1277 
1278 	/* Compare new lladdr with cached one */
1279 	if (!dev->addr_len) {
1280 		/* First case: device needs no address. */
1281 		lladdr = neigh->ha;
1282 	} else if (lladdr) {
1283 		/* The second case: if something is already cached
1284 		   and a new address is proposed:
1285 		   - compare new & old
1286 		   - if they are different, check override flag
1287 		 */
1288 		if ((old & NUD_VALID) &&
1289 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1290 			lladdr = neigh->ha;
1291 	} else {
1292 		/* No address is supplied; if we know something,
1293 		   use it, otherwise discard the request.
1294 		 */
1295 		err = -EINVAL;
1296 		if (!(old & NUD_VALID)) {
1297 			NL_SET_ERR_MSG(extack, "No link layer address given");
1298 			goto out;
1299 		}
1300 		lladdr = neigh->ha;
1301 	}
1302 
1303 	/* Update confirmed timestamp for neighbour entry after we
1304 	 * received ARP packet even if it doesn't change IP to MAC binding.
1305 	 */
1306 	if (new & NUD_CONNECTED)
1307 		neigh->confirmed = jiffies;
1308 
1309 	/* If entry was valid and address is not changed,
1310 	   do not change entry state, if new one is STALE.
1311 	 */
1312 	err = 0;
1313 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1314 	if (old & NUD_VALID) {
1315 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1316 			update_isrouter = 0;
1317 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1318 			    (old & NUD_CONNECTED)) {
1319 				lladdr = neigh->ha;
1320 				new = NUD_STALE;
1321 			} else
1322 				goto out;
1323 		} else {
1324 			if (lladdr == neigh->ha && new == NUD_STALE &&
1325 			    !(flags & NEIGH_UPDATE_F_ADMIN))
1326 				new = old;
1327 		}
1328 	}
1329 
1330 	/* Update timestamp only once we know we will make a change to the
1331 	 * neighbour entry. Otherwise we risk to move the locktime window with
1332 	 * noop updates and ignore relevant ARP updates.
1333 	 */
1334 	if (new != old || lladdr != neigh->ha)
1335 		neigh->updated = jiffies;
1336 
1337 	if (new != old) {
1338 		neigh_del_timer(neigh);
1339 		if (new & NUD_PROBE)
1340 			atomic_set(&neigh->probes, 0);
1341 		if (new & NUD_IN_TIMER)
1342 			neigh_add_timer(neigh, (jiffies +
1343 						((new & NUD_REACHABLE) ?
1344 						 neigh->parms->reachable_time :
1345 						 0)));
1346 		neigh->nud_state = new;
1347 		notify = 1;
1348 	}
1349 
1350 	if (lladdr != neigh->ha) {
1351 		write_seqlock(&neigh->ha_lock);
1352 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1353 		write_sequnlock(&neigh->ha_lock);
1354 		neigh_update_hhs(neigh);
1355 		if (!(new & NUD_CONNECTED))
1356 			neigh->confirmed = jiffies -
1357 				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1358 		notify = 1;
1359 	}
1360 	if (new == old)
1361 		goto out;
1362 	if (new & NUD_CONNECTED)
1363 		neigh_connect(neigh);
1364 	else
1365 		neigh_suspect(neigh);
1366 	if (!(old & NUD_VALID)) {
1367 		struct sk_buff *skb;
1368 
1369 		/* Again: avoid dead loop if something went wrong */
1370 
1371 		while (neigh->nud_state & NUD_VALID &&
1372 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1373 			struct dst_entry *dst = skb_dst(skb);
1374 			struct neighbour *n2, *n1 = neigh;
1375 			write_unlock_bh(&neigh->lock);
1376 
1377 			rcu_read_lock();
1378 
1379 			/* Why not just use 'neigh' as-is?  The problem is that
1380 			 * things such as shaper, eql, and sch_teql can end up
1381 			 * using alternative, different, neigh objects to output
1382 			 * the packet in the output path.  So what we need to do
1383 			 * here is re-lookup the top-level neigh in the path so
1384 			 * we can reinject the packet there.
1385 			 */
1386 			n2 = NULL;
1387 			if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
1388 				n2 = dst_neigh_lookup_skb(dst, skb);
1389 				if (n2)
1390 					n1 = n2;
1391 			}
1392 			n1->output(n1, skb);
1393 			if (n2)
1394 				neigh_release(n2);
1395 			rcu_read_unlock();
1396 
1397 			write_lock_bh(&neigh->lock);
1398 		}
1399 		__skb_queue_purge(&neigh->arp_queue);
1400 		neigh->arp_queue_len_bytes = 0;
1401 	}
1402 out:
1403 	if (update_isrouter)
1404 		neigh_update_is_router(neigh, flags, &notify);
1405 	write_unlock_bh(&neigh->lock);
1406 
1407 	if (((new ^ old) & NUD_PERMANENT) || ext_learn_change)
1408 		neigh_update_gc_list(neigh);
1409 
1410 	if (notify)
1411 		neigh_update_notify(neigh, nlmsg_pid);
1412 
1413 	trace_neigh_update_done(neigh, err);
1414 
1415 	return err;
1416 }
1417 
1418 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1419 		 u32 flags, u32 nlmsg_pid)
1420 {
1421 	return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1422 }
1423 EXPORT_SYMBOL(neigh_update);
1424 
1425 /* Update the neigh to listen temporarily for probe responses, even if it is
1426  * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1427  */
1428 void __neigh_set_probe_once(struct neighbour *neigh)
1429 {
1430 	if (neigh->dead)
1431 		return;
1432 	neigh->updated = jiffies;
1433 	if (!(neigh->nud_state & NUD_FAILED))
1434 		return;
1435 	neigh->nud_state = NUD_INCOMPLETE;
1436 	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1437 	neigh_add_timer(neigh,
1438 			jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1439 				      HZ/100));
1440 }
1441 EXPORT_SYMBOL(__neigh_set_probe_once);
1442 
1443 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1444 				 u8 *lladdr, void *saddr,
1445 				 struct net_device *dev)
1446 {
1447 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1448 						 lladdr || !dev->addr_len);
1449 	if (neigh)
1450 		neigh_update(neigh, lladdr, NUD_STALE,
1451 			     NEIGH_UPDATE_F_OVERRIDE, 0);
1452 	return neigh;
1453 }
1454 EXPORT_SYMBOL(neigh_event_ns);
1455 
1456 /* called with read_lock_bh(&n->lock); */
1457 static void neigh_hh_init(struct neighbour *n)
1458 {
1459 	struct net_device *dev = n->dev;
1460 	__be16 prot = n->tbl->protocol;
1461 	struct hh_cache	*hh = &n->hh;
1462 
1463 	write_lock_bh(&n->lock);
1464 
1465 	/* Only one thread can come in here and initialize the
1466 	 * hh_cache entry.
1467 	 */
1468 	if (!hh->hh_len)
1469 		dev->header_ops->cache(n, hh, prot);
1470 
1471 	write_unlock_bh(&n->lock);
1472 }
1473 
1474 /* Slow and careful. */
1475 
1476 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1477 {
1478 	int rc = 0;
1479 
1480 	if (!neigh_event_send(neigh, skb)) {
1481 		int err;
1482 		struct net_device *dev = neigh->dev;
1483 		unsigned int seq;
1484 
1485 		if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
1486 			neigh_hh_init(neigh);
1487 
1488 		do {
1489 			__skb_pull(skb, skb_network_offset(skb));
1490 			seq = read_seqbegin(&neigh->ha_lock);
1491 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1492 					      neigh->ha, NULL, skb->len);
1493 		} while (read_seqretry(&neigh->ha_lock, seq));
1494 
1495 		if (err >= 0)
1496 			rc = dev_queue_xmit(skb);
1497 		else
1498 			goto out_kfree_skb;
1499 	}
1500 out:
1501 	return rc;
1502 out_kfree_skb:
1503 	rc = -EINVAL;
1504 	kfree_skb(skb);
1505 	goto out;
1506 }
1507 EXPORT_SYMBOL(neigh_resolve_output);
1508 
1509 /* As fast as possible without hh cache */
1510 
1511 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1512 {
1513 	struct net_device *dev = neigh->dev;
1514 	unsigned int seq;
1515 	int err;
1516 
1517 	do {
1518 		__skb_pull(skb, skb_network_offset(skb));
1519 		seq = read_seqbegin(&neigh->ha_lock);
1520 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1521 				      neigh->ha, NULL, skb->len);
1522 	} while (read_seqretry(&neigh->ha_lock, seq));
1523 
1524 	if (err >= 0)
1525 		err = dev_queue_xmit(skb);
1526 	else {
1527 		err = -EINVAL;
1528 		kfree_skb(skb);
1529 	}
1530 	return err;
1531 }
1532 EXPORT_SYMBOL(neigh_connected_output);
1533 
1534 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1535 {
1536 	return dev_queue_xmit(skb);
1537 }
1538 EXPORT_SYMBOL(neigh_direct_output);
1539 
1540 static void neigh_proxy_process(struct timer_list *t)
1541 {
1542 	struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1543 	long sched_next = 0;
1544 	unsigned long now = jiffies;
1545 	struct sk_buff *skb, *n;
1546 
1547 	spin_lock(&tbl->proxy_queue.lock);
1548 
1549 	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1550 		long tdif = NEIGH_CB(skb)->sched_next - now;
1551 
1552 		if (tdif <= 0) {
1553 			struct net_device *dev = skb->dev;
1554 
1555 			__skb_unlink(skb, &tbl->proxy_queue);
1556 			if (tbl->proxy_redo && netif_running(dev)) {
1557 				rcu_read_lock();
1558 				tbl->proxy_redo(skb);
1559 				rcu_read_unlock();
1560 			} else {
1561 				kfree_skb(skb);
1562 			}
1563 
1564 			dev_put(dev);
1565 		} else if (!sched_next || tdif < sched_next)
1566 			sched_next = tdif;
1567 	}
1568 	del_timer(&tbl->proxy_timer);
1569 	if (sched_next)
1570 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1571 	spin_unlock(&tbl->proxy_queue.lock);
1572 }
1573 
1574 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1575 		    struct sk_buff *skb)
1576 {
1577 	unsigned long sched_next = jiffies +
1578 			prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY));
1579 
1580 	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1581 		kfree_skb(skb);
1582 		return;
1583 	}
1584 
1585 	NEIGH_CB(skb)->sched_next = sched_next;
1586 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1587 
1588 	spin_lock(&tbl->proxy_queue.lock);
1589 	if (del_timer(&tbl->proxy_timer)) {
1590 		if (time_before(tbl->proxy_timer.expires, sched_next))
1591 			sched_next = tbl->proxy_timer.expires;
1592 	}
1593 	skb_dst_drop(skb);
1594 	dev_hold(skb->dev);
1595 	__skb_queue_tail(&tbl->proxy_queue, skb);
1596 	mod_timer(&tbl->proxy_timer, sched_next);
1597 	spin_unlock(&tbl->proxy_queue.lock);
1598 }
1599 EXPORT_SYMBOL(pneigh_enqueue);
1600 
1601 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1602 						      struct net *net, int ifindex)
1603 {
1604 	struct neigh_parms *p;
1605 
1606 	list_for_each_entry(p, &tbl->parms_list, list) {
1607 		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1608 		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1609 			return p;
1610 	}
1611 
1612 	return NULL;
1613 }
1614 
1615 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1616 				      struct neigh_table *tbl)
1617 {
1618 	struct neigh_parms *p;
1619 	struct net *net = dev_net(dev);
1620 	const struct net_device_ops *ops = dev->netdev_ops;
1621 
1622 	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1623 	if (p) {
1624 		p->tbl		  = tbl;
1625 		refcount_set(&p->refcnt, 1);
1626 		p->reachable_time =
1627 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1628 		dev_hold(dev);
1629 		p->dev = dev;
1630 		write_pnet(&p->net, net);
1631 		p->sysctl_table = NULL;
1632 
1633 		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1634 			dev_put(dev);
1635 			kfree(p);
1636 			return NULL;
1637 		}
1638 
1639 		write_lock_bh(&tbl->lock);
1640 		list_add(&p->list, &tbl->parms.list);
1641 		write_unlock_bh(&tbl->lock);
1642 
1643 		neigh_parms_data_state_cleanall(p);
1644 	}
1645 	return p;
1646 }
1647 EXPORT_SYMBOL(neigh_parms_alloc);
1648 
1649 static void neigh_rcu_free_parms(struct rcu_head *head)
1650 {
1651 	struct neigh_parms *parms =
1652 		container_of(head, struct neigh_parms, rcu_head);
1653 
1654 	neigh_parms_put(parms);
1655 }
1656 
1657 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1658 {
1659 	if (!parms || parms == &tbl->parms)
1660 		return;
1661 	write_lock_bh(&tbl->lock);
1662 	list_del(&parms->list);
1663 	parms->dead = 1;
1664 	write_unlock_bh(&tbl->lock);
1665 	if (parms->dev)
1666 		dev_put(parms->dev);
1667 	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1668 }
1669 EXPORT_SYMBOL(neigh_parms_release);
1670 
1671 static void neigh_parms_destroy(struct neigh_parms *parms)
1672 {
1673 	kfree(parms);
1674 }
1675 
1676 static struct lock_class_key neigh_table_proxy_queue_class;
1677 
1678 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1679 
1680 void neigh_table_init(int index, struct neigh_table *tbl)
1681 {
1682 	unsigned long now = jiffies;
1683 	unsigned long phsize;
1684 
1685 	INIT_LIST_HEAD(&tbl->parms_list);
1686 	INIT_LIST_HEAD(&tbl->gc_list);
1687 	list_add(&tbl->parms.list, &tbl->parms_list);
1688 	write_pnet(&tbl->parms.net, &init_net);
1689 	refcount_set(&tbl->parms.refcnt, 1);
1690 	tbl->parms.reachable_time =
1691 			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1692 
1693 	tbl->stats = alloc_percpu(struct neigh_statistics);
1694 	if (!tbl->stats)
1695 		panic("cannot create neighbour cache statistics");
1696 
1697 #ifdef CONFIG_PROC_FS
1698 	if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1699 			      &neigh_stat_seq_ops, tbl))
1700 		panic("cannot create neighbour proc dir entry");
1701 #endif
1702 
1703 	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1704 
1705 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1706 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1707 
1708 	if (!tbl->nht || !tbl->phash_buckets)
1709 		panic("cannot allocate neighbour cache hashes");
1710 
1711 	if (!tbl->entry_size)
1712 		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1713 					tbl->key_len, NEIGH_PRIV_ALIGN);
1714 	else
1715 		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1716 
1717 	rwlock_init(&tbl->lock);
1718 	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1719 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1720 			tbl->parms.reachable_time);
1721 	timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1722 	skb_queue_head_init_class(&tbl->proxy_queue,
1723 			&neigh_table_proxy_queue_class);
1724 
1725 	tbl->last_flush = now;
1726 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1727 
1728 	neigh_tables[index] = tbl;
1729 }
1730 EXPORT_SYMBOL(neigh_table_init);
1731 
1732 int neigh_table_clear(int index, struct neigh_table *tbl)
1733 {
1734 	neigh_tables[index] = NULL;
1735 	/* It is not clean... Fix it to unload IPv6 module safely */
1736 	cancel_delayed_work_sync(&tbl->gc_work);
1737 	del_timer_sync(&tbl->proxy_timer);
1738 	pneigh_queue_purge(&tbl->proxy_queue);
1739 	neigh_ifdown(tbl, NULL);
1740 	if (atomic_read(&tbl->entries))
1741 		pr_crit("neighbour leakage\n");
1742 
1743 	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1744 		 neigh_hash_free_rcu);
1745 	tbl->nht = NULL;
1746 
1747 	kfree(tbl->phash_buckets);
1748 	tbl->phash_buckets = NULL;
1749 
1750 	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1751 
1752 	free_percpu(tbl->stats);
1753 	tbl->stats = NULL;
1754 
1755 	return 0;
1756 }
1757 EXPORT_SYMBOL(neigh_table_clear);
1758 
1759 static struct neigh_table *neigh_find_table(int family)
1760 {
1761 	struct neigh_table *tbl = NULL;
1762 
1763 	switch (family) {
1764 	case AF_INET:
1765 		tbl = neigh_tables[NEIGH_ARP_TABLE];
1766 		break;
1767 	case AF_INET6:
1768 		tbl = neigh_tables[NEIGH_ND_TABLE];
1769 		break;
1770 	case AF_DECnet:
1771 		tbl = neigh_tables[NEIGH_DN_TABLE];
1772 		break;
1773 	}
1774 
1775 	return tbl;
1776 }
1777 
1778 const struct nla_policy nda_policy[NDA_MAX+1] = {
1779 	[NDA_UNSPEC]		= { .strict_start_type = NDA_NH_ID },
1780 	[NDA_DST]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1781 	[NDA_LLADDR]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1782 	[NDA_CACHEINFO]		= { .len = sizeof(struct nda_cacheinfo) },
1783 	[NDA_PROBES]		= { .type = NLA_U32 },
1784 	[NDA_VLAN]		= { .type = NLA_U16 },
1785 	[NDA_PORT]		= { .type = NLA_U16 },
1786 	[NDA_VNI]		= { .type = NLA_U32 },
1787 	[NDA_IFINDEX]		= { .type = NLA_U32 },
1788 	[NDA_MASTER]		= { .type = NLA_U32 },
1789 	[NDA_PROTOCOL]		= { .type = NLA_U8 },
1790 	[NDA_NH_ID]		= { .type = NLA_U32 },
1791 	[NDA_FDB_EXT_ATTRS]	= { .type = NLA_NESTED },
1792 };
1793 
1794 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1795 			struct netlink_ext_ack *extack)
1796 {
1797 	struct net *net = sock_net(skb->sk);
1798 	struct ndmsg *ndm;
1799 	struct nlattr *dst_attr;
1800 	struct neigh_table *tbl;
1801 	struct neighbour *neigh;
1802 	struct net_device *dev = NULL;
1803 	int err = -EINVAL;
1804 
1805 	ASSERT_RTNL();
1806 	if (nlmsg_len(nlh) < sizeof(*ndm))
1807 		goto out;
1808 
1809 	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1810 	if (!dst_attr) {
1811 		NL_SET_ERR_MSG(extack, "Network address not specified");
1812 		goto out;
1813 	}
1814 
1815 	ndm = nlmsg_data(nlh);
1816 	if (ndm->ndm_ifindex) {
1817 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1818 		if (dev == NULL) {
1819 			err = -ENODEV;
1820 			goto out;
1821 		}
1822 	}
1823 
1824 	tbl = neigh_find_table(ndm->ndm_family);
1825 	if (tbl == NULL)
1826 		return -EAFNOSUPPORT;
1827 
1828 	if (nla_len(dst_attr) < (int)tbl->key_len) {
1829 		NL_SET_ERR_MSG(extack, "Invalid network address");
1830 		goto out;
1831 	}
1832 
1833 	if (ndm->ndm_flags & NTF_PROXY) {
1834 		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1835 		goto out;
1836 	}
1837 
1838 	if (dev == NULL)
1839 		goto out;
1840 
1841 	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1842 	if (neigh == NULL) {
1843 		err = -ENOENT;
1844 		goto out;
1845 	}
1846 
1847 	err = __neigh_update(neigh, NULL, NUD_FAILED,
1848 			     NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1849 			     NETLINK_CB(skb).portid, extack);
1850 	write_lock_bh(&tbl->lock);
1851 	neigh_release(neigh);
1852 	neigh_remove_one(neigh, tbl);
1853 	write_unlock_bh(&tbl->lock);
1854 
1855 out:
1856 	return err;
1857 }
1858 
1859 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1860 		     struct netlink_ext_ack *extack)
1861 {
1862 	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1863 		NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1864 	struct net *net = sock_net(skb->sk);
1865 	struct ndmsg *ndm;
1866 	struct nlattr *tb[NDA_MAX+1];
1867 	struct neigh_table *tbl;
1868 	struct net_device *dev = NULL;
1869 	struct neighbour *neigh;
1870 	void *dst, *lladdr;
1871 	u8 protocol = 0;
1872 	int err;
1873 
1874 	ASSERT_RTNL();
1875 	err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1876 				     nda_policy, extack);
1877 	if (err < 0)
1878 		goto out;
1879 
1880 	err = -EINVAL;
1881 	if (!tb[NDA_DST]) {
1882 		NL_SET_ERR_MSG(extack, "Network address not specified");
1883 		goto out;
1884 	}
1885 
1886 	ndm = nlmsg_data(nlh);
1887 	if (ndm->ndm_ifindex) {
1888 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1889 		if (dev == NULL) {
1890 			err = -ENODEV;
1891 			goto out;
1892 		}
1893 
1894 		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1895 			NL_SET_ERR_MSG(extack, "Invalid link address");
1896 			goto out;
1897 		}
1898 	}
1899 
1900 	tbl = neigh_find_table(ndm->ndm_family);
1901 	if (tbl == NULL)
1902 		return -EAFNOSUPPORT;
1903 
1904 	if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
1905 		NL_SET_ERR_MSG(extack, "Invalid network address");
1906 		goto out;
1907 	}
1908 
1909 	dst = nla_data(tb[NDA_DST]);
1910 	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1911 
1912 	if (tb[NDA_PROTOCOL])
1913 		protocol = nla_get_u8(tb[NDA_PROTOCOL]);
1914 
1915 	if (ndm->ndm_flags & NTF_PROXY) {
1916 		struct pneigh_entry *pn;
1917 
1918 		err = -ENOBUFS;
1919 		pn = pneigh_lookup(tbl, net, dst, dev, 1);
1920 		if (pn) {
1921 			pn->flags = ndm->ndm_flags;
1922 			if (protocol)
1923 				pn->protocol = protocol;
1924 			err = 0;
1925 		}
1926 		goto out;
1927 	}
1928 
1929 	if (!dev) {
1930 		NL_SET_ERR_MSG(extack, "Device not specified");
1931 		goto out;
1932 	}
1933 
1934 	if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
1935 		err = -EINVAL;
1936 		goto out;
1937 	}
1938 
1939 	neigh = neigh_lookup(tbl, dst, dev);
1940 	if (neigh == NULL) {
1941 		bool exempt_from_gc;
1942 
1943 		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1944 			err = -ENOENT;
1945 			goto out;
1946 		}
1947 
1948 		exempt_from_gc = ndm->ndm_state & NUD_PERMANENT ||
1949 				 ndm->ndm_flags & NTF_EXT_LEARNED;
1950 		neigh = ___neigh_create(tbl, dst, dev, exempt_from_gc, true);
1951 		if (IS_ERR(neigh)) {
1952 			err = PTR_ERR(neigh);
1953 			goto out;
1954 		}
1955 	} else {
1956 		if (nlh->nlmsg_flags & NLM_F_EXCL) {
1957 			err = -EEXIST;
1958 			neigh_release(neigh);
1959 			goto out;
1960 		}
1961 
1962 		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1963 			flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
1964 				   NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
1965 	}
1966 
1967 	if (protocol)
1968 		neigh->protocol = protocol;
1969 
1970 	if (ndm->ndm_flags & NTF_EXT_LEARNED)
1971 		flags |= NEIGH_UPDATE_F_EXT_LEARNED;
1972 
1973 	if (ndm->ndm_flags & NTF_ROUTER)
1974 		flags |= NEIGH_UPDATE_F_ISROUTER;
1975 
1976 	if (ndm->ndm_flags & NTF_USE) {
1977 		neigh_event_send(neigh, NULL);
1978 		err = 0;
1979 	} else
1980 		err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
1981 				     NETLINK_CB(skb).portid, extack);
1982 
1983 	neigh_release(neigh);
1984 
1985 out:
1986 	return err;
1987 }
1988 
1989 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1990 {
1991 	struct nlattr *nest;
1992 
1993 	nest = nla_nest_start_noflag(skb, NDTA_PARMS);
1994 	if (nest == NULL)
1995 		return -ENOBUFS;
1996 
1997 	if ((parms->dev &&
1998 	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1999 	    nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
2000 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
2001 			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
2002 	    /* approximative value for deprecated QUEUE_LEN (in packets) */
2003 	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
2004 			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2005 	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2006 	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2007 	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
2008 			NEIGH_VAR(parms, UCAST_PROBES)) ||
2009 	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
2010 			NEIGH_VAR(parms, MCAST_PROBES)) ||
2011 	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2012 			NEIGH_VAR(parms, MCAST_REPROBES)) ||
2013 	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2014 			  NDTPA_PAD) ||
2015 	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2016 			  NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2017 	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
2018 			  NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2019 	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2020 			  NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2021 	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2022 			  NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2023 	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2024 			  NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2025 	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2026 			  NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2027 	    nla_put_msecs(skb, NDTPA_LOCKTIME,
2028 			  NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
2029 		goto nla_put_failure;
2030 	return nla_nest_end(skb, nest);
2031 
2032 nla_put_failure:
2033 	nla_nest_cancel(skb, nest);
2034 	return -EMSGSIZE;
2035 }
2036 
2037 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2038 			      u32 pid, u32 seq, int type, int flags)
2039 {
2040 	struct nlmsghdr *nlh;
2041 	struct ndtmsg *ndtmsg;
2042 
2043 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2044 	if (nlh == NULL)
2045 		return -EMSGSIZE;
2046 
2047 	ndtmsg = nlmsg_data(nlh);
2048 
2049 	read_lock_bh(&tbl->lock);
2050 	ndtmsg->ndtm_family = tbl->family;
2051 	ndtmsg->ndtm_pad1   = 0;
2052 	ndtmsg->ndtm_pad2   = 0;
2053 
2054 	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2055 	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
2056 	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
2057 	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
2058 	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
2059 		goto nla_put_failure;
2060 	{
2061 		unsigned long now = jiffies;
2062 		long flush_delta = now - tbl->last_flush;
2063 		long rand_delta = now - tbl->last_rand;
2064 		struct neigh_hash_table *nht;
2065 		struct ndt_config ndc = {
2066 			.ndtc_key_len		= tbl->key_len,
2067 			.ndtc_entry_size	= tbl->entry_size,
2068 			.ndtc_entries		= atomic_read(&tbl->entries),
2069 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
2070 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
2071 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
2072 		};
2073 
2074 		rcu_read_lock_bh();
2075 		nht = rcu_dereference_bh(tbl->nht);
2076 		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2077 		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2078 		rcu_read_unlock_bh();
2079 
2080 		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2081 			goto nla_put_failure;
2082 	}
2083 
2084 	{
2085 		int cpu;
2086 		struct ndt_stats ndst;
2087 
2088 		memset(&ndst, 0, sizeof(ndst));
2089 
2090 		for_each_possible_cpu(cpu) {
2091 			struct neigh_statistics	*st;
2092 
2093 			st = per_cpu_ptr(tbl->stats, cpu);
2094 			ndst.ndts_allocs		+= st->allocs;
2095 			ndst.ndts_destroys		+= st->destroys;
2096 			ndst.ndts_hash_grows		+= st->hash_grows;
2097 			ndst.ndts_res_failed		+= st->res_failed;
2098 			ndst.ndts_lookups		+= st->lookups;
2099 			ndst.ndts_hits			+= st->hits;
2100 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
2101 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
2102 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
2103 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
2104 			ndst.ndts_table_fulls		+= st->table_fulls;
2105 		}
2106 
2107 		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2108 				  NDTA_PAD))
2109 			goto nla_put_failure;
2110 	}
2111 
2112 	BUG_ON(tbl->parms.dev);
2113 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2114 		goto nla_put_failure;
2115 
2116 	read_unlock_bh(&tbl->lock);
2117 	nlmsg_end(skb, nlh);
2118 	return 0;
2119 
2120 nla_put_failure:
2121 	read_unlock_bh(&tbl->lock);
2122 	nlmsg_cancel(skb, nlh);
2123 	return -EMSGSIZE;
2124 }
2125 
2126 static int neightbl_fill_param_info(struct sk_buff *skb,
2127 				    struct neigh_table *tbl,
2128 				    struct neigh_parms *parms,
2129 				    u32 pid, u32 seq, int type,
2130 				    unsigned int flags)
2131 {
2132 	struct ndtmsg *ndtmsg;
2133 	struct nlmsghdr *nlh;
2134 
2135 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2136 	if (nlh == NULL)
2137 		return -EMSGSIZE;
2138 
2139 	ndtmsg = nlmsg_data(nlh);
2140 
2141 	read_lock_bh(&tbl->lock);
2142 	ndtmsg->ndtm_family = tbl->family;
2143 	ndtmsg->ndtm_pad1   = 0;
2144 	ndtmsg->ndtm_pad2   = 0;
2145 
2146 	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2147 	    neightbl_fill_parms(skb, parms) < 0)
2148 		goto errout;
2149 
2150 	read_unlock_bh(&tbl->lock);
2151 	nlmsg_end(skb, nlh);
2152 	return 0;
2153 errout:
2154 	read_unlock_bh(&tbl->lock);
2155 	nlmsg_cancel(skb, nlh);
2156 	return -EMSGSIZE;
2157 }
2158 
2159 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2160 	[NDTA_NAME]		= { .type = NLA_STRING },
2161 	[NDTA_THRESH1]		= { .type = NLA_U32 },
2162 	[NDTA_THRESH2]		= { .type = NLA_U32 },
2163 	[NDTA_THRESH3]		= { .type = NLA_U32 },
2164 	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
2165 	[NDTA_PARMS]		= { .type = NLA_NESTED },
2166 };
2167 
2168 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2169 	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
2170 	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
2171 	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
2172 	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
2173 	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
2174 	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
2175 	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
2176 	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
2177 	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
2178 	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
2179 	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
2180 	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
2181 	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
2182 	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
2183 };
2184 
2185 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2186 			struct netlink_ext_ack *extack)
2187 {
2188 	struct net *net = sock_net(skb->sk);
2189 	struct neigh_table *tbl;
2190 	struct ndtmsg *ndtmsg;
2191 	struct nlattr *tb[NDTA_MAX+1];
2192 	bool found = false;
2193 	int err, tidx;
2194 
2195 	err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2196 				     nl_neightbl_policy, extack);
2197 	if (err < 0)
2198 		goto errout;
2199 
2200 	if (tb[NDTA_NAME] == NULL) {
2201 		err = -EINVAL;
2202 		goto errout;
2203 	}
2204 
2205 	ndtmsg = nlmsg_data(nlh);
2206 
2207 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2208 		tbl = neigh_tables[tidx];
2209 		if (!tbl)
2210 			continue;
2211 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2212 			continue;
2213 		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2214 			found = true;
2215 			break;
2216 		}
2217 	}
2218 
2219 	if (!found)
2220 		return -ENOENT;
2221 
2222 	/*
2223 	 * We acquire tbl->lock to be nice to the periodic timers and
2224 	 * make sure they always see a consistent set of values.
2225 	 */
2226 	write_lock_bh(&tbl->lock);
2227 
2228 	if (tb[NDTA_PARMS]) {
2229 		struct nlattr *tbp[NDTPA_MAX+1];
2230 		struct neigh_parms *p;
2231 		int i, ifindex = 0;
2232 
2233 		err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2234 						  tb[NDTA_PARMS],
2235 						  nl_ntbl_parm_policy, extack);
2236 		if (err < 0)
2237 			goto errout_tbl_lock;
2238 
2239 		if (tbp[NDTPA_IFINDEX])
2240 			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2241 
2242 		p = lookup_neigh_parms(tbl, net, ifindex);
2243 		if (p == NULL) {
2244 			err = -ENOENT;
2245 			goto errout_tbl_lock;
2246 		}
2247 
2248 		for (i = 1; i <= NDTPA_MAX; i++) {
2249 			if (tbp[i] == NULL)
2250 				continue;
2251 
2252 			switch (i) {
2253 			case NDTPA_QUEUE_LEN:
2254 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2255 					      nla_get_u32(tbp[i]) *
2256 					      SKB_TRUESIZE(ETH_FRAME_LEN));
2257 				break;
2258 			case NDTPA_QUEUE_LENBYTES:
2259 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2260 					      nla_get_u32(tbp[i]));
2261 				break;
2262 			case NDTPA_PROXY_QLEN:
2263 				NEIGH_VAR_SET(p, PROXY_QLEN,
2264 					      nla_get_u32(tbp[i]));
2265 				break;
2266 			case NDTPA_APP_PROBES:
2267 				NEIGH_VAR_SET(p, APP_PROBES,
2268 					      nla_get_u32(tbp[i]));
2269 				break;
2270 			case NDTPA_UCAST_PROBES:
2271 				NEIGH_VAR_SET(p, UCAST_PROBES,
2272 					      nla_get_u32(tbp[i]));
2273 				break;
2274 			case NDTPA_MCAST_PROBES:
2275 				NEIGH_VAR_SET(p, MCAST_PROBES,
2276 					      nla_get_u32(tbp[i]));
2277 				break;
2278 			case NDTPA_MCAST_REPROBES:
2279 				NEIGH_VAR_SET(p, MCAST_REPROBES,
2280 					      nla_get_u32(tbp[i]));
2281 				break;
2282 			case NDTPA_BASE_REACHABLE_TIME:
2283 				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2284 					      nla_get_msecs(tbp[i]));
2285 				/* update reachable_time as well, otherwise, the change will
2286 				 * only be effective after the next time neigh_periodic_work
2287 				 * decides to recompute it (can be multiple minutes)
2288 				 */
2289 				p->reachable_time =
2290 					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2291 				break;
2292 			case NDTPA_GC_STALETIME:
2293 				NEIGH_VAR_SET(p, GC_STALETIME,
2294 					      nla_get_msecs(tbp[i]));
2295 				break;
2296 			case NDTPA_DELAY_PROBE_TIME:
2297 				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2298 					      nla_get_msecs(tbp[i]));
2299 				call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2300 				break;
2301 			case NDTPA_RETRANS_TIME:
2302 				NEIGH_VAR_SET(p, RETRANS_TIME,
2303 					      nla_get_msecs(tbp[i]));
2304 				break;
2305 			case NDTPA_ANYCAST_DELAY:
2306 				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2307 					      nla_get_msecs(tbp[i]));
2308 				break;
2309 			case NDTPA_PROXY_DELAY:
2310 				NEIGH_VAR_SET(p, PROXY_DELAY,
2311 					      nla_get_msecs(tbp[i]));
2312 				break;
2313 			case NDTPA_LOCKTIME:
2314 				NEIGH_VAR_SET(p, LOCKTIME,
2315 					      nla_get_msecs(tbp[i]));
2316 				break;
2317 			}
2318 		}
2319 	}
2320 
2321 	err = -ENOENT;
2322 	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2323 	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2324 	    !net_eq(net, &init_net))
2325 		goto errout_tbl_lock;
2326 
2327 	if (tb[NDTA_THRESH1])
2328 		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2329 
2330 	if (tb[NDTA_THRESH2])
2331 		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2332 
2333 	if (tb[NDTA_THRESH3])
2334 		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2335 
2336 	if (tb[NDTA_GC_INTERVAL])
2337 		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2338 
2339 	err = 0;
2340 
2341 errout_tbl_lock:
2342 	write_unlock_bh(&tbl->lock);
2343 errout:
2344 	return err;
2345 }
2346 
2347 static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2348 				    struct netlink_ext_ack *extack)
2349 {
2350 	struct ndtmsg *ndtm;
2351 
2352 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2353 		NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2354 		return -EINVAL;
2355 	}
2356 
2357 	ndtm = nlmsg_data(nlh);
2358 	if (ndtm->ndtm_pad1  || ndtm->ndtm_pad2) {
2359 		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2360 		return -EINVAL;
2361 	}
2362 
2363 	if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2364 		NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2365 		return -EINVAL;
2366 	}
2367 
2368 	return 0;
2369 }
2370 
2371 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2372 {
2373 	const struct nlmsghdr *nlh = cb->nlh;
2374 	struct net *net = sock_net(skb->sk);
2375 	int family, tidx, nidx = 0;
2376 	int tbl_skip = cb->args[0];
2377 	int neigh_skip = cb->args[1];
2378 	struct neigh_table *tbl;
2379 
2380 	if (cb->strict_check) {
2381 		int err = neightbl_valid_dump_info(nlh, cb->extack);
2382 
2383 		if (err < 0)
2384 			return err;
2385 	}
2386 
2387 	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2388 
2389 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2390 		struct neigh_parms *p;
2391 
2392 		tbl = neigh_tables[tidx];
2393 		if (!tbl)
2394 			continue;
2395 
2396 		if (tidx < tbl_skip || (family && tbl->family != family))
2397 			continue;
2398 
2399 		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2400 				       nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2401 				       NLM_F_MULTI) < 0)
2402 			break;
2403 
2404 		nidx = 0;
2405 		p = list_next_entry(&tbl->parms, list);
2406 		list_for_each_entry_from(p, &tbl->parms_list, list) {
2407 			if (!net_eq(neigh_parms_net(p), net))
2408 				continue;
2409 
2410 			if (nidx < neigh_skip)
2411 				goto next;
2412 
2413 			if (neightbl_fill_param_info(skb, tbl, p,
2414 						     NETLINK_CB(cb->skb).portid,
2415 						     nlh->nlmsg_seq,
2416 						     RTM_NEWNEIGHTBL,
2417 						     NLM_F_MULTI) < 0)
2418 				goto out;
2419 		next:
2420 			nidx++;
2421 		}
2422 
2423 		neigh_skip = 0;
2424 	}
2425 out:
2426 	cb->args[0] = tidx;
2427 	cb->args[1] = nidx;
2428 
2429 	return skb->len;
2430 }
2431 
2432 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2433 			   u32 pid, u32 seq, int type, unsigned int flags)
2434 {
2435 	unsigned long now = jiffies;
2436 	struct nda_cacheinfo ci;
2437 	struct nlmsghdr *nlh;
2438 	struct ndmsg *ndm;
2439 
2440 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2441 	if (nlh == NULL)
2442 		return -EMSGSIZE;
2443 
2444 	ndm = nlmsg_data(nlh);
2445 	ndm->ndm_family	 = neigh->ops->family;
2446 	ndm->ndm_pad1    = 0;
2447 	ndm->ndm_pad2    = 0;
2448 	ndm->ndm_flags	 = neigh->flags;
2449 	ndm->ndm_type	 = neigh->type;
2450 	ndm->ndm_ifindex = neigh->dev->ifindex;
2451 
2452 	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2453 		goto nla_put_failure;
2454 
2455 	read_lock_bh(&neigh->lock);
2456 	ndm->ndm_state	 = neigh->nud_state;
2457 	if (neigh->nud_state & NUD_VALID) {
2458 		char haddr[MAX_ADDR_LEN];
2459 
2460 		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2461 		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2462 			read_unlock_bh(&neigh->lock);
2463 			goto nla_put_failure;
2464 		}
2465 	}
2466 
2467 	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2468 	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2469 	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2470 	ci.ndm_refcnt	 = refcount_read(&neigh->refcnt) - 1;
2471 	read_unlock_bh(&neigh->lock);
2472 
2473 	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2474 	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2475 		goto nla_put_failure;
2476 
2477 	if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2478 		goto nla_put_failure;
2479 
2480 	nlmsg_end(skb, nlh);
2481 	return 0;
2482 
2483 nla_put_failure:
2484 	nlmsg_cancel(skb, nlh);
2485 	return -EMSGSIZE;
2486 }
2487 
2488 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2489 			    u32 pid, u32 seq, int type, unsigned int flags,
2490 			    struct neigh_table *tbl)
2491 {
2492 	struct nlmsghdr *nlh;
2493 	struct ndmsg *ndm;
2494 
2495 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2496 	if (nlh == NULL)
2497 		return -EMSGSIZE;
2498 
2499 	ndm = nlmsg_data(nlh);
2500 	ndm->ndm_family	 = tbl->family;
2501 	ndm->ndm_pad1    = 0;
2502 	ndm->ndm_pad2    = 0;
2503 	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
2504 	ndm->ndm_type	 = RTN_UNICAST;
2505 	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2506 	ndm->ndm_state	 = NUD_NONE;
2507 
2508 	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2509 		goto nla_put_failure;
2510 
2511 	if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2512 		goto nla_put_failure;
2513 
2514 	nlmsg_end(skb, nlh);
2515 	return 0;
2516 
2517 nla_put_failure:
2518 	nlmsg_cancel(skb, nlh);
2519 	return -EMSGSIZE;
2520 }
2521 
2522 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2523 {
2524 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2525 	__neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2526 }
2527 
2528 static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2529 {
2530 	struct net_device *master;
2531 
2532 	if (!master_idx)
2533 		return false;
2534 
2535 	master = dev ? netdev_master_upper_dev_get(dev) : NULL;
2536 	if (!master || master->ifindex != master_idx)
2537 		return true;
2538 
2539 	return false;
2540 }
2541 
2542 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2543 {
2544 	if (filter_idx && (!dev || dev->ifindex != filter_idx))
2545 		return true;
2546 
2547 	return false;
2548 }
2549 
2550 struct neigh_dump_filter {
2551 	int master_idx;
2552 	int dev_idx;
2553 };
2554 
2555 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2556 			    struct netlink_callback *cb,
2557 			    struct neigh_dump_filter *filter)
2558 {
2559 	struct net *net = sock_net(skb->sk);
2560 	struct neighbour *n;
2561 	int rc, h, s_h = cb->args[1];
2562 	int idx, s_idx = idx = cb->args[2];
2563 	struct neigh_hash_table *nht;
2564 	unsigned int flags = NLM_F_MULTI;
2565 
2566 	if (filter->dev_idx || filter->master_idx)
2567 		flags |= NLM_F_DUMP_FILTERED;
2568 
2569 	rcu_read_lock_bh();
2570 	nht = rcu_dereference_bh(tbl->nht);
2571 
2572 	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2573 		if (h > s_h)
2574 			s_idx = 0;
2575 		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2576 		     n != NULL;
2577 		     n = rcu_dereference_bh(n->next)) {
2578 			if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2579 				goto next;
2580 			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2581 			    neigh_master_filtered(n->dev, filter->master_idx))
2582 				goto next;
2583 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2584 					    cb->nlh->nlmsg_seq,
2585 					    RTM_NEWNEIGH,
2586 					    flags) < 0) {
2587 				rc = -1;
2588 				goto out;
2589 			}
2590 next:
2591 			idx++;
2592 		}
2593 	}
2594 	rc = skb->len;
2595 out:
2596 	rcu_read_unlock_bh();
2597 	cb->args[1] = h;
2598 	cb->args[2] = idx;
2599 	return rc;
2600 }
2601 
2602 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2603 			     struct netlink_callback *cb,
2604 			     struct neigh_dump_filter *filter)
2605 {
2606 	struct pneigh_entry *n;
2607 	struct net *net = sock_net(skb->sk);
2608 	int rc, h, s_h = cb->args[3];
2609 	int idx, s_idx = idx = cb->args[4];
2610 	unsigned int flags = NLM_F_MULTI;
2611 
2612 	if (filter->dev_idx || filter->master_idx)
2613 		flags |= NLM_F_DUMP_FILTERED;
2614 
2615 	read_lock_bh(&tbl->lock);
2616 
2617 	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2618 		if (h > s_h)
2619 			s_idx = 0;
2620 		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2621 			if (idx < s_idx || pneigh_net(n) != net)
2622 				goto next;
2623 			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2624 			    neigh_master_filtered(n->dev, filter->master_idx))
2625 				goto next;
2626 			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2627 					    cb->nlh->nlmsg_seq,
2628 					    RTM_NEWNEIGH, flags, tbl) < 0) {
2629 				read_unlock_bh(&tbl->lock);
2630 				rc = -1;
2631 				goto out;
2632 			}
2633 		next:
2634 			idx++;
2635 		}
2636 	}
2637 
2638 	read_unlock_bh(&tbl->lock);
2639 	rc = skb->len;
2640 out:
2641 	cb->args[3] = h;
2642 	cb->args[4] = idx;
2643 	return rc;
2644 
2645 }
2646 
2647 static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2648 				bool strict_check,
2649 				struct neigh_dump_filter *filter,
2650 				struct netlink_ext_ack *extack)
2651 {
2652 	struct nlattr *tb[NDA_MAX + 1];
2653 	int err, i;
2654 
2655 	if (strict_check) {
2656 		struct ndmsg *ndm;
2657 
2658 		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2659 			NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2660 			return -EINVAL;
2661 		}
2662 
2663 		ndm = nlmsg_data(nlh);
2664 		if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_ifindex ||
2665 		    ndm->ndm_state || ndm->ndm_type) {
2666 			NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2667 			return -EINVAL;
2668 		}
2669 
2670 		if (ndm->ndm_flags & ~NTF_PROXY) {
2671 			NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2672 			return -EINVAL;
2673 		}
2674 
2675 		err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2676 						    tb, NDA_MAX, nda_policy,
2677 						    extack);
2678 	} else {
2679 		err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2680 					     NDA_MAX, nda_policy, extack);
2681 	}
2682 	if (err < 0)
2683 		return err;
2684 
2685 	for (i = 0; i <= NDA_MAX; ++i) {
2686 		if (!tb[i])
2687 			continue;
2688 
2689 		/* all new attributes should require strict_check */
2690 		switch (i) {
2691 		case NDA_IFINDEX:
2692 			filter->dev_idx = nla_get_u32(tb[i]);
2693 			break;
2694 		case NDA_MASTER:
2695 			filter->master_idx = nla_get_u32(tb[i]);
2696 			break;
2697 		default:
2698 			if (strict_check) {
2699 				NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2700 				return -EINVAL;
2701 			}
2702 		}
2703 	}
2704 
2705 	return 0;
2706 }
2707 
2708 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2709 {
2710 	const struct nlmsghdr *nlh = cb->nlh;
2711 	struct neigh_dump_filter filter = {};
2712 	struct neigh_table *tbl;
2713 	int t, family, s_t;
2714 	int proxy = 0;
2715 	int err;
2716 
2717 	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2718 
2719 	/* check for full ndmsg structure presence, family member is
2720 	 * the same for both structures
2721 	 */
2722 	if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2723 	    ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2724 		proxy = 1;
2725 
2726 	err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2727 	if (err < 0 && cb->strict_check)
2728 		return err;
2729 
2730 	s_t = cb->args[0];
2731 
2732 	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2733 		tbl = neigh_tables[t];
2734 
2735 		if (!tbl)
2736 			continue;
2737 		if (t < s_t || (family && tbl->family != family))
2738 			continue;
2739 		if (t > s_t)
2740 			memset(&cb->args[1], 0, sizeof(cb->args) -
2741 						sizeof(cb->args[0]));
2742 		if (proxy)
2743 			err = pneigh_dump_table(tbl, skb, cb, &filter);
2744 		else
2745 			err = neigh_dump_table(tbl, skb, cb, &filter);
2746 		if (err < 0)
2747 			break;
2748 	}
2749 
2750 	cb->args[0] = t;
2751 	return skb->len;
2752 }
2753 
2754 static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2755 			       struct neigh_table **tbl,
2756 			       void **dst, int *dev_idx, u8 *ndm_flags,
2757 			       struct netlink_ext_ack *extack)
2758 {
2759 	struct nlattr *tb[NDA_MAX + 1];
2760 	struct ndmsg *ndm;
2761 	int err, i;
2762 
2763 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2764 		NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2765 		return -EINVAL;
2766 	}
2767 
2768 	ndm = nlmsg_data(nlh);
2769 	if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
2770 	    ndm->ndm_type) {
2771 		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2772 		return -EINVAL;
2773 	}
2774 
2775 	if (ndm->ndm_flags & ~NTF_PROXY) {
2776 		NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2777 		return -EINVAL;
2778 	}
2779 
2780 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2781 					    NDA_MAX, nda_policy, extack);
2782 	if (err < 0)
2783 		return err;
2784 
2785 	*ndm_flags = ndm->ndm_flags;
2786 	*dev_idx = ndm->ndm_ifindex;
2787 	*tbl = neigh_find_table(ndm->ndm_family);
2788 	if (*tbl == NULL) {
2789 		NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2790 		return -EAFNOSUPPORT;
2791 	}
2792 
2793 	for (i = 0; i <= NDA_MAX; ++i) {
2794 		if (!tb[i])
2795 			continue;
2796 
2797 		switch (i) {
2798 		case NDA_DST:
2799 			if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2800 				NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2801 				return -EINVAL;
2802 			}
2803 			*dst = nla_data(tb[i]);
2804 			break;
2805 		default:
2806 			NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2807 			return -EINVAL;
2808 		}
2809 	}
2810 
2811 	return 0;
2812 }
2813 
2814 static inline size_t neigh_nlmsg_size(void)
2815 {
2816 	return NLMSG_ALIGN(sizeof(struct ndmsg))
2817 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2818 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2819 	       + nla_total_size(sizeof(struct nda_cacheinfo))
2820 	       + nla_total_size(4)  /* NDA_PROBES */
2821 	       + nla_total_size(1); /* NDA_PROTOCOL */
2822 }
2823 
2824 static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2825 			   u32 pid, u32 seq)
2826 {
2827 	struct sk_buff *skb;
2828 	int err = 0;
2829 
2830 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2831 	if (!skb)
2832 		return -ENOBUFS;
2833 
2834 	err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2835 	if (err) {
2836 		kfree_skb(skb);
2837 		goto errout;
2838 	}
2839 
2840 	err = rtnl_unicast(skb, net, pid);
2841 errout:
2842 	return err;
2843 }
2844 
2845 static inline size_t pneigh_nlmsg_size(void)
2846 {
2847 	return NLMSG_ALIGN(sizeof(struct ndmsg))
2848 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2849 	       + nla_total_size(1); /* NDA_PROTOCOL */
2850 }
2851 
2852 static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
2853 			    u32 pid, u32 seq, struct neigh_table *tbl)
2854 {
2855 	struct sk_buff *skb;
2856 	int err = 0;
2857 
2858 	skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
2859 	if (!skb)
2860 		return -ENOBUFS;
2861 
2862 	err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
2863 	if (err) {
2864 		kfree_skb(skb);
2865 		goto errout;
2866 	}
2867 
2868 	err = rtnl_unicast(skb, net, pid);
2869 errout:
2870 	return err;
2871 }
2872 
2873 static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2874 		     struct netlink_ext_ack *extack)
2875 {
2876 	struct net *net = sock_net(in_skb->sk);
2877 	struct net_device *dev = NULL;
2878 	struct neigh_table *tbl = NULL;
2879 	struct neighbour *neigh;
2880 	void *dst = NULL;
2881 	u8 ndm_flags = 0;
2882 	int dev_idx = 0;
2883 	int err;
2884 
2885 	err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
2886 				  extack);
2887 	if (err < 0)
2888 		return err;
2889 
2890 	if (dev_idx) {
2891 		dev = __dev_get_by_index(net, dev_idx);
2892 		if (!dev) {
2893 			NL_SET_ERR_MSG(extack, "Unknown device ifindex");
2894 			return -ENODEV;
2895 		}
2896 	}
2897 
2898 	if (!dst) {
2899 		NL_SET_ERR_MSG(extack, "Network address not specified");
2900 		return -EINVAL;
2901 	}
2902 
2903 	if (ndm_flags & NTF_PROXY) {
2904 		struct pneigh_entry *pn;
2905 
2906 		pn = pneigh_lookup(tbl, net, dst, dev, 0);
2907 		if (!pn) {
2908 			NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
2909 			return -ENOENT;
2910 		}
2911 		return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
2912 					nlh->nlmsg_seq, tbl);
2913 	}
2914 
2915 	if (!dev) {
2916 		NL_SET_ERR_MSG(extack, "No device specified");
2917 		return -EINVAL;
2918 	}
2919 
2920 	neigh = neigh_lookup(tbl, dst, dev);
2921 	if (!neigh) {
2922 		NL_SET_ERR_MSG(extack, "Neighbour entry not found");
2923 		return -ENOENT;
2924 	}
2925 
2926 	err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
2927 			      nlh->nlmsg_seq);
2928 
2929 	neigh_release(neigh);
2930 
2931 	return err;
2932 }
2933 
2934 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2935 {
2936 	int chain;
2937 	struct neigh_hash_table *nht;
2938 
2939 	rcu_read_lock_bh();
2940 	nht = rcu_dereference_bh(tbl->nht);
2941 
2942 	read_lock(&tbl->lock); /* avoid resizes */
2943 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2944 		struct neighbour *n;
2945 
2946 		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2947 		     n != NULL;
2948 		     n = rcu_dereference_bh(n->next))
2949 			cb(n, cookie);
2950 	}
2951 	read_unlock(&tbl->lock);
2952 	rcu_read_unlock_bh();
2953 }
2954 EXPORT_SYMBOL(neigh_for_each);
2955 
2956 /* The tbl->lock must be held as a writer and BH disabled. */
2957 void __neigh_for_each_release(struct neigh_table *tbl,
2958 			      int (*cb)(struct neighbour *))
2959 {
2960 	int chain;
2961 	struct neigh_hash_table *nht;
2962 
2963 	nht = rcu_dereference_protected(tbl->nht,
2964 					lockdep_is_held(&tbl->lock));
2965 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2966 		struct neighbour *n;
2967 		struct neighbour __rcu **np;
2968 
2969 		np = &nht->hash_buckets[chain];
2970 		while ((n = rcu_dereference_protected(*np,
2971 					lockdep_is_held(&tbl->lock))) != NULL) {
2972 			int release;
2973 
2974 			write_lock(&n->lock);
2975 			release = cb(n);
2976 			if (release) {
2977 				rcu_assign_pointer(*np,
2978 					rcu_dereference_protected(n->next,
2979 						lockdep_is_held(&tbl->lock)));
2980 				neigh_mark_dead(n);
2981 			} else
2982 				np = &n->next;
2983 			write_unlock(&n->lock);
2984 			if (release)
2985 				neigh_cleanup_and_release(n);
2986 		}
2987 	}
2988 }
2989 EXPORT_SYMBOL(__neigh_for_each_release);
2990 
2991 int neigh_xmit(int index, struct net_device *dev,
2992 	       const void *addr, struct sk_buff *skb)
2993 {
2994 	int err = -EAFNOSUPPORT;
2995 	if (likely(index < NEIGH_NR_TABLES)) {
2996 		struct neigh_table *tbl;
2997 		struct neighbour *neigh;
2998 
2999 		tbl = neigh_tables[index];
3000 		if (!tbl)
3001 			goto out;
3002 		rcu_read_lock_bh();
3003 		if (index == NEIGH_ARP_TABLE) {
3004 			u32 key = *((u32 *)addr);
3005 
3006 			neigh = __ipv4_neigh_lookup_noref(dev, key);
3007 		} else {
3008 			neigh = __neigh_lookup_noref(tbl, addr, dev);
3009 		}
3010 		if (!neigh)
3011 			neigh = __neigh_create(tbl, addr, dev, false);
3012 		err = PTR_ERR(neigh);
3013 		if (IS_ERR(neigh)) {
3014 			rcu_read_unlock_bh();
3015 			goto out_kfree_skb;
3016 		}
3017 		err = neigh->output(neigh, skb);
3018 		rcu_read_unlock_bh();
3019 	}
3020 	else if (index == NEIGH_LINK_TABLE) {
3021 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3022 				      addr, NULL, skb->len);
3023 		if (err < 0)
3024 			goto out_kfree_skb;
3025 		err = dev_queue_xmit(skb);
3026 	}
3027 out:
3028 	return err;
3029 out_kfree_skb:
3030 	kfree_skb(skb);
3031 	goto out;
3032 }
3033 EXPORT_SYMBOL(neigh_xmit);
3034 
3035 #ifdef CONFIG_PROC_FS
3036 
3037 static struct neighbour *neigh_get_first(struct seq_file *seq)
3038 {
3039 	struct neigh_seq_state *state = seq->private;
3040 	struct net *net = seq_file_net(seq);
3041 	struct neigh_hash_table *nht = state->nht;
3042 	struct neighbour *n = NULL;
3043 	int bucket;
3044 
3045 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
3046 	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
3047 		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
3048 
3049 		while (n) {
3050 			if (!net_eq(dev_net(n->dev), net))
3051 				goto next;
3052 			if (state->neigh_sub_iter) {
3053 				loff_t fakep = 0;
3054 				void *v;
3055 
3056 				v = state->neigh_sub_iter(state, n, &fakep);
3057 				if (!v)
3058 					goto next;
3059 			}
3060 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3061 				break;
3062 			if (n->nud_state & ~NUD_NOARP)
3063 				break;
3064 next:
3065 			n = rcu_dereference_bh(n->next);
3066 		}
3067 
3068 		if (n)
3069 			break;
3070 	}
3071 	state->bucket = bucket;
3072 
3073 	return n;
3074 }
3075 
3076 static struct neighbour *neigh_get_next(struct seq_file *seq,
3077 					struct neighbour *n,
3078 					loff_t *pos)
3079 {
3080 	struct neigh_seq_state *state = seq->private;
3081 	struct net *net = seq_file_net(seq);
3082 	struct neigh_hash_table *nht = state->nht;
3083 
3084 	if (state->neigh_sub_iter) {
3085 		void *v = state->neigh_sub_iter(state, n, pos);
3086 		if (v)
3087 			return n;
3088 	}
3089 	n = rcu_dereference_bh(n->next);
3090 
3091 	while (1) {
3092 		while (n) {
3093 			if (!net_eq(dev_net(n->dev), net))
3094 				goto next;
3095 			if (state->neigh_sub_iter) {
3096 				void *v = state->neigh_sub_iter(state, n, pos);
3097 				if (v)
3098 					return n;
3099 				goto next;
3100 			}
3101 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3102 				break;
3103 
3104 			if (n->nud_state & ~NUD_NOARP)
3105 				break;
3106 next:
3107 			n = rcu_dereference_bh(n->next);
3108 		}
3109 
3110 		if (n)
3111 			break;
3112 
3113 		if (++state->bucket >= (1 << nht->hash_shift))
3114 			break;
3115 
3116 		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
3117 	}
3118 
3119 	if (n && pos)
3120 		--(*pos);
3121 	return n;
3122 }
3123 
3124 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3125 {
3126 	struct neighbour *n = neigh_get_first(seq);
3127 
3128 	if (n) {
3129 		--(*pos);
3130 		while (*pos) {
3131 			n = neigh_get_next(seq, n, pos);
3132 			if (!n)
3133 				break;
3134 		}
3135 	}
3136 	return *pos ? NULL : n;
3137 }
3138 
3139 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3140 {
3141 	struct neigh_seq_state *state = seq->private;
3142 	struct net *net = seq_file_net(seq);
3143 	struct neigh_table *tbl = state->tbl;
3144 	struct pneigh_entry *pn = NULL;
3145 	int bucket;
3146 
3147 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
3148 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3149 		pn = tbl->phash_buckets[bucket];
3150 		while (pn && !net_eq(pneigh_net(pn), net))
3151 			pn = pn->next;
3152 		if (pn)
3153 			break;
3154 	}
3155 	state->bucket = bucket;
3156 
3157 	return pn;
3158 }
3159 
3160 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3161 					    struct pneigh_entry *pn,
3162 					    loff_t *pos)
3163 {
3164 	struct neigh_seq_state *state = seq->private;
3165 	struct net *net = seq_file_net(seq);
3166 	struct neigh_table *tbl = state->tbl;
3167 
3168 	do {
3169 		pn = pn->next;
3170 	} while (pn && !net_eq(pneigh_net(pn), net));
3171 
3172 	while (!pn) {
3173 		if (++state->bucket > PNEIGH_HASHMASK)
3174 			break;
3175 		pn = tbl->phash_buckets[state->bucket];
3176 		while (pn && !net_eq(pneigh_net(pn), net))
3177 			pn = pn->next;
3178 		if (pn)
3179 			break;
3180 	}
3181 
3182 	if (pn && pos)
3183 		--(*pos);
3184 
3185 	return pn;
3186 }
3187 
3188 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3189 {
3190 	struct pneigh_entry *pn = pneigh_get_first(seq);
3191 
3192 	if (pn) {
3193 		--(*pos);
3194 		while (*pos) {
3195 			pn = pneigh_get_next(seq, pn, pos);
3196 			if (!pn)
3197 				break;
3198 		}
3199 	}
3200 	return *pos ? NULL : pn;
3201 }
3202 
3203 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3204 {
3205 	struct neigh_seq_state *state = seq->private;
3206 	void *rc;
3207 	loff_t idxpos = *pos;
3208 
3209 	rc = neigh_get_idx(seq, &idxpos);
3210 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3211 		rc = pneigh_get_idx(seq, &idxpos);
3212 
3213 	return rc;
3214 }
3215 
3216 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3217 	__acquires(tbl->lock)
3218 	__acquires(rcu_bh)
3219 {
3220 	struct neigh_seq_state *state = seq->private;
3221 
3222 	state->tbl = tbl;
3223 	state->bucket = 0;
3224 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3225 
3226 	rcu_read_lock_bh();
3227 	state->nht = rcu_dereference_bh(tbl->nht);
3228 	read_lock(&tbl->lock);
3229 
3230 	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3231 }
3232 EXPORT_SYMBOL(neigh_seq_start);
3233 
3234 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3235 {
3236 	struct neigh_seq_state *state;
3237 	void *rc;
3238 
3239 	if (v == SEQ_START_TOKEN) {
3240 		rc = neigh_get_first(seq);
3241 		goto out;
3242 	}
3243 
3244 	state = seq->private;
3245 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3246 		rc = neigh_get_next(seq, v, NULL);
3247 		if (rc)
3248 			goto out;
3249 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3250 			rc = pneigh_get_first(seq);
3251 	} else {
3252 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3253 		rc = pneigh_get_next(seq, v, NULL);
3254 	}
3255 out:
3256 	++(*pos);
3257 	return rc;
3258 }
3259 EXPORT_SYMBOL(neigh_seq_next);
3260 
3261 void neigh_seq_stop(struct seq_file *seq, void *v)
3262 	__releases(tbl->lock)
3263 	__releases(rcu_bh)
3264 {
3265 	struct neigh_seq_state *state = seq->private;
3266 	struct neigh_table *tbl = state->tbl;
3267 
3268 	read_unlock(&tbl->lock);
3269 	rcu_read_unlock_bh();
3270 }
3271 EXPORT_SYMBOL(neigh_seq_stop);
3272 
3273 /* statistics via seq_file */
3274 
3275 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3276 {
3277 	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3278 	int cpu;
3279 
3280 	if (*pos == 0)
3281 		return SEQ_START_TOKEN;
3282 
3283 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3284 		if (!cpu_possible(cpu))
3285 			continue;
3286 		*pos = cpu+1;
3287 		return per_cpu_ptr(tbl->stats, cpu);
3288 	}
3289 	return NULL;
3290 }
3291 
3292 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3293 {
3294 	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3295 	int cpu;
3296 
3297 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3298 		if (!cpu_possible(cpu))
3299 			continue;
3300 		*pos = cpu+1;
3301 		return per_cpu_ptr(tbl->stats, cpu);
3302 	}
3303 	(*pos)++;
3304 	return NULL;
3305 }
3306 
3307 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3308 {
3309 
3310 }
3311 
3312 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3313 {
3314 	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3315 	struct neigh_statistics *st = v;
3316 
3317 	if (v == SEQ_START_TOKEN) {
3318 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3319 		return 0;
3320 	}
3321 
3322 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
3323 			"%08lx %08lx  %08lx %08lx %08lx %08lx\n",
3324 		   atomic_read(&tbl->entries),
3325 
3326 		   st->allocs,
3327 		   st->destroys,
3328 		   st->hash_grows,
3329 
3330 		   st->lookups,
3331 		   st->hits,
3332 
3333 		   st->res_failed,
3334 
3335 		   st->rcv_probes_mcast,
3336 		   st->rcv_probes_ucast,
3337 
3338 		   st->periodic_gc_runs,
3339 		   st->forced_gc_runs,
3340 		   st->unres_discards,
3341 		   st->table_fulls
3342 		   );
3343 
3344 	return 0;
3345 }
3346 
3347 static const struct seq_operations neigh_stat_seq_ops = {
3348 	.start	= neigh_stat_seq_start,
3349 	.next	= neigh_stat_seq_next,
3350 	.stop	= neigh_stat_seq_stop,
3351 	.show	= neigh_stat_seq_show,
3352 };
3353 #endif /* CONFIG_PROC_FS */
3354 
3355 static void __neigh_notify(struct neighbour *n, int type, int flags,
3356 			   u32 pid)
3357 {
3358 	struct net *net = dev_net(n->dev);
3359 	struct sk_buff *skb;
3360 	int err = -ENOBUFS;
3361 
3362 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3363 	if (skb == NULL)
3364 		goto errout;
3365 
3366 	err = neigh_fill_info(skb, n, pid, 0, type, flags);
3367 	if (err < 0) {
3368 		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3369 		WARN_ON(err == -EMSGSIZE);
3370 		kfree_skb(skb);
3371 		goto errout;
3372 	}
3373 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3374 	return;
3375 errout:
3376 	if (err < 0)
3377 		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3378 }
3379 
3380 void neigh_app_ns(struct neighbour *n)
3381 {
3382 	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3383 }
3384 EXPORT_SYMBOL(neigh_app_ns);
3385 
3386 #ifdef CONFIG_SYSCTL
3387 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3388 
3389 static int proc_unres_qlen(struct ctl_table *ctl, int write,
3390 			   void *buffer, size_t *lenp, loff_t *ppos)
3391 {
3392 	int size, ret;
3393 	struct ctl_table tmp = *ctl;
3394 
3395 	tmp.extra1 = SYSCTL_ZERO;
3396 	tmp.extra2 = &unres_qlen_max;
3397 	tmp.data = &size;
3398 
3399 	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3400 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3401 
3402 	if (write && !ret)
3403 		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3404 	return ret;
3405 }
3406 
3407 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
3408 						   int family)
3409 {
3410 	switch (family) {
3411 	case AF_INET:
3412 		return __in_dev_arp_parms_get_rcu(dev);
3413 	case AF_INET6:
3414 		return __in6_dev_nd_parms_get_rcu(dev);
3415 	}
3416 	return NULL;
3417 }
3418 
3419 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3420 				  int index)
3421 {
3422 	struct net_device *dev;
3423 	int family = neigh_parms_family(p);
3424 
3425 	rcu_read_lock();
3426 	for_each_netdev_rcu(net, dev) {
3427 		struct neigh_parms *dst_p =
3428 				neigh_get_dev_parms_rcu(dev, family);
3429 
3430 		if (dst_p && !test_bit(index, dst_p->data_state))
3431 			dst_p->data[index] = p->data[index];
3432 	}
3433 	rcu_read_unlock();
3434 }
3435 
3436 static void neigh_proc_update(struct ctl_table *ctl, int write)
3437 {
3438 	struct net_device *dev = ctl->extra1;
3439 	struct neigh_parms *p = ctl->extra2;
3440 	struct net *net = neigh_parms_net(p);
3441 	int index = (int *) ctl->data - p->data;
3442 
3443 	if (!write)
3444 		return;
3445 
3446 	set_bit(index, p->data_state);
3447 	if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3448 		call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3449 	if (!dev) /* NULL dev means this is default value */
3450 		neigh_copy_dflt_parms(net, p, index);
3451 }
3452 
3453 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3454 					   void *buffer, size_t *lenp,
3455 					   loff_t *ppos)
3456 {
3457 	struct ctl_table tmp = *ctl;
3458 	int ret;
3459 
3460 	tmp.extra1 = SYSCTL_ZERO;
3461 	tmp.extra2 = SYSCTL_INT_MAX;
3462 
3463 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3464 	neigh_proc_update(ctl, write);
3465 	return ret;
3466 }
3467 
3468 int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
3469 			size_t *lenp, loff_t *ppos)
3470 {
3471 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3472 
3473 	neigh_proc_update(ctl, write);
3474 	return ret;
3475 }
3476 EXPORT_SYMBOL(neigh_proc_dointvec);
3477 
3478 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
3479 				size_t *lenp, loff_t *ppos)
3480 {
3481 	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3482 
3483 	neigh_proc_update(ctl, write);
3484 	return ret;
3485 }
3486 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3487 
3488 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3489 					      void *buffer, size_t *lenp,
3490 					      loff_t *ppos)
3491 {
3492 	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3493 
3494 	neigh_proc_update(ctl, write);
3495 	return ret;
3496 }
3497 
3498 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3499 				   void *buffer, size_t *lenp, loff_t *ppos)
3500 {
3501 	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3502 
3503 	neigh_proc_update(ctl, write);
3504 	return ret;
3505 }
3506 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3507 
3508 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3509 					  void *buffer, size_t *lenp,
3510 					  loff_t *ppos)
3511 {
3512 	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3513 
3514 	neigh_proc_update(ctl, write);
3515 	return ret;
3516 }
3517 
3518 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3519 					  void *buffer, size_t *lenp,
3520 					  loff_t *ppos)
3521 {
3522 	struct neigh_parms *p = ctl->extra2;
3523 	int ret;
3524 
3525 	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3526 		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3527 	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3528 		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3529 	else
3530 		ret = -1;
3531 
3532 	if (write && ret == 0) {
3533 		/* update reachable_time as well, otherwise, the change will
3534 		 * only be effective after the next time neigh_periodic_work
3535 		 * decides to recompute it
3536 		 */
3537 		p->reachable_time =
3538 			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3539 	}
3540 	return ret;
3541 }
3542 
3543 #define NEIGH_PARMS_DATA_OFFSET(index)	\
3544 	(&((struct neigh_parms *) 0)->data[index])
3545 
3546 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3547 	[NEIGH_VAR_ ## attr] = { \
3548 		.procname	= name, \
3549 		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3550 		.maxlen		= sizeof(int), \
3551 		.mode		= mval, \
3552 		.proc_handler	= proc, \
3553 	}
3554 
3555 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3556 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3557 
3558 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3559 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3560 
3561 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3562 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3563 
3564 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3565 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3566 
3567 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3568 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3569 
3570 static struct neigh_sysctl_table {
3571 	struct ctl_table_header *sysctl_header;
3572 	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3573 } neigh_sysctl_template __read_mostly = {
3574 	.neigh_vars = {
3575 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3576 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3577 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3578 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3579 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3580 		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3581 		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3582 		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3583 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3584 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3585 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3586 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3587 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3588 		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3589 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3590 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3591 		[NEIGH_VAR_GC_INTERVAL] = {
3592 			.procname	= "gc_interval",
3593 			.maxlen		= sizeof(int),
3594 			.mode		= 0644,
3595 			.proc_handler	= proc_dointvec_jiffies,
3596 		},
3597 		[NEIGH_VAR_GC_THRESH1] = {
3598 			.procname	= "gc_thresh1",
3599 			.maxlen		= sizeof(int),
3600 			.mode		= 0644,
3601 			.extra1		= SYSCTL_ZERO,
3602 			.extra2		= SYSCTL_INT_MAX,
3603 			.proc_handler	= proc_dointvec_minmax,
3604 		},
3605 		[NEIGH_VAR_GC_THRESH2] = {
3606 			.procname	= "gc_thresh2",
3607 			.maxlen		= sizeof(int),
3608 			.mode		= 0644,
3609 			.extra1		= SYSCTL_ZERO,
3610 			.extra2		= SYSCTL_INT_MAX,
3611 			.proc_handler	= proc_dointvec_minmax,
3612 		},
3613 		[NEIGH_VAR_GC_THRESH3] = {
3614 			.procname	= "gc_thresh3",
3615 			.maxlen		= sizeof(int),
3616 			.mode		= 0644,
3617 			.extra1		= SYSCTL_ZERO,
3618 			.extra2		= SYSCTL_INT_MAX,
3619 			.proc_handler	= proc_dointvec_minmax,
3620 		},
3621 		{},
3622 	},
3623 };
3624 
3625 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3626 			  proc_handler *handler)
3627 {
3628 	int i;
3629 	struct neigh_sysctl_table *t;
3630 	const char *dev_name_source;
3631 	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3632 	char *p_name;
3633 
3634 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3635 	if (!t)
3636 		goto err;
3637 
3638 	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3639 		t->neigh_vars[i].data += (long) p;
3640 		t->neigh_vars[i].extra1 = dev;
3641 		t->neigh_vars[i].extra2 = p;
3642 	}
3643 
3644 	if (dev) {
3645 		dev_name_source = dev->name;
3646 		/* Terminate the table early */
3647 		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3648 		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3649 	} else {
3650 		struct neigh_table *tbl = p->tbl;
3651 		dev_name_source = "default";
3652 		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3653 		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3654 		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3655 		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3656 	}
3657 
3658 	if (handler) {
3659 		/* RetransTime */
3660 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3661 		/* ReachableTime */
3662 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3663 		/* RetransTime (in milliseconds)*/
3664 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3665 		/* ReachableTime (in milliseconds) */
3666 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3667 	} else {
3668 		/* Those handlers will update p->reachable_time after
3669 		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3670 		 * applied after the next neighbour update instead of waiting for
3671 		 * neigh_periodic_work to update its value (can be multiple minutes)
3672 		 * So any handler that replaces them should do this as well
3673 		 */
3674 		/* ReachableTime */
3675 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3676 			neigh_proc_base_reachable_time;
3677 		/* ReachableTime (in milliseconds) */
3678 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3679 			neigh_proc_base_reachable_time;
3680 	}
3681 
3682 	/* Don't export sysctls to unprivileged users */
3683 	if (neigh_parms_net(p)->user_ns != &init_user_ns)
3684 		t->neigh_vars[0].procname = NULL;
3685 
3686 	switch (neigh_parms_family(p)) {
3687 	case AF_INET:
3688 	      p_name = "ipv4";
3689 	      break;
3690 	case AF_INET6:
3691 	      p_name = "ipv6";
3692 	      break;
3693 	default:
3694 	      BUG();
3695 	}
3696 
3697 	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3698 		p_name, dev_name_source);
3699 	t->sysctl_header =
3700 		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3701 	if (!t->sysctl_header)
3702 		goto free;
3703 
3704 	p->sysctl_table = t;
3705 	return 0;
3706 
3707 free:
3708 	kfree(t);
3709 err:
3710 	return -ENOBUFS;
3711 }
3712 EXPORT_SYMBOL(neigh_sysctl_register);
3713 
3714 void neigh_sysctl_unregister(struct neigh_parms *p)
3715 {
3716 	if (p->sysctl_table) {
3717 		struct neigh_sysctl_table *t = p->sysctl_table;
3718 		p->sysctl_table = NULL;
3719 		unregister_net_sysctl_table(t->sysctl_header);
3720 		kfree(t);
3721 	}
3722 }
3723 EXPORT_SYMBOL(neigh_sysctl_unregister);
3724 
3725 #endif	/* CONFIG_SYSCTL */
3726 
3727 static int __init neigh_init(void)
3728 {
3729 	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3730 	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3731 	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
3732 
3733 	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3734 		      0);
3735 	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3736 
3737 	return 0;
3738 }
3739 
3740 subsys_initcall(neigh_init);
3741