xref: /openbmc/linux/net/core/neighbour.c (revision dd3cb467)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Generic address resolution entity
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
8  *
9  *	Fixes:
10  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
11  *	Harald Welte		Add neighbour cache statistics like rtstat
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/slab.h>
17 #include <linux/kmemleak.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
30 #include <net/arp.h>
31 #include <net/dst.h>
32 #include <net/sock.h>
33 #include <net/netevent.h>
34 #include <net/netlink.h>
35 #include <linux/rtnetlink.h>
36 #include <linux/random.h>
37 #include <linux/string.h>
38 #include <linux/log2.h>
39 #include <linux/inetdevice.h>
40 #include <net/addrconf.h>
41 
42 #include <trace/events/neigh.h>
43 
44 #define NEIGH_DEBUG 1
45 #define neigh_dbg(level, fmt, ...)		\
46 do {						\
47 	if (level <= NEIGH_DEBUG)		\
48 		pr_debug(fmt, ##__VA_ARGS__);	\
49 } while (0)
50 
51 #define PNEIGH_HASHMASK		0xF
52 
53 static void neigh_timer_handler(struct timer_list *t);
54 static void __neigh_notify(struct neighbour *n, int type, int flags,
55 			   u32 pid);
56 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
57 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
58 				    struct net_device *dev);
59 
60 #ifdef CONFIG_PROC_FS
61 static const struct seq_operations neigh_stat_seq_ops;
62 #endif
63 
64 /*
65    Neighbour hash table buckets are protected with rwlock tbl->lock.
66 
67    - All the scans/updates to hash buckets MUST be made under this lock.
68    - NOTHING clever should be made under this lock: no callbacks
69      to protocol backends, no attempts to send something to network.
70      It will result in deadlocks, if backend/driver wants to use neighbour
71      cache.
72    - If the entry requires some non-trivial actions, increase
73      its reference count and release table lock.
74 
75    Neighbour entries are protected:
76    - with reference count.
77    - with rwlock neigh->lock
78 
79    Reference count prevents destruction.
80 
81    neigh->lock mainly serializes ll address data and its validity state.
82    However, the same lock is used to protect another entry fields:
83     - timer
84     - resolution queue
85 
86    Again, nothing clever shall be made under neigh->lock,
87    the most complicated procedure, which we allow is dev->hard_header.
88    It is supposed, that dev->hard_header is simplistic and does
89    not make callbacks to neighbour tables.
90  */
91 
92 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
93 {
94 	kfree_skb(skb);
95 	return -ENETDOWN;
96 }
97 
98 static void neigh_cleanup_and_release(struct neighbour *neigh)
99 {
100 	trace_neigh_cleanup_and_release(neigh, 0);
101 	__neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
102 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
103 	neigh_release(neigh);
104 }
105 
106 /*
107  * It is random distribution in the interval (1/2)*base...(3/2)*base.
108  * It corresponds to default IPv6 settings and is not overridable,
109  * because it is really reasonable choice.
110  */
111 
112 unsigned long neigh_rand_reach_time(unsigned long base)
113 {
114 	return base ? (prandom_u32() % base) + (base >> 1) : 0;
115 }
116 EXPORT_SYMBOL(neigh_rand_reach_time);
117 
118 static void neigh_mark_dead(struct neighbour *n)
119 {
120 	n->dead = 1;
121 	if (!list_empty(&n->gc_list)) {
122 		list_del_init(&n->gc_list);
123 		atomic_dec(&n->tbl->gc_entries);
124 	}
125 	if (!list_empty(&n->managed_list))
126 		list_del_init(&n->managed_list);
127 }
128 
129 static void neigh_update_gc_list(struct neighbour *n)
130 {
131 	bool on_gc_list, exempt_from_gc;
132 
133 	write_lock_bh(&n->tbl->lock);
134 	write_lock(&n->lock);
135 	if (n->dead)
136 		goto out;
137 
138 	/* remove from the gc list if new state is permanent or if neighbor
139 	 * is externally learned; otherwise entry should be on the gc list
140 	 */
141 	exempt_from_gc = n->nud_state & NUD_PERMANENT ||
142 			 n->flags & NTF_EXT_LEARNED;
143 	on_gc_list = !list_empty(&n->gc_list);
144 
145 	if (exempt_from_gc && on_gc_list) {
146 		list_del_init(&n->gc_list);
147 		atomic_dec(&n->tbl->gc_entries);
148 	} else if (!exempt_from_gc && !on_gc_list) {
149 		/* add entries to the tail; cleaning removes from the front */
150 		list_add_tail(&n->gc_list, &n->tbl->gc_list);
151 		atomic_inc(&n->tbl->gc_entries);
152 	}
153 out:
154 	write_unlock(&n->lock);
155 	write_unlock_bh(&n->tbl->lock);
156 }
157 
158 static void neigh_update_managed_list(struct neighbour *n)
159 {
160 	bool on_managed_list, add_to_managed;
161 
162 	write_lock_bh(&n->tbl->lock);
163 	write_lock(&n->lock);
164 	if (n->dead)
165 		goto out;
166 
167 	add_to_managed = n->flags & NTF_MANAGED;
168 	on_managed_list = !list_empty(&n->managed_list);
169 
170 	if (!add_to_managed && on_managed_list)
171 		list_del_init(&n->managed_list);
172 	else if (add_to_managed && !on_managed_list)
173 		list_add_tail(&n->managed_list, &n->tbl->managed_list);
174 out:
175 	write_unlock(&n->lock);
176 	write_unlock_bh(&n->tbl->lock);
177 }
178 
179 static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
180 			       bool *gc_update, bool *managed_update)
181 {
182 	u32 ndm_flags, old_flags = neigh->flags;
183 
184 	if (!(flags & NEIGH_UPDATE_F_ADMIN))
185 		return;
186 
187 	ndm_flags  = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
188 	ndm_flags |= (flags & NEIGH_UPDATE_F_MANAGED) ? NTF_MANAGED : 0;
189 
190 	if ((old_flags ^ ndm_flags) & NTF_EXT_LEARNED) {
191 		if (ndm_flags & NTF_EXT_LEARNED)
192 			neigh->flags |= NTF_EXT_LEARNED;
193 		else
194 			neigh->flags &= ~NTF_EXT_LEARNED;
195 		*notify = 1;
196 		*gc_update = true;
197 	}
198 	if ((old_flags ^ ndm_flags) & NTF_MANAGED) {
199 		if (ndm_flags & NTF_MANAGED)
200 			neigh->flags |= NTF_MANAGED;
201 		else
202 			neigh->flags &= ~NTF_MANAGED;
203 		*notify = 1;
204 		*managed_update = true;
205 	}
206 }
207 
208 static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
209 		      struct neigh_table *tbl)
210 {
211 	bool retval = false;
212 
213 	write_lock(&n->lock);
214 	if (refcount_read(&n->refcnt) == 1) {
215 		struct neighbour *neigh;
216 
217 		neigh = rcu_dereference_protected(n->next,
218 						  lockdep_is_held(&tbl->lock));
219 		rcu_assign_pointer(*np, neigh);
220 		neigh_mark_dead(n);
221 		retval = true;
222 	}
223 	write_unlock(&n->lock);
224 	if (retval)
225 		neigh_cleanup_and_release(n);
226 	return retval;
227 }
228 
229 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
230 {
231 	struct neigh_hash_table *nht;
232 	void *pkey = ndel->primary_key;
233 	u32 hash_val;
234 	struct neighbour *n;
235 	struct neighbour __rcu **np;
236 
237 	nht = rcu_dereference_protected(tbl->nht,
238 					lockdep_is_held(&tbl->lock));
239 	hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
240 	hash_val = hash_val >> (32 - nht->hash_shift);
241 
242 	np = &nht->hash_buckets[hash_val];
243 	while ((n = rcu_dereference_protected(*np,
244 					      lockdep_is_held(&tbl->lock)))) {
245 		if (n == ndel)
246 			return neigh_del(n, np, tbl);
247 		np = &n->next;
248 	}
249 	return false;
250 }
251 
252 static int neigh_forced_gc(struct neigh_table *tbl)
253 {
254 	int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
255 	unsigned long tref = jiffies - 5 * HZ;
256 	struct neighbour *n, *tmp;
257 	int shrunk = 0;
258 
259 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
260 
261 	write_lock_bh(&tbl->lock);
262 
263 	list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
264 		if (refcount_read(&n->refcnt) == 1) {
265 			bool remove = false;
266 
267 			write_lock(&n->lock);
268 			if ((n->nud_state == NUD_FAILED) ||
269 			    (n->nud_state == NUD_NOARP) ||
270 			    (tbl->is_multicast &&
271 			     tbl->is_multicast(n->primary_key)) ||
272 			    time_after(tref, n->updated))
273 				remove = true;
274 			write_unlock(&n->lock);
275 
276 			if (remove && neigh_remove_one(n, tbl))
277 				shrunk++;
278 			if (shrunk >= max_clean)
279 				break;
280 		}
281 	}
282 
283 	tbl->last_flush = jiffies;
284 
285 	write_unlock_bh(&tbl->lock);
286 
287 	return shrunk;
288 }
289 
290 static void neigh_add_timer(struct neighbour *n, unsigned long when)
291 {
292 	neigh_hold(n);
293 	if (unlikely(mod_timer(&n->timer, when))) {
294 		printk("NEIGH: BUG, double timer add, state is %x\n",
295 		       n->nud_state);
296 		dump_stack();
297 	}
298 }
299 
300 static int neigh_del_timer(struct neighbour *n)
301 {
302 	if ((n->nud_state & NUD_IN_TIMER) &&
303 	    del_timer(&n->timer)) {
304 		neigh_release(n);
305 		return 1;
306 	}
307 	return 0;
308 }
309 
310 static void pneigh_queue_purge(struct sk_buff_head *list)
311 {
312 	struct sk_buff *skb;
313 
314 	while ((skb = skb_dequeue(list)) != NULL) {
315 		dev_put(skb->dev);
316 		kfree_skb(skb);
317 	}
318 }
319 
320 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
321 			    bool skip_perm)
322 {
323 	int i;
324 	struct neigh_hash_table *nht;
325 
326 	nht = rcu_dereference_protected(tbl->nht,
327 					lockdep_is_held(&tbl->lock));
328 
329 	for (i = 0; i < (1 << nht->hash_shift); i++) {
330 		struct neighbour *n;
331 		struct neighbour __rcu **np = &nht->hash_buckets[i];
332 
333 		while ((n = rcu_dereference_protected(*np,
334 					lockdep_is_held(&tbl->lock))) != NULL) {
335 			if (dev && n->dev != dev) {
336 				np = &n->next;
337 				continue;
338 			}
339 			if (skip_perm && n->nud_state & NUD_PERMANENT) {
340 				np = &n->next;
341 				continue;
342 			}
343 			rcu_assign_pointer(*np,
344 				   rcu_dereference_protected(n->next,
345 						lockdep_is_held(&tbl->lock)));
346 			write_lock(&n->lock);
347 			neigh_del_timer(n);
348 			neigh_mark_dead(n);
349 			if (refcount_read(&n->refcnt) != 1) {
350 				/* The most unpleasant situation.
351 				   We must destroy neighbour entry,
352 				   but someone still uses it.
353 
354 				   The destroy will be delayed until
355 				   the last user releases us, but
356 				   we must kill timers etc. and move
357 				   it to safe state.
358 				 */
359 				__skb_queue_purge(&n->arp_queue);
360 				n->arp_queue_len_bytes = 0;
361 				n->output = neigh_blackhole;
362 				if (n->nud_state & NUD_VALID)
363 					n->nud_state = NUD_NOARP;
364 				else
365 					n->nud_state = NUD_NONE;
366 				neigh_dbg(2, "neigh %p is stray\n", n);
367 			}
368 			write_unlock(&n->lock);
369 			neigh_cleanup_and_release(n);
370 		}
371 	}
372 }
373 
374 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
375 {
376 	write_lock_bh(&tbl->lock);
377 	neigh_flush_dev(tbl, dev, false);
378 	write_unlock_bh(&tbl->lock);
379 }
380 EXPORT_SYMBOL(neigh_changeaddr);
381 
382 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
383 			  bool skip_perm)
384 {
385 	write_lock_bh(&tbl->lock);
386 	neigh_flush_dev(tbl, dev, skip_perm);
387 	pneigh_ifdown_and_unlock(tbl, dev);
388 
389 	del_timer_sync(&tbl->proxy_timer);
390 	pneigh_queue_purge(&tbl->proxy_queue);
391 	return 0;
392 }
393 
394 int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
395 {
396 	__neigh_ifdown(tbl, dev, true);
397 	return 0;
398 }
399 EXPORT_SYMBOL(neigh_carrier_down);
400 
401 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
402 {
403 	__neigh_ifdown(tbl, dev, false);
404 	return 0;
405 }
406 EXPORT_SYMBOL(neigh_ifdown);
407 
408 static struct neighbour *neigh_alloc(struct neigh_table *tbl,
409 				     struct net_device *dev,
410 				     u32 flags, bool exempt_from_gc)
411 {
412 	struct neighbour *n = NULL;
413 	unsigned long now = jiffies;
414 	int entries;
415 
416 	if (exempt_from_gc)
417 		goto do_alloc;
418 
419 	entries = atomic_inc_return(&tbl->gc_entries) - 1;
420 	if (entries >= tbl->gc_thresh3 ||
421 	    (entries >= tbl->gc_thresh2 &&
422 	     time_after(now, tbl->last_flush + 5 * HZ))) {
423 		if (!neigh_forced_gc(tbl) &&
424 		    entries >= tbl->gc_thresh3) {
425 			net_info_ratelimited("%s: neighbor table overflow!\n",
426 					     tbl->id);
427 			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
428 			goto out_entries;
429 		}
430 	}
431 
432 do_alloc:
433 	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
434 	if (!n)
435 		goto out_entries;
436 
437 	__skb_queue_head_init(&n->arp_queue);
438 	rwlock_init(&n->lock);
439 	seqlock_init(&n->ha_lock);
440 	n->updated	  = n->used = now;
441 	n->nud_state	  = NUD_NONE;
442 	n->output	  = neigh_blackhole;
443 	n->flags	  = flags;
444 	seqlock_init(&n->hh.hh_lock);
445 	n->parms	  = neigh_parms_clone(&tbl->parms);
446 	timer_setup(&n->timer, neigh_timer_handler, 0);
447 
448 	NEIGH_CACHE_STAT_INC(tbl, allocs);
449 	n->tbl		  = tbl;
450 	refcount_set(&n->refcnt, 1);
451 	n->dead		  = 1;
452 	INIT_LIST_HEAD(&n->gc_list);
453 	INIT_LIST_HEAD(&n->managed_list);
454 
455 	atomic_inc(&tbl->entries);
456 out:
457 	return n;
458 
459 out_entries:
460 	if (!exempt_from_gc)
461 		atomic_dec(&tbl->gc_entries);
462 	goto out;
463 }
464 
465 static void neigh_get_hash_rnd(u32 *x)
466 {
467 	*x = get_random_u32() | 1;
468 }
469 
470 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
471 {
472 	size_t size = (1 << shift) * sizeof(struct neighbour *);
473 	struct neigh_hash_table *ret;
474 	struct neighbour __rcu **buckets;
475 	int i;
476 
477 	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
478 	if (!ret)
479 		return NULL;
480 	if (size <= PAGE_SIZE) {
481 		buckets = kzalloc(size, GFP_ATOMIC);
482 	} else {
483 		buckets = (struct neighbour __rcu **)
484 			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
485 					   get_order(size));
486 		kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
487 	}
488 	if (!buckets) {
489 		kfree(ret);
490 		return NULL;
491 	}
492 	ret->hash_buckets = buckets;
493 	ret->hash_shift = shift;
494 	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
495 		neigh_get_hash_rnd(&ret->hash_rnd[i]);
496 	return ret;
497 }
498 
499 static void neigh_hash_free_rcu(struct rcu_head *head)
500 {
501 	struct neigh_hash_table *nht = container_of(head,
502 						    struct neigh_hash_table,
503 						    rcu);
504 	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
505 	struct neighbour __rcu **buckets = nht->hash_buckets;
506 
507 	if (size <= PAGE_SIZE) {
508 		kfree(buckets);
509 	} else {
510 		kmemleak_free(buckets);
511 		free_pages((unsigned long)buckets, get_order(size));
512 	}
513 	kfree(nht);
514 }
515 
516 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
517 						unsigned long new_shift)
518 {
519 	unsigned int i, hash;
520 	struct neigh_hash_table *new_nht, *old_nht;
521 
522 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
523 
524 	old_nht = rcu_dereference_protected(tbl->nht,
525 					    lockdep_is_held(&tbl->lock));
526 	new_nht = neigh_hash_alloc(new_shift);
527 	if (!new_nht)
528 		return old_nht;
529 
530 	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
531 		struct neighbour *n, *next;
532 
533 		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
534 						   lockdep_is_held(&tbl->lock));
535 		     n != NULL;
536 		     n = next) {
537 			hash = tbl->hash(n->primary_key, n->dev,
538 					 new_nht->hash_rnd);
539 
540 			hash >>= (32 - new_nht->hash_shift);
541 			next = rcu_dereference_protected(n->next,
542 						lockdep_is_held(&tbl->lock));
543 
544 			rcu_assign_pointer(n->next,
545 					   rcu_dereference_protected(
546 						new_nht->hash_buckets[hash],
547 						lockdep_is_held(&tbl->lock)));
548 			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
549 		}
550 	}
551 
552 	rcu_assign_pointer(tbl->nht, new_nht);
553 	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
554 	return new_nht;
555 }
556 
557 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
558 			       struct net_device *dev)
559 {
560 	struct neighbour *n;
561 
562 	NEIGH_CACHE_STAT_INC(tbl, lookups);
563 
564 	rcu_read_lock_bh();
565 	n = __neigh_lookup_noref(tbl, pkey, dev);
566 	if (n) {
567 		if (!refcount_inc_not_zero(&n->refcnt))
568 			n = NULL;
569 		NEIGH_CACHE_STAT_INC(tbl, hits);
570 	}
571 
572 	rcu_read_unlock_bh();
573 	return n;
574 }
575 EXPORT_SYMBOL(neigh_lookup);
576 
577 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
578 				     const void *pkey)
579 {
580 	struct neighbour *n;
581 	unsigned int key_len = tbl->key_len;
582 	u32 hash_val;
583 	struct neigh_hash_table *nht;
584 
585 	NEIGH_CACHE_STAT_INC(tbl, lookups);
586 
587 	rcu_read_lock_bh();
588 	nht = rcu_dereference_bh(tbl->nht);
589 	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
590 
591 	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
592 	     n != NULL;
593 	     n = rcu_dereference_bh(n->next)) {
594 		if (!memcmp(n->primary_key, pkey, key_len) &&
595 		    net_eq(dev_net(n->dev), net)) {
596 			if (!refcount_inc_not_zero(&n->refcnt))
597 				n = NULL;
598 			NEIGH_CACHE_STAT_INC(tbl, hits);
599 			break;
600 		}
601 	}
602 
603 	rcu_read_unlock_bh();
604 	return n;
605 }
606 EXPORT_SYMBOL(neigh_lookup_nodev);
607 
608 static struct neighbour *
609 ___neigh_create(struct neigh_table *tbl, const void *pkey,
610 		struct net_device *dev, u32 flags,
611 		bool exempt_from_gc, bool want_ref)
612 {
613 	u32 hash_val, key_len = tbl->key_len;
614 	struct neighbour *n1, *rc, *n;
615 	struct neigh_hash_table *nht;
616 	int error;
617 
618 	n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
619 	trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
620 	if (!n) {
621 		rc = ERR_PTR(-ENOBUFS);
622 		goto out;
623 	}
624 
625 	memcpy(n->primary_key, pkey, key_len);
626 	n->dev = dev;
627 	netdev_hold(dev, &n->dev_tracker, GFP_ATOMIC);
628 
629 	/* Protocol specific setup. */
630 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
631 		rc = ERR_PTR(error);
632 		goto out_neigh_release;
633 	}
634 
635 	if (dev->netdev_ops->ndo_neigh_construct) {
636 		error = dev->netdev_ops->ndo_neigh_construct(dev, n);
637 		if (error < 0) {
638 			rc = ERR_PTR(error);
639 			goto out_neigh_release;
640 		}
641 	}
642 
643 	/* Device specific setup. */
644 	if (n->parms->neigh_setup &&
645 	    (error = n->parms->neigh_setup(n)) < 0) {
646 		rc = ERR_PTR(error);
647 		goto out_neigh_release;
648 	}
649 
650 	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
651 
652 	write_lock_bh(&tbl->lock);
653 	nht = rcu_dereference_protected(tbl->nht,
654 					lockdep_is_held(&tbl->lock));
655 
656 	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
657 		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
658 
659 	hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
660 
661 	if (n->parms->dead) {
662 		rc = ERR_PTR(-EINVAL);
663 		goto out_tbl_unlock;
664 	}
665 
666 	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
667 					    lockdep_is_held(&tbl->lock));
668 	     n1 != NULL;
669 	     n1 = rcu_dereference_protected(n1->next,
670 			lockdep_is_held(&tbl->lock))) {
671 		if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
672 			if (want_ref)
673 				neigh_hold(n1);
674 			rc = n1;
675 			goto out_tbl_unlock;
676 		}
677 	}
678 
679 	n->dead = 0;
680 	if (!exempt_from_gc)
681 		list_add_tail(&n->gc_list, &n->tbl->gc_list);
682 	if (n->flags & NTF_MANAGED)
683 		list_add_tail(&n->managed_list, &n->tbl->managed_list);
684 	if (want_ref)
685 		neigh_hold(n);
686 	rcu_assign_pointer(n->next,
687 			   rcu_dereference_protected(nht->hash_buckets[hash_val],
688 						     lockdep_is_held(&tbl->lock)));
689 	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
690 	write_unlock_bh(&tbl->lock);
691 	neigh_dbg(2, "neigh %p is created\n", n);
692 	rc = n;
693 out:
694 	return rc;
695 out_tbl_unlock:
696 	write_unlock_bh(&tbl->lock);
697 out_neigh_release:
698 	if (!exempt_from_gc)
699 		atomic_dec(&tbl->gc_entries);
700 	neigh_release(n);
701 	goto out;
702 }
703 
704 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
705 				 struct net_device *dev, bool want_ref)
706 {
707 	return ___neigh_create(tbl, pkey, dev, 0, false, want_ref);
708 }
709 EXPORT_SYMBOL(__neigh_create);
710 
711 static u32 pneigh_hash(const void *pkey, unsigned int key_len)
712 {
713 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
714 	hash_val ^= (hash_val >> 16);
715 	hash_val ^= hash_val >> 8;
716 	hash_val ^= hash_val >> 4;
717 	hash_val &= PNEIGH_HASHMASK;
718 	return hash_val;
719 }
720 
721 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
722 					      struct net *net,
723 					      const void *pkey,
724 					      unsigned int key_len,
725 					      struct net_device *dev)
726 {
727 	while (n) {
728 		if (!memcmp(n->key, pkey, key_len) &&
729 		    net_eq(pneigh_net(n), net) &&
730 		    (n->dev == dev || !n->dev))
731 			return n;
732 		n = n->next;
733 	}
734 	return NULL;
735 }
736 
737 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
738 		struct net *net, const void *pkey, struct net_device *dev)
739 {
740 	unsigned int key_len = tbl->key_len;
741 	u32 hash_val = pneigh_hash(pkey, key_len);
742 
743 	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
744 				 net, pkey, key_len, dev);
745 }
746 EXPORT_SYMBOL_GPL(__pneigh_lookup);
747 
748 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
749 				    struct net *net, const void *pkey,
750 				    struct net_device *dev, int creat)
751 {
752 	struct pneigh_entry *n;
753 	unsigned int key_len = tbl->key_len;
754 	u32 hash_val = pneigh_hash(pkey, key_len);
755 
756 	read_lock_bh(&tbl->lock);
757 	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
758 			      net, pkey, key_len, dev);
759 	read_unlock_bh(&tbl->lock);
760 
761 	if (n || !creat)
762 		goto out;
763 
764 	ASSERT_RTNL();
765 
766 	n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
767 	if (!n)
768 		goto out;
769 
770 	write_pnet(&n->net, net);
771 	memcpy(n->key, pkey, key_len);
772 	n->dev = dev;
773 	netdev_hold(dev, &n->dev_tracker, GFP_KERNEL);
774 
775 	if (tbl->pconstructor && tbl->pconstructor(n)) {
776 		netdev_put(dev, &n->dev_tracker);
777 		kfree(n);
778 		n = NULL;
779 		goto out;
780 	}
781 
782 	write_lock_bh(&tbl->lock);
783 	n->next = tbl->phash_buckets[hash_val];
784 	tbl->phash_buckets[hash_val] = n;
785 	write_unlock_bh(&tbl->lock);
786 out:
787 	return n;
788 }
789 EXPORT_SYMBOL(pneigh_lookup);
790 
791 
792 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
793 		  struct net_device *dev)
794 {
795 	struct pneigh_entry *n, **np;
796 	unsigned int key_len = tbl->key_len;
797 	u32 hash_val = pneigh_hash(pkey, key_len);
798 
799 	write_lock_bh(&tbl->lock);
800 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
801 	     np = &n->next) {
802 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
803 		    net_eq(pneigh_net(n), net)) {
804 			*np = n->next;
805 			write_unlock_bh(&tbl->lock);
806 			if (tbl->pdestructor)
807 				tbl->pdestructor(n);
808 			netdev_put(n->dev, &n->dev_tracker);
809 			kfree(n);
810 			return 0;
811 		}
812 	}
813 	write_unlock_bh(&tbl->lock);
814 	return -ENOENT;
815 }
816 
817 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
818 				    struct net_device *dev)
819 {
820 	struct pneigh_entry *n, **np, *freelist = NULL;
821 	u32 h;
822 
823 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
824 		np = &tbl->phash_buckets[h];
825 		while ((n = *np) != NULL) {
826 			if (!dev || n->dev == dev) {
827 				*np = n->next;
828 				n->next = freelist;
829 				freelist = n;
830 				continue;
831 			}
832 			np = &n->next;
833 		}
834 	}
835 	write_unlock_bh(&tbl->lock);
836 	while ((n = freelist)) {
837 		freelist = n->next;
838 		n->next = NULL;
839 		if (tbl->pdestructor)
840 			tbl->pdestructor(n);
841 		netdev_put(n->dev, &n->dev_tracker);
842 		kfree(n);
843 	}
844 	return -ENOENT;
845 }
846 
847 static void neigh_parms_destroy(struct neigh_parms *parms);
848 
849 static inline void neigh_parms_put(struct neigh_parms *parms)
850 {
851 	if (refcount_dec_and_test(&parms->refcnt))
852 		neigh_parms_destroy(parms);
853 }
854 
855 /*
856  *	neighbour must already be out of the table;
857  *
858  */
859 void neigh_destroy(struct neighbour *neigh)
860 {
861 	struct net_device *dev = neigh->dev;
862 
863 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
864 
865 	if (!neigh->dead) {
866 		pr_warn("Destroying alive neighbour %p\n", neigh);
867 		dump_stack();
868 		return;
869 	}
870 
871 	if (neigh_del_timer(neigh))
872 		pr_warn("Impossible event\n");
873 
874 	write_lock_bh(&neigh->lock);
875 	__skb_queue_purge(&neigh->arp_queue);
876 	write_unlock_bh(&neigh->lock);
877 	neigh->arp_queue_len_bytes = 0;
878 
879 	if (dev->netdev_ops->ndo_neigh_destroy)
880 		dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
881 
882 	netdev_put(dev, &neigh->dev_tracker);
883 	neigh_parms_put(neigh->parms);
884 
885 	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
886 
887 	atomic_dec(&neigh->tbl->entries);
888 	kfree_rcu(neigh, rcu);
889 }
890 EXPORT_SYMBOL(neigh_destroy);
891 
892 /* Neighbour state is suspicious;
893    disable fast path.
894 
895    Called with write_locked neigh.
896  */
897 static void neigh_suspect(struct neighbour *neigh)
898 {
899 	neigh_dbg(2, "neigh %p is suspected\n", neigh);
900 
901 	neigh->output = neigh->ops->output;
902 }
903 
904 /* Neighbour state is OK;
905    enable fast path.
906 
907    Called with write_locked neigh.
908  */
909 static void neigh_connect(struct neighbour *neigh)
910 {
911 	neigh_dbg(2, "neigh %p is connected\n", neigh);
912 
913 	neigh->output = neigh->ops->connected_output;
914 }
915 
916 static void neigh_periodic_work(struct work_struct *work)
917 {
918 	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
919 	struct neighbour *n;
920 	struct neighbour __rcu **np;
921 	unsigned int i;
922 	struct neigh_hash_table *nht;
923 
924 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
925 
926 	write_lock_bh(&tbl->lock);
927 	nht = rcu_dereference_protected(tbl->nht,
928 					lockdep_is_held(&tbl->lock));
929 
930 	/*
931 	 *	periodically recompute ReachableTime from random function
932 	 */
933 
934 	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
935 		struct neigh_parms *p;
936 		tbl->last_rand = jiffies;
937 		list_for_each_entry(p, &tbl->parms_list, list)
938 			p->reachable_time =
939 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
940 	}
941 
942 	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
943 		goto out;
944 
945 	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
946 		np = &nht->hash_buckets[i];
947 
948 		while ((n = rcu_dereference_protected(*np,
949 				lockdep_is_held(&tbl->lock))) != NULL) {
950 			unsigned int state;
951 
952 			write_lock(&n->lock);
953 
954 			state = n->nud_state;
955 			if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
956 			    (n->flags & NTF_EXT_LEARNED)) {
957 				write_unlock(&n->lock);
958 				goto next_elt;
959 			}
960 
961 			if (time_before(n->used, n->confirmed))
962 				n->used = n->confirmed;
963 
964 			if (refcount_read(&n->refcnt) == 1 &&
965 			    (state == NUD_FAILED ||
966 			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
967 				*np = n->next;
968 				neigh_mark_dead(n);
969 				write_unlock(&n->lock);
970 				neigh_cleanup_and_release(n);
971 				continue;
972 			}
973 			write_unlock(&n->lock);
974 
975 next_elt:
976 			np = &n->next;
977 		}
978 		/*
979 		 * It's fine to release lock here, even if hash table
980 		 * grows while we are preempted.
981 		 */
982 		write_unlock_bh(&tbl->lock);
983 		cond_resched();
984 		write_lock_bh(&tbl->lock);
985 		nht = rcu_dereference_protected(tbl->nht,
986 						lockdep_is_held(&tbl->lock));
987 	}
988 out:
989 	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
990 	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
991 	 * BASE_REACHABLE_TIME.
992 	 */
993 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
994 			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
995 	write_unlock_bh(&tbl->lock);
996 }
997 
998 static __inline__ int neigh_max_probes(struct neighbour *n)
999 {
1000 	struct neigh_parms *p = n->parms;
1001 	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
1002 	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
1003 	        NEIGH_VAR(p, MCAST_PROBES));
1004 }
1005 
1006 static void neigh_invalidate(struct neighbour *neigh)
1007 	__releases(neigh->lock)
1008 	__acquires(neigh->lock)
1009 {
1010 	struct sk_buff *skb;
1011 
1012 	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
1013 	neigh_dbg(2, "neigh %p is failed\n", neigh);
1014 	neigh->updated = jiffies;
1015 
1016 	/* It is very thin place. report_unreachable is very complicated
1017 	   routine. Particularly, it can hit the same neighbour entry!
1018 
1019 	   So that, we try to be accurate and avoid dead loop. --ANK
1020 	 */
1021 	while (neigh->nud_state == NUD_FAILED &&
1022 	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1023 		write_unlock(&neigh->lock);
1024 		neigh->ops->error_report(neigh, skb);
1025 		write_lock(&neigh->lock);
1026 	}
1027 	__skb_queue_purge(&neigh->arp_queue);
1028 	neigh->arp_queue_len_bytes = 0;
1029 }
1030 
1031 static void neigh_probe(struct neighbour *neigh)
1032 	__releases(neigh->lock)
1033 {
1034 	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1035 	/* keep skb alive even if arp_queue overflows */
1036 	if (skb)
1037 		skb = skb_clone(skb, GFP_ATOMIC);
1038 	write_unlock(&neigh->lock);
1039 	if (neigh->ops->solicit)
1040 		neigh->ops->solicit(neigh, skb);
1041 	atomic_inc(&neigh->probes);
1042 	consume_skb(skb);
1043 }
1044 
1045 /* Called when a timer expires for a neighbour entry. */
1046 
1047 static void neigh_timer_handler(struct timer_list *t)
1048 {
1049 	unsigned long now, next;
1050 	struct neighbour *neigh = from_timer(neigh, t, timer);
1051 	unsigned int state;
1052 	int notify = 0;
1053 
1054 	write_lock(&neigh->lock);
1055 
1056 	state = neigh->nud_state;
1057 	now = jiffies;
1058 	next = now + HZ;
1059 
1060 	if (!(state & NUD_IN_TIMER))
1061 		goto out;
1062 
1063 	if (state & NUD_REACHABLE) {
1064 		if (time_before_eq(now,
1065 				   neigh->confirmed + neigh->parms->reachable_time)) {
1066 			neigh_dbg(2, "neigh %p is still alive\n", neigh);
1067 			next = neigh->confirmed + neigh->parms->reachable_time;
1068 		} else if (time_before_eq(now,
1069 					  neigh->used +
1070 					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1071 			neigh_dbg(2, "neigh %p is delayed\n", neigh);
1072 			neigh->nud_state = NUD_DELAY;
1073 			neigh->updated = jiffies;
1074 			neigh_suspect(neigh);
1075 			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1076 		} else {
1077 			neigh_dbg(2, "neigh %p is suspected\n", neigh);
1078 			neigh->nud_state = NUD_STALE;
1079 			neigh->updated = jiffies;
1080 			neigh_suspect(neigh);
1081 			notify = 1;
1082 		}
1083 	} else if (state & NUD_DELAY) {
1084 		if (time_before_eq(now,
1085 				   neigh->confirmed +
1086 				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1087 			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1088 			neigh->nud_state = NUD_REACHABLE;
1089 			neigh->updated = jiffies;
1090 			neigh_connect(neigh);
1091 			notify = 1;
1092 			next = neigh->confirmed + neigh->parms->reachable_time;
1093 		} else {
1094 			neigh_dbg(2, "neigh %p is probed\n", neigh);
1095 			neigh->nud_state = NUD_PROBE;
1096 			neigh->updated = jiffies;
1097 			atomic_set(&neigh->probes, 0);
1098 			notify = 1;
1099 			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1100 					 HZ/100);
1101 		}
1102 	} else {
1103 		/* NUD_PROBE|NUD_INCOMPLETE */
1104 		next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
1105 	}
1106 
1107 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1108 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1109 		neigh->nud_state = NUD_FAILED;
1110 		notify = 1;
1111 		neigh_invalidate(neigh);
1112 		goto out;
1113 	}
1114 
1115 	if (neigh->nud_state & NUD_IN_TIMER) {
1116 		if (time_before(next, jiffies + HZ/100))
1117 			next = jiffies + HZ/100;
1118 		if (!mod_timer(&neigh->timer, next))
1119 			neigh_hold(neigh);
1120 	}
1121 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1122 		neigh_probe(neigh);
1123 	} else {
1124 out:
1125 		write_unlock(&neigh->lock);
1126 	}
1127 
1128 	if (notify)
1129 		neigh_update_notify(neigh, 0);
1130 
1131 	trace_neigh_timer_handler(neigh, 0);
1132 
1133 	neigh_release(neigh);
1134 }
1135 
1136 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
1137 		       const bool immediate_ok)
1138 {
1139 	int rc;
1140 	bool immediate_probe = false;
1141 
1142 	write_lock_bh(&neigh->lock);
1143 
1144 	rc = 0;
1145 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1146 		goto out_unlock_bh;
1147 	if (neigh->dead)
1148 		goto out_dead;
1149 
1150 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1151 		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1152 		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
1153 			unsigned long next, now = jiffies;
1154 
1155 			atomic_set(&neigh->probes,
1156 				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
1157 			neigh_del_timer(neigh);
1158 			neigh->nud_state = NUD_INCOMPLETE;
1159 			neigh->updated = now;
1160 			if (!immediate_ok) {
1161 				next = now + 1;
1162 			} else {
1163 				immediate_probe = true;
1164 				next = now + max(NEIGH_VAR(neigh->parms,
1165 							   RETRANS_TIME),
1166 						 HZ / 100);
1167 			}
1168 			neigh_add_timer(neigh, next);
1169 		} else {
1170 			neigh->nud_state = NUD_FAILED;
1171 			neigh->updated = jiffies;
1172 			write_unlock_bh(&neigh->lock);
1173 
1174 			kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_FAILED);
1175 			return 1;
1176 		}
1177 	} else if (neigh->nud_state & NUD_STALE) {
1178 		neigh_dbg(2, "neigh %p is delayed\n", neigh);
1179 		neigh_del_timer(neigh);
1180 		neigh->nud_state = NUD_DELAY;
1181 		neigh->updated = jiffies;
1182 		neigh_add_timer(neigh, jiffies +
1183 				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1184 	}
1185 
1186 	if (neigh->nud_state == NUD_INCOMPLETE) {
1187 		if (skb) {
1188 			while (neigh->arp_queue_len_bytes + skb->truesize >
1189 			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1190 				struct sk_buff *buff;
1191 
1192 				buff = __skb_dequeue(&neigh->arp_queue);
1193 				if (!buff)
1194 					break;
1195 				neigh->arp_queue_len_bytes -= buff->truesize;
1196 				kfree_skb_reason(buff, SKB_DROP_REASON_NEIGH_QUEUEFULL);
1197 				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1198 			}
1199 			skb_dst_force(skb);
1200 			__skb_queue_tail(&neigh->arp_queue, skb);
1201 			neigh->arp_queue_len_bytes += skb->truesize;
1202 		}
1203 		rc = 1;
1204 	}
1205 out_unlock_bh:
1206 	if (immediate_probe)
1207 		neigh_probe(neigh);
1208 	else
1209 		write_unlock(&neigh->lock);
1210 	local_bh_enable();
1211 	trace_neigh_event_send_done(neigh, rc);
1212 	return rc;
1213 
1214 out_dead:
1215 	if (neigh->nud_state & NUD_STALE)
1216 		goto out_unlock_bh;
1217 	write_unlock_bh(&neigh->lock);
1218 	kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_DEAD);
1219 	trace_neigh_event_send_dead(neigh, 1);
1220 	return 1;
1221 }
1222 EXPORT_SYMBOL(__neigh_event_send);
1223 
1224 static void neigh_update_hhs(struct neighbour *neigh)
1225 {
1226 	struct hh_cache *hh;
1227 	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1228 		= NULL;
1229 
1230 	if (neigh->dev->header_ops)
1231 		update = neigh->dev->header_ops->cache_update;
1232 
1233 	if (update) {
1234 		hh = &neigh->hh;
1235 		if (READ_ONCE(hh->hh_len)) {
1236 			write_seqlock_bh(&hh->hh_lock);
1237 			update(hh, neigh->dev, neigh->ha);
1238 			write_sequnlock_bh(&hh->hh_lock);
1239 		}
1240 	}
1241 }
1242 
1243 /* Generic update routine.
1244    -- lladdr is new lladdr or NULL, if it is not supplied.
1245    -- new    is new state.
1246    -- flags
1247 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1248 				if it is different.
1249 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1250 				lladdr instead of overriding it
1251 				if it is different.
1252 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1253 	NEIGH_UPDATE_F_USE	means that the entry is user triggered.
1254 	NEIGH_UPDATE_F_MANAGED	means that the entry will be auto-refreshed.
1255 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1256 				NTF_ROUTER flag.
1257 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1258 				a router.
1259 
1260    Caller MUST hold reference count on the entry.
1261  */
1262 static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1263 			  u8 new, u32 flags, u32 nlmsg_pid,
1264 			  struct netlink_ext_ack *extack)
1265 {
1266 	bool gc_update = false, managed_update = false;
1267 	int update_isrouter = 0;
1268 	struct net_device *dev;
1269 	int err, notify = 0;
1270 	u8 old;
1271 
1272 	trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1273 
1274 	write_lock_bh(&neigh->lock);
1275 
1276 	dev    = neigh->dev;
1277 	old    = neigh->nud_state;
1278 	err    = -EPERM;
1279 
1280 	if (neigh->dead) {
1281 		NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1282 		new = old;
1283 		goto out;
1284 	}
1285 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1286 	    (old & (NUD_NOARP | NUD_PERMANENT)))
1287 		goto out;
1288 
1289 	neigh_update_flags(neigh, flags, &notify, &gc_update, &managed_update);
1290 	if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) {
1291 		new = old & ~NUD_PERMANENT;
1292 		neigh->nud_state = new;
1293 		err = 0;
1294 		goto out;
1295 	}
1296 
1297 	if (!(new & NUD_VALID)) {
1298 		neigh_del_timer(neigh);
1299 		if (old & NUD_CONNECTED)
1300 			neigh_suspect(neigh);
1301 		neigh->nud_state = new;
1302 		err = 0;
1303 		notify = old & NUD_VALID;
1304 		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1305 		    (new & NUD_FAILED)) {
1306 			neigh_invalidate(neigh);
1307 			notify = 1;
1308 		}
1309 		goto out;
1310 	}
1311 
1312 	/* Compare new lladdr with cached one */
1313 	if (!dev->addr_len) {
1314 		/* First case: device needs no address. */
1315 		lladdr = neigh->ha;
1316 	} else if (lladdr) {
1317 		/* The second case: if something is already cached
1318 		   and a new address is proposed:
1319 		   - compare new & old
1320 		   - if they are different, check override flag
1321 		 */
1322 		if ((old & NUD_VALID) &&
1323 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1324 			lladdr = neigh->ha;
1325 	} else {
1326 		/* No address is supplied; if we know something,
1327 		   use it, otherwise discard the request.
1328 		 */
1329 		err = -EINVAL;
1330 		if (!(old & NUD_VALID)) {
1331 			NL_SET_ERR_MSG(extack, "No link layer address given");
1332 			goto out;
1333 		}
1334 		lladdr = neigh->ha;
1335 	}
1336 
1337 	/* Update confirmed timestamp for neighbour entry after we
1338 	 * received ARP packet even if it doesn't change IP to MAC binding.
1339 	 */
1340 	if (new & NUD_CONNECTED)
1341 		neigh->confirmed = jiffies;
1342 
1343 	/* If entry was valid and address is not changed,
1344 	   do not change entry state, if new one is STALE.
1345 	 */
1346 	err = 0;
1347 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1348 	if (old & NUD_VALID) {
1349 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1350 			update_isrouter = 0;
1351 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1352 			    (old & NUD_CONNECTED)) {
1353 				lladdr = neigh->ha;
1354 				new = NUD_STALE;
1355 			} else
1356 				goto out;
1357 		} else {
1358 			if (lladdr == neigh->ha && new == NUD_STALE &&
1359 			    !(flags & NEIGH_UPDATE_F_ADMIN))
1360 				new = old;
1361 		}
1362 	}
1363 
1364 	/* Update timestamp only once we know we will make a change to the
1365 	 * neighbour entry. Otherwise we risk to move the locktime window with
1366 	 * noop updates and ignore relevant ARP updates.
1367 	 */
1368 	if (new != old || lladdr != neigh->ha)
1369 		neigh->updated = jiffies;
1370 
1371 	if (new != old) {
1372 		neigh_del_timer(neigh);
1373 		if (new & NUD_PROBE)
1374 			atomic_set(&neigh->probes, 0);
1375 		if (new & NUD_IN_TIMER)
1376 			neigh_add_timer(neigh, (jiffies +
1377 						((new & NUD_REACHABLE) ?
1378 						 neigh->parms->reachable_time :
1379 						 0)));
1380 		neigh->nud_state = new;
1381 		notify = 1;
1382 	}
1383 
1384 	if (lladdr != neigh->ha) {
1385 		write_seqlock(&neigh->ha_lock);
1386 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1387 		write_sequnlock(&neigh->ha_lock);
1388 		neigh_update_hhs(neigh);
1389 		if (!(new & NUD_CONNECTED))
1390 			neigh->confirmed = jiffies -
1391 				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1392 		notify = 1;
1393 	}
1394 	if (new == old)
1395 		goto out;
1396 	if (new & NUD_CONNECTED)
1397 		neigh_connect(neigh);
1398 	else
1399 		neigh_suspect(neigh);
1400 	if (!(old & NUD_VALID)) {
1401 		struct sk_buff *skb;
1402 
1403 		/* Again: avoid dead loop if something went wrong */
1404 
1405 		while (neigh->nud_state & NUD_VALID &&
1406 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1407 			struct dst_entry *dst = skb_dst(skb);
1408 			struct neighbour *n2, *n1 = neigh;
1409 			write_unlock_bh(&neigh->lock);
1410 
1411 			rcu_read_lock();
1412 
1413 			/* Why not just use 'neigh' as-is?  The problem is that
1414 			 * things such as shaper, eql, and sch_teql can end up
1415 			 * using alternative, different, neigh objects to output
1416 			 * the packet in the output path.  So what we need to do
1417 			 * here is re-lookup the top-level neigh in the path so
1418 			 * we can reinject the packet there.
1419 			 */
1420 			n2 = NULL;
1421 			if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
1422 				n2 = dst_neigh_lookup_skb(dst, skb);
1423 				if (n2)
1424 					n1 = n2;
1425 			}
1426 			n1->output(n1, skb);
1427 			if (n2)
1428 				neigh_release(n2);
1429 			rcu_read_unlock();
1430 
1431 			write_lock_bh(&neigh->lock);
1432 		}
1433 		__skb_queue_purge(&neigh->arp_queue);
1434 		neigh->arp_queue_len_bytes = 0;
1435 	}
1436 out:
1437 	if (update_isrouter)
1438 		neigh_update_is_router(neigh, flags, &notify);
1439 	write_unlock_bh(&neigh->lock);
1440 	if (((new ^ old) & NUD_PERMANENT) || gc_update)
1441 		neigh_update_gc_list(neigh);
1442 	if (managed_update)
1443 		neigh_update_managed_list(neigh);
1444 	if (notify)
1445 		neigh_update_notify(neigh, nlmsg_pid);
1446 	trace_neigh_update_done(neigh, err);
1447 	return err;
1448 }
1449 
1450 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1451 		 u32 flags, u32 nlmsg_pid)
1452 {
1453 	return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1454 }
1455 EXPORT_SYMBOL(neigh_update);
1456 
1457 /* Update the neigh to listen temporarily for probe responses, even if it is
1458  * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1459  */
1460 void __neigh_set_probe_once(struct neighbour *neigh)
1461 {
1462 	if (neigh->dead)
1463 		return;
1464 	neigh->updated = jiffies;
1465 	if (!(neigh->nud_state & NUD_FAILED))
1466 		return;
1467 	neigh->nud_state = NUD_INCOMPLETE;
1468 	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1469 	neigh_add_timer(neigh,
1470 			jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1471 				      HZ/100));
1472 }
1473 EXPORT_SYMBOL(__neigh_set_probe_once);
1474 
1475 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1476 				 u8 *lladdr, void *saddr,
1477 				 struct net_device *dev)
1478 {
1479 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1480 						 lladdr || !dev->addr_len);
1481 	if (neigh)
1482 		neigh_update(neigh, lladdr, NUD_STALE,
1483 			     NEIGH_UPDATE_F_OVERRIDE, 0);
1484 	return neigh;
1485 }
1486 EXPORT_SYMBOL(neigh_event_ns);
1487 
1488 /* called with read_lock_bh(&n->lock); */
1489 static void neigh_hh_init(struct neighbour *n)
1490 {
1491 	struct net_device *dev = n->dev;
1492 	__be16 prot = n->tbl->protocol;
1493 	struct hh_cache	*hh = &n->hh;
1494 
1495 	write_lock_bh(&n->lock);
1496 
1497 	/* Only one thread can come in here and initialize the
1498 	 * hh_cache entry.
1499 	 */
1500 	if (!hh->hh_len)
1501 		dev->header_ops->cache(n, hh, prot);
1502 
1503 	write_unlock_bh(&n->lock);
1504 }
1505 
1506 /* Slow and careful. */
1507 
1508 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1509 {
1510 	int rc = 0;
1511 
1512 	if (!neigh_event_send(neigh, skb)) {
1513 		int err;
1514 		struct net_device *dev = neigh->dev;
1515 		unsigned int seq;
1516 
1517 		if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
1518 			neigh_hh_init(neigh);
1519 
1520 		do {
1521 			__skb_pull(skb, skb_network_offset(skb));
1522 			seq = read_seqbegin(&neigh->ha_lock);
1523 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1524 					      neigh->ha, NULL, skb->len);
1525 		} while (read_seqretry(&neigh->ha_lock, seq));
1526 
1527 		if (err >= 0)
1528 			rc = dev_queue_xmit(skb);
1529 		else
1530 			goto out_kfree_skb;
1531 	}
1532 out:
1533 	return rc;
1534 out_kfree_skb:
1535 	rc = -EINVAL;
1536 	kfree_skb(skb);
1537 	goto out;
1538 }
1539 EXPORT_SYMBOL(neigh_resolve_output);
1540 
1541 /* As fast as possible without hh cache */
1542 
1543 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1544 {
1545 	struct net_device *dev = neigh->dev;
1546 	unsigned int seq;
1547 	int err;
1548 
1549 	do {
1550 		__skb_pull(skb, skb_network_offset(skb));
1551 		seq = read_seqbegin(&neigh->ha_lock);
1552 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1553 				      neigh->ha, NULL, skb->len);
1554 	} while (read_seqretry(&neigh->ha_lock, seq));
1555 
1556 	if (err >= 0)
1557 		err = dev_queue_xmit(skb);
1558 	else {
1559 		err = -EINVAL;
1560 		kfree_skb(skb);
1561 	}
1562 	return err;
1563 }
1564 EXPORT_SYMBOL(neigh_connected_output);
1565 
1566 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1567 {
1568 	return dev_queue_xmit(skb);
1569 }
1570 EXPORT_SYMBOL(neigh_direct_output);
1571 
1572 static void neigh_managed_work(struct work_struct *work)
1573 {
1574 	struct neigh_table *tbl = container_of(work, struct neigh_table,
1575 					       managed_work.work);
1576 	struct neighbour *neigh;
1577 
1578 	write_lock_bh(&tbl->lock);
1579 	list_for_each_entry(neigh, &tbl->managed_list, managed_list)
1580 		neigh_event_send_probe(neigh, NULL, false);
1581 	queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
1582 			   NEIGH_VAR(&tbl->parms, INTERVAL_PROBE_TIME_MS));
1583 	write_unlock_bh(&tbl->lock);
1584 }
1585 
1586 static void neigh_proxy_process(struct timer_list *t)
1587 {
1588 	struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1589 	long sched_next = 0;
1590 	unsigned long now = jiffies;
1591 	struct sk_buff *skb, *n;
1592 
1593 	spin_lock(&tbl->proxy_queue.lock);
1594 
1595 	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1596 		long tdif = NEIGH_CB(skb)->sched_next - now;
1597 
1598 		if (tdif <= 0) {
1599 			struct net_device *dev = skb->dev;
1600 
1601 			__skb_unlink(skb, &tbl->proxy_queue);
1602 			if (tbl->proxy_redo && netif_running(dev)) {
1603 				rcu_read_lock();
1604 				tbl->proxy_redo(skb);
1605 				rcu_read_unlock();
1606 			} else {
1607 				kfree_skb(skb);
1608 			}
1609 
1610 			dev_put(dev);
1611 		} else if (!sched_next || tdif < sched_next)
1612 			sched_next = tdif;
1613 	}
1614 	del_timer(&tbl->proxy_timer);
1615 	if (sched_next)
1616 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1617 	spin_unlock(&tbl->proxy_queue.lock);
1618 }
1619 
1620 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1621 		    struct sk_buff *skb)
1622 {
1623 	unsigned long sched_next = jiffies +
1624 			prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY));
1625 
1626 	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1627 		kfree_skb(skb);
1628 		return;
1629 	}
1630 
1631 	NEIGH_CB(skb)->sched_next = sched_next;
1632 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1633 
1634 	spin_lock(&tbl->proxy_queue.lock);
1635 	if (del_timer(&tbl->proxy_timer)) {
1636 		if (time_before(tbl->proxy_timer.expires, sched_next))
1637 			sched_next = tbl->proxy_timer.expires;
1638 	}
1639 	skb_dst_drop(skb);
1640 	dev_hold(skb->dev);
1641 	__skb_queue_tail(&tbl->proxy_queue, skb);
1642 	mod_timer(&tbl->proxy_timer, sched_next);
1643 	spin_unlock(&tbl->proxy_queue.lock);
1644 }
1645 EXPORT_SYMBOL(pneigh_enqueue);
1646 
1647 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1648 						      struct net *net, int ifindex)
1649 {
1650 	struct neigh_parms *p;
1651 
1652 	list_for_each_entry(p, &tbl->parms_list, list) {
1653 		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1654 		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1655 			return p;
1656 	}
1657 
1658 	return NULL;
1659 }
1660 
1661 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1662 				      struct neigh_table *tbl)
1663 {
1664 	struct neigh_parms *p;
1665 	struct net *net = dev_net(dev);
1666 	const struct net_device_ops *ops = dev->netdev_ops;
1667 
1668 	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1669 	if (p) {
1670 		p->tbl		  = tbl;
1671 		refcount_set(&p->refcnt, 1);
1672 		p->reachable_time =
1673 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1674 		netdev_hold(dev, &p->dev_tracker, GFP_KERNEL);
1675 		p->dev = dev;
1676 		write_pnet(&p->net, net);
1677 		p->sysctl_table = NULL;
1678 
1679 		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1680 			netdev_put(dev, &p->dev_tracker);
1681 			kfree(p);
1682 			return NULL;
1683 		}
1684 
1685 		write_lock_bh(&tbl->lock);
1686 		list_add(&p->list, &tbl->parms.list);
1687 		write_unlock_bh(&tbl->lock);
1688 
1689 		neigh_parms_data_state_cleanall(p);
1690 	}
1691 	return p;
1692 }
1693 EXPORT_SYMBOL(neigh_parms_alloc);
1694 
1695 static void neigh_rcu_free_parms(struct rcu_head *head)
1696 {
1697 	struct neigh_parms *parms =
1698 		container_of(head, struct neigh_parms, rcu_head);
1699 
1700 	neigh_parms_put(parms);
1701 }
1702 
1703 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1704 {
1705 	if (!parms || parms == &tbl->parms)
1706 		return;
1707 	write_lock_bh(&tbl->lock);
1708 	list_del(&parms->list);
1709 	parms->dead = 1;
1710 	write_unlock_bh(&tbl->lock);
1711 	netdev_put(parms->dev, &parms->dev_tracker);
1712 	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1713 }
1714 EXPORT_SYMBOL(neigh_parms_release);
1715 
1716 static void neigh_parms_destroy(struct neigh_parms *parms)
1717 {
1718 	kfree(parms);
1719 }
1720 
1721 static struct lock_class_key neigh_table_proxy_queue_class;
1722 
1723 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1724 
1725 void neigh_table_init(int index, struct neigh_table *tbl)
1726 {
1727 	unsigned long now = jiffies;
1728 	unsigned long phsize;
1729 
1730 	INIT_LIST_HEAD(&tbl->parms_list);
1731 	INIT_LIST_HEAD(&tbl->gc_list);
1732 	INIT_LIST_HEAD(&tbl->managed_list);
1733 
1734 	list_add(&tbl->parms.list, &tbl->parms_list);
1735 	write_pnet(&tbl->parms.net, &init_net);
1736 	refcount_set(&tbl->parms.refcnt, 1);
1737 	tbl->parms.reachable_time =
1738 			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1739 
1740 	tbl->stats = alloc_percpu(struct neigh_statistics);
1741 	if (!tbl->stats)
1742 		panic("cannot create neighbour cache statistics");
1743 
1744 #ifdef CONFIG_PROC_FS
1745 	if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1746 			      &neigh_stat_seq_ops, tbl))
1747 		panic("cannot create neighbour proc dir entry");
1748 #endif
1749 
1750 	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1751 
1752 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1753 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1754 
1755 	if (!tbl->nht || !tbl->phash_buckets)
1756 		panic("cannot allocate neighbour cache hashes");
1757 
1758 	if (!tbl->entry_size)
1759 		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1760 					tbl->key_len, NEIGH_PRIV_ALIGN);
1761 	else
1762 		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1763 
1764 	rwlock_init(&tbl->lock);
1765 
1766 	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1767 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1768 			tbl->parms.reachable_time);
1769 	INIT_DEFERRABLE_WORK(&tbl->managed_work, neigh_managed_work);
1770 	queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 0);
1771 
1772 	timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1773 	skb_queue_head_init_class(&tbl->proxy_queue,
1774 			&neigh_table_proxy_queue_class);
1775 
1776 	tbl->last_flush = now;
1777 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1778 
1779 	neigh_tables[index] = tbl;
1780 }
1781 EXPORT_SYMBOL(neigh_table_init);
1782 
1783 int neigh_table_clear(int index, struct neigh_table *tbl)
1784 {
1785 	neigh_tables[index] = NULL;
1786 	/* It is not clean... Fix it to unload IPv6 module safely */
1787 	cancel_delayed_work_sync(&tbl->managed_work);
1788 	cancel_delayed_work_sync(&tbl->gc_work);
1789 	del_timer_sync(&tbl->proxy_timer);
1790 	pneigh_queue_purge(&tbl->proxy_queue);
1791 	neigh_ifdown(tbl, NULL);
1792 	if (atomic_read(&tbl->entries))
1793 		pr_crit("neighbour leakage\n");
1794 
1795 	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1796 		 neigh_hash_free_rcu);
1797 	tbl->nht = NULL;
1798 
1799 	kfree(tbl->phash_buckets);
1800 	tbl->phash_buckets = NULL;
1801 
1802 	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1803 
1804 	free_percpu(tbl->stats);
1805 	tbl->stats = NULL;
1806 
1807 	return 0;
1808 }
1809 EXPORT_SYMBOL(neigh_table_clear);
1810 
1811 static struct neigh_table *neigh_find_table(int family)
1812 {
1813 	struct neigh_table *tbl = NULL;
1814 
1815 	switch (family) {
1816 	case AF_INET:
1817 		tbl = neigh_tables[NEIGH_ARP_TABLE];
1818 		break;
1819 	case AF_INET6:
1820 		tbl = neigh_tables[NEIGH_ND_TABLE];
1821 		break;
1822 	case AF_DECnet:
1823 		tbl = neigh_tables[NEIGH_DN_TABLE];
1824 		break;
1825 	}
1826 
1827 	return tbl;
1828 }
1829 
1830 const struct nla_policy nda_policy[NDA_MAX+1] = {
1831 	[NDA_UNSPEC]		= { .strict_start_type = NDA_NH_ID },
1832 	[NDA_DST]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1833 	[NDA_LLADDR]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1834 	[NDA_CACHEINFO]		= { .len = sizeof(struct nda_cacheinfo) },
1835 	[NDA_PROBES]		= { .type = NLA_U32 },
1836 	[NDA_VLAN]		= { .type = NLA_U16 },
1837 	[NDA_PORT]		= { .type = NLA_U16 },
1838 	[NDA_VNI]		= { .type = NLA_U32 },
1839 	[NDA_IFINDEX]		= { .type = NLA_U32 },
1840 	[NDA_MASTER]		= { .type = NLA_U32 },
1841 	[NDA_PROTOCOL]		= { .type = NLA_U8 },
1842 	[NDA_NH_ID]		= { .type = NLA_U32 },
1843 	[NDA_FLAGS_EXT]		= NLA_POLICY_MASK(NLA_U32, NTF_EXT_MASK),
1844 	[NDA_FDB_EXT_ATTRS]	= { .type = NLA_NESTED },
1845 };
1846 
1847 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1848 			struct netlink_ext_ack *extack)
1849 {
1850 	struct net *net = sock_net(skb->sk);
1851 	struct ndmsg *ndm;
1852 	struct nlattr *dst_attr;
1853 	struct neigh_table *tbl;
1854 	struct neighbour *neigh;
1855 	struct net_device *dev = NULL;
1856 	int err = -EINVAL;
1857 
1858 	ASSERT_RTNL();
1859 	if (nlmsg_len(nlh) < sizeof(*ndm))
1860 		goto out;
1861 
1862 	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1863 	if (!dst_attr) {
1864 		NL_SET_ERR_MSG(extack, "Network address not specified");
1865 		goto out;
1866 	}
1867 
1868 	ndm = nlmsg_data(nlh);
1869 	if (ndm->ndm_ifindex) {
1870 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1871 		if (dev == NULL) {
1872 			err = -ENODEV;
1873 			goto out;
1874 		}
1875 	}
1876 
1877 	tbl = neigh_find_table(ndm->ndm_family);
1878 	if (tbl == NULL)
1879 		return -EAFNOSUPPORT;
1880 
1881 	if (nla_len(dst_attr) < (int)tbl->key_len) {
1882 		NL_SET_ERR_MSG(extack, "Invalid network address");
1883 		goto out;
1884 	}
1885 
1886 	if (ndm->ndm_flags & NTF_PROXY) {
1887 		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1888 		goto out;
1889 	}
1890 
1891 	if (dev == NULL)
1892 		goto out;
1893 
1894 	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1895 	if (neigh == NULL) {
1896 		err = -ENOENT;
1897 		goto out;
1898 	}
1899 
1900 	err = __neigh_update(neigh, NULL, NUD_FAILED,
1901 			     NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1902 			     NETLINK_CB(skb).portid, extack);
1903 	write_lock_bh(&tbl->lock);
1904 	neigh_release(neigh);
1905 	neigh_remove_one(neigh, tbl);
1906 	write_unlock_bh(&tbl->lock);
1907 
1908 out:
1909 	return err;
1910 }
1911 
1912 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1913 		     struct netlink_ext_ack *extack)
1914 {
1915 	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1916 		    NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1917 	struct net *net = sock_net(skb->sk);
1918 	struct ndmsg *ndm;
1919 	struct nlattr *tb[NDA_MAX+1];
1920 	struct neigh_table *tbl;
1921 	struct net_device *dev = NULL;
1922 	struct neighbour *neigh;
1923 	void *dst, *lladdr;
1924 	u8 protocol = 0;
1925 	u32 ndm_flags;
1926 	int err;
1927 
1928 	ASSERT_RTNL();
1929 	err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1930 				     nda_policy, extack);
1931 	if (err < 0)
1932 		goto out;
1933 
1934 	err = -EINVAL;
1935 	if (!tb[NDA_DST]) {
1936 		NL_SET_ERR_MSG(extack, "Network address not specified");
1937 		goto out;
1938 	}
1939 
1940 	ndm = nlmsg_data(nlh);
1941 	ndm_flags = ndm->ndm_flags;
1942 	if (tb[NDA_FLAGS_EXT]) {
1943 		u32 ext = nla_get_u32(tb[NDA_FLAGS_EXT]);
1944 
1945 		BUILD_BUG_ON(sizeof(neigh->flags) * BITS_PER_BYTE <
1946 			     (sizeof(ndm->ndm_flags) * BITS_PER_BYTE +
1947 			      hweight32(NTF_EXT_MASK)));
1948 		ndm_flags |= (ext << NTF_EXT_SHIFT);
1949 	}
1950 	if (ndm->ndm_ifindex) {
1951 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1952 		if (dev == NULL) {
1953 			err = -ENODEV;
1954 			goto out;
1955 		}
1956 
1957 		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
1958 			NL_SET_ERR_MSG(extack, "Invalid link address");
1959 			goto out;
1960 		}
1961 	}
1962 
1963 	tbl = neigh_find_table(ndm->ndm_family);
1964 	if (tbl == NULL)
1965 		return -EAFNOSUPPORT;
1966 
1967 	if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
1968 		NL_SET_ERR_MSG(extack, "Invalid network address");
1969 		goto out;
1970 	}
1971 
1972 	dst = nla_data(tb[NDA_DST]);
1973 	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1974 
1975 	if (tb[NDA_PROTOCOL])
1976 		protocol = nla_get_u8(tb[NDA_PROTOCOL]);
1977 	if (ndm_flags & NTF_PROXY) {
1978 		struct pneigh_entry *pn;
1979 
1980 		if (ndm_flags & NTF_MANAGED) {
1981 			NL_SET_ERR_MSG(extack, "Invalid NTF_* flag combination");
1982 			goto out;
1983 		}
1984 
1985 		err = -ENOBUFS;
1986 		pn = pneigh_lookup(tbl, net, dst, dev, 1);
1987 		if (pn) {
1988 			pn->flags = ndm_flags;
1989 			if (protocol)
1990 				pn->protocol = protocol;
1991 			err = 0;
1992 		}
1993 		goto out;
1994 	}
1995 
1996 	if (!dev) {
1997 		NL_SET_ERR_MSG(extack, "Device not specified");
1998 		goto out;
1999 	}
2000 
2001 	if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
2002 		err = -EINVAL;
2003 		goto out;
2004 	}
2005 
2006 	neigh = neigh_lookup(tbl, dst, dev);
2007 	if (neigh == NULL) {
2008 		bool ndm_permanent  = ndm->ndm_state & NUD_PERMANENT;
2009 		bool exempt_from_gc = ndm_permanent ||
2010 				      ndm_flags & NTF_EXT_LEARNED;
2011 
2012 		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
2013 			err = -ENOENT;
2014 			goto out;
2015 		}
2016 		if (ndm_permanent && (ndm_flags & NTF_MANAGED)) {
2017 			NL_SET_ERR_MSG(extack, "Invalid NTF_* flag for permanent entry");
2018 			err = -EINVAL;
2019 			goto out;
2020 		}
2021 
2022 		neigh = ___neigh_create(tbl, dst, dev,
2023 					ndm_flags &
2024 					(NTF_EXT_LEARNED | NTF_MANAGED),
2025 					exempt_from_gc, true);
2026 		if (IS_ERR(neigh)) {
2027 			err = PTR_ERR(neigh);
2028 			goto out;
2029 		}
2030 	} else {
2031 		if (nlh->nlmsg_flags & NLM_F_EXCL) {
2032 			err = -EEXIST;
2033 			neigh_release(neigh);
2034 			goto out;
2035 		}
2036 
2037 		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
2038 			flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
2039 				   NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
2040 	}
2041 
2042 	if (protocol)
2043 		neigh->protocol = protocol;
2044 	if (ndm_flags & NTF_EXT_LEARNED)
2045 		flags |= NEIGH_UPDATE_F_EXT_LEARNED;
2046 	if (ndm_flags & NTF_ROUTER)
2047 		flags |= NEIGH_UPDATE_F_ISROUTER;
2048 	if (ndm_flags & NTF_MANAGED)
2049 		flags |= NEIGH_UPDATE_F_MANAGED;
2050 	if (ndm_flags & NTF_USE)
2051 		flags |= NEIGH_UPDATE_F_USE;
2052 
2053 	err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
2054 			     NETLINK_CB(skb).portid, extack);
2055 	if (!err && ndm_flags & (NTF_USE | NTF_MANAGED)) {
2056 		neigh_event_send(neigh, NULL);
2057 		err = 0;
2058 	}
2059 	neigh_release(neigh);
2060 out:
2061 	return err;
2062 }
2063 
2064 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
2065 {
2066 	struct nlattr *nest;
2067 
2068 	nest = nla_nest_start_noflag(skb, NDTA_PARMS);
2069 	if (nest == NULL)
2070 		return -ENOBUFS;
2071 
2072 	if ((parms->dev &&
2073 	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
2074 	    nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
2075 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
2076 			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
2077 	    /* approximative value for deprecated QUEUE_LEN (in packets) */
2078 	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
2079 			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2080 	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2081 	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2082 	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
2083 			NEIGH_VAR(parms, UCAST_PROBES)) ||
2084 	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
2085 			NEIGH_VAR(parms, MCAST_PROBES)) ||
2086 	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2087 			NEIGH_VAR(parms, MCAST_REPROBES)) ||
2088 	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2089 			  NDTPA_PAD) ||
2090 	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2091 			  NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2092 	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
2093 			  NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2094 	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2095 			  NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2096 	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2097 			  NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2098 	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2099 			  NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2100 	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2101 			  NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2102 	    nla_put_msecs(skb, NDTPA_LOCKTIME,
2103 			  NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD) ||
2104 	    nla_put_msecs(skb, NDTPA_INTERVAL_PROBE_TIME_MS,
2105 			  NEIGH_VAR(parms, INTERVAL_PROBE_TIME_MS), NDTPA_PAD))
2106 		goto nla_put_failure;
2107 	return nla_nest_end(skb, nest);
2108 
2109 nla_put_failure:
2110 	nla_nest_cancel(skb, nest);
2111 	return -EMSGSIZE;
2112 }
2113 
2114 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2115 			      u32 pid, u32 seq, int type, int flags)
2116 {
2117 	struct nlmsghdr *nlh;
2118 	struct ndtmsg *ndtmsg;
2119 
2120 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2121 	if (nlh == NULL)
2122 		return -EMSGSIZE;
2123 
2124 	ndtmsg = nlmsg_data(nlh);
2125 
2126 	read_lock_bh(&tbl->lock);
2127 	ndtmsg->ndtm_family = tbl->family;
2128 	ndtmsg->ndtm_pad1   = 0;
2129 	ndtmsg->ndtm_pad2   = 0;
2130 
2131 	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2132 	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
2133 	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
2134 	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
2135 	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
2136 		goto nla_put_failure;
2137 	{
2138 		unsigned long now = jiffies;
2139 		long flush_delta = now - tbl->last_flush;
2140 		long rand_delta = now - tbl->last_rand;
2141 		struct neigh_hash_table *nht;
2142 		struct ndt_config ndc = {
2143 			.ndtc_key_len		= tbl->key_len,
2144 			.ndtc_entry_size	= tbl->entry_size,
2145 			.ndtc_entries		= atomic_read(&tbl->entries),
2146 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
2147 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
2148 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
2149 		};
2150 
2151 		rcu_read_lock_bh();
2152 		nht = rcu_dereference_bh(tbl->nht);
2153 		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2154 		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2155 		rcu_read_unlock_bh();
2156 
2157 		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2158 			goto nla_put_failure;
2159 	}
2160 
2161 	{
2162 		int cpu;
2163 		struct ndt_stats ndst;
2164 
2165 		memset(&ndst, 0, sizeof(ndst));
2166 
2167 		for_each_possible_cpu(cpu) {
2168 			struct neigh_statistics	*st;
2169 
2170 			st = per_cpu_ptr(tbl->stats, cpu);
2171 			ndst.ndts_allocs		+= st->allocs;
2172 			ndst.ndts_destroys		+= st->destroys;
2173 			ndst.ndts_hash_grows		+= st->hash_grows;
2174 			ndst.ndts_res_failed		+= st->res_failed;
2175 			ndst.ndts_lookups		+= st->lookups;
2176 			ndst.ndts_hits			+= st->hits;
2177 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
2178 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
2179 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
2180 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
2181 			ndst.ndts_table_fulls		+= st->table_fulls;
2182 		}
2183 
2184 		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2185 				  NDTA_PAD))
2186 			goto nla_put_failure;
2187 	}
2188 
2189 	BUG_ON(tbl->parms.dev);
2190 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2191 		goto nla_put_failure;
2192 
2193 	read_unlock_bh(&tbl->lock);
2194 	nlmsg_end(skb, nlh);
2195 	return 0;
2196 
2197 nla_put_failure:
2198 	read_unlock_bh(&tbl->lock);
2199 	nlmsg_cancel(skb, nlh);
2200 	return -EMSGSIZE;
2201 }
2202 
2203 static int neightbl_fill_param_info(struct sk_buff *skb,
2204 				    struct neigh_table *tbl,
2205 				    struct neigh_parms *parms,
2206 				    u32 pid, u32 seq, int type,
2207 				    unsigned int flags)
2208 {
2209 	struct ndtmsg *ndtmsg;
2210 	struct nlmsghdr *nlh;
2211 
2212 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2213 	if (nlh == NULL)
2214 		return -EMSGSIZE;
2215 
2216 	ndtmsg = nlmsg_data(nlh);
2217 
2218 	read_lock_bh(&tbl->lock);
2219 	ndtmsg->ndtm_family = tbl->family;
2220 	ndtmsg->ndtm_pad1   = 0;
2221 	ndtmsg->ndtm_pad2   = 0;
2222 
2223 	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2224 	    neightbl_fill_parms(skb, parms) < 0)
2225 		goto errout;
2226 
2227 	read_unlock_bh(&tbl->lock);
2228 	nlmsg_end(skb, nlh);
2229 	return 0;
2230 errout:
2231 	read_unlock_bh(&tbl->lock);
2232 	nlmsg_cancel(skb, nlh);
2233 	return -EMSGSIZE;
2234 }
2235 
2236 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2237 	[NDTA_NAME]		= { .type = NLA_STRING },
2238 	[NDTA_THRESH1]		= { .type = NLA_U32 },
2239 	[NDTA_THRESH2]		= { .type = NLA_U32 },
2240 	[NDTA_THRESH3]		= { .type = NLA_U32 },
2241 	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
2242 	[NDTA_PARMS]		= { .type = NLA_NESTED },
2243 };
2244 
2245 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2246 	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
2247 	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
2248 	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
2249 	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
2250 	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
2251 	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
2252 	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
2253 	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
2254 	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
2255 	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
2256 	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
2257 	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
2258 	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
2259 	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
2260 	[NDTPA_INTERVAL_PROBE_TIME_MS]	= { .type = NLA_U64, .min = 1 },
2261 };
2262 
2263 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2264 			struct netlink_ext_ack *extack)
2265 {
2266 	struct net *net = sock_net(skb->sk);
2267 	struct neigh_table *tbl;
2268 	struct ndtmsg *ndtmsg;
2269 	struct nlattr *tb[NDTA_MAX+1];
2270 	bool found = false;
2271 	int err, tidx;
2272 
2273 	err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2274 				     nl_neightbl_policy, extack);
2275 	if (err < 0)
2276 		goto errout;
2277 
2278 	if (tb[NDTA_NAME] == NULL) {
2279 		err = -EINVAL;
2280 		goto errout;
2281 	}
2282 
2283 	ndtmsg = nlmsg_data(nlh);
2284 
2285 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2286 		tbl = neigh_tables[tidx];
2287 		if (!tbl)
2288 			continue;
2289 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2290 			continue;
2291 		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2292 			found = true;
2293 			break;
2294 		}
2295 	}
2296 
2297 	if (!found)
2298 		return -ENOENT;
2299 
2300 	/*
2301 	 * We acquire tbl->lock to be nice to the periodic timers and
2302 	 * make sure they always see a consistent set of values.
2303 	 */
2304 	write_lock_bh(&tbl->lock);
2305 
2306 	if (tb[NDTA_PARMS]) {
2307 		struct nlattr *tbp[NDTPA_MAX+1];
2308 		struct neigh_parms *p;
2309 		int i, ifindex = 0;
2310 
2311 		err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2312 						  tb[NDTA_PARMS],
2313 						  nl_ntbl_parm_policy, extack);
2314 		if (err < 0)
2315 			goto errout_tbl_lock;
2316 
2317 		if (tbp[NDTPA_IFINDEX])
2318 			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2319 
2320 		p = lookup_neigh_parms(tbl, net, ifindex);
2321 		if (p == NULL) {
2322 			err = -ENOENT;
2323 			goto errout_tbl_lock;
2324 		}
2325 
2326 		for (i = 1; i <= NDTPA_MAX; i++) {
2327 			if (tbp[i] == NULL)
2328 				continue;
2329 
2330 			switch (i) {
2331 			case NDTPA_QUEUE_LEN:
2332 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2333 					      nla_get_u32(tbp[i]) *
2334 					      SKB_TRUESIZE(ETH_FRAME_LEN));
2335 				break;
2336 			case NDTPA_QUEUE_LENBYTES:
2337 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2338 					      nla_get_u32(tbp[i]));
2339 				break;
2340 			case NDTPA_PROXY_QLEN:
2341 				NEIGH_VAR_SET(p, PROXY_QLEN,
2342 					      nla_get_u32(tbp[i]));
2343 				break;
2344 			case NDTPA_APP_PROBES:
2345 				NEIGH_VAR_SET(p, APP_PROBES,
2346 					      nla_get_u32(tbp[i]));
2347 				break;
2348 			case NDTPA_UCAST_PROBES:
2349 				NEIGH_VAR_SET(p, UCAST_PROBES,
2350 					      nla_get_u32(tbp[i]));
2351 				break;
2352 			case NDTPA_MCAST_PROBES:
2353 				NEIGH_VAR_SET(p, MCAST_PROBES,
2354 					      nla_get_u32(tbp[i]));
2355 				break;
2356 			case NDTPA_MCAST_REPROBES:
2357 				NEIGH_VAR_SET(p, MCAST_REPROBES,
2358 					      nla_get_u32(tbp[i]));
2359 				break;
2360 			case NDTPA_BASE_REACHABLE_TIME:
2361 				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2362 					      nla_get_msecs(tbp[i]));
2363 				/* update reachable_time as well, otherwise, the change will
2364 				 * only be effective after the next time neigh_periodic_work
2365 				 * decides to recompute it (can be multiple minutes)
2366 				 */
2367 				p->reachable_time =
2368 					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2369 				break;
2370 			case NDTPA_GC_STALETIME:
2371 				NEIGH_VAR_SET(p, GC_STALETIME,
2372 					      nla_get_msecs(tbp[i]));
2373 				break;
2374 			case NDTPA_DELAY_PROBE_TIME:
2375 				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2376 					      nla_get_msecs(tbp[i]));
2377 				call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2378 				break;
2379 			case NDTPA_INTERVAL_PROBE_TIME_MS:
2380 				NEIGH_VAR_SET(p, INTERVAL_PROBE_TIME_MS,
2381 					      nla_get_msecs(tbp[i]));
2382 				break;
2383 			case NDTPA_RETRANS_TIME:
2384 				NEIGH_VAR_SET(p, RETRANS_TIME,
2385 					      nla_get_msecs(tbp[i]));
2386 				break;
2387 			case NDTPA_ANYCAST_DELAY:
2388 				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2389 					      nla_get_msecs(tbp[i]));
2390 				break;
2391 			case NDTPA_PROXY_DELAY:
2392 				NEIGH_VAR_SET(p, PROXY_DELAY,
2393 					      nla_get_msecs(tbp[i]));
2394 				break;
2395 			case NDTPA_LOCKTIME:
2396 				NEIGH_VAR_SET(p, LOCKTIME,
2397 					      nla_get_msecs(tbp[i]));
2398 				break;
2399 			}
2400 		}
2401 	}
2402 
2403 	err = -ENOENT;
2404 	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2405 	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2406 	    !net_eq(net, &init_net))
2407 		goto errout_tbl_lock;
2408 
2409 	if (tb[NDTA_THRESH1])
2410 		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2411 
2412 	if (tb[NDTA_THRESH2])
2413 		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2414 
2415 	if (tb[NDTA_THRESH3])
2416 		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2417 
2418 	if (tb[NDTA_GC_INTERVAL])
2419 		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2420 
2421 	err = 0;
2422 
2423 errout_tbl_lock:
2424 	write_unlock_bh(&tbl->lock);
2425 errout:
2426 	return err;
2427 }
2428 
2429 static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2430 				    struct netlink_ext_ack *extack)
2431 {
2432 	struct ndtmsg *ndtm;
2433 
2434 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2435 		NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2436 		return -EINVAL;
2437 	}
2438 
2439 	ndtm = nlmsg_data(nlh);
2440 	if (ndtm->ndtm_pad1  || ndtm->ndtm_pad2) {
2441 		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2442 		return -EINVAL;
2443 	}
2444 
2445 	if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2446 		NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2447 		return -EINVAL;
2448 	}
2449 
2450 	return 0;
2451 }
2452 
2453 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2454 {
2455 	const struct nlmsghdr *nlh = cb->nlh;
2456 	struct net *net = sock_net(skb->sk);
2457 	int family, tidx, nidx = 0;
2458 	int tbl_skip = cb->args[0];
2459 	int neigh_skip = cb->args[1];
2460 	struct neigh_table *tbl;
2461 
2462 	if (cb->strict_check) {
2463 		int err = neightbl_valid_dump_info(nlh, cb->extack);
2464 
2465 		if (err < 0)
2466 			return err;
2467 	}
2468 
2469 	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2470 
2471 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2472 		struct neigh_parms *p;
2473 
2474 		tbl = neigh_tables[tidx];
2475 		if (!tbl)
2476 			continue;
2477 
2478 		if (tidx < tbl_skip || (family && tbl->family != family))
2479 			continue;
2480 
2481 		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2482 				       nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2483 				       NLM_F_MULTI) < 0)
2484 			break;
2485 
2486 		nidx = 0;
2487 		p = list_next_entry(&tbl->parms, list);
2488 		list_for_each_entry_from(p, &tbl->parms_list, list) {
2489 			if (!net_eq(neigh_parms_net(p), net))
2490 				continue;
2491 
2492 			if (nidx < neigh_skip)
2493 				goto next;
2494 
2495 			if (neightbl_fill_param_info(skb, tbl, p,
2496 						     NETLINK_CB(cb->skb).portid,
2497 						     nlh->nlmsg_seq,
2498 						     RTM_NEWNEIGHTBL,
2499 						     NLM_F_MULTI) < 0)
2500 				goto out;
2501 		next:
2502 			nidx++;
2503 		}
2504 
2505 		neigh_skip = 0;
2506 	}
2507 out:
2508 	cb->args[0] = tidx;
2509 	cb->args[1] = nidx;
2510 
2511 	return skb->len;
2512 }
2513 
2514 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2515 			   u32 pid, u32 seq, int type, unsigned int flags)
2516 {
2517 	u32 neigh_flags, neigh_flags_ext;
2518 	unsigned long now = jiffies;
2519 	struct nda_cacheinfo ci;
2520 	struct nlmsghdr *nlh;
2521 	struct ndmsg *ndm;
2522 
2523 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2524 	if (nlh == NULL)
2525 		return -EMSGSIZE;
2526 
2527 	neigh_flags_ext = neigh->flags >> NTF_EXT_SHIFT;
2528 	neigh_flags     = neigh->flags & NTF_OLD_MASK;
2529 
2530 	ndm = nlmsg_data(nlh);
2531 	ndm->ndm_family	 = neigh->ops->family;
2532 	ndm->ndm_pad1    = 0;
2533 	ndm->ndm_pad2    = 0;
2534 	ndm->ndm_flags	 = neigh_flags;
2535 	ndm->ndm_type	 = neigh->type;
2536 	ndm->ndm_ifindex = neigh->dev->ifindex;
2537 
2538 	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2539 		goto nla_put_failure;
2540 
2541 	read_lock_bh(&neigh->lock);
2542 	ndm->ndm_state	 = neigh->nud_state;
2543 	if (neigh->nud_state & NUD_VALID) {
2544 		char haddr[MAX_ADDR_LEN];
2545 
2546 		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2547 		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2548 			read_unlock_bh(&neigh->lock);
2549 			goto nla_put_failure;
2550 		}
2551 	}
2552 
2553 	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2554 	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2555 	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2556 	ci.ndm_refcnt	 = refcount_read(&neigh->refcnt) - 1;
2557 	read_unlock_bh(&neigh->lock);
2558 
2559 	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2560 	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2561 		goto nla_put_failure;
2562 
2563 	if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2564 		goto nla_put_failure;
2565 	if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2566 		goto nla_put_failure;
2567 
2568 	nlmsg_end(skb, nlh);
2569 	return 0;
2570 
2571 nla_put_failure:
2572 	nlmsg_cancel(skb, nlh);
2573 	return -EMSGSIZE;
2574 }
2575 
2576 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2577 			    u32 pid, u32 seq, int type, unsigned int flags,
2578 			    struct neigh_table *tbl)
2579 {
2580 	u32 neigh_flags, neigh_flags_ext;
2581 	struct nlmsghdr *nlh;
2582 	struct ndmsg *ndm;
2583 
2584 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2585 	if (nlh == NULL)
2586 		return -EMSGSIZE;
2587 
2588 	neigh_flags_ext = pn->flags >> NTF_EXT_SHIFT;
2589 	neigh_flags     = pn->flags & NTF_OLD_MASK;
2590 
2591 	ndm = nlmsg_data(nlh);
2592 	ndm->ndm_family	 = tbl->family;
2593 	ndm->ndm_pad1    = 0;
2594 	ndm->ndm_pad2    = 0;
2595 	ndm->ndm_flags	 = neigh_flags | NTF_PROXY;
2596 	ndm->ndm_type	 = RTN_UNICAST;
2597 	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2598 	ndm->ndm_state	 = NUD_NONE;
2599 
2600 	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2601 		goto nla_put_failure;
2602 
2603 	if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2604 		goto nla_put_failure;
2605 	if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2606 		goto nla_put_failure;
2607 
2608 	nlmsg_end(skb, nlh);
2609 	return 0;
2610 
2611 nla_put_failure:
2612 	nlmsg_cancel(skb, nlh);
2613 	return -EMSGSIZE;
2614 }
2615 
2616 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2617 {
2618 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2619 	__neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2620 }
2621 
2622 static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2623 {
2624 	struct net_device *master;
2625 
2626 	if (!master_idx)
2627 		return false;
2628 
2629 	master = dev ? netdev_master_upper_dev_get(dev) : NULL;
2630 
2631 	/* 0 is already used to denote NDA_MASTER wasn't passed, therefore need another
2632 	 * invalid value for ifindex to denote "no master".
2633 	 */
2634 	if (master_idx == -1)
2635 		return !!master;
2636 
2637 	if (!master || master->ifindex != master_idx)
2638 		return true;
2639 
2640 	return false;
2641 }
2642 
2643 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2644 {
2645 	if (filter_idx && (!dev || dev->ifindex != filter_idx))
2646 		return true;
2647 
2648 	return false;
2649 }
2650 
2651 struct neigh_dump_filter {
2652 	int master_idx;
2653 	int dev_idx;
2654 };
2655 
2656 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2657 			    struct netlink_callback *cb,
2658 			    struct neigh_dump_filter *filter)
2659 {
2660 	struct net *net = sock_net(skb->sk);
2661 	struct neighbour *n;
2662 	int rc, h, s_h = cb->args[1];
2663 	int idx, s_idx = idx = cb->args[2];
2664 	struct neigh_hash_table *nht;
2665 	unsigned int flags = NLM_F_MULTI;
2666 
2667 	if (filter->dev_idx || filter->master_idx)
2668 		flags |= NLM_F_DUMP_FILTERED;
2669 
2670 	rcu_read_lock_bh();
2671 	nht = rcu_dereference_bh(tbl->nht);
2672 
2673 	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2674 		if (h > s_h)
2675 			s_idx = 0;
2676 		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2677 		     n != NULL;
2678 		     n = rcu_dereference_bh(n->next)) {
2679 			if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2680 				goto next;
2681 			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2682 			    neigh_master_filtered(n->dev, filter->master_idx))
2683 				goto next;
2684 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2685 					    cb->nlh->nlmsg_seq,
2686 					    RTM_NEWNEIGH,
2687 					    flags) < 0) {
2688 				rc = -1;
2689 				goto out;
2690 			}
2691 next:
2692 			idx++;
2693 		}
2694 	}
2695 	rc = skb->len;
2696 out:
2697 	rcu_read_unlock_bh();
2698 	cb->args[1] = h;
2699 	cb->args[2] = idx;
2700 	return rc;
2701 }
2702 
2703 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2704 			     struct netlink_callback *cb,
2705 			     struct neigh_dump_filter *filter)
2706 {
2707 	struct pneigh_entry *n;
2708 	struct net *net = sock_net(skb->sk);
2709 	int rc, h, s_h = cb->args[3];
2710 	int idx, s_idx = idx = cb->args[4];
2711 	unsigned int flags = NLM_F_MULTI;
2712 
2713 	if (filter->dev_idx || filter->master_idx)
2714 		flags |= NLM_F_DUMP_FILTERED;
2715 
2716 	read_lock_bh(&tbl->lock);
2717 
2718 	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2719 		if (h > s_h)
2720 			s_idx = 0;
2721 		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2722 			if (idx < s_idx || pneigh_net(n) != net)
2723 				goto next;
2724 			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2725 			    neigh_master_filtered(n->dev, filter->master_idx))
2726 				goto next;
2727 			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2728 					    cb->nlh->nlmsg_seq,
2729 					    RTM_NEWNEIGH, flags, tbl) < 0) {
2730 				read_unlock_bh(&tbl->lock);
2731 				rc = -1;
2732 				goto out;
2733 			}
2734 		next:
2735 			idx++;
2736 		}
2737 	}
2738 
2739 	read_unlock_bh(&tbl->lock);
2740 	rc = skb->len;
2741 out:
2742 	cb->args[3] = h;
2743 	cb->args[4] = idx;
2744 	return rc;
2745 
2746 }
2747 
2748 static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2749 				bool strict_check,
2750 				struct neigh_dump_filter *filter,
2751 				struct netlink_ext_ack *extack)
2752 {
2753 	struct nlattr *tb[NDA_MAX + 1];
2754 	int err, i;
2755 
2756 	if (strict_check) {
2757 		struct ndmsg *ndm;
2758 
2759 		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2760 			NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2761 			return -EINVAL;
2762 		}
2763 
2764 		ndm = nlmsg_data(nlh);
2765 		if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_ifindex ||
2766 		    ndm->ndm_state || ndm->ndm_type) {
2767 			NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2768 			return -EINVAL;
2769 		}
2770 
2771 		if (ndm->ndm_flags & ~NTF_PROXY) {
2772 			NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2773 			return -EINVAL;
2774 		}
2775 
2776 		err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2777 						    tb, NDA_MAX, nda_policy,
2778 						    extack);
2779 	} else {
2780 		err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2781 					     NDA_MAX, nda_policy, extack);
2782 	}
2783 	if (err < 0)
2784 		return err;
2785 
2786 	for (i = 0; i <= NDA_MAX; ++i) {
2787 		if (!tb[i])
2788 			continue;
2789 
2790 		/* all new attributes should require strict_check */
2791 		switch (i) {
2792 		case NDA_IFINDEX:
2793 			filter->dev_idx = nla_get_u32(tb[i]);
2794 			break;
2795 		case NDA_MASTER:
2796 			filter->master_idx = nla_get_u32(tb[i]);
2797 			break;
2798 		default:
2799 			if (strict_check) {
2800 				NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2801 				return -EINVAL;
2802 			}
2803 		}
2804 	}
2805 
2806 	return 0;
2807 }
2808 
2809 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2810 {
2811 	const struct nlmsghdr *nlh = cb->nlh;
2812 	struct neigh_dump_filter filter = {};
2813 	struct neigh_table *tbl;
2814 	int t, family, s_t;
2815 	int proxy = 0;
2816 	int err;
2817 
2818 	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2819 
2820 	/* check for full ndmsg structure presence, family member is
2821 	 * the same for both structures
2822 	 */
2823 	if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2824 	    ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2825 		proxy = 1;
2826 
2827 	err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2828 	if (err < 0 && cb->strict_check)
2829 		return err;
2830 
2831 	s_t = cb->args[0];
2832 
2833 	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2834 		tbl = neigh_tables[t];
2835 
2836 		if (!tbl)
2837 			continue;
2838 		if (t < s_t || (family && tbl->family != family))
2839 			continue;
2840 		if (t > s_t)
2841 			memset(&cb->args[1], 0, sizeof(cb->args) -
2842 						sizeof(cb->args[0]));
2843 		if (proxy)
2844 			err = pneigh_dump_table(tbl, skb, cb, &filter);
2845 		else
2846 			err = neigh_dump_table(tbl, skb, cb, &filter);
2847 		if (err < 0)
2848 			break;
2849 	}
2850 
2851 	cb->args[0] = t;
2852 	return skb->len;
2853 }
2854 
2855 static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2856 			       struct neigh_table **tbl,
2857 			       void **dst, int *dev_idx, u8 *ndm_flags,
2858 			       struct netlink_ext_ack *extack)
2859 {
2860 	struct nlattr *tb[NDA_MAX + 1];
2861 	struct ndmsg *ndm;
2862 	int err, i;
2863 
2864 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2865 		NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2866 		return -EINVAL;
2867 	}
2868 
2869 	ndm = nlmsg_data(nlh);
2870 	if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
2871 	    ndm->ndm_type) {
2872 		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2873 		return -EINVAL;
2874 	}
2875 
2876 	if (ndm->ndm_flags & ~NTF_PROXY) {
2877 		NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2878 		return -EINVAL;
2879 	}
2880 
2881 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2882 					    NDA_MAX, nda_policy, extack);
2883 	if (err < 0)
2884 		return err;
2885 
2886 	*ndm_flags = ndm->ndm_flags;
2887 	*dev_idx = ndm->ndm_ifindex;
2888 	*tbl = neigh_find_table(ndm->ndm_family);
2889 	if (*tbl == NULL) {
2890 		NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2891 		return -EAFNOSUPPORT;
2892 	}
2893 
2894 	for (i = 0; i <= NDA_MAX; ++i) {
2895 		if (!tb[i])
2896 			continue;
2897 
2898 		switch (i) {
2899 		case NDA_DST:
2900 			if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2901 				NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2902 				return -EINVAL;
2903 			}
2904 			*dst = nla_data(tb[i]);
2905 			break;
2906 		default:
2907 			NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2908 			return -EINVAL;
2909 		}
2910 	}
2911 
2912 	return 0;
2913 }
2914 
2915 static inline size_t neigh_nlmsg_size(void)
2916 {
2917 	return NLMSG_ALIGN(sizeof(struct ndmsg))
2918 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2919 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2920 	       + nla_total_size(sizeof(struct nda_cacheinfo))
2921 	       + nla_total_size(4)  /* NDA_PROBES */
2922 	       + nla_total_size(4)  /* NDA_FLAGS_EXT */
2923 	       + nla_total_size(1); /* NDA_PROTOCOL */
2924 }
2925 
2926 static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2927 			   u32 pid, u32 seq)
2928 {
2929 	struct sk_buff *skb;
2930 	int err = 0;
2931 
2932 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2933 	if (!skb)
2934 		return -ENOBUFS;
2935 
2936 	err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2937 	if (err) {
2938 		kfree_skb(skb);
2939 		goto errout;
2940 	}
2941 
2942 	err = rtnl_unicast(skb, net, pid);
2943 errout:
2944 	return err;
2945 }
2946 
2947 static inline size_t pneigh_nlmsg_size(void)
2948 {
2949 	return NLMSG_ALIGN(sizeof(struct ndmsg))
2950 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2951 	       + nla_total_size(4)  /* NDA_FLAGS_EXT */
2952 	       + nla_total_size(1); /* NDA_PROTOCOL */
2953 }
2954 
2955 static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
2956 			    u32 pid, u32 seq, struct neigh_table *tbl)
2957 {
2958 	struct sk_buff *skb;
2959 	int err = 0;
2960 
2961 	skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
2962 	if (!skb)
2963 		return -ENOBUFS;
2964 
2965 	err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
2966 	if (err) {
2967 		kfree_skb(skb);
2968 		goto errout;
2969 	}
2970 
2971 	err = rtnl_unicast(skb, net, pid);
2972 errout:
2973 	return err;
2974 }
2975 
2976 static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2977 		     struct netlink_ext_ack *extack)
2978 {
2979 	struct net *net = sock_net(in_skb->sk);
2980 	struct net_device *dev = NULL;
2981 	struct neigh_table *tbl = NULL;
2982 	struct neighbour *neigh;
2983 	void *dst = NULL;
2984 	u8 ndm_flags = 0;
2985 	int dev_idx = 0;
2986 	int err;
2987 
2988 	err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
2989 				  extack);
2990 	if (err < 0)
2991 		return err;
2992 
2993 	if (dev_idx) {
2994 		dev = __dev_get_by_index(net, dev_idx);
2995 		if (!dev) {
2996 			NL_SET_ERR_MSG(extack, "Unknown device ifindex");
2997 			return -ENODEV;
2998 		}
2999 	}
3000 
3001 	if (!dst) {
3002 		NL_SET_ERR_MSG(extack, "Network address not specified");
3003 		return -EINVAL;
3004 	}
3005 
3006 	if (ndm_flags & NTF_PROXY) {
3007 		struct pneigh_entry *pn;
3008 
3009 		pn = pneigh_lookup(tbl, net, dst, dev, 0);
3010 		if (!pn) {
3011 			NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
3012 			return -ENOENT;
3013 		}
3014 		return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
3015 					nlh->nlmsg_seq, tbl);
3016 	}
3017 
3018 	if (!dev) {
3019 		NL_SET_ERR_MSG(extack, "No device specified");
3020 		return -EINVAL;
3021 	}
3022 
3023 	neigh = neigh_lookup(tbl, dst, dev);
3024 	if (!neigh) {
3025 		NL_SET_ERR_MSG(extack, "Neighbour entry not found");
3026 		return -ENOENT;
3027 	}
3028 
3029 	err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
3030 			      nlh->nlmsg_seq);
3031 
3032 	neigh_release(neigh);
3033 
3034 	return err;
3035 }
3036 
3037 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
3038 {
3039 	int chain;
3040 	struct neigh_hash_table *nht;
3041 
3042 	rcu_read_lock_bh();
3043 	nht = rcu_dereference_bh(tbl->nht);
3044 
3045 	read_lock(&tbl->lock); /* avoid resizes */
3046 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3047 		struct neighbour *n;
3048 
3049 		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
3050 		     n != NULL;
3051 		     n = rcu_dereference_bh(n->next))
3052 			cb(n, cookie);
3053 	}
3054 	read_unlock(&tbl->lock);
3055 	rcu_read_unlock_bh();
3056 }
3057 EXPORT_SYMBOL(neigh_for_each);
3058 
3059 /* The tbl->lock must be held as a writer and BH disabled. */
3060 void __neigh_for_each_release(struct neigh_table *tbl,
3061 			      int (*cb)(struct neighbour *))
3062 {
3063 	int chain;
3064 	struct neigh_hash_table *nht;
3065 
3066 	nht = rcu_dereference_protected(tbl->nht,
3067 					lockdep_is_held(&tbl->lock));
3068 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3069 		struct neighbour *n;
3070 		struct neighbour __rcu **np;
3071 
3072 		np = &nht->hash_buckets[chain];
3073 		while ((n = rcu_dereference_protected(*np,
3074 					lockdep_is_held(&tbl->lock))) != NULL) {
3075 			int release;
3076 
3077 			write_lock(&n->lock);
3078 			release = cb(n);
3079 			if (release) {
3080 				rcu_assign_pointer(*np,
3081 					rcu_dereference_protected(n->next,
3082 						lockdep_is_held(&tbl->lock)));
3083 				neigh_mark_dead(n);
3084 			} else
3085 				np = &n->next;
3086 			write_unlock(&n->lock);
3087 			if (release)
3088 				neigh_cleanup_and_release(n);
3089 		}
3090 	}
3091 }
3092 EXPORT_SYMBOL(__neigh_for_each_release);
3093 
3094 int neigh_xmit(int index, struct net_device *dev,
3095 	       const void *addr, struct sk_buff *skb)
3096 {
3097 	int err = -EAFNOSUPPORT;
3098 	if (likely(index < NEIGH_NR_TABLES)) {
3099 		struct neigh_table *tbl;
3100 		struct neighbour *neigh;
3101 
3102 		tbl = neigh_tables[index];
3103 		if (!tbl)
3104 			goto out;
3105 		rcu_read_lock_bh();
3106 		if (index == NEIGH_ARP_TABLE) {
3107 			u32 key = *((u32 *)addr);
3108 
3109 			neigh = __ipv4_neigh_lookup_noref(dev, key);
3110 		} else {
3111 			neigh = __neigh_lookup_noref(tbl, addr, dev);
3112 		}
3113 		if (!neigh)
3114 			neigh = __neigh_create(tbl, addr, dev, false);
3115 		err = PTR_ERR(neigh);
3116 		if (IS_ERR(neigh)) {
3117 			rcu_read_unlock_bh();
3118 			goto out_kfree_skb;
3119 		}
3120 		err = neigh->output(neigh, skb);
3121 		rcu_read_unlock_bh();
3122 	}
3123 	else if (index == NEIGH_LINK_TABLE) {
3124 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3125 				      addr, NULL, skb->len);
3126 		if (err < 0)
3127 			goto out_kfree_skb;
3128 		err = dev_queue_xmit(skb);
3129 	}
3130 out:
3131 	return err;
3132 out_kfree_skb:
3133 	kfree_skb(skb);
3134 	goto out;
3135 }
3136 EXPORT_SYMBOL(neigh_xmit);
3137 
3138 #ifdef CONFIG_PROC_FS
3139 
3140 static struct neighbour *neigh_get_first(struct seq_file *seq)
3141 {
3142 	struct neigh_seq_state *state = seq->private;
3143 	struct net *net = seq_file_net(seq);
3144 	struct neigh_hash_table *nht = state->nht;
3145 	struct neighbour *n = NULL;
3146 	int bucket;
3147 
3148 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
3149 	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
3150 		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
3151 
3152 		while (n) {
3153 			if (!net_eq(dev_net(n->dev), net))
3154 				goto next;
3155 			if (state->neigh_sub_iter) {
3156 				loff_t fakep = 0;
3157 				void *v;
3158 
3159 				v = state->neigh_sub_iter(state, n, &fakep);
3160 				if (!v)
3161 					goto next;
3162 			}
3163 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3164 				break;
3165 			if (n->nud_state & ~NUD_NOARP)
3166 				break;
3167 next:
3168 			n = rcu_dereference_bh(n->next);
3169 		}
3170 
3171 		if (n)
3172 			break;
3173 	}
3174 	state->bucket = bucket;
3175 
3176 	return n;
3177 }
3178 
3179 static struct neighbour *neigh_get_next(struct seq_file *seq,
3180 					struct neighbour *n,
3181 					loff_t *pos)
3182 {
3183 	struct neigh_seq_state *state = seq->private;
3184 	struct net *net = seq_file_net(seq);
3185 	struct neigh_hash_table *nht = state->nht;
3186 
3187 	if (state->neigh_sub_iter) {
3188 		void *v = state->neigh_sub_iter(state, n, pos);
3189 		if (v)
3190 			return n;
3191 	}
3192 	n = rcu_dereference_bh(n->next);
3193 
3194 	while (1) {
3195 		while (n) {
3196 			if (!net_eq(dev_net(n->dev), net))
3197 				goto next;
3198 			if (state->neigh_sub_iter) {
3199 				void *v = state->neigh_sub_iter(state, n, pos);
3200 				if (v)
3201 					return n;
3202 				goto next;
3203 			}
3204 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3205 				break;
3206 
3207 			if (n->nud_state & ~NUD_NOARP)
3208 				break;
3209 next:
3210 			n = rcu_dereference_bh(n->next);
3211 		}
3212 
3213 		if (n)
3214 			break;
3215 
3216 		if (++state->bucket >= (1 << nht->hash_shift))
3217 			break;
3218 
3219 		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
3220 	}
3221 
3222 	if (n && pos)
3223 		--(*pos);
3224 	return n;
3225 }
3226 
3227 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3228 {
3229 	struct neighbour *n = neigh_get_first(seq);
3230 
3231 	if (n) {
3232 		--(*pos);
3233 		while (*pos) {
3234 			n = neigh_get_next(seq, n, pos);
3235 			if (!n)
3236 				break;
3237 		}
3238 	}
3239 	return *pos ? NULL : n;
3240 }
3241 
3242 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3243 {
3244 	struct neigh_seq_state *state = seq->private;
3245 	struct net *net = seq_file_net(seq);
3246 	struct neigh_table *tbl = state->tbl;
3247 	struct pneigh_entry *pn = NULL;
3248 	int bucket;
3249 
3250 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
3251 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3252 		pn = tbl->phash_buckets[bucket];
3253 		while (pn && !net_eq(pneigh_net(pn), net))
3254 			pn = pn->next;
3255 		if (pn)
3256 			break;
3257 	}
3258 	state->bucket = bucket;
3259 
3260 	return pn;
3261 }
3262 
3263 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3264 					    struct pneigh_entry *pn,
3265 					    loff_t *pos)
3266 {
3267 	struct neigh_seq_state *state = seq->private;
3268 	struct net *net = seq_file_net(seq);
3269 	struct neigh_table *tbl = state->tbl;
3270 
3271 	do {
3272 		pn = pn->next;
3273 	} while (pn && !net_eq(pneigh_net(pn), net));
3274 
3275 	while (!pn) {
3276 		if (++state->bucket > PNEIGH_HASHMASK)
3277 			break;
3278 		pn = tbl->phash_buckets[state->bucket];
3279 		while (pn && !net_eq(pneigh_net(pn), net))
3280 			pn = pn->next;
3281 		if (pn)
3282 			break;
3283 	}
3284 
3285 	if (pn && pos)
3286 		--(*pos);
3287 
3288 	return pn;
3289 }
3290 
3291 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3292 {
3293 	struct pneigh_entry *pn = pneigh_get_first(seq);
3294 
3295 	if (pn) {
3296 		--(*pos);
3297 		while (*pos) {
3298 			pn = pneigh_get_next(seq, pn, pos);
3299 			if (!pn)
3300 				break;
3301 		}
3302 	}
3303 	return *pos ? NULL : pn;
3304 }
3305 
3306 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3307 {
3308 	struct neigh_seq_state *state = seq->private;
3309 	void *rc;
3310 	loff_t idxpos = *pos;
3311 
3312 	rc = neigh_get_idx(seq, &idxpos);
3313 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3314 		rc = pneigh_get_idx(seq, &idxpos);
3315 
3316 	return rc;
3317 }
3318 
3319 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3320 	__acquires(tbl->lock)
3321 	__acquires(rcu_bh)
3322 {
3323 	struct neigh_seq_state *state = seq->private;
3324 
3325 	state->tbl = tbl;
3326 	state->bucket = 0;
3327 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3328 
3329 	rcu_read_lock_bh();
3330 	state->nht = rcu_dereference_bh(tbl->nht);
3331 	read_lock(&tbl->lock);
3332 
3333 	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3334 }
3335 EXPORT_SYMBOL(neigh_seq_start);
3336 
3337 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3338 {
3339 	struct neigh_seq_state *state;
3340 	void *rc;
3341 
3342 	if (v == SEQ_START_TOKEN) {
3343 		rc = neigh_get_first(seq);
3344 		goto out;
3345 	}
3346 
3347 	state = seq->private;
3348 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3349 		rc = neigh_get_next(seq, v, NULL);
3350 		if (rc)
3351 			goto out;
3352 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3353 			rc = pneigh_get_first(seq);
3354 	} else {
3355 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3356 		rc = pneigh_get_next(seq, v, NULL);
3357 	}
3358 out:
3359 	++(*pos);
3360 	return rc;
3361 }
3362 EXPORT_SYMBOL(neigh_seq_next);
3363 
3364 void neigh_seq_stop(struct seq_file *seq, void *v)
3365 	__releases(tbl->lock)
3366 	__releases(rcu_bh)
3367 {
3368 	struct neigh_seq_state *state = seq->private;
3369 	struct neigh_table *tbl = state->tbl;
3370 
3371 	read_unlock(&tbl->lock);
3372 	rcu_read_unlock_bh();
3373 }
3374 EXPORT_SYMBOL(neigh_seq_stop);
3375 
3376 /* statistics via seq_file */
3377 
3378 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3379 {
3380 	struct neigh_table *tbl = pde_data(file_inode(seq->file));
3381 	int cpu;
3382 
3383 	if (*pos == 0)
3384 		return SEQ_START_TOKEN;
3385 
3386 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3387 		if (!cpu_possible(cpu))
3388 			continue;
3389 		*pos = cpu+1;
3390 		return per_cpu_ptr(tbl->stats, cpu);
3391 	}
3392 	return NULL;
3393 }
3394 
3395 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3396 {
3397 	struct neigh_table *tbl = pde_data(file_inode(seq->file));
3398 	int cpu;
3399 
3400 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3401 		if (!cpu_possible(cpu))
3402 			continue;
3403 		*pos = cpu+1;
3404 		return per_cpu_ptr(tbl->stats, cpu);
3405 	}
3406 	(*pos)++;
3407 	return NULL;
3408 }
3409 
3410 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3411 {
3412 
3413 }
3414 
3415 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3416 {
3417 	struct neigh_table *tbl = pde_data(file_inode(seq->file));
3418 	struct neigh_statistics *st = v;
3419 
3420 	if (v == SEQ_START_TOKEN) {
3421 		seq_puts(seq, "entries  allocs   destroys hash_grows lookups  hits     res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3422 		return 0;
3423 	}
3424 
3425 	seq_printf(seq, "%08x %08lx %08lx %08lx   %08lx %08lx %08lx   "
3426 			"%08lx         %08lx         %08lx         "
3427 			"%08lx       %08lx            %08lx\n",
3428 		   atomic_read(&tbl->entries),
3429 
3430 		   st->allocs,
3431 		   st->destroys,
3432 		   st->hash_grows,
3433 
3434 		   st->lookups,
3435 		   st->hits,
3436 
3437 		   st->res_failed,
3438 
3439 		   st->rcv_probes_mcast,
3440 		   st->rcv_probes_ucast,
3441 
3442 		   st->periodic_gc_runs,
3443 		   st->forced_gc_runs,
3444 		   st->unres_discards,
3445 		   st->table_fulls
3446 		   );
3447 
3448 	return 0;
3449 }
3450 
3451 static const struct seq_operations neigh_stat_seq_ops = {
3452 	.start	= neigh_stat_seq_start,
3453 	.next	= neigh_stat_seq_next,
3454 	.stop	= neigh_stat_seq_stop,
3455 	.show	= neigh_stat_seq_show,
3456 };
3457 #endif /* CONFIG_PROC_FS */
3458 
3459 static void __neigh_notify(struct neighbour *n, int type, int flags,
3460 			   u32 pid)
3461 {
3462 	struct net *net = dev_net(n->dev);
3463 	struct sk_buff *skb;
3464 	int err = -ENOBUFS;
3465 
3466 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3467 	if (skb == NULL)
3468 		goto errout;
3469 
3470 	err = neigh_fill_info(skb, n, pid, 0, type, flags);
3471 	if (err < 0) {
3472 		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3473 		WARN_ON(err == -EMSGSIZE);
3474 		kfree_skb(skb);
3475 		goto errout;
3476 	}
3477 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3478 	return;
3479 errout:
3480 	if (err < 0)
3481 		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3482 }
3483 
3484 void neigh_app_ns(struct neighbour *n)
3485 {
3486 	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3487 }
3488 EXPORT_SYMBOL(neigh_app_ns);
3489 
3490 #ifdef CONFIG_SYSCTL
3491 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3492 
3493 static int proc_unres_qlen(struct ctl_table *ctl, int write,
3494 			   void *buffer, size_t *lenp, loff_t *ppos)
3495 {
3496 	int size, ret;
3497 	struct ctl_table tmp = *ctl;
3498 
3499 	tmp.extra1 = SYSCTL_ZERO;
3500 	tmp.extra2 = &unres_qlen_max;
3501 	tmp.data = &size;
3502 
3503 	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3504 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3505 
3506 	if (write && !ret)
3507 		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3508 	return ret;
3509 }
3510 
3511 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
3512 						   int family)
3513 {
3514 	switch (family) {
3515 	case AF_INET:
3516 		return __in_dev_arp_parms_get_rcu(dev);
3517 	case AF_INET6:
3518 		return __in6_dev_nd_parms_get_rcu(dev);
3519 	}
3520 	return NULL;
3521 }
3522 
3523 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3524 				  int index)
3525 {
3526 	struct net_device *dev;
3527 	int family = neigh_parms_family(p);
3528 
3529 	rcu_read_lock();
3530 	for_each_netdev_rcu(net, dev) {
3531 		struct neigh_parms *dst_p =
3532 				neigh_get_dev_parms_rcu(dev, family);
3533 
3534 		if (dst_p && !test_bit(index, dst_p->data_state))
3535 			dst_p->data[index] = p->data[index];
3536 	}
3537 	rcu_read_unlock();
3538 }
3539 
3540 static void neigh_proc_update(struct ctl_table *ctl, int write)
3541 {
3542 	struct net_device *dev = ctl->extra1;
3543 	struct neigh_parms *p = ctl->extra2;
3544 	struct net *net = neigh_parms_net(p);
3545 	int index = (int *) ctl->data - p->data;
3546 
3547 	if (!write)
3548 		return;
3549 
3550 	set_bit(index, p->data_state);
3551 	if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3552 		call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3553 	if (!dev) /* NULL dev means this is default value */
3554 		neigh_copy_dflt_parms(net, p, index);
3555 }
3556 
3557 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3558 					   void *buffer, size_t *lenp,
3559 					   loff_t *ppos)
3560 {
3561 	struct ctl_table tmp = *ctl;
3562 	int ret;
3563 
3564 	tmp.extra1 = SYSCTL_ZERO;
3565 	tmp.extra2 = SYSCTL_INT_MAX;
3566 
3567 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3568 	neigh_proc_update(ctl, write);
3569 	return ret;
3570 }
3571 
3572 static int neigh_proc_dointvec_ms_jiffies_positive(struct ctl_table *ctl, int write,
3573 						   void *buffer, size_t *lenp, loff_t *ppos)
3574 {
3575 	struct ctl_table tmp = *ctl;
3576 	int ret;
3577 
3578 	int min = msecs_to_jiffies(1);
3579 
3580 	tmp.extra1 = &min;
3581 	tmp.extra2 = NULL;
3582 
3583 	ret = proc_dointvec_ms_jiffies_minmax(&tmp, write, buffer, lenp, ppos);
3584 	neigh_proc_update(ctl, write);
3585 	return ret;
3586 }
3587 
3588 int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
3589 			size_t *lenp, loff_t *ppos)
3590 {
3591 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3592 
3593 	neigh_proc_update(ctl, write);
3594 	return ret;
3595 }
3596 EXPORT_SYMBOL(neigh_proc_dointvec);
3597 
3598 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
3599 				size_t *lenp, loff_t *ppos)
3600 {
3601 	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3602 
3603 	neigh_proc_update(ctl, write);
3604 	return ret;
3605 }
3606 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3607 
3608 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3609 					      void *buffer, size_t *lenp,
3610 					      loff_t *ppos)
3611 {
3612 	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3613 
3614 	neigh_proc_update(ctl, write);
3615 	return ret;
3616 }
3617 
3618 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3619 				   void *buffer, size_t *lenp, loff_t *ppos)
3620 {
3621 	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3622 
3623 	neigh_proc_update(ctl, write);
3624 	return ret;
3625 }
3626 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3627 
3628 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3629 					  void *buffer, size_t *lenp,
3630 					  loff_t *ppos)
3631 {
3632 	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3633 
3634 	neigh_proc_update(ctl, write);
3635 	return ret;
3636 }
3637 
3638 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3639 					  void *buffer, size_t *lenp,
3640 					  loff_t *ppos)
3641 {
3642 	struct neigh_parms *p = ctl->extra2;
3643 	int ret;
3644 
3645 	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3646 		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3647 	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3648 		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3649 	else
3650 		ret = -1;
3651 
3652 	if (write && ret == 0) {
3653 		/* update reachable_time as well, otherwise, the change will
3654 		 * only be effective after the next time neigh_periodic_work
3655 		 * decides to recompute it
3656 		 */
3657 		p->reachable_time =
3658 			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3659 	}
3660 	return ret;
3661 }
3662 
3663 #define NEIGH_PARMS_DATA_OFFSET(index)	\
3664 	(&((struct neigh_parms *) 0)->data[index])
3665 
3666 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3667 	[NEIGH_VAR_ ## attr] = { \
3668 		.procname	= name, \
3669 		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3670 		.maxlen		= sizeof(int), \
3671 		.mode		= mval, \
3672 		.proc_handler	= proc, \
3673 	}
3674 
3675 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3676 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3677 
3678 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3679 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3680 
3681 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3682 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3683 
3684 #define NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(attr, name) \
3685 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies_positive)
3686 
3687 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3688 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3689 
3690 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3691 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3692 
3693 static struct neigh_sysctl_table {
3694 	struct ctl_table_header *sysctl_header;
3695 	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3696 } neigh_sysctl_template __read_mostly = {
3697 	.neigh_vars = {
3698 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3699 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3700 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3701 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3702 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3703 		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3704 		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3705 		NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(INTERVAL_PROBE_TIME_MS,
3706 						       "interval_probe_time_ms"),
3707 		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3708 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3709 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3710 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3711 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3712 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3713 		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3714 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3715 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3716 		[NEIGH_VAR_GC_INTERVAL] = {
3717 			.procname	= "gc_interval",
3718 			.maxlen		= sizeof(int),
3719 			.mode		= 0644,
3720 			.proc_handler	= proc_dointvec_jiffies,
3721 		},
3722 		[NEIGH_VAR_GC_THRESH1] = {
3723 			.procname	= "gc_thresh1",
3724 			.maxlen		= sizeof(int),
3725 			.mode		= 0644,
3726 			.extra1		= SYSCTL_ZERO,
3727 			.extra2		= SYSCTL_INT_MAX,
3728 			.proc_handler	= proc_dointvec_minmax,
3729 		},
3730 		[NEIGH_VAR_GC_THRESH2] = {
3731 			.procname	= "gc_thresh2",
3732 			.maxlen		= sizeof(int),
3733 			.mode		= 0644,
3734 			.extra1		= SYSCTL_ZERO,
3735 			.extra2		= SYSCTL_INT_MAX,
3736 			.proc_handler	= proc_dointvec_minmax,
3737 		},
3738 		[NEIGH_VAR_GC_THRESH3] = {
3739 			.procname	= "gc_thresh3",
3740 			.maxlen		= sizeof(int),
3741 			.mode		= 0644,
3742 			.extra1		= SYSCTL_ZERO,
3743 			.extra2		= SYSCTL_INT_MAX,
3744 			.proc_handler	= proc_dointvec_minmax,
3745 		},
3746 		{},
3747 	},
3748 };
3749 
3750 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3751 			  proc_handler *handler)
3752 {
3753 	int i;
3754 	struct neigh_sysctl_table *t;
3755 	const char *dev_name_source;
3756 	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3757 	char *p_name;
3758 
3759 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL_ACCOUNT);
3760 	if (!t)
3761 		goto err;
3762 
3763 	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3764 		t->neigh_vars[i].data += (long) p;
3765 		t->neigh_vars[i].extra1 = dev;
3766 		t->neigh_vars[i].extra2 = p;
3767 	}
3768 
3769 	if (dev) {
3770 		dev_name_source = dev->name;
3771 		/* Terminate the table early */
3772 		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3773 		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3774 	} else {
3775 		struct neigh_table *tbl = p->tbl;
3776 		dev_name_source = "default";
3777 		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3778 		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3779 		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3780 		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3781 	}
3782 
3783 	if (handler) {
3784 		/* RetransTime */
3785 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3786 		/* ReachableTime */
3787 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3788 		/* RetransTime (in milliseconds)*/
3789 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3790 		/* ReachableTime (in milliseconds) */
3791 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3792 	} else {
3793 		/* Those handlers will update p->reachable_time after
3794 		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3795 		 * applied after the next neighbour update instead of waiting for
3796 		 * neigh_periodic_work to update its value (can be multiple minutes)
3797 		 * So any handler that replaces them should do this as well
3798 		 */
3799 		/* ReachableTime */
3800 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3801 			neigh_proc_base_reachable_time;
3802 		/* ReachableTime (in milliseconds) */
3803 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3804 			neigh_proc_base_reachable_time;
3805 	}
3806 
3807 	switch (neigh_parms_family(p)) {
3808 	case AF_INET:
3809 	      p_name = "ipv4";
3810 	      break;
3811 	case AF_INET6:
3812 	      p_name = "ipv6";
3813 	      break;
3814 	default:
3815 	      BUG();
3816 	}
3817 
3818 	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3819 		p_name, dev_name_source);
3820 	t->sysctl_header =
3821 		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3822 	if (!t->sysctl_header)
3823 		goto free;
3824 
3825 	p->sysctl_table = t;
3826 	return 0;
3827 
3828 free:
3829 	kfree(t);
3830 err:
3831 	return -ENOBUFS;
3832 }
3833 EXPORT_SYMBOL(neigh_sysctl_register);
3834 
3835 void neigh_sysctl_unregister(struct neigh_parms *p)
3836 {
3837 	if (p->sysctl_table) {
3838 		struct neigh_sysctl_table *t = p->sysctl_table;
3839 		p->sysctl_table = NULL;
3840 		unregister_net_sysctl_table(t->sysctl_header);
3841 		kfree(t);
3842 	}
3843 }
3844 EXPORT_SYMBOL(neigh_sysctl_unregister);
3845 
3846 #endif	/* CONFIG_SYSCTL */
3847 
3848 static int __init neigh_init(void)
3849 {
3850 	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3851 	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3852 	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
3853 
3854 	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3855 		      0);
3856 	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3857 
3858 	return 0;
3859 }
3860 
3861 subsys_initcall(neigh_init);
3862