xref: /openbmc/linux/net/core/neighbour.c (revision f2d51e579359b708d7063eef543bec6a57d2b5e9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Generic address resolution entity
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
8  *
9  *	Fixes:
10  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
11  *	Harald Welte		Add neighbour cache statistics like rtstat
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/slab.h>
17 #include <linux/kmemleak.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
30 #include <net/arp.h>
31 #include <net/dst.h>
32 #include <net/sock.h>
33 #include <net/netevent.h>
34 #include <net/netlink.h>
35 #include <linux/rtnetlink.h>
36 #include <linux/random.h>
37 #include <linux/string.h>
38 #include <linux/log2.h>
39 #include <linux/inetdevice.h>
40 #include <net/addrconf.h>
41 
42 #include <trace/events/neigh.h>
43 
44 #define NEIGH_DEBUG 1
45 #define neigh_dbg(level, fmt, ...)		\
46 do {						\
47 	if (level <= NEIGH_DEBUG)		\
48 		pr_debug(fmt, ##__VA_ARGS__);	\
49 } while (0)
50 
51 #define PNEIGH_HASHMASK		0xF
52 
53 static void neigh_timer_handler(struct timer_list *t);
54 static void __neigh_notify(struct neighbour *n, int type, int flags,
55 			   u32 pid);
56 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
57 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
58 				    struct net_device *dev);
59 
60 #ifdef CONFIG_PROC_FS
61 static const struct seq_operations neigh_stat_seq_ops;
62 #endif
63 
64 /*
65    Neighbour hash table buckets are protected with rwlock tbl->lock.
66 
67    - All the scans/updates to hash buckets MUST be made under this lock.
68    - NOTHING clever should be made under this lock: no callbacks
69      to protocol backends, no attempts to send something to network.
70      It will result in deadlocks, if backend/driver wants to use neighbour
71      cache.
72    - If the entry requires some non-trivial actions, increase
73      its reference count and release table lock.
74 
75    Neighbour entries are protected:
76    - with reference count.
77    - with rwlock neigh->lock
78 
79    Reference count prevents destruction.
80 
81    neigh->lock mainly serializes ll address data and its validity state.
82    However, the same lock is used to protect another entry fields:
83     - timer
84     - resolution queue
85 
86    Again, nothing clever shall be made under neigh->lock,
87    the most complicated procedure, which we allow is dev->hard_header.
88    It is supposed, that dev->hard_header is simplistic and does
89    not make callbacks to neighbour tables.
90  */
91 
92 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
93 {
94 	kfree_skb(skb);
95 	return -ENETDOWN;
96 }
97 
98 static void neigh_cleanup_and_release(struct neighbour *neigh)
99 {
100 	trace_neigh_cleanup_and_release(neigh, 0);
101 	__neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
102 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
103 	neigh_release(neigh);
104 }
105 
106 /*
107  * It is random distribution in the interval (1/2)*base...(3/2)*base.
108  * It corresponds to default IPv6 settings and is not overridable,
109  * because it is really reasonable choice.
110  */
111 
112 unsigned long neigh_rand_reach_time(unsigned long base)
113 {
114 	return base ? get_random_u32_below(base) + (base >> 1) : 0;
115 }
116 EXPORT_SYMBOL(neigh_rand_reach_time);
117 
118 static void neigh_mark_dead(struct neighbour *n)
119 {
120 	n->dead = 1;
121 	if (!list_empty(&n->gc_list)) {
122 		list_del_init(&n->gc_list);
123 		atomic_dec(&n->tbl->gc_entries);
124 	}
125 	if (!list_empty(&n->managed_list))
126 		list_del_init(&n->managed_list);
127 }
128 
129 static void neigh_update_gc_list(struct neighbour *n)
130 {
131 	bool on_gc_list, exempt_from_gc;
132 
133 	write_lock_bh(&n->tbl->lock);
134 	write_lock(&n->lock);
135 	if (n->dead)
136 		goto out;
137 
138 	/* remove from the gc list if new state is permanent or if neighbor
139 	 * is externally learned; otherwise entry should be on the gc list
140 	 */
141 	exempt_from_gc = n->nud_state & NUD_PERMANENT ||
142 			 n->flags & NTF_EXT_LEARNED;
143 	on_gc_list = !list_empty(&n->gc_list);
144 
145 	if (exempt_from_gc && on_gc_list) {
146 		list_del_init(&n->gc_list);
147 		atomic_dec(&n->tbl->gc_entries);
148 	} else if (!exempt_from_gc && !on_gc_list) {
149 		/* add entries to the tail; cleaning removes from the front */
150 		list_add_tail(&n->gc_list, &n->tbl->gc_list);
151 		atomic_inc(&n->tbl->gc_entries);
152 	}
153 out:
154 	write_unlock(&n->lock);
155 	write_unlock_bh(&n->tbl->lock);
156 }
157 
158 static void neigh_update_managed_list(struct neighbour *n)
159 {
160 	bool on_managed_list, add_to_managed;
161 
162 	write_lock_bh(&n->tbl->lock);
163 	write_lock(&n->lock);
164 	if (n->dead)
165 		goto out;
166 
167 	add_to_managed = n->flags & NTF_MANAGED;
168 	on_managed_list = !list_empty(&n->managed_list);
169 
170 	if (!add_to_managed && on_managed_list)
171 		list_del_init(&n->managed_list);
172 	else if (add_to_managed && !on_managed_list)
173 		list_add_tail(&n->managed_list, &n->tbl->managed_list);
174 out:
175 	write_unlock(&n->lock);
176 	write_unlock_bh(&n->tbl->lock);
177 }
178 
179 static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
180 			       bool *gc_update, bool *managed_update)
181 {
182 	u32 ndm_flags, old_flags = neigh->flags;
183 
184 	if (!(flags & NEIGH_UPDATE_F_ADMIN))
185 		return;
186 
187 	ndm_flags  = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
188 	ndm_flags |= (flags & NEIGH_UPDATE_F_MANAGED) ? NTF_MANAGED : 0;
189 
190 	if ((old_flags ^ ndm_flags) & NTF_EXT_LEARNED) {
191 		if (ndm_flags & NTF_EXT_LEARNED)
192 			neigh->flags |= NTF_EXT_LEARNED;
193 		else
194 			neigh->flags &= ~NTF_EXT_LEARNED;
195 		*notify = 1;
196 		*gc_update = true;
197 	}
198 	if ((old_flags ^ ndm_flags) & NTF_MANAGED) {
199 		if (ndm_flags & NTF_MANAGED)
200 			neigh->flags |= NTF_MANAGED;
201 		else
202 			neigh->flags &= ~NTF_MANAGED;
203 		*notify = 1;
204 		*managed_update = true;
205 	}
206 }
207 
208 static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
209 		      struct neigh_table *tbl)
210 {
211 	bool retval = false;
212 
213 	write_lock(&n->lock);
214 	if (refcount_read(&n->refcnt) == 1) {
215 		struct neighbour *neigh;
216 
217 		neigh = rcu_dereference_protected(n->next,
218 						  lockdep_is_held(&tbl->lock));
219 		rcu_assign_pointer(*np, neigh);
220 		neigh_mark_dead(n);
221 		retval = true;
222 	}
223 	write_unlock(&n->lock);
224 	if (retval)
225 		neigh_cleanup_and_release(n);
226 	return retval;
227 }
228 
229 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
230 {
231 	struct neigh_hash_table *nht;
232 	void *pkey = ndel->primary_key;
233 	u32 hash_val;
234 	struct neighbour *n;
235 	struct neighbour __rcu **np;
236 
237 	nht = rcu_dereference_protected(tbl->nht,
238 					lockdep_is_held(&tbl->lock));
239 	hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
240 	hash_val = hash_val >> (32 - nht->hash_shift);
241 
242 	np = &nht->hash_buckets[hash_val];
243 	while ((n = rcu_dereference_protected(*np,
244 					      lockdep_is_held(&tbl->lock)))) {
245 		if (n == ndel)
246 			return neigh_del(n, np, tbl);
247 		np = &n->next;
248 	}
249 	return false;
250 }
251 
252 static int neigh_forced_gc(struct neigh_table *tbl)
253 {
254 	int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
255 	unsigned long tref = jiffies - 5 * HZ;
256 	struct neighbour *n, *tmp;
257 	int shrunk = 0;
258 
259 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
260 
261 	write_lock_bh(&tbl->lock);
262 
263 	list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
264 		if (refcount_read(&n->refcnt) == 1) {
265 			bool remove = false;
266 
267 			write_lock(&n->lock);
268 			if ((n->nud_state == NUD_FAILED) ||
269 			    (n->nud_state == NUD_NOARP) ||
270 			    (tbl->is_multicast &&
271 			     tbl->is_multicast(n->primary_key)) ||
272 			    time_after(tref, n->updated))
273 				remove = true;
274 			write_unlock(&n->lock);
275 
276 			if (remove && neigh_remove_one(n, tbl))
277 				shrunk++;
278 			if (shrunk >= max_clean)
279 				break;
280 		}
281 	}
282 
283 	tbl->last_flush = jiffies;
284 
285 	write_unlock_bh(&tbl->lock);
286 
287 	return shrunk;
288 }
289 
290 static void neigh_add_timer(struct neighbour *n, unsigned long when)
291 {
292 	neigh_hold(n);
293 	if (unlikely(mod_timer(&n->timer, when))) {
294 		printk("NEIGH: BUG, double timer add, state is %x\n",
295 		       n->nud_state);
296 		dump_stack();
297 	}
298 }
299 
300 static int neigh_del_timer(struct neighbour *n)
301 {
302 	if ((n->nud_state & NUD_IN_TIMER) &&
303 	    del_timer(&n->timer)) {
304 		neigh_release(n);
305 		return 1;
306 	}
307 	return 0;
308 }
309 
310 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
311 						   int family)
312 {
313 	switch (family) {
314 	case AF_INET:
315 		return __in_dev_arp_parms_get_rcu(dev);
316 	case AF_INET6:
317 		return __in6_dev_nd_parms_get_rcu(dev);
318 	}
319 	return NULL;
320 }
321 
322 static void neigh_parms_qlen_dec(struct net_device *dev, int family)
323 {
324 	struct neigh_parms *p;
325 
326 	rcu_read_lock();
327 	p = neigh_get_dev_parms_rcu(dev, family);
328 	if (p)
329 		p->qlen--;
330 	rcu_read_unlock();
331 }
332 
333 static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net,
334 			       int family)
335 {
336 	struct sk_buff_head tmp;
337 	unsigned long flags;
338 	struct sk_buff *skb;
339 
340 	skb_queue_head_init(&tmp);
341 	spin_lock_irqsave(&list->lock, flags);
342 	skb = skb_peek(list);
343 	while (skb != NULL) {
344 		struct sk_buff *skb_next = skb_peek_next(skb, list);
345 		struct net_device *dev = skb->dev;
346 
347 		if (net == NULL || net_eq(dev_net(dev), net)) {
348 			neigh_parms_qlen_dec(dev, family);
349 			__skb_unlink(skb, list);
350 			__skb_queue_tail(&tmp, skb);
351 		}
352 		skb = skb_next;
353 	}
354 	spin_unlock_irqrestore(&list->lock, flags);
355 
356 	while ((skb = __skb_dequeue(&tmp))) {
357 		dev_put(skb->dev);
358 		kfree_skb(skb);
359 	}
360 }
361 
362 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
363 			    bool skip_perm)
364 {
365 	int i;
366 	struct neigh_hash_table *nht;
367 
368 	nht = rcu_dereference_protected(tbl->nht,
369 					lockdep_is_held(&tbl->lock));
370 
371 	for (i = 0; i < (1 << nht->hash_shift); i++) {
372 		struct neighbour *n;
373 		struct neighbour __rcu **np = &nht->hash_buckets[i];
374 
375 		while ((n = rcu_dereference_protected(*np,
376 					lockdep_is_held(&tbl->lock))) != NULL) {
377 			if (dev && n->dev != dev) {
378 				np = &n->next;
379 				continue;
380 			}
381 			if (skip_perm && n->nud_state & NUD_PERMANENT) {
382 				np = &n->next;
383 				continue;
384 			}
385 			rcu_assign_pointer(*np,
386 				   rcu_dereference_protected(n->next,
387 						lockdep_is_held(&tbl->lock)));
388 			write_lock(&n->lock);
389 			neigh_del_timer(n);
390 			neigh_mark_dead(n);
391 			if (refcount_read(&n->refcnt) != 1) {
392 				/* The most unpleasant situation.
393 				   We must destroy neighbour entry,
394 				   but someone still uses it.
395 
396 				   The destroy will be delayed until
397 				   the last user releases us, but
398 				   we must kill timers etc. and move
399 				   it to safe state.
400 				 */
401 				__skb_queue_purge(&n->arp_queue);
402 				n->arp_queue_len_bytes = 0;
403 				n->output = neigh_blackhole;
404 				if (n->nud_state & NUD_VALID)
405 					n->nud_state = NUD_NOARP;
406 				else
407 					n->nud_state = NUD_NONE;
408 				neigh_dbg(2, "neigh %p is stray\n", n);
409 			}
410 			write_unlock(&n->lock);
411 			neigh_cleanup_and_release(n);
412 		}
413 	}
414 }
415 
416 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
417 {
418 	write_lock_bh(&tbl->lock);
419 	neigh_flush_dev(tbl, dev, false);
420 	write_unlock_bh(&tbl->lock);
421 }
422 EXPORT_SYMBOL(neigh_changeaddr);
423 
424 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
425 			  bool skip_perm)
426 {
427 	write_lock_bh(&tbl->lock);
428 	neigh_flush_dev(tbl, dev, skip_perm);
429 	pneigh_ifdown_and_unlock(tbl, dev);
430 	pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL,
431 			   tbl->family);
432 	if (skb_queue_empty_lockless(&tbl->proxy_queue))
433 		del_timer_sync(&tbl->proxy_timer);
434 	return 0;
435 }
436 
437 int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
438 {
439 	__neigh_ifdown(tbl, dev, true);
440 	return 0;
441 }
442 EXPORT_SYMBOL(neigh_carrier_down);
443 
444 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
445 {
446 	__neigh_ifdown(tbl, dev, false);
447 	return 0;
448 }
449 EXPORT_SYMBOL(neigh_ifdown);
450 
451 static struct neighbour *neigh_alloc(struct neigh_table *tbl,
452 				     struct net_device *dev,
453 				     u32 flags, bool exempt_from_gc)
454 {
455 	struct neighbour *n = NULL;
456 	unsigned long now = jiffies;
457 	int entries;
458 
459 	if (exempt_from_gc)
460 		goto do_alloc;
461 
462 	entries = atomic_inc_return(&tbl->gc_entries) - 1;
463 	if (entries >= tbl->gc_thresh3 ||
464 	    (entries >= tbl->gc_thresh2 &&
465 	     time_after(now, tbl->last_flush + 5 * HZ))) {
466 		if (!neigh_forced_gc(tbl) &&
467 		    entries >= tbl->gc_thresh3) {
468 			net_info_ratelimited("%s: neighbor table overflow!\n",
469 					     tbl->id);
470 			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
471 			goto out_entries;
472 		}
473 	}
474 
475 do_alloc:
476 	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
477 	if (!n)
478 		goto out_entries;
479 
480 	__skb_queue_head_init(&n->arp_queue);
481 	rwlock_init(&n->lock);
482 	seqlock_init(&n->ha_lock);
483 	n->updated	  = n->used = now;
484 	n->nud_state	  = NUD_NONE;
485 	n->output	  = neigh_blackhole;
486 	n->flags	  = flags;
487 	seqlock_init(&n->hh.hh_lock);
488 	n->parms	  = neigh_parms_clone(&tbl->parms);
489 	timer_setup(&n->timer, neigh_timer_handler, 0);
490 
491 	NEIGH_CACHE_STAT_INC(tbl, allocs);
492 	n->tbl		  = tbl;
493 	refcount_set(&n->refcnt, 1);
494 	n->dead		  = 1;
495 	INIT_LIST_HEAD(&n->gc_list);
496 	INIT_LIST_HEAD(&n->managed_list);
497 
498 	atomic_inc(&tbl->entries);
499 out:
500 	return n;
501 
502 out_entries:
503 	if (!exempt_from_gc)
504 		atomic_dec(&tbl->gc_entries);
505 	goto out;
506 }
507 
508 static void neigh_get_hash_rnd(u32 *x)
509 {
510 	*x = get_random_u32() | 1;
511 }
512 
513 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
514 {
515 	size_t size = (1 << shift) * sizeof(struct neighbour *);
516 	struct neigh_hash_table *ret;
517 	struct neighbour __rcu **buckets;
518 	int i;
519 
520 	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
521 	if (!ret)
522 		return NULL;
523 	if (size <= PAGE_SIZE) {
524 		buckets = kzalloc(size, GFP_ATOMIC);
525 	} else {
526 		buckets = (struct neighbour __rcu **)
527 			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
528 					   get_order(size));
529 		kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
530 	}
531 	if (!buckets) {
532 		kfree(ret);
533 		return NULL;
534 	}
535 	ret->hash_buckets = buckets;
536 	ret->hash_shift = shift;
537 	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
538 		neigh_get_hash_rnd(&ret->hash_rnd[i]);
539 	return ret;
540 }
541 
542 static void neigh_hash_free_rcu(struct rcu_head *head)
543 {
544 	struct neigh_hash_table *nht = container_of(head,
545 						    struct neigh_hash_table,
546 						    rcu);
547 	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
548 	struct neighbour __rcu **buckets = nht->hash_buckets;
549 
550 	if (size <= PAGE_SIZE) {
551 		kfree(buckets);
552 	} else {
553 		kmemleak_free(buckets);
554 		free_pages((unsigned long)buckets, get_order(size));
555 	}
556 	kfree(nht);
557 }
558 
559 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
560 						unsigned long new_shift)
561 {
562 	unsigned int i, hash;
563 	struct neigh_hash_table *new_nht, *old_nht;
564 
565 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
566 
567 	old_nht = rcu_dereference_protected(tbl->nht,
568 					    lockdep_is_held(&tbl->lock));
569 	new_nht = neigh_hash_alloc(new_shift);
570 	if (!new_nht)
571 		return old_nht;
572 
573 	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
574 		struct neighbour *n, *next;
575 
576 		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
577 						   lockdep_is_held(&tbl->lock));
578 		     n != NULL;
579 		     n = next) {
580 			hash = tbl->hash(n->primary_key, n->dev,
581 					 new_nht->hash_rnd);
582 
583 			hash >>= (32 - new_nht->hash_shift);
584 			next = rcu_dereference_protected(n->next,
585 						lockdep_is_held(&tbl->lock));
586 
587 			rcu_assign_pointer(n->next,
588 					   rcu_dereference_protected(
589 						new_nht->hash_buckets[hash],
590 						lockdep_is_held(&tbl->lock)));
591 			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
592 		}
593 	}
594 
595 	rcu_assign_pointer(tbl->nht, new_nht);
596 	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
597 	return new_nht;
598 }
599 
600 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
601 			       struct net_device *dev)
602 {
603 	struct neighbour *n;
604 
605 	NEIGH_CACHE_STAT_INC(tbl, lookups);
606 
607 	rcu_read_lock_bh();
608 	n = __neigh_lookup_noref(tbl, pkey, dev);
609 	if (n) {
610 		if (!refcount_inc_not_zero(&n->refcnt))
611 			n = NULL;
612 		NEIGH_CACHE_STAT_INC(tbl, hits);
613 	}
614 
615 	rcu_read_unlock_bh();
616 	return n;
617 }
618 EXPORT_SYMBOL(neigh_lookup);
619 
620 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
621 				     const void *pkey)
622 {
623 	struct neighbour *n;
624 	unsigned int key_len = tbl->key_len;
625 	u32 hash_val;
626 	struct neigh_hash_table *nht;
627 
628 	NEIGH_CACHE_STAT_INC(tbl, lookups);
629 
630 	rcu_read_lock_bh();
631 	nht = rcu_dereference_bh(tbl->nht);
632 	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
633 
634 	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
635 	     n != NULL;
636 	     n = rcu_dereference_bh(n->next)) {
637 		if (!memcmp(n->primary_key, pkey, key_len) &&
638 		    net_eq(dev_net(n->dev), net)) {
639 			if (!refcount_inc_not_zero(&n->refcnt))
640 				n = NULL;
641 			NEIGH_CACHE_STAT_INC(tbl, hits);
642 			break;
643 		}
644 	}
645 
646 	rcu_read_unlock_bh();
647 	return n;
648 }
649 EXPORT_SYMBOL(neigh_lookup_nodev);
650 
651 static struct neighbour *
652 ___neigh_create(struct neigh_table *tbl, const void *pkey,
653 		struct net_device *dev, u32 flags,
654 		bool exempt_from_gc, bool want_ref)
655 {
656 	u32 hash_val, key_len = tbl->key_len;
657 	struct neighbour *n1, *rc, *n;
658 	struct neigh_hash_table *nht;
659 	int error;
660 
661 	n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
662 	trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
663 	if (!n) {
664 		rc = ERR_PTR(-ENOBUFS);
665 		goto out;
666 	}
667 
668 	memcpy(n->primary_key, pkey, key_len);
669 	n->dev = dev;
670 	netdev_hold(dev, &n->dev_tracker, GFP_ATOMIC);
671 
672 	/* Protocol specific setup. */
673 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
674 		rc = ERR_PTR(error);
675 		goto out_neigh_release;
676 	}
677 
678 	if (dev->netdev_ops->ndo_neigh_construct) {
679 		error = dev->netdev_ops->ndo_neigh_construct(dev, n);
680 		if (error < 0) {
681 			rc = ERR_PTR(error);
682 			goto out_neigh_release;
683 		}
684 	}
685 
686 	/* Device specific setup. */
687 	if (n->parms->neigh_setup &&
688 	    (error = n->parms->neigh_setup(n)) < 0) {
689 		rc = ERR_PTR(error);
690 		goto out_neigh_release;
691 	}
692 
693 	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
694 
695 	write_lock_bh(&tbl->lock);
696 	nht = rcu_dereference_protected(tbl->nht,
697 					lockdep_is_held(&tbl->lock));
698 
699 	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
700 		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
701 
702 	hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
703 
704 	if (n->parms->dead) {
705 		rc = ERR_PTR(-EINVAL);
706 		goto out_tbl_unlock;
707 	}
708 
709 	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
710 					    lockdep_is_held(&tbl->lock));
711 	     n1 != NULL;
712 	     n1 = rcu_dereference_protected(n1->next,
713 			lockdep_is_held(&tbl->lock))) {
714 		if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
715 			if (want_ref)
716 				neigh_hold(n1);
717 			rc = n1;
718 			goto out_tbl_unlock;
719 		}
720 	}
721 
722 	n->dead = 0;
723 	if (!exempt_from_gc)
724 		list_add_tail(&n->gc_list, &n->tbl->gc_list);
725 	if (n->flags & NTF_MANAGED)
726 		list_add_tail(&n->managed_list, &n->tbl->managed_list);
727 	if (want_ref)
728 		neigh_hold(n);
729 	rcu_assign_pointer(n->next,
730 			   rcu_dereference_protected(nht->hash_buckets[hash_val],
731 						     lockdep_is_held(&tbl->lock)));
732 	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
733 	write_unlock_bh(&tbl->lock);
734 	neigh_dbg(2, "neigh %p is created\n", n);
735 	rc = n;
736 out:
737 	return rc;
738 out_tbl_unlock:
739 	write_unlock_bh(&tbl->lock);
740 out_neigh_release:
741 	if (!exempt_from_gc)
742 		atomic_dec(&tbl->gc_entries);
743 	neigh_release(n);
744 	goto out;
745 }
746 
747 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
748 				 struct net_device *dev, bool want_ref)
749 {
750 	return ___neigh_create(tbl, pkey, dev, 0, false, want_ref);
751 }
752 EXPORT_SYMBOL(__neigh_create);
753 
754 static u32 pneigh_hash(const void *pkey, unsigned int key_len)
755 {
756 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
757 	hash_val ^= (hash_val >> 16);
758 	hash_val ^= hash_val >> 8;
759 	hash_val ^= hash_val >> 4;
760 	hash_val &= PNEIGH_HASHMASK;
761 	return hash_val;
762 }
763 
764 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
765 					      struct net *net,
766 					      const void *pkey,
767 					      unsigned int key_len,
768 					      struct net_device *dev)
769 {
770 	while (n) {
771 		if (!memcmp(n->key, pkey, key_len) &&
772 		    net_eq(pneigh_net(n), net) &&
773 		    (n->dev == dev || !n->dev))
774 			return n;
775 		n = n->next;
776 	}
777 	return NULL;
778 }
779 
780 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
781 		struct net *net, const void *pkey, struct net_device *dev)
782 {
783 	unsigned int key_len = tbl->key_len;
784 	u32 hash_val = pneigh_hash(pkey, key_len);
785 
786 	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
787 				 net, pkey, key_len, dev);
788 }
789 EXPORT_SYMBOL_GPL(__pneigh_lookup);
790 
791 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
792 				    struct net *net, const void *pkey,
793 				    struct net_device *dev, int creat)
794 {
795 	struct pneigh_entry *n;
796 	unsigned int key_len = tbl->key_len;
797 	u32 hash_val = pneigh_hash(pkey, key_len);
798 
799 	read_lock_bh(&tbl->lock);
800 	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
801 			      net, pkey, key_len, dev);
802 	read_unlock_bh(&tbl->lock);
803 
804 	if (n || !creat)
805 		goto out;
806 
807 	ASSERT_RTNL();
808 
809 	n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
810 	if (!n)
811 		goto out;
812 
813 	write_pnet(&n->net, net);
814 	memcpy(n->key, pkey, key_len);
815 	n->dev = dev;
816 	netdev_hold(dev, &n->dev_tracker, GFP_KERNEL);
817 
818 	if (tbl->pconstructor && tbl->pconstructor(n)) {
819 		netdev_put(dev, &n->dev_tracker);
820 		kfree(n);
821 		n = NULL;
822 		goto out;
823 	}
824 
825 	write_lock_bh(&tbl->lock);
826 	n->next = tbl->phash_buckets[hash_val];
827 	tbl->phash_buckets[hash_val] = n;
828 	write_unlock_bh(&tbl->lock);
829 out:
830 	return n;
831 }
832 EXPORT_SYMBOL(pneigh_lookup);
833 
834 
835 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
836 		  struct net_device *dev)
837 {
838 	struct pneigh_entry *n, **np;
839 	unsigned int key_len = tbl->key_len;
840 	u32 hash_val = pneigh_hash(pkey, key_len);
841 
842 	write_lock_bh(&tbl->lock);
843 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
844 	     np = &n->next) {
845 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
846 		    net_eq(pneigh_net(n), net)) {
847 			*np = n->next;
848 			write_unlock_bh(&tbl->lock);
849 			if (tbl->pdestructor)
850 				tbl->pdestructor(n);
851 			netdev_put(n->dev, &n->dev_tracker);
852 			kfree(n);
853 			return 0;
854 		}
855 	}
856 	write_unlock_bh(&tbl->lock);
857 	return -ENOENT;
858 }
859 
860 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
861 				    struct net_device *dev)
862 {
863 	struct pneigh_entry *n, **np, *freelist = NULL;
864 	u32 h;
865 
866 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
867 		np = &tbl->phash_buckets[h];
868 		while ((n = *np) != NULL) {
869 			if (!dev || n->dev == dev) {
870 				*np = n->next;
871 				n->next = freelist;
872 				freelist = n;
873 				continue;
874 			}
875 			np = &n->next;
876 		}
877 	}
878 	write_unlock_bh(&tbl->lock);
879 	while ((n = freelist)) {
880 		freelist = n->next;
881 		n->next = NULL;
882 		if (tbl->pdestructor)
883 			tbl->pdestructor(n);
884 		netdev_put(n->dev, &n->dev_tracker);
885 		kfree(n);
886 	}
887 	return -ENOENT;
888 }
889 
890 static void neigh_parms_destroy(struct neigh_parms *parms);
891 
892 static inline void neigh_parms_put(struct neigh_parms *parms)
893 {
894 	if (refcount_dec_and_test(&parms->refcnt))
895 		neigh_parms_destroy(parms);
896 }
897 
898 /*
899  *	neighbour must already be out of the table;
900  *
901  */
902 void neigh_destroy(struct neighbour *neigh)
903 {
904 	struct net_device *dev = neigh->dev;
905 
906 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
907 
908 	if (!neigh->dead) {
909 		pr_warn("Destroying alive neighbour %p\n", neigh);
910 		dump_stack();
911 		return;
912 	}
913 
914 	if (neigh_del_timer(neigh))
915 		pr_warn("Impossible event\n");
916 
917 	write_lock_bh(&neigh->lock);
918 	__skb_queue_purge(&neigh->arp_queue);
919 	write_unlock_bh(&neigh->lock);
920 	neigh->arp_queue_len_bytes = 0;
921 
922 	if (dev->netdev_ops->ndo_neigh_destroy)
923 		dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
924 
925 	netdev_put(dev, &neigh->dev_tracker);
926 	neigh_parms_put(neigh->parms);
927 
928 	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
929 
930 	atomic_dec(&neigh->tbl->entries);
931 	kfree_rcu(neigh, rcu);
932 }
933 EXPORT_SYMBOL(neigh_destroy);
934 
935 /* Neighbour state is suspicious;
936    disable fast path.
937 
938    Called with write_locked neigh.
939  */
940 static void neigh_suspect(struct neighbour *neigh)
941 {
942 	neigh_dbg(2, "neigh %p is suspected\n", neigh);
943 
944 	neigh->output = neigh->ops->output;
945 }
946 
947 /* Neighbour state is OK;
948    enable fast path.
949 
950    Called with write_locked neigh.
951  */
952 static void neigh_connect(struct neighbour *neigh)
953 {
954 	neigh_dbg(2, "neigh %p is connected\n", neigh);
955 
956 	neigh->output = neigh->ops->connected_output;
957 }
958 
959 static void neigh_periodic_work(struct work_struct *work)
960 {
961 	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
962 	struct neighbour *n;
963 	struct neighbour __rcu **np;
964 	unsigned int i;
965 	struct neigh_hash_table *nht;
966 
967 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
968 
969 	write_lock_bh(&tbl->lock);
970 	nht = rcu_dereference_protected(tbl->nht,
971 					lockdep_is_held(&tbl->lock));
972 
973 	/*
974 	 *	periodically recompute ReachableTime from random function
975 	 */
976 
977 	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
978 		struct neigh_parms *p;
979 		tbl->last_rand = jiffies;
980 		list_for_each_entry(p, &tbl->parms_list, list)
981 			p->reachable_time =
982 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
983 	}
984 
985 	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
986 		goto out;
987 
988 	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
989 		np = &nht->hash_buckets[i];
990 
991 		while ((n = rcu_dereference_protected(*np,
992 				lockdep_is_held(&tbl->lock))) != NULL) {
993 			unsigned int state;
994 
995 			write_lock(&n->lock);
996 
997 			state = n->nud_state;
998 			if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
999 			    (n->flags & NTF_EXT_LEARNED)) {
1000 				write_unlock(&n->lock);
1001 				goto next_elt;
1002 			}
1003 
1004 			if (time_before(n->used, n->confirmed))
1005 				n->used = n->confirmed;
1006 
1007 			if (refcount_read(&n->refcnt) == 1 &&
1008 			    (state == NUD_FAILED ||
1009 			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
1010 				*np = n->next;
1011 				neigh_mark_dead(n);
1012 				write_unlock(&n->lock);
1013 				neigh_cleanup_and_release(n);
1014 				continue;
1015 			}
1016 			write_unlock(&n->lock);
1017 
1018 next_elt:
1019 			np = &n->next;
1020 		}
1021 		/*
1022 		 * It's fine to release lock here, even if hash table
1023 		 * grows while we are preempted.
1024 		 */
1025 		write_unlock_bh(&tbl->lock);
1026 		cond_resched();
1027 		write_lock_bh(&tbl->lock);
1028 		nht = rcu_dereference_protected(tbl->nht,
1029 						lockdep_is_held(&tbl->lock));
1030 	}
1031 out:
1032 	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
1033 	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
1034 	 * BASE_REACHABLE_TIME.
1035 	 */
1036 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1037 			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
1038 	write_unlock_bh(&tbl->lock);
1039 }
1040 
1041 static __inline__ int neigh_max_probes(struct neighbour *n)
1042 {
1043 	struct neigh_parms *p = n->parms;
1044 	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
1045 	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
1046 	        NEIGH_VAR(p, MCAST_PROBES));
1047 }
1048 
1049 static void neigh_invalidate(struct neighbour *neigh)
1050 	__releases(neigh->lock)
1051 	__acquires(neigh->lock)
1052 {
1053 	struct sk_buff *skb;
1054 
1055 	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
1056 	neigh_dbg(2, "neigh %p is failed\n", neigh);
1057 	neigh->updated = jiffies;
1058 
1059 	/* It is very thin place. report_unreachable is very complicated
1060 	   routine. Particularly, it can hit the same neighbour entry!
1061 
1062 	   So that, we try to be accurate and avoid dead loop. --ANK
1063 	 */
1064 	while (neigh->nud_state == NUD_FAILED &&
1065 	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1066 		write_unlock(&neigh->lock);
1067 		neigh->ops->error_report(neigh, skb);
1068 		write_lock(&neigh->lock);
1069 	}
1070 	__skb_queue_purge(&neigh->arp_queue);
1071 	neigh->arp_queue_len_bytes = 0;
1072 }
1073 
1074 static void neigh_probe(struct neighbour *neigh)
1075 	__releases(neigh->lock)
1076 {
1077 	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1078 	/* keep skb alive even if arp_queue overflows */
1079 	if (skb)
1080 		skb = skb_clone(skb, GFP_ATOMIC);
1081 	write_unlock(&neigh->lock);
1082 	if (neigh->ops->solicit)
1083 		neigh->ops->solicit(neigh, skb);
1084 	atomic_inc(&neigh->probes);
1085 	consume_skb(skb);
1086 }
1087 
1088 /* Called when a timer expires for a neighbour entry. */
1089 
1090 static void neigh_timer_handler(struct timer_list *t)
1091 {
1092 	unsigned long now, next;
1093 	struct neighbour *neigh = from_timer(neigh, t, timer);
1094 	unsigned int state;
1095 	int notify = 0;
1096 
1097 	write_lock(&neigh->lock);
1098 
1099 	state = neigh->nud_state;
1100 	now = jiffies;
1101 	next = now + HZ;
1102 
1103 	if (!(state & NUD_IN_TIMER))
1104 		goto out;
1105 
1106 	if (state & NUD_REACHABLE) {
1107 		if (time_before_eq(now,
1108 				   neigh->confirmed + neigh->parms->reachable_time)) {
1109 			neigh_dbg(2, "neigh %p is still alive\n", neigh);
1110 			next = neigh->confirmed + neigh->parms->reachable_time;
1111 		} else if (time_before_eq(now,
1112 					  neigh->used +
1113 					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1114 			neigh_dbg(2, "neigh %p is delayed\n", neigh);
1115 			neigh->nud_state = NUD_DELAY;
1116 			neigh->updated = jiffies;
1117 			neigh_suspect(neigh);
1118 			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1119 		} else {
1120 			neigh_dbg(2, "neigh %p is suspected\n", neigh);
1121 			neigh->nud_state = NUD_STALE;
1122 			neigh->updated = jiffies;
1123 			neigh_suspect(neigh);
1124 			notify = 1;
1125 		}
1126 	} else if (state & NUD_DELAY) {
1127 		if (time_before_eq(now,
1128 				   neigh->confirmed +
1129 				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1130 			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1131 			neigh->nud_state = NUD_REACHABLE;
1132 			neigh->updated = jiffies;
1133 			neigh_connect(neigh);
1134 			notify = 1;
1135 			next = neigh->confirmed + neigh->parms->reachable_time;
1136 		} else {
1137 			neigh_dbg(2, "neigh %p is probed\n", neigh);
1138 			neigh->nud_state = NUD_PROBE;
1139 			neigh->updated = jiffies;
1140 			atomic_set(&neigh->probes, 0);
1141 			notify = 1;
1142 			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1143 					 HZ/100);
1144 		}
1145 	} else {
1146 		/* NUD_PROBE|NUD_INCOMPLETE */
1147 		next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
1148 	}
1149 
1150 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1151 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1152 		neigh->nud_state = NUD_FAILED;
1153 		notify = 1;
1154 		neigh_invalidate(neigh);
1155 		goto out;
1156 	}
1157 
1158 	if (neigh->nud_state & NUD_IN_TIMER) {
1159 		if (time_before(next, jiffies + HZ/100))
1160 			next = jiffies + HZ/100;
1161 		if (!mod_timer(&neigh->timer, next))
1162 			neigh_hold(neigh);
1163 	}
1164 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1165 		neigh_probe(neigh);
1166 	} else {
1167 out:
1168 		write_unlock(&neigh->lock);
1169 	}
1170 
1171 	if (notify)
1172 		neigh_update_notify(neigh, 0);
1173 
1174 	trace_neigh_timer_handler(neigh, 0);
1175 
1176 	neigh_release(neigh);
1177 }
1178 
1179 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
1180 		       const bool immediate_ok)
1181 {
1182 	int rc;
1183 	bool immediate_probe = false;
1184 
1185 	write_lock_bh(&neigh->lock);
1186 
1187 	rc = 0;
1188 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1189 		goto out_unlock_bh;
1190 	if (neigh->dead)
1191 		goto out_dead;
1192 
1193 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1194 		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1195 		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
1196 			unsigned long next, now = jiffies;
1197 
1198 			atomic_set(&neigh->probes,
1199 				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
1200 			neigh_del_timer(neigh);
1201 			neigh->nud_state = NUD_INCOMPLETE;
1202 			neigh->updated = now;
1203 			if (!immediate_ok) {
1204 				next = now + 1;
1205 			} else {
1206 				immediate_probe = true;
1207 				next = now + max(NEIGH_VAR(neigh->parms,
1208 							   RETRANS_TIME),
1209 						 HZ / 100);
1210 			}
1211 			neigh_add_timer(neigh, next);
1212 		} else {
1213 			neigh->nud_state = NUD_FAILED;
1214 			neigh->updated = jiffies;
1215 			write_unlock_bh(&neigh->lock);
1216 
1217 			kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_FAILED);
1218 			return 1;
1219 		}
1220 	} else if (neigh->nud_state & NUD_STALE) {
1221 		neigh_dbg(2, "neigh %p is delayed\n", neigh);
1222 		neigh_del_timer(neigh);
1223 		neigh->nud_state = NUD_DELAY;
1224 		neigh->updated = jiffies;
1225 		neigh_add_timer(neigh, jiffies +
1226 				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1227 	}
1228 
1229 	if (neigh->nud_state == NUD_INCOMPLETE) {
1230 		if (skb) {
1231 			while (neigh->arp_queue_len_bytes + skb->truesize >
1232 			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1233 				struct sk_buff *buff;
1234 
1235 				buff = __skb_dequeue(&neigh->arp_queue);
1236 				if (!buff)
1237 					break;
1238 				neigh->arp_queue_len_bytes -= buff->truesize;
1239 				kfree_skb_reason(buff, SKB_DROP_REASON_NEIGH_QUEUEFULL);
1240 				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1241 			}
1242 			skb_dst_force(skb);
1243 			__skb_queue_tail(&neigh->arp_queue, skb);
1244 			neigh->arp_queue_len_bytes += skb->truesize;
1245 		}
1246 		rc = 1;
1247 	}
1248 out_unlock_bh:
1249 	if (immediate_probe)
1250 		neigh_probe(neigh);
1251 	else
1252 		write_unlock(&neigh->lock);
1253 	local_bh_enable();
1254 	trace_neigh_event_send_done(neigh, rc);
1255 	return rc;
1256 
1257 out_dead:
1258 	if (neigh->nud_state & NUD_STALE)
1259 		goto out_unlock_bh;
1260 	write_unlock_bh(&neigh->lock);
1261 	kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_DEAD);
1262 	trace_neigh_event_send_dead(neigh, 1);
1263 	return 1;
1264 }
1265 EXPORT_SYMBOL(__neigh_event_send);
1266 
1267 static void neigh_update_hhs(struct neighbour *neigh)
1268 {
1269 	struct hh_cache *hh;
1270 	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1271 		= NULL;
1272 
1273 	if (neigh->dev->header_ops)
1274 		update = neigh->dev->header_ops->cache_update;
1275 
1276 	if (update) {
1277 		hh = &neigh->hh;
1278 		if (READ_ONCE(hh->hh_len)) {
1279 			write_seqlock_bh(&hh->hh_lock);
1280 			update(hh, neigh->dev, neigh->ha);
1281 			write_sequnlock_bh(&hh->hh_lock);
1282 		}
1283 	}
1284 }
1285 
1286 /* Generic update routine.
1287    -- lladdr is new lladdr or NULL, if it is not supplied.
1288    -- new    is new state.
1289    -- flags
1290 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1291 				if it is different.
1292 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1293 				lladdr instead of overriding it
1294 				if it is different.
1295 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1296 	NEIGH_UPDATE_F_USE	means that the entry is user triggered.
1297 	NEIGH_UPDATE_F_MANAGED	means that the entry will be auto-refreshed.
1298 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1299 				NTF_ROUTER flag.
1300 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1301 				a router.
1302 
1303    Caller MUST hold reference count on the entry.
1304  */
1305 static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1306 			  u8 new, u32 flags, u32 nlmsg_pid,
1307 			  struct netlink_ext_ack *extack)
1308 {
1309 	bool gc_update = false, managed_update = false;
1310 	int update_isrouter = 0;
1311 	struct net_device *dev;
1312 	int err, notify = 0;
1313 	u8 old;
1314 
1315 	trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1316 
1317 	write_lock_bh(&neigh->lock);
1318 
1319 	dev    = neigh->dev;
1320 	old    = neigh->nud_state;
1321 	err    = -EPERM;
1322 
1323 	if (neigh->dead) {
1324 		NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
1325 		new = old;
1326 		goto out;
1327 	}
1328 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1329 	    (old & (NUD_NOARP | NUD_PERMANENT)))
1330 		goto out;
1331 
1332 	neigh_update_flags(neigh, flags, &notify, &gc_update, &managed_update);
1333 	if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) {
1334 		new = old & ~NUD_PERMANENT;
1335 		neigh->nud_state = new;
1336 		err = 0;
1337 		goto out;
1338 	}
1339 
1340 	if (!(new & NUD_VALID)) {
1341 		neigh_del_timer(neigh);
1342 		if (old & NUD_CONNECTED)
1343 			neigh_suspect(neigh);
1344 		neigh->nud_state = new;
1345 		err = 0;
1346 		notify = old & NUD_VALID;
1347 		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1348 		    (new & NUD_FAILED)) {
1349 			neigh_invalidate(neigh);
1350 			notify = 1;
1351 		}
1352 		goto out;
1353 	}
1354 
1355 	/* Compare new lladdr with cached one */
1356 	if (!dev->addr_len) {
1357 		/* First case: device needs no address. */
1358 		lladdr = neigh->ha;
1359 	} else if (lladdr) {
1360 		/* The second case: if something is already cached
1361 		   and a new address is proposed:
1362 		   - compare new & old
1363 		   - if they are different, check override flag
1364 		 */
1365 		if ((old & NUD_VALID) &&
1366 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1367 			lladdr = neigh->ha;
1368 	} else {
1369 		/* No address is supplied; if we know something,
1370 		   use it, otherwise discard the request.
1371 		 */
1372 		err = -EINVAL;
1373 		if (!(old & NUD_VALID)) {
1374 			NL_SET_ERR_MSG(extack, "No link layer address given");
1375 			goto out;
1376 		}
1377 		lladdr = neigh->ha;
1378 	}
1379 
1380 	/* Update confirmed timestamp for neighbour entry after we
1381 	 * received ARP packet even if it doesn't change IP to MAC binding.
1382 	 */
1383 	if (new & NUD_CONNECTED)
1384 		neigh->confirmed = jiffies;
1385 
1386 	/* If entry was valid and address is not changed,
1387 	   do not change entry state, if new one is STALE.
1388 	 */
1389 	err = 0;
1390 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1391 	if (old & NUD_VALID) {
1392 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1393 			update_isrouter = 0;
1394 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1395 			    (old & NUD_CONNECTED)) {
1396 				lladdr = neigh->ha;
1397 				new = NUD_STALE;
1398 			} else
1399 				goto out;
1400 		} else {
1401 			if (lladdr == neigh->ha && new == NUD_STALE &&
1402 			    !(flags & NEIGH_UPDATE_F_ADMIN))
1403 				new = old;
1404 		}
1405 	}
1406 
1407 	/* Update timestamp only once we know we will make a change to the
1408 	 * neighbour entry. Otherwise we risk to move the locktime window with
1409 	 * noop updates and ignore relevant ARP updates.
1410 	 */
1411 	if (new != old || lladdr != neigh->ha)
1412 		neigh->updated = jiffies;
1413 
1414 	if (new != old) {
1415 		neigh_del_timer(neigh);
1416 		if (new & NUD_PROBE)
1417 			atomic_set(&neigh->probes, 0);
1418 		if (new & NUD_IN_TIMER)
1419 			neigh_add_timer(neigh, (jiffies +
1420 						((new & NUD_REACHABLE) ?
1421 						 neigh->parms->reachable_time :
1422 						 0)));
1423 		neigh->nud_state = new;
1424 		notify = 1;
1425 	}
1426 
1427 	if (lladdr != neigh->ha) {
1428 		write_seqlock(&neigh->ha_lock);
1429 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1430 		write_sequnlock(&neigh->ha_lock);
1431 		neigh_update_hhs(neigh);
1432 		if (!(new & NUD_CONNECTED))
1433 			neigh->confirmed = jiffies -
1434 				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1435 		notify = 1;
1436 	}
1437 	if (new == old)
1438 		goto out;
1439 	if (new & NUD_CONNECTED)
1440 		neigh_connect(neigh);
1441 	else
1442 		neigh_suspect(neigh);
1443 	if (!(old & NUD_VALID)) {
1444 		struct sk_buff *skb;
1445 
1446 		/* Again: avoid dead loop if something went wrong */
1447 
1448 		while (neigh->nud_state & NUD_VALID &&
1449 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1450 			struct dst_entry *dst = skb_dst(skb);
1451 			struct neighbour *n2, *n1 = neigh;
1452 			write_unlock_bh(&neigh->lock);
1453 
1454 			rcu_read_lock();
1455 
1456 			/* Why not just use 'neigh' as-is?  The problem is that
1457 			 * things such as shaper, eql, and sch_teql can end up
1458 			 * using alternative, different, neigh objects to output
1459 			 * the packet in the output path.  So what we need to do
1460 			 * here is re-lookup the top-level neigh in the path so
1461 			 * we can reinject the packet there.
1462 			 */
1463 			n2 = NULL;
1464 			if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
1465 				n2 = dst_neigh_lookup_skb(dst, skb);
1466 				if (n2)
1467 					n1 = n2;
1468 			}
1469 			n1->output(n1, skb);
1470 			if (n2)
1471 				neigh_release(n2);
1472 			rcu_read_unlock();
1473 
1474 			write_lock_bh(&neigh->lock);
1475 		}
1476 		__skb_queue_purge(&neigh->arp_queue);
1477 		neigh->arp_queue_len_bytes = 0;
1478 	}
1479 out:
1480 	if (update_isrouter)
1481 		neigh_update_is_router(neigh, flags, &notify);
1482 	write_unlock_bh(&neigh->lock);
1483 	if (((new ^ old) & NUD_PERMANENT) || gc_update)
1484 		neigh_update_gc_list(neigh);
1485 	if (managed_update)
1486 		neigh_update_managed_list(neigh);
1487 	if (notify)
1488 		neigh_update_notify(neigh, nlmsg_pid);
1489 	trace_neigh_update_done(neigh, err);
1490 	return err;
1491 }
1492 
1493 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1494 		 u32 flags, u32 nlmsg_pid)
1495 {
1496 	return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1497 }
1498 EXPORT_SYMBOL(neigh_update);
1499 
1500 /* Update the neigh to listen temporarily for probe responses, even if it is
1501  * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1502  */
1503 void __neigh_set_probe_once(struct neighbour *neigh)
1504 {
1505 	if (neigh->dead)
1506 		return;
1507 	neigh->updated = jiffies;
1508 	if (!(neigh->nud_state & NUD_FAILED))
1509 		return;
1510 	neigh->nud_state = NUD_INCOMPLETE;
1511 	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1512 	neigh_add_timer(neigh,
1513 			jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1514 				      HZ/100));
1515 }
1516 EXPORT_SYMBOL(__neigh_set_probe_once);
1517 
1518 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1519 				 u8 *lladdr, void *saddr,
1520 				 struct net_device *dev)
1521 {
1522 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1523 						 lladdr || !dev->addr_len);
1524 	if (neigh)
1525 		neigh_update(neigh, lladdr, NUD_STALE,
1526 			     NEIGH_UPDATE_F_OVERRIDE, 0);
1527 	return neigh;
1528 }
1529 EXPORT_SYMBOL(neigh_event_ns);
1530 
1531 /* called with read_lock_bh(&n->lock); */
1532 static void neigh_hh_init(struct neighbour *n)
1533 {
1534 	struct net_device *dev = n->dev;
1535 	__be16 prot = n->tbl->protocol;
1536 	struct hh_cache	*hh = &n->hh;
1537 
1538 	write_lock_bh(&n->lock);
1539 
1540 	/* Only one thread can come in here and initialize the
1541 	 * hh_cache entry.
1542 	 */
1543 	if (!hh->hh_len)
1544 		dev->header_ops->cache(n, hh, prot);
1545 
1546 	write_unlock_bh(&n->lock);
1547 }
1548 
1549 /* Slow and careful. */
1550 
1551 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1552 {
1553 	int rc = 0;
1554 
1555 	if (!neigh_event_send(neigh, skb)) {
1556 		int err;
1557 		struct net_device *dev = neigh->dev;
1558 		unsigned int seq;
1559 
1560 		if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
1561 			neigh_hh_init(neigh);
1562 
1563 		do {
1564 			__skb_pull(skb, skb_network_offset(skb));
1565 			seq = read_seqbegin(&neigh->ha_lock);
1566 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1567 					      neigh->ha, NULL, skb->len);
1568 		} while (read_seqretry(&neigh->ha_lock, seq));
1569 
1570 		if (err >= 0)
1571 			rc = dev_queue_xmit(skb);
1572 		else
1573 			goto out_kfree_skb;
1574 	}
1575 out:
1576 	return rc;
1577 out_kfree_skb:
1578 	rc = -EINVAL;
1579 	kfree_skb(skb);
1580 	goto out;
1581 }
1582 EXPORT_SYMBOL(neigh_resolve_output);
1583 
1584 /* As fast as possible without hh cache */
1585 
1586 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1587 {
1588 	struct net_device *dev = neigh->dev;
1589 	unsigned int seq;
1590 	int err;
1591 
1592 	do {
1593 		__skb_pull(skb, skb_network_offset(skb));
1594 		seq = read_seqbegin(&neigh->ha_lock);
1595 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1596 				      neigh->ha, NULL, skb->len);
1597 	} while (read_seqretry(&neigh->ha_lock, seq));
1598 
1599 	if (err >= 0)
1600 		err = dev_queue_xmit(skb);
1601 	else {
1602 		err = -EINVAL;
1603 		kfree_skb(skb);
1604 	}
1605 	return err;
1606 }
1607 EXPORT_SYMBOL(neigh_connected_output);
1608 
1609 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1610 {
1611 	return dev_queue_xmit(skb);
1612 }
1613 EXPORT_SYMBOL(neigh_direct_output);
1614 
1615 static void neigh_managed_work(struct work_struct *work)
1616 {
1617 	struct neigh_table *tbl = container_of(work, struct neigh_table,
1618 					       managed_work.work);
1619 	struct neighbour *neigh;
1620 
1621 	write_lock_bh(&tbl->lock);
1622 	list_for_each_entry(neigh, &tbl->managed_list, managed_list)
1623 		neigh_event_send_probe(neigh, NULL, false);
1624 	queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
1625 			   NEIGH_VAR(&tbl->parms, INTERVAL_PROBE_TIME_MS));
1626 	write_unlock_bh(&tbl->lock);
1627 }
1628 
1629 static void neigh_proxy_process(struct timer_list *t)
1630 {
1631 	struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1632 	long sched_next = 0;
1633 	unsigned long now = jiffies;
1634 	struct sk_buff *skb, *n;
1635 
1636 	spin_lock(&tbl->proxy_queue.lock);
1637 
1638 	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1639 		long tdif = NEIGH_CB(skb)->sched_next - now;
1640 
1641 		if (tdif <= 0) {
1642 			struct net_device *dev = skb->dev;
1643 
1644 			neigh_parms_qlen_dec(dev, tbl->family);
1645 			__skb_unlink(skb, &tbl->proxy_queue);
1646 
1647 			if (tbl->proxy_redo && netif_running(dev)) {
1648 				rcu_read_lock();
1649 				tbl->proxy_redo(skb);
1650 				rcu_read_unlock();
1651 			} else {
1652 				kfree_skb(skb);
1653 			}
1654 
1655 			dev_put(dev);
1656 		} else if (!sched_next || tdif < sched_next)
1657 			sched_next = tdif;
1658 	}
1659 	del_timer(&tbl->proxy_timer);
1660 	if (sched_next)
1661 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1662 	spin_unlock(&tbl->proxy_queue.lock);
1663 }
1664 
1665 static unsigned long neigh_proxy_delay(struct neigh_parms *p)
1666 {
1667 	/* If proxy_delay is zero, do not call get_random_u32_below()
1668 	 * as it is undefined behavior.
1669 	 */
1670 	unsigned long proxy_delay = NEIGH_VAR(p, PROXY_DELAY);
1671 
1672 	return proxy_delay ?
1673 	       jiffies + get_random_u32_below(proxy_delay) : jiffies;
1674 }
1675 
1676 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1677 		    struct sk_buff *skb)
1678 {
1679 	unsigned long sched_next = neigh_proxy_delay(p);
1680 
1681 	if (p->qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1682 		kfree_skb(skb);
1683 		return;
1684 	}
1685 
1686 	NEIGH_CB(skb)->sched_next = sched_next;
1687 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1688 
1689 	spin_lock(&tbl->proxy_queue.lock);
1690 	if (del_timer(&tbl->proxy_timer)) {
1691 		if (time_before(tbl->proxy_timer.expires, sched_next))
1692 			sched_next = tbl->proxy_timer.expires;
1693 	}
1694 	skb_dst_drop(skb);
1695 	dev_hold(skb->dev);
1696 	__skb_queue_tail(&tbl->proxy_queue, skb);
1697 	p->qlen++;
1698 	mod_timer(&tbl->proxy_timer, sched_next);
1699 	spin_unlock(&tbl->proxy_queue.lock);
1700 }
1701 EXPORT_SYMBOL(pneigh_enqueue);
1702 
1703 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1704 						      struct net *net, int ifindex)
1705 {
1706 	struct neigh_parms *p;
1707 
1708 	list_for_each_entry(p, &tbl->parms_list, list) {
1709 		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1710 		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1711 			return p;
1712 	}
1713 
1714 	return NULL;
1715 }
1716 
1717 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1718 				      struct neigh_table *tbl)
1719 {
1720 	struct neigh_parms *p;
1721 	struct net *net = dev_net(dev);
1722 	const struct net_device_ops *ops = dev->netdev_ops;
1723 
1724 	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1725 	if (p) {
1726 		p->tbl		  = tbl;
1727 		refcount_set(&p->refcnt, 1);
1728 		p->reachable_time =
1729 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1730 		p->qlen = 0;
1731 		netdev_hold(dev, &p->dev_tracker, GFP_KERNEL);
1732 		p->dev = dev;
1733 		write_pnet(&p->net, net);
1734 		p->sysctl_table = NULL;
1735 
1736 		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1737 			netdev_put(dev, &p->dev_tracker);
1738 			kfree(p);
1739 			return NULL;
1740 		}
1741 
1742 		write_lock_bh(&tbl->lock);
1743 		list_add(&p->list, &tbl->parms.list);
1744 		write_unlock_bh(&tbl->lock);
1745 
1746 		neigh_parms_data_state_cleanall(p);
1747 	}
1748 	return p;
1749 }
1750 EXPORT_SYMBOL(neigh_parms_alloc);
1751 
1752 static void neigh_rcu_free_parms(struct rcu_head *head)
1753 {
1754 	struct neigh_parms *parms =
1755 		container_of(head, struct neigh_parms, rcu_head);
1756 
1757 	neigh_parms_put(parms);
1758 }
1759 
1760 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1761 {
1762 	if (!parms || parms == &tbl->parms)
1763 		return;
1764 	write_lock_bh(&tbl->lock);
1765 	list_del(&parms->list);
1766 	parms->dead = 1;
1767 	write_unlock_bh(&tbl->lock);
1768 	netdev_put(parms->dev, &parms->dev_tracker);
1769 	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1770 }
1771 EXPORT_SYMBOL(neigh_parms_release);
1772 
1773 static void neigh_parms_destroy(struct neigh_parms *parms)
1774 {
1775 	kfree(parms);
1776 }
1777 
1778 static struct lock_class_key neigh_table_proxy_queue_class;
1779 
1780 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1781 
1782 void neigh_table_init(int index, struct neigh_table *tbl)
1783 {
1784 	unsigned long now = jiffies;
1785 	unsigned long phsize;
1786 
1787 	INIT_LIST_HEAD(&tbl->parms_list);
1788 	INIT_LIST_HEAD(&tbl->gc_list);
1789 	INIT_LIST_HEAD(&tbl->managed_list);
1790 
1791 	list_add(&tbl->parms.list, &tbl->parms_list);
1792 	write_pnet(&tbl->parms.net, &init_net);
1793 	refcount_set(&tbl->parms.refcnt, 1);
1794 	tbl->parms.reachable_time =
1795 			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1796 	tbl->parms.qlen = 0;
1797 
1798 	tbl->stats = alloc_percpu(struct neigh_statistics);
1799 	if (!tbl->stats)
1800 		panic("cannot create neighbour cache statistics");
1801 
1802 #ifdef CONFIG_PROC_FS
1803 	if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1804 			      &neigh_stat_seq_ops, tbl))
1805 		panic("cannot create neighbour proc dir entry");
1806 #endif
1807 
1808 	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1809 
1810 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1811 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1812 
1813 	if (!tbl->nht || !tbl->phash_buckets)
1814 		panic("cannot allocate neighbour cache hashes");
1815 
1816 	if (!tbl->entry_size)
1817 		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1818 					tbl->key_len, NEIGH_PRIV_ALIGN);
1819 	else
1820 		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1821 
1822 	rwlock_init(&tbl->lock);
1823 
1824 	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1825 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1826 			tbl->parms.reachable_time);
1827 	INIT_DEFERRABLE_WORK(&tbl->managed_work, neigh_managed_work);
1828 	queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 0);
1829 
1830 	timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1831 	skb_queue_head_init_class(&tbl->proxy_queue,
1832 			&neigh_table_proxy_queue_class);
1833 
1834 	tbl->last_flush = now;
1835 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1836 
1837 	neigh_tables[index] = tbl;
1838 }
1839 EXPORT_SYMBOL(neigh_table_init);
1840 
1841 int neigh_table_clear(int index, struct neigh_table *tbl)
1842 {
1843 	neigh_tables[index] = NULL;
1844 	/* It is not clean... Fix it to unload IPv6 module safely */
1845 	cancel_delayed_work_sync(&tbl->managed_work);
1846 	cancel_delayed_work_sync(&tbl->gc_work);
1847 	del_timer_sync(&tbl->proxy_timer);
1848 	pneigh_queue_purge(&tbl->proxy_queue, NULL, tbl->family);
1849 	neigh_ifdown(tbl, NULL);
1850 	if (atomic_read(&tbl->entries))
1851 		pr_crit("neighbour leakage\n");
1852 
1853 	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1854 		 neigh_hash_free_rcu);
1855 	tbl->nht = NULL;
1856 
1857 	kfree(tbl->phash_buckets);
1858 	tbl->phash_buckets = NULL;
1859 
1860 	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1861 
1862 	free_percpu(tbl->stats);
1863 	tbl->stats = NULL;
1864 
1865 	return 0;
1866 }
1867 EXPORT_SYMBOL(neigh_table_clear);
1868 
1869 static struct neigh_table *neigh_find_table(int family)
1870 {
1871 	struct neigh_table *tbl = NULL;
1872 
1873 	switch (family) {
1874 	case AF_INET:
1875 		tbl = neigh_tables[NEIGH_ARP_TABLE];
1876 		break;
1877 	case AF_INET6:
1878 		tbl = neigh_tables[NEIGH_ND_TABLE];
1879 		break;
1880 	}
1881 
1882 	return tbl;
1883 }
1884 
1885 const struct nla_policy nda_policy[NDA_MAX+1] = {
1886 	[NDA_UNSPEC]		= { .strict_start_type = NDA_NH_ID },
1887 	[NDA_DST]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1888 	[NDA_LLADDR]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1889 	[NDA_CACHEINFO]		= { .len = sizeof(struct nda_cacheinfo) },
1890 	[NDA_PROBES]		= { .type = NLA_U32 },
1891 	[NDA_VLAN]		= { .type = NLA_U16 },
1892 	[NDA_PORT]		= { .type = NLA_U16 },
1893 	[NDA_VNI]		= { .type = NLA_U32 },
1894 	[NDA_IFINDEX]		= { .type = NLA_U32 },
1895 	[NDA_MASTER]		= { .type = NLA_U32 },
1896 	[NDA_PROTOCOL]		= { .type = NLA_U8 },
1897 	[NDA_NH_ID]		= { .type = NLA_U32 },
1898 	[NDA_FLAGS_EXT]		= NLA_POLICY_MASK(NLA_U32, NTF_EXT_MASK),
1899 	[NDA_FDB_EXT_ATTRS]	= { .type = NLA_NESTED },
1900 };
1901 
1902 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1903 			struct netlink_ext_ack *extack)
1904 {
1905 	struct net *net = sock_net(skb->sk);
1906 	struct ndmsg *ndm;
1907 	struct nlattr *dst_attr;
1908 	struct neigh_table *tbl;
1909 	struct neighbour *neigh;
1910 	struct net_device *dev = NULL;
1911 	int err = -EINVAL;
1912 
1913 	ASSERT_RTNL();
1914 	if (nlmsg_len(nlh) < sizeof(*ndm))
1915 		goto out;
1916 
1917 	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1918 	if (!dst_attr) {
1919 		NL_SET_ERR_MSG(extack, "Network address not specified");
1920 		goto out;
1921 	}
1922 
1923 	ndm = nlmsg_data(nlh);
1924 	if (ndm->ndm_ifindex) {
1925 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1926 		if (dev == NULL) {
1927 			err = -ENODEV;
1928 			goto out;
1929 		}
1930 	}
1931 
1932 	tbl = neigh_find_table(ndm->ndm_family);
1933 	if (tbl == NULL)
1934 		return -EAFNOSUPPORT;
1935 
1936 	if (nla_len(dst_attr) < (int)tbl->key_len) {
1937 		NL_SET_ERR_MSG(extack, "Invalid network address");
1938 		goto out;
1939 	}
1940 
1941 	if (ndm->ndm_flags & NTF_PROXY) {
1942 		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1943 		goto out;
1944 	}
1945 
1946 	if (dev == NULL)
1947 		goto out;
1948 
1949 	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1950 	if (neigh == NULL) {
1951 		err = -ENOENT;
1952 		goto out;
1953 	}
1954 
1955 	err = __neigh_update(neigh, NULL, NUD_FAILED,
1956 			     NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
1957 			     NETLINK_CB(skb).portid, extack);
1958 	write_lock_bh(&tbl->lock);
1959 	neigh_release(neigh);
1960 	neigh_remove_one(neigh, tbl);
1961 	write_unlock_bh(&tbl->lock);
1962 
1963 out:
1964 	return err;
1965 }
1966 
1967 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1968 		     struct netlink_ext_ack *extack)
1969 {
1970 	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE |
1971 		    NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1972 	struct net *net = sock_net(skb->sk);
1973 	struct ndmsg *ndm;
1974 	struct nlattr *tb[NDA_MAX+1];
1975 	struct neigh_table *tbl;
1976 	struct net_device *dev = NULL;
1977 	struct neighbour *neigh;
1978 	void *dst, *lladdr;
1979 	u8 protocol = 0;
1980 	u32 ndm_flags;
1981 	int err;
1982 
1983 	ASSERT_RTNL();
1984 	err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
1985 				     nda_policy, extack);
1986 	if (err < 0)
1987 		goto out;
1988 
1989 	err = -EINVAL;
1990 	if (!tb[NDA_DST]) {
1991 		NL_SET_ERR_MSG(extack, "Network address not specified");
1992 		goto out;
1993 	}
1994 
1995 	ndm = nlmsg_data(nlh);
1996 	ndm_flags = ndm->ndm_flags;
1997 	if (tb[NDA_FLAGS_EXT]) {
1998 		u32 ext = nla_get_u32(tb[NDA_FLAGS_EXT]);
1999 
2000 		BUILD_BUG_ON(sizeof(neigh->flags) * BITS_PER_BYTE <
2001 			     (sizeof(ndm->ndm_flags) * BITS_PER_BYTE +
2002 			      hweight32(NTF_EXT_MASK)));
2003 		ndm_flags |= (ext << NTF_EXT_SHIFT);
2004 	}
2005 	if (ndm->ndm_ifindex) {
2006 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
2007 		if (dev == NULL) {
2008 			err = -ENODEV;
2009 			goto out;
2010 		}
2011 
2012 		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
2013 			NL_SET_ERR_MSG(extack, "Invalid link address");
2014 			goto out;
2015 		}
2016 	}
2017 
2018 	tbl = neigh_find_table(ndm->ndm_family);
2019 	if (tbl == NULL)
2020 		return -EAFNOSUPPORT;
2021 
2022 	if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
2023 		NL_SET_ERR_MSG(extack, "Invalid network address");
2024 		goto out;
2025 	}
2026 
2027 	dst = nla_data(tb[NDA_DST]);
2028 	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
2029 
2030 	if (tb[NDA_PROTOCOL])
2031 		protocol = nla_get_u8(tb[NDA_PROTOCOL]);
2032 	if (ndm_flags & NTF_PROXY) {
2033 		struct pneigh_entry *pn;
2034 
2035 		if (ndm_flags & NTF_MANAGED) {
2036 			NL_SET_ERR_MSG(extack, "Invalid NTF_* flag combination");
2037 			goto out;
2038 		}
2039 
2040 		err = -ENOBUFS;
2041 		pn = pneigh_lookup(tbl, net, dst, dev, 1);
2042 		if (pn) {
2043 			pn->flags = ndm_flags;
2044 			if (protocol)
2045 				pn->protocol = protocol;
2046 			err = 0;
2047 		}
2048 		goto out;
2049 	}
2050 
2051 	if (!dev) {
2052 		NL_SET_ERR_MSG(extack, "Device not specified");
2053 		goto out;
2054 	}
2055 
2056 	if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
2057 		err = -EINVAL;
2058 		goto out;
2059 	}
2060 
2061 	neigh = neigh_lookup(tbl, dst, dev);
2062 	if (neigh == NULL) {
2063 		bool ndm_permanent  = ndm->ndm_state & NUD_PERMANENT;
2064 		bool exempt_from_gc = ndm_permanent ||
2065 				      ndm_flags & NTF_EXT_LEARNED;
2066 
2067 		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
2068 			err = -ENOENT;
2069 			goto out;
2070 		}
2071 		if (ndm_permanent && (ndm_flags & NTF_MANAGED)) {
2072 			NL_SET_ERR_MSG(extack, "Invalid NTF_* flag for permanent entry");
2073 			err = -EINVAL;
2074 			goto out;
2075 		}
2076 
2077 		neigh = ___neigh_create(tbl, dst, dev,
2078 					ndm_flags &
2079 					(NTF_EXT_LEARNED | NTF_MANAGED),
2080 					exempt_from_gc, true);
2081 		if (IS_ERR(neigh)) {
2082 			err = PTR_ERR(neigh);
2083 			goto out;
2084 		}
2085 	} else {
2086 		if (nlh->nlmsg_flags & NLM_F_EXCL) {
2087 			err = -EEXIST;
2088 			neigh_release(neigh);
2089 			goto out;
2090 		}
2091 
2092 		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
2093 			flags &= ~(NEIGH_UPDATE_F_OVERRIDE |
2094 				   NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
2095 	}
2096 
2097 	if (protocol)
2098 		neigh->protocol = protocol;
2099 	if (ndm_flags & NTF_EXT_LEARNED)
2100 		flags |= NEIGH_UPDATE_F_EXT_LEARNED;
2101 	if (ndm_flags & NTF_ROUTER)
2102 		flags |= NEIGH_UPDATE_F_ISROUTER;
2103 	if (ndm_flags & NTF_MANAGED)
2104 		flags |= NEIGH_UPDATE_F_MANAGED;
2105 	if (ndm_flags & NTF_USE)
2106 		flags |= NEIGH_UPDATE_F_USE;
2107 
2108 	err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
2109 			     NETLINK_CB(skb).portid, extack);
2110 	if (!err && ndm_flags & (NTF_USE | NTF_MANAGED)) {
2111 		neigh_event_send(neigh, NULL);
2112 		err = 0;
2113 	}
2114 	neigh_release(neigh);
2115 out:
2116 	return err;
2117 }
2118 
2119 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
2120 {
2121 	struct nlattr *nest;
2122 
2123 	nest = nla_nest_start_noflag(skb, NDTA_PARMS);
2124 	if (nest == NULL)
2125 		return -ENOBUFS;
2126 
2127 	if ((parms->dev &&
2128 	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
2129 	    nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
2130 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
2131 			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
2132 	    /* approximative value for deprecated QUEUE_LEN (in packets) */
2133 	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
2134 			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
2135 	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
2136 	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
2137 	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
2138 			NEIGH_VAR(parms, UCAST_PROBES)) ||
2139 	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
2140 			NEIGH_VAR(parms, MCAST_PROBES)) ||
2141 	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
2142 			NEIGH_VAR(parms, MCAST_REPROBES)) ||
2143 	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
2144 			  NDTPA_PAD) ||
2145 	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
2146 			  NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
2147 	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
2148 			  NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
2149 	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
2150 			  NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
2151 	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
2152 			  NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
2153 	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
2154 			  NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
2155 	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
2156 			  NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
2157 	    nla_put_msecs(skb, NDTPA_LOCKTIME,
2158 			  NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD) ||
2159 	    nla_put_msecs(skb, NDTPA_INTERVAL_PROBE_TIME_MS,
2160 			  NEIGH_VAR(parms, INTERVAL_PROBE_TIME_MS), NDTPA_PAD))
2161 		goto nla_put_failure;
2162 	return nla_nest_end(skb, nest);
2163 
2164 nla_put_failure:
2165 	nla_nest_cancel(skb, nest);
2166 	return -EMSGSIZE;
2167 }
2168 
2169 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2170 			      u32 pid, u32 seq, int type, int flags)
2171 {
2172 	struct nlmsghdr *nlh;
2173 	struct ndtmsg *ndtmsg;
2174 
2175 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2176 	if (nlh == NULL)
2177 		return -EMSGSIZE;
2178 
2179 	ndtmsg = nlmsg_data(nlh);
2180 
2181 	read_lock_bh(&tbl->lock);
2182 	ndtmsg->ndtm_family = tbl->family;
2183 	ndtmsg->ndtm_pad1   = 0;
2184 	ndtmsg->ndtm_pad2   = 0;
2185 
2186 	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2187 	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
2188 	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
2189 	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
2190 	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
2191 		goto nla_put_failure;
2192 	{
2193 		unsigned long now = jiffies;
2194 		long flush_delta = now - tbl->last_flush;
2195 		long rand_delta = now - tbl->last_rand;
2196 		struct neigh_hash_table *nht;
2197 		struct ndt_config ndc = {
2198 			.ndtc_key_len		= tbl->key_len,
2199 			.ndtc_entry_size	= tbl->entry_size,
2200 			.ndtc_entries		= atomic_read(&tbl->entries),
2201 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
2202 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
2203 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
2204 		};
2205 
2206 		rcu_read_lock_bh();
2207 		nht = rcu_dereference_bh(tbl->nht);
2208 		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
2209 		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
2210 		rcu_read_unlock_bh();
2211 
2212 		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
2213 			goto nla_put_failure;
2214 	}
2215 
2216 	{
2217 		int cpu;
2218 		struct ndt_stats ndst;
2219 
2220 		memset(&ndst, 0, sizeof(ndst));
2221 
2222 		for_each_possible_cpu(cpu) {
2223 			struct neigh_statistics	*st;
2224 
2225 			st = per_cpu_ptr(tbl->stats, cpu);
2226 			ndst.ndts_allocs		+= st->allocs;
2227 			ndst.ndts_destroys		+= st->destroys;
2228 			ndst.ndts_hash_grows		+= st->hash_grows;
2229 			ndst.ndts_res_failed		+= st->res_failed;
2230 			ndst.ndts_lookups		+= st->lookups;
2231 			ndst.ndts_hits			+= st->hits;
2232 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
2233 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
2234 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
2235 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
2236 			ndst.ndts_table_fulls		+= st->table_fulls;
2237 		}
2238 
2239 		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
2240 				  NDTA_PAD))
2241 			goto nla_put_failure;
2242 	}
2243 
2244 	BUG_ON(tbl->parms.dev);
2245 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2246 		goto nla_put_failure;
2247 
2248 	read_unlock_bh(&tbl->lock);
2249 	nlmsg_end(skb, nlh);
2250 	return 0;
2251 
2252 nla_put_failure:
2253 	read_unlock_bh(&tbl->lock);
2254 	nlmsg_cancel(skb, nlh);
2255 	return -EMSGSIZE;
2256 }
2257 
2258 static int neightbl_fill_param_info(struct sk_buff *skb,
2259 				    struct neigh_table *tbl,
2260 				    struct neigh_parms *parms,
2261 				    u32 pid, u32 seq, int type,
2262 				    unsigned int flags)
2263 {
2264 	struct ndtmsg *ndtmsg;
2265 	struct nlmsghdr *nlh;
2266 
2267 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
2268 	if (nlh == NULL)
2269 		return -EMSGSIZE;
2270 
2271 	ndtmsg = nlmsg_data(nlh);
2272 
2273 	read_lock_bh(&tbl->lock);
2274 	ndtmsg->ndtm_family = tbl->family;
2275 	ndtmsg->ndtm_pad1   = 0;
2276 	ndtmsg->ndtm_pad2   = 0;
2277 
2278 	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2279 	    neightbl_fill_parms(skb, parms) < 0)
2280 		goto errout;
2281 
2282 	read_unlock_bh(&tbl->lock);
2283 	nlmsg_end(skb, nlh);
2284 	return 0;
2285 errout:
2286 	read_unlock_bh(&tbl->lock);
2287 	nlmsg_cancel(skb, nlh);
2288 	return -EMSGSIZE;
2289 }
2290 
2291 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
2292 	[NDTA_NAME]		= { .type = NLA_STRING },
2293 	[NDTA_THRESH1]		= { .type = NLA_U32 },
2294 	[NDTA_THRESH2]		= { .type = NLA_U32 },
2295 	[NDTA_THRESH3]		= { .type = NLA_U32 },
2296 	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
2297 	[NDTA_PARMS]		= { .type = NLA_NESTED },
2298 };
2299 
2300 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
2301 	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
2302 	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
2303 	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
2304 	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
2305 	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
2306 	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
2307 	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
2308 	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
2309 	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
2310 	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
2311 	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
2312 	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
2313 	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
2314 	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
2315 	[NDTPA_INTERVAL_PROBE_TIME_MS]	= { .type = NLA_U64, .min = 1 },
2316 };
2317 
2318 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2319 			struct netlink_ext_ack *extack)
2320 {
2321 	struct net *net = sock_net(skb->sk);
2322 	struct neigh_table *tbl;
2323 	struct ndtmsg *ndtmsg;
2324 	struct nlattr *tb[NDTA_MAX+1];
2325 	bool found = false;
2326 	int err, tidx;
2327 
2328 	err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2329 				     nl_neightbl_policy, extack);
2330 	if (err < 0)
2331 		goto errout;
2332 
2333 	if (tb[NDTA_NAME] == NULL) {
2334 		err = -EINVAL;
2335 		goto errout;
2336 	}
2337 
2338 	ndtmsg = nlmsg_data(nlh);
2339 
2340 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2341 		tbl = neigh_tables[tidx];
2342 		if (!tbl)
2343 			continue;
2344 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2345 			continue;
2346 		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2347 			found = true;
2348 			break;
2349 		}
2350 	}
2351 
2352 	if (!found)
2353 		return -ENOENT;
2354 
2355 	/*
2356 	 * We acquire tbl->lock to be nice to the periodic timers and
2357 	 * make sure they always see a consistent set of values.
2358 	 */
2359 	write_lock_bh(&tbl->lock);
2360 
2361 	if (tb[NDTA_PARMS]) {
2362 		struct nlattr *tbp[NDTPA_MAX+1];
2363 		struct neigh_parms *p;
2364 		int i, ifindex = 0;
2365 
2366 		err = nla_parse_nested_deprecated(tbp, NDTPA_MAX,
2367 						  tb[NDTA_PARMS],
2368 						  nl_ntbl_parm_policy, extack);
2369 		if (err < 0)
2370 			goto errout_tbl_lock;
2371 
2372 		if (tbp[NDTPA_IFINDEX])
2373 			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2374 
2375 		p = lookup_neigh_parms(tbl, net, ifindex);
2376 		if (p == NULL) {
2377 			err = -ENOENT;
2378 			goto errout_tbl_lock;
2379 		}
2380 
2381 		for (i = 1; i <= NDTPA_MAX; i++) {
2382 			if (tbp[i] == NULL)
2383 				continue;
2384 
2385 			switch (i) {
2386 			case NDTPA_QUEUE_LEN:
2387 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2388 					      nla_get_u32(tbp[i]) *
2389 					      SKB_TRUESIZE(ETH_FRAME_LEN));
2390 				break;
2391 			case NDTPA_QUEUE_LENBYTES:
2392 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2393 					      nla_get_u32(tbp[i]));
2394 				break;
2395 			case NDTPA_PROXY_QLEN:
2396 				NEIGH_VAR_SET(p, PROXY_QLEN,
2397 					      nla_get_u32(tbp[i]));
2398 				break;
2399 			case NDTPA_APP_PROBES:
2400 				NEIGH_VAR_SET(p, APP_PROBES,
2401 					      nla_get_u32(tbp[i]));
2402 				break;
2403 			case NDTPA_UCAST_PROBES:
2404 				NEIGH_VAR_SET(p, UCAST_PROBES,
2405 					      nla_get_u32(tbp[i]));
2406 				break;
2407 			case NDTPA_MCAST_PROBES:
2408 				NEIGH_VAR_SET(p, MCAST_PROBES,
2409 					      nla_get_u32(tbp[i]));
2410 				break;
2411 			case NDTPA_MCAST_REPROBES:
2412 				NEIGH_VAR_SET(p, MCAST_REPROBES,
2413 					      nla_get_u32(tbp[i]));
2414 				break;
2415 			case NDTPA_BASE_REACHABLE_TIME:
2416 				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2417 					      nla_get_msecs(tbp[i]));
2418 				/* update reachable_time as well, otherwise, the change will
2419 				 * only be effective after the next time neigh_periodic_work
2420 				 * decides to recompute it (can be multiple minutes)
2421 				 */
2422 				p->reachable_time =
2423 					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2424 				break;
2425 			case NDTPA_GC_STALETIME:
2426 				NEIGH_VAR_SET(p, GC_STALETIME,
2427 					      nla_get_msecs(tbp[i]));
2428 				break;
2429 			case NDTPA_DELAY_PROBE_TIME:
2430 				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2431 					      nla_get_msecs(tbp[i]));
2432 				call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2433 				break;
2434 			case NDTPA_INTERVAL_PROBE_TIME_MS:
2435 				NEIGH_VAR_SET(p, INTERVAL_PROBE_TIME_MS,
2436 					      nla_get_msecs(tbp[i]));
2437 				break;
2438 			case NDTPA_RETRANS_TIME:
2439 				NEIGH_VAR_SET(p, RETRANS_TIME,
2440 					      nla_get_msecs(tbp[i]));
2441 				break;
2442 			case NDTPA_ANYCAST_DELAY:
2443 				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2444 					      nla_get_msecs(tbp[i]));
2445 				break;
2446 			case NDTPA_PROXY_DELAY:
2447 				NEIGH_VAR_SET(p, PROXY_DELAY,
2448 					      nla_get_msecs(tbp[i]));
2449 				break;
2450 			case NDTPA_LOCKTIME:
2451 				NEIGH_VAR_SET(p, LOCKTIME,
2452 					      nla_get_msecs(tbp[i]));
2453 				break;
2454 			}
2455 		}
2456 	}
2457 
2458 	err = -ENOENT;
2459 	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2460 	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2461 	    !net_eq(net, &init_net))
2462 		goto errout_tbl_lock;
2463 
2464 	if (tb[NDTA_THRESH1])
2465 		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2466 
2467 	if (tb[NDTA_THRESH2])
2468 		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2469 
2470 	if (tb[NDTA_THRESH3])
2471 		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2472 
2473 	if (tb[NDTA_GC_INTERVAL])
2474 		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2475 
2476 	err = 0;
2477 
2478 errout_tbl_lock:
2479 	write_unlock_bh(&tbl->lock);
2480 errout:
2481 	return err;
2482 }
2483 
2484 static int neightbl_valid_dump_info(const struct nlmsghdr *nlh,
2485 				    struct netlink_ext_ack *extack)
2486 {
2487 	struct ndtmsg *ndtm;
2488 
2489 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) {
2490 		NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request");
2491 		return -EINVAL;
2492 	}
2493 
2494 	ndtm = nlmsg_data(nlh);
2495 	if (ndtm->ndtm_pad1  || ndtm->ndtm_pad2) {
2496 		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request");
2497 		return -EINVAL;
2498 	}
2499 
2500 	if (nlmsg_attrlen(nlh, sizeof(*ndtm))) {
2501 		NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request");
2502 		return -EINVAL;
2503 	}
2504 
2505 	return 0;
2506 }
2507 
2508 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2509 {
2510 	const struct nlmsghdr *nlh = cb->nlh;
2511 	struct net *net = sock_net(skb->sk);
2512 	int family, tidx, nidx = 0;
2513 	int tbl_skip = cb->args[0];
2514 	int neigh_skip = cb->args[1];
2515 	struct neigh_table *tbl;
2516 
2517 	if (cb->strict_check) {
2518 		int err = neightbl_valid_dump_info(nlh, cb->extack);
2519 
2520 		if (err < 0)
2521 			return err;
2522 	}
2523 
2524 	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2525 
2526 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2527 		struct neigh_parms *p;
2528 
2529 		tbl = neigh_tables[tidx];
2530 		if (!tbl)
2531 			continue;
2532 
2533 		if (tidx < tbl_skip || (family && tbl->family != family))
2534 			continue;
2535 
2536 		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2537 				       nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2538 				       NLM_F_MULTI) < 0)
2539 			break;
2540 
2541 		nidx = 0;
2542 		p = list_next_entry(&tbl->parms, list);
2543 		list_for_each_entry_from(p, &tbl->parms_list, list) {
2544 			if (!net_eq(neigh_parms_net(p), net))
2545 				continue;
2546 
2547 			if (nidx < neigh_skip)
2548 				goto next;
2549 
2550 			if (neightbl_fill_param_info(skb, tbl, p,
2551 						     NETLINK_CB(cb->skb).portid,
2552 						     nlh->nlmsg_seq,
2553 						     RTM_NEWNEIGHTBL,
2554 						     NLM_F_MULTI) < 0)
2555 				goto out;
2556 		next:
2557 			nidx++;
2558 		}
2559 
2560 		neigh_skip = 0;
2561 	}
2562 out:
2563 	cb->args[0] = tidx;
2564 	cb->args[1] = nidx;
2565 
2566 	return skb->len;
2567 }
2568 
2569 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2570 			   u32 pid, u32 seq, int type, unsigned int flags)
2571 {
2572 	u32 neigh_flags, neigh_flags_ext;
2573 	unsigned long now = jiffies;
2574 	struct nda_cacheinfo ci;
2575 	struct nlmsghdr *nlh;
2576 	struct ndmsg *ndm;
2577 
2578 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2579 	if (nlh == NULL)
2580 		return -EMSGSIZE;
2581 
2582 	neigh_flags_ext = neigh->flags >> NTF_EXT_SHIFT;
2583 	neigh_flags     = neigh->flags & NTF_OLD_MASK;
2584 
2585 	ndm = nlmsg_data(nlh);
2586 	ndm->ndm_family	 = neigh->ops->family;
2587 	ndm->ndm_pad1    = 0;
2588 	ndm->ndm_pad2    = 0;
2589 	ndm->ndm_flags	 = neigh_flags;
2590 	ndm->ndm_type	 = neigh->type;
2591 	ndm->ndm_ifindex = neigh->dev->ifindex;
2592 
2593 	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2594 		goto nla_put_failure;
2595 
2596 	read_lock_bh(&neigh->lock);
2597 	ndm->ndm_state	 = neigh->nud_state;
2598 	if (neigh->nud_state & NUD_VALID) {
2599 		char haddr[MAX_ADDR_LEN];
2600 
2601 		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2602 		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2603 			read_unlock_bh(&neigh->lock);
2604 			goto nla_put_failure;
2605 		}
2606 	}
2607 
2608 	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2609 	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2610 	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2611 	ci.ndm_refcnt	 = refcount_read(&neigh->refcnt) - 1;
2612 	read_unlock_bh(&neigh->lock);
2613 
2614 	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2615 	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2616 		goto nla_put_failure;
2617 
2618 	if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2619 		goto nla_put_failure;
2620 	if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2621 		goto nla_put_failure;
2622 
2623 	nlmsg_end(skb, nlh);
2624 	return 0;
2625 
2626 nla_put_failure:
2627 	nlmsg_cancel(skb, nlh);
2628 	return -EMSGSIZE;
2629 }
2630 
2631 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2632 			    u32 pid, u32 seq, int type, unsigned int flags,
2633 			    struct neigh_table *tbl)
2634 {
2635 	u32 neigh_flags, neigh_flags_ext;
2636 	struct nlmsghdr *nlh;
2637 	struct ndmsg *ndm;
2638 
2639 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2640 	if (nlh == NULL)
2641 		return -EMSGSIZE;
2642 
2643 	neigh_flags_ext = pn->flags >> NTF_EXT_SHIFT;
2644 	neigh_flags     = pn->flags & NTF_OLD_MASK;
2645 
2646 	ndm = nlmsg_data(nlh);
2647 	ndm->ndm_family	 = tbl->family;
2648 	ndm->ndm_pad1    = 0;
2649 	ndm->ndm_pad2    = 0;
2650 	ndm->ndm_flags	 = neigh_flags | NTF_PROXY;
2651 	ndm->ndm_type	 = RTN_UNICAST;
2652 	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2653 	ndm->ndm_state	 = NUD_NONE;
2654 
2655 	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2656 		goto nla_put_failure;
2657 
2658 	if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
2659 		goto nla_put_failure;
2660 	if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext))
2661 		goto nla_put_failure;
2662 
2663 	nlmsg_end(skb, nlh);
2664 	return 0;
2665 
2666 nla_put_failure:
2667 	nlmsg_cancel(skb, nlh);
2668 	return -EMSGSIZE;
2669 }
2670 
2671 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2672 {
2673 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2674 	__neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2675 }
2676 
2677 static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2678 {
2679 	struct net_device *master;
2680 
2681 	if (!master_idx)
2682 		return false;
2683 
2684 	master = dev ? netdev_master_upper_dev_get(dev) : NULL;
2685 
2686 	/* 0 is already used to denote NDA_MASTER wasn't passed, therefore need another
2687 	 * invalid value for ifindex to denote "no master".
2688 	 */
2689 	if (master_idx == -1)
2690 		return !!master;
2691 
2692 	if (!master || master->ifindex != master_idx)
2693 		return true;
2694 
2695 	return false;
2696 }
2697 
2698 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2699 {
2700 	if (filter_idx && (!dev || dev->ifindex != filter_idx))
2701 		return true;
2702 
2703 	return false;
2704 }
2705 
2706 struct neigh_dump_filter {
2707 	int master_idx;
2708 	int dev_idx;
2709 };
2710 
2711 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2712 			    struct netlink_callback *cb,
2713 			    struct neigh_dump_filter *filter)
2714 {
2715 	struct net *net = sock_net(skb->sk);
2716 	struct neighbour *n;
2717 	int rc, h, s_h = cb->args[1];
2718 	int idx, s_idx = idx = cb->args[2];
2719 	struct neigh_hash_table *nht;
2720 	unsigned int flags = NLM_F_MULTI;
2721 
2722 	if (filter->dev_idx || filter->master_idx)
2723 		flags |= NLM_F_DUMP_FILTERED;
2724 
2725 	rcu_read_lock_bh();
2726 	nht = rcu_dereference_bh(tbl->nht);
2727 
2728 	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2729 		if (h > s_h)
2730 			s_idx = 0;
2731 		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2732 		     n != NULL;
2733 		     n = rcu_dereference_bh(n->next)) {
2734 			if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2735 				goto next;
2736 			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2737 			    neigh_master_filtered(n->dev, filter->master_idx))
2738 				goto next;
2739 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2740 					    cb->nlh->nlmsg_seq,
2741 					    RTM_NEWNEIGH,
2742 					    flags) < 0) {
2743 				rc = -1;
2744 				goto out;
2745 			}
2746 next:
2747 			idx++;
2748 		}
2749 	}
2750 	rc = skb->len;
2751 out:
2752 	rcu_read_unlock_bh();
2753 	cb->args[1] = h;
2754 	cb->args[2] = idx;
2755 	return rc;
2756 }
2757 
2758 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2759 			     struct netlink_callback *cb,
2760 			     struct neigh_dump_filter *filter)
2761 {
2762 	struct pneigh_entry *n;
2763 	struct net *net = sock_net(skb->sk);
2764 	int rc, h, s_h = cb->args[3];
2765 	int idx, s_idx = idx = cb->args[4];
2766 	unsigned int flags = NLM_F_MULTI;
2767 
2768 	if (filter->dev_idx || filter->master_idx)
2769 		flags |= NLM_F_DUMP_FILTERED;
2770 
2771 	read_lock_bh(&tbl->lock);
2772 
2773 	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2774 		if (h > s_h)
2775 			s_idx = 0;
2776 		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2777 			if (idx < s_idx || pneigh_net(n) != net)
2778 				goto next;
2779 			if (neigh_ifindex_filtered(n->dev, filter->dev_idx) ||
2780 			    neigh_master_filtered(n->dev, filter->master_idx))
2781 				goto next;
2782 			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2783 					    cb->nlh->nlmsg_seq,
2784 					    RTM_NEWNEIGH, flags, tbl) < 0) {
2785 				read_unlock_bh(&tbl->lock);
2786 				rc = -1;
2787 				goto out;
2788 			}
2789 		next:
2790 			idx++;
2791 		}
2792 	}
2793 
2794 	read_unlock_bh(&tbl->lock);
2795 	rc = skb->len;
2796 out:
2797 	cb->args[3] = h;
2798 	cb->args[4] = idx;
2799 	return rc;
2800 
2801 }
2802 
2803 static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
2804 				bool strict_check,
2805 				struct neigh_dump_filter *filter,
2806 				struct netlink_ext_ack *extack)
2807 {
2808 	struct nlattr *tb[NDA_MAX + 1];
2809 	int err, i;
2810 
2811 	if (strict_check) {
2812 		struct ndmsg *ndm;
2813 
2814 		if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2815 			NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request");
2816 			return -EINVAL;
2817 		}
2818 
2819 		ndm = nlmsg_data(nlh);
2820 		if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_ifindex ||
2821 		    ndm->ndm_state || ndm->ndm_type) {
2822 			NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
2823 			return -EINVAL;
2824 		}
2825 
2826 		if (ndm->ndm_flags & ~NTF_PROXY) {
2827 			NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
2828 			return -EINVAL;
2829 		}
2830 
2831 		err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg),
2832 						    tb, NDA_MAX, nda_policy,
2833 						    extack);
2834 	} else {
2835 		err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb,
2836 					     NDA_MAX, nda_policy, extack);
2837 	}
2838 	if (err < 0)
2839 		return err;
2840 
2841 	for (i = 0; i <= NDA_MAX; ++i) {
2842 		if (!tb[i])
2843 			continue;
2844 
2845 		/* all new attributes should require strict_check */
2846 		switch (i) {
2847 		case NDA_IFINDEX:
2848 			filter->dev_idx = nla_get_u32(tb[i]);
2849 			break;
2850 		case NDA_MASTER:
2851 			filter->master_idx = nla_get_u32(tb[i]);
2852 			break;
2853 		default:
2854 			if (strict_check) {
2855 				NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request");
2856 				return -EINVAL;
2857 			}
2858 		}
2859 	}
2860 
2861 	return 0;
2862 }
2863 
2864 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2865 {
2866 	const struct nlmsghdr *nlh = cb->nlh;
2867 	struct neigh_dump_filter filter = {};
2868 	struct neigh_table *tbl;
2869 	int t, family, s_t;
2870 	int proxy = 0;
2871 	int err;
2872 
2873 	family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2874 
2875 	/* check for full ndmsg structure presence, family member is
2876 	 * the same for both structures
2877 	 */
2878 	if (nlmsg_len(nlh) >= sizeof(struct ndmsg) &&
2879 	    ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY)
2880 		proxy = 1;
2881 
2882 	err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack);
2883 	if (err < 0 && cb->strict_check)
2884 		return err;
2885 
2886 	s_t = cb->args[0];
2887 
2888 	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2889 		tbl = neigh_tables[t];
2890 
2891 		if (!tbl)
2892 			continue;
2893 		if (t < s_t || (family && tbl->family != family))
2894 			continue;
2895 		if (t > s_t)
2896 			memset(&cb->args[1], 0, sizeof(cb->args) -
2897 						sizeof(cb->args[0]));
2898 		if (proxy)
2899 			err = pneigh_dump_table(tbl, skb, cb, &filter);
2900 		else
2901 			err = neigh_dump_table(tbl, skb, cb, &filter);
2902 		if (err < 0)
2903 			break;
2904 	}
2905 
2906 	cb->args[0] = t;
2907 	return skb->len;
2908 }
2909 
2910 static int neigh_valid_get_req(const struct nlmsghdr *nlh,
2911 			       struct neigh_table **tbl,
2912 			       void **dst, int *dev_idx, u8 *ndm_flags,
2913 			       struct netlink_ext_ack *extack)
2914 {
2915 	struct nlattr *tb[NDA_MAX + 1];
2916 	struct ndmsg *ndm;
2917 	int err, i;
2918 
2919 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
2920 		NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
2921 		return -EINVAL;
2922 	}
2923 
2924 	ndm = nlmsg_data(nlh);
2925 	if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
2926 	    ndm->ndm_type) {
2927 		NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
2928 		return -EINVAL;
2929 	}
2930 
2931 	if (ndm->ndm_flags & ~NTF_PROXY) {
2932 		NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
2933 		return -EINVAL;
2934 	}
2935 
2936 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
2937 					    NDA_MAX, nda_policy, extack);
2938 	if (err < 0)
2939 		return err;
2940 
2941 	*ndm_flags = ndm->ndm_flags;
2942 	*dev_idx = ndm->ndm_ifindex;
2943 	*tbl = neigh_find_table(ndm->ndm_family);
2944 	if (*tbl == NULL) {
2945 		NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
2946 		return -EAFNOSUPPORT;
2947 	}
2948 
2949 	for (i = 0; i <= NDA_MAX; ++i) {
2950 		if (!tb[i])
2951 			continue;
2952 
2953 		switch (i) {
2954 		case NDA_DST:
2955 			if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2956 				NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
2957 				return -EINVAL;
2958 			}
2959 			*dst = nla_data(tb[i]);
2960 			break;
2961 		default:
2962 			NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
2963 			return -EINVAL;
2964 		}
2965 	}
2966 
2967 	return 0;
2968 }
2969 
2970 static inline size_t neigh_nlmsg_size(void)
2971 {
2972 	return NLMSG_ALIGN(sizeof(struct ndmsg))
2973 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2974 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2975 	       + nla_total_size(sizeof(struct nda_cacheinfo))
2976 	       + nla_total_size(4)  /* NDA_PROBES */
2977 	       + nla_total_size(4)  /* NDA_FLAGS_EXT */
2978 	       + nla_total_size(1); /* NDA_PROTOCOL */
2979 }
2980 
2981 static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2982 			   u32 pid, u32 seq)
2983 {
2984 	struct sk_buff *skb;
2985 	int err = 0;
2986 
2987 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
2988 	if (!skb)
2989 		return -ENOBUFS;
2990 
2991 	err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
2992 	if (err) {
2993 		kfree_skb(skb);
2994 		goto errout;
2995 	}
2996 
2997 	err = rtnl_unicast(skb, net, pid);
2998 errout:
2999 	return err;
3000 }
3001 
3002 static inline size_t pneigh_nlmsg_size(void)
3003 {
3004 	return NLMSG_ALIGN(sizeof(struct ndmsg))
3005 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
3006 	       + nla_total_size(4)  /* NDA_FLAGS_EXT */
3007 	       + nla_total_size(1); /* NDA_PROTOCOL */
3008 }
3009 
3010 static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
3011 			    u32 pid, u32 seq, struct neigh_table *tbl)
3012 {
3013 	struct sk_buff *skb;
3014 	int err = 0;
3015 
3016 	skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
3017 	if (!skb)
3018 		return -ENOBUFS;
3019 
3020 	err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
3021 	if (err) {
3022 		kfree_skb(skb);
3023 		goto errout;
3024 	}
3025 
3026 	err = rtnl_unicast(skb, net, pid);
3027 errout:
3028 	return err;
3029 }
3030 
3031 static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3032 		     struct netlink_ext_ack *extack)
3033 {
3034 	struct net *net = sock_net(in_skb->sk);
3035 	struct net_device *dev = NULL;
3036 	struct neigh_table *tbl = NULL;
3037 	struct neighbour *neigh;
3038 	void *dst = NULL;
3039 	u8 ndm_flags = 0;
3040 	int dev_idx = 0;
3041 	int err;
3042 
3043 	err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
3044 				  extack);
3045 	if (err < 0)
3046 		return err;
3047 
3048 	if (dev_idx) {
3049 		dev = __dev_get_by_index(net, dev_idx);
3050 		if (!dev) {
3051 			NL_SET_ERR_MSG(extack, "Unknown device ifindex");
3052 			return -ENODEV;
3053 		}
3054 	}
3055 
3056 	if (!dst) {
3057 		NL_SET_ERR_MSG(extack, "Network address not specified");
3058 		return -EINVAL;
3059 	}
3060 
3061 	if (ndm_flags & NTF_PROXY) {
3062 		struct pneigh_entry *pn;
3063 
3064 		pn = pneigh_lookup(tbl, net, dst, dev, 0);
3065 		if (!pn) {
3066 			NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
3067 			return -ENOENT;
3068 		}
3069 		return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
3070 					nlh->nlmsg_seq, tbl);
3071 	}
3072 
3073 	if (!dev) {
3074 		NL_SET_ERR_MSG(extack, "No device specified");
3075 		return -EINVAL;
3076 	}
3077 
3078 	neigh = neigh_lookup(tbl, dst, dev);
3079 	if (!neigh) {
3080 		NL_SET_ERR_MSG(extack, "Neighbour entry not found");
3081 		return -ENOENT;
3082 	}
3083 
3084 	err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
3085 			      nlh->nlmsg_seq);
3086 
3087 	neigh_release(neigh);
3088 
3089 	return err;
3090 }
3091 
3092 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
3093 {
3094 	int chain;
3095 	struct neigh_hash_table *nht;
3096 
3097 	rcu_read_lock_bh();
3098 	nht = rcu_dereference_bh(tbl->nht);
3099 
3100 	read_lock(&tbl->lock); /* avoid resizes */
3101 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3102 		struct neighbour *n;
3103 
3104 		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
3105 		     n != NULL;
3106 		     n = rcu_dereference_bh(n->next))
3107 			cb(n, cookie);
3108 	}
3109 	read_unlock(&tbl->lock);
3110 	rcu_read_unlock_bh();
3111 }
3112 EXPORT_SYMBOL(neigh_for_each);
3113 
3114 /* The tbl->lock must be held as a writer and BH disabled. */
3115 void __neigh_for_each_release(struct neigh_table *tbl,
3116 			      int (*cb)(struct neighbour *))
3117 {
3118 	int chain;
3119 	struct neigh_hash_table *nht;
3120 
3121 	nht = rcu_dereference_protected(tbl->nht,
3122 					lockdep_is_held(&tbl->lock));
3123 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
3124 		struct neighbour *n;
3125 		struct neighbour __rcu **np;
3126 
3127 		np = &nht->hash_buckets[chain];
3128 		while ((n = rcu_dereference_protected(*np,
3129 					lockdep_is_held(&tbl->lock))) != NULL) {
3130 			int release;
3131 
3132 			write_lock(&n->lock);
3133 			release = cb(n);
3134 			if (release) {
3135 				rcu_assign_pointer(*np,
3136 					rcu_dereference_protected(n->next,
3137 						lockdep_is_held(&tbl->lock)));
3138 				neigh_mark_dead(n);
3139 			} else
3140 				np = &n->next;
3141 			write_unlock(&n->lock);
3142 			if (release)
3143 				neigh_cleanup_and_release(n);
3144 		}
3145 	}
3146 }
3147 EXPORT_SYMBOL(__neigh_for_each_release);
3148 
3149 int neigh_xmit(int index, struct net_device *dev,
3150 	       const void *addr, struct sk_buff *skb)
3151 {
3152 	int err = -EAFNOSUPPORT;
3153 	if (likely(index < NEIGH_NR_TABLES)) {
3154 		struct neigh_table *tbl;
3155 		struct neighbour *neigh;
3156 
3157 		tbl = neigh_tables[index];
3158 		if (!tbl)
3159 			goto out;
3160 		rcu_read_lock_bh();
3161 		if (index == NEIGH_ARP_TABLE) {
3162 			u32 key = *((u32 *)addr);
3163 
3164 			neigh = __ipv4_neigh_lookup_noref(dev, key);
3165 		} else {
3166 			neigh = __neigh_lookup_noref(tbl, addr, dev);
3167 		}
3168 		if (!neigh)
3169 			neigh = __neigh_create(tbl, addr, dev, false);
3170 		err = PTR_ERR(neigh);
3171 		if (IS_ERR(neigh)) {
3172 			rcu_read_unlock_bh();
3173 			goto out_kfree_skb;
3174 		}
3175 		err = neigh->output(neigh, skb);
3176 		rcu_read_unlock_bh();
3177 	}
3178 	else if (index == NEIGH_LINK_TABLE) {
3179 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
3180 				      addr, NULL, skb->len);
3181 		if (err < 0)
3182 			goto out_kfree_skb;
3183 		err = dev_queue_xmit(skb);
3184 	}
3185 out:
3186 	return err;
3187 out_kfree_skb:
3188 	kfree_skb(skb);
3189 	goto out;
3190 }
3191 EXPORT_SYMBOL(neigh_xmit);
3192 
3193 #ifdef CONFIG_PROC_FS
3194 
3195 static struct neighbour *neigh_get_first(struct seq_file *seq)
3196 {
3197 	struct neigh_seq_state *state = seq->private;
3198 	struct net *net = seq_file_net(seq);
3199 	struct neigh_hash_table *nht = state->nht;
3200 	struct neighbour *n = NULL;
3201 	int bucket;
3202 
3203 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
3204 	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
3205 		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
3206 
3207 		while (n) {
3208 			if (!net_eq(dev_net(n->dev), net))
3209 				goto next;
3210 			if (state->neigh_sub_iter) {
3211 				loff_t fakep = 0;
3212 				void *v;
3213 
3214 				v = state->neigh_sub_iter(state, n, &fakep);
3215 				if (!v)
3216 					goto next;
3217 			}
3218 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3219 				break;
3220 			if (n->nud_state & ~NUD_NOARP)
3221 				break;
3222 next:
3223 			n = rcu_dereference_bh(n->next);
3224 		}
3225 
3226 		if (n)
3227 			break;
3228 	}
3229 	state->bucket = bucket;
3230 
3231 	return n;
3232 }
3233 
3234 static struct neighbour *neigh_get_next(struct seq_file *seq,
3235 					struct neighbour *n,
3236 					loff_t *pos)
3237 {
3238 	struct neigh_seq_state *state = seq->private;
3239 	struct net *net = seq_file_net(seq);
3240 	struct neigh_hash_table *nht = state->nht;
3241 
3242 	if (state->neigh_sub_iter) {
3243 		void *v = state->neigh_sub_iter(state, n, pos);
3244 		if (v)
3245 			return n;
3246 	}
3247 	n = rcu_dereference_bh(n->next);
3248 
3249 	while (1) {
3250 		while (n) {
3251 			if (!net_eq(dev_net(n->dev), net))
3252 				goto next;
3253 			if (state->neigh_sub_iter) {
3254 				void *v = state->neigh_sub_iter(state, n, pos);
3255 				if (v)
3256 					return n;
3257 				goto next;
3258 			}
3259 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
3260 				break;
3261 
3262 			if (n->nud_state & ~NUD_NOARP)
3263 				break;
3264 next:
3265 			n = rcu_dereference_bh(n->next);
3266 		}
3267 
3268 		if (n)
3269 			break;
3270 
3271 		if (++state->bucket >= (1 << nht->hash_shift))
3272 			break;
3273 
3274 		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
3275 	}
3276 
3277 	if (n && pos)
3278 		--(*pos);
3279 	return n;
3280 }
3281 
3282 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
3283 {
3284 	struct neighbour *n = neigh_get_first(seq);
3285 
3286 	if (n) {
3287 		--(*pos);
3288 		while (*pos) {
3289 			n = neigh_get_next(seq, n, pos);
3290 			if (!n)
3291 				break;
3292 		}
3293 	}
3294 	return *pos ? NULL : n;
3295 }
3296 
3297 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
3298 {
3299 	struct neigh_seq_state *state = seq->private;
3300 	struct net *net = seq_file_net(seq);
3301 	struct neigh_table *tbl = state->tbl;
3302 	struct pneigh_entry *pn = NULL;
3303 	int bucket;
3304 
3305 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
3306 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
3307 		pn = tbl->phash_buckets[bucket];
3308 		while (pn && !net_eq(pneigh_net(pn), net))
3309 			pn = pn->next;
3310 		if (pn)
3311 			break;
3312 	}
3313 	state->bucket = bucket;
3314 
3315 	return pn;
3316 }
3317 
3318 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
3319 					    struct pneigh_entry *pn,
3320 					    loff_t *pos)
3321 {
3322 	struct neigh_seq_state *state = seq->private;
3323 	struct net *net = seq_file_net(seq);
3324 	struct neigh_table *tbl = state->tbl;
3325 
3326 	do {
3327 		pn = pn->next;
3328 	} while (pn && !net_eq(pneigh_net(pn), net));
3329 
3330 	while (!pn) {
3331 		if (++state->bucket > PNEIGH_HASHMASK)
3332 			break;
3333 		pn = tbl->phash_buckets[state->bucket];
3334 		while (pn && !net_eq(pneigh_net(pn), net))
3335 			pn = pn->next;
3336 		if (pn)
3337 			break;
3338 	}
3339 
3340 	if (pn && pos)
3341 		--(*pos);
3342 
3343 	return pn;
3344 }
3345 
3346 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
3347 {
3348 	struct pneigh_entry *pn = pneigh_get_first(seq);
3349 
3350 	if (pn) {
3351 		--(*pos);
3352 		while (*pos) {
3353 			pn = pneigh_get_next(seq, pn, pos);
3354 			if (!pn)
3355 				break;
3356 		}
3357 	}
3358 	return *pos ? NULL : pn;
3359 }
3360 
3361 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
3362 {
3363 	struct neigh_seq_state *state = seq->private;
3364 	void *rc;
3365 	loff_t idxpos = *pos;
3366 
3367 	rc = neigh_get_idx(seq, &idxpos);
3368 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3369 		rc = pneigh_get_idx(seq, &idxpos);
3370 
3371 	return rc;
3372 }
3373 
3374 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3375 	__acquires(tbl->lock)
3376 	__acquires(rcu_bh)
3377 {
3378 	struct neigh_seq_state *state = seq->private;
3379 
3380 	state->tbl = tbl;
3381 	state->bucket = 0;
3382 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
3383 
3384 	rcu_read_lock_bh();
3385 	state->nht = rcu_dereference_bh(tbl->nht);
3386 	read_lock(&tbl->lock);
3387 
3388 	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
3389 }
3390 EXPORT_SYMBOL(neigh_seq_start);
3391 
3392 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3393 {
3394 	struct neigh_seq_state *state;
3395 	void *rc;
3396 
3397 	if (v == SEQ_START_TOKEN) {
3398 		rc = neigh_get_first(seq);
3399 		goto out;
3400 	}
3401 
3402 	state = seq->private;
3403 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
3404 		rc = neigh_get_next(seq, v, NULL);
3405 		if (rc)
3406 			goto out;
3407 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
3408 			rc = pneigh_get_first(seq);
3409 	} else {
3410 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
3411 		rc = pneigh_get_next(seq, v, NULL);
3412 	}
3413 out:
3414 	++(*pos);
3415 	return rc;
3416 }
3417 EXPORT_SYMBOL(neigh_seq_next);
3418 
3419 void neigh_seq_stop(struct seq_file *seq, void *v)
3420 	__releases(tbl->lock)
3421 	__releases(rcu_bh)
3422 {
3423 	struct neigh_seq_state *state = seq->private;
3424 	struct neigh_table *tbl = state->tbl;
3425 
3426 	read_unlock(&tbl->lock);
3427 	rcu_read_unlock_bh();
3428 }
3429 EXPORT_SYMBOL(neigh_seq_stop);
3430 
3431 /* statistics via seq_file */
3432 
3433 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
3434 {
3435 	struct neigh_table *tbl = pde_data(file_inode(seq->file));
3436 	int cpu;
3437 
3438 	if (*pos == 0)
3439 		return SEQ_START_TOKEN;
3440 
3441 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
3442 		if (!cpu_possible(cpu))
3443 			continue;
3444 		*pos = cpu+1;
3445 		return per_cpu_ptr(tbl->stats, cpu);
3446 	}
3447 	return NULL;
3448 }
3449 
3450 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3451 {
3452 	struct neigh_table *tbl = pde_data(file_inode(seq->file));
3453 	int cpu;
3454 
3455 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
3456 		if (!cpu_possible(cpu))
3457 			continue;
3458 		*pos = cpu+1;
3459 		return per_cpu_ptr(tbl->stats, cpu);
3460 	}
3461 	(*pos)++;
3462 	return NULL;
3463 }
3464 
3465 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
3466 {
3467 
3468 }
3469 
3470 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
3471 {
3472 	struct neigh_table *tbl = pde_data(file_inode(seq->file));
3473 	struct neigh_statistics *st = v;
3474 
3475 	if (v == SEQ_START_TOKEN) {
3476 		seq_puts(seq, "entries  allocs   destroys hash_grows lookups  hits     res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3477 		return 0;
3478 	}
3479 
3480 	seq_printf(seq, "%08x %08lx %08lx %08lx   %08lx %08lx %08lx   "
3481 			"%08lx         %08lx         %08lx         "
3482 			"%08lx       %08lx            %08lx\n",
3483 		   atomic_read(&tbl->entries),
3484 
3485 		   st->allocs,
3486 		   st->destroys,
3487 		   st->hash_grows,
3488 
3489 		   st->lookups,
3490 		   st->hits,
3491 
3492 		   st->res_failed,
3493 
3494 		   st->rcv_probes_mcast,
3495 		   st->rcv_probes_ucast,
3496 
3497 		   st->periodic_gc_runs,
3498 		   st->forced_gc_runs,
3499 		   st->unres_discards,
3500 		   st->table_fulls
3501 		   );
3502 
3503 	return 0;
3504 }
3505 
3506 static const struct seq_operations neigh_stat_seq_ops = {
3507 	.start	= neigh_stat_seq_start,
3508 	.next	= neigh_stat_seq_next,
3509 	.stop	= neigh_stat_seq_stop,
3510 	.show	= neigh_stat_seq_show,
3511 };
3512 #endif /* CONFIG_PROC_FS */
3513 
3514 static void __neigh_notify(struct neighbour *n, int type, int flags,
3515 			   u32 pid)
3516 {
3517 	struct net *net = dev_net(n->dev);
3518 	struct sk_buff *skb;
3519 	int err = -ENOBUFS;
3520 
3521 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
3522 	if (skb == NULL)
3523 		goto errout;
3524 
3525 	err = neigh_fill_info(skb, n, pid, 0, type, flags);
3526 	if (err < 0) {
3527 		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3528 		WARN_ON(err == -EMSGSIZE);
3529 		kfree_skb(skb);
3530 		goto errout;
3531 	}
3532 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
3533 	return;
3534 errout:
3535 	if (err < 0)
3536 		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
3537 }
3538 
3539 void neigh_app_ns(struct neighbour *n)
3540 {
3541 	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
3542 }
3543 EXPORT_SYMBOL(neigh_app_ns);
3544 
3545 #ifdef CONFIG_SYSCTL
3546 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
3547 
3548 static int proc_unres_qlen(struct ctl_table *ctl, int write,
3549 			   void *buffer, size_t *lenp, loff_t *ppos)
3550 {
3551 	int size, ret;
3552 	struct ctl_table tmp = *ctl;
3553 
3554 	tmp.extra1 = SYSCTL_ZERO;
3555 	tmp.extra2 = &unres_qlen_max;
3556 	tmp.data = &size;
3557 
3558 	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
3559 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3560 
3561 	if (write && !ret)
3562 		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
3563 	return ret;
3564 }
3565 
3566 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
3567 				  int index)
3568 {
3569 	struct net_device *dev;
3570 	int family = neigh_parms_family(p);
3571 
3572 	rcu_read_lock();
3573 	for_each_netdev_rcu(net, dev) {
3574 		struct neigh_parms *dst_p =
3575 				neigh_get_dev_parms_rcu(dev, family);
3576 
3577 		if (dst_p && !test_bit(index, dst_p->data_state))
3578 			dst_p->data[index] = p->data[index];
3579 	}
3580 	rcu_read_unlock();
3581 }
3582 
3583 static void neigh_proc_update(struct ctl_table *ctl, int write)
3584 {
3585 	struct net_device *dev = ctl->extra1;
3586 	struct neigh_parms *p = ctl->extra2;
3587 	struct net *net = neigh_parms_net(p);
3588 	int index = (int *) ctl->data - p->data;
3589 
3590 	if (!write)
3591 		return;
3592 
3593 	set_bit(index, p->data_state);
3594 	if (index == NEIGH_VAR_DELAY_PROBE_TIME)
3595 		call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
3596 	if (!dev) /* NULL dev means this is default value */
3597 		neigh_copy_dflt_parms(net, p, index);
3598 }
3599 
3600 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
3601 					   void *buffer, size_t *lenp,
3602 					   loff_t *ppos)
3603 {
3604 	struct ctl_table tmp = *ctl;
3605 	int ret;
3606 
3607 	tmp.extra1 = SYSCTL_ZERO;
3608 	tmp.extra2 = SYSCTL_INT_MAX;
3609 
3610 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
3611 	neigh_proc_update(ctl, write);
3612 	return ret;
3613 }
3614 
3615 static int neigh_proc_dointvec_ms_jiffies_positive(struct ctl_table *ctl, int write,
3616 						   void *buffer, size_t *lenp, loff_t *ppos)
3617 {
3618 	struct ctl_table tmp = *ctl;
3619 	int ret;
3620 
3621 	int min = msecs_to_jiffies(1);
3622 
3623 	tmp.extra1 = &min;
3624 	tmp.extra2 = NULL;
3625 
3626 	ret = proc_dointvec_ms_jiffies_minmax(&tmp, write, buffer, lenp, ppos);
3627 	neigh_proc_update(ctl, write);
3628 	return ret;
3629 }
3630 
3631 int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
3632 			size_t *lenp, loff_t *ppos)
3633 {
3634 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3635 
3636 	neigh_proc_update(ctl, write);
3637 	return ret;
3638 }
3639 EXPORT_SYMBOL(neigh_proc_dointvec);
3640 
3641 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
3642 				size_t *lenp, loff_t *ppos)
3643 {
3644 	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3645 
3646 	neigh_proc_update(ctl, write);
3647 	return ret;
3648 }
3649 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3650 
3651 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3652 					      void *buffer, size_t *lenp,
3653 					      loff_t *ppos)
3654 {
3655 	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3656 
3657 	neigh_proc_update(ctl, write);
3658 	return ret;
3659 }
3660 
3661 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3662 				   void *buffer, size_t *lenp, loff_t *ppos)
3663 {
3664 	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3665 
3666 	neigh_proc_update(ctl, write);
3667 	return ret;
3668 }
3669 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3670 
3671 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3672 					  void *buffer, size_t *lenp,
3673 					  loff_t *ppos)
3674 {
3675 	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3676 
3677 	neigh_proc_update(ctl, write);
3678 	return ret;
3679 }
3680 
3681 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3682 					  void *buffer, size_t *lenp,
3683 					  loff_t *ppos)
3684 {
3685 	struct neigh_parms *p = ctl->extra2;
3686 	int ret;
3687 
3688 	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3689 		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3690 	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3691 		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3692 	else
3693 		ret = -1;
3694 
3695 	if (write && ret == 0) {
3696 		/* update reachable_time as well, otherwise, the change will
3697 		 * only be effective after the next time neigh_periodic_work
3698 		 * decides to recompute it
3699 		 */
3700 		p->reachable_time =
3701 			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3702 	}
3703 	return ret;
3704 }
3705 
3706 #define NEIGH_PARMS_DATA_OFFSET(index)	\
3707 	(&((struct neigh_parms *) 0)->data[index])
3708 
3709 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3710 	[NEIGH_VAR_ ## attr] = { \
3711 		.procname	= name, \
3712 		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3713 		.maxlen		= sizeof(int), \
3714 		.mode		= mval, \
3715 		.proc_handler	= proc, \
3716 	}
3717 
3718 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3719 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3720 
3721 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3722 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3723 
3724 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3725 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3726 
3727 #define NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(attr, name) \
3728 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies_positive)
3729 
3730 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3731 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3732 
3733 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3734 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3735 
3736 static struct neigh_sysctl_table {
3737 	struct ctl_table_header *sysctl_header;
3738 	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3739 } neigh_sysctl_template __read_mostly = {
3740 	.neigh_vars = {
3741 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3742 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3743 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3744 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3745 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3746 		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3747 		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3748 		NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(INTERVAL_PROBE_TIME_MS,
3749 						       "interval_probe_time_ms"),
3750 		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3751 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3752 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3753 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3754 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3755 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3756 		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3757 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3758 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3759 		[NEIGH_VAR_GC_INTERVAL] = {
3760 			.procname	= "gc_interval",
3761 			.maxlen		= sizeof(int),
3762 			.mode		= 0644,
3763 			.proc_handler	= proc_dointvec_jiffies,
3764 		},
3765 		[NEIGH_VAR_GC_THRESH1] = {
3766 			.procname	= "gc_thresh1",
3767 			.maxlen		= sizeof(int),
3768 			.mode		= 0644,
3769 			.extra1		= SYSCTL_ZERO,
3770 			.extra2		= SYSCTL_INT_MAX,
3771 			.proc_handler	= proc_dointvec_minmax,
3772 		},
3773 		[NEIGH_VAR_GC_THRESH2] = {
3774 			.procname	= "gc_thresh2",
3775 			.maxlen		= sizeof(int),
3776 			.mode		= 0644,
3777 			.extra1		= SYSCTL_ZERO,
3778 			.extra2		= SYSCTL_INT_MAX,
3779 			.proc_handler	= proc_dointvec_minmax,
3780 		},
3781 		[NEIGH_VAR_GC_THRESH3] = {
3782 			.procname	= "gc_thresh3",
3783 			.maxlen		= sizeof(int),
3784 			.mode		= 0644,
3785 			.extra1		= SYSCTL_ZERO,
3786 			.extra2		= SYSCTL_INT_MAX,
3787 			.proc_handler	= proc_dointvec_minmax,
3788 		},
3789 		{},
3790 	},
3791 };
3792 
3793 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3794 			  proc_handler *handler)
3795 {
3796 	int i;
3797 	struct neigh_sysctl_table *t;
3798 	const char *dev_name_source;
3799 	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3800 	char *p_name;
3801 
3802 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL_ACCOUNT);
3803 	if (!t)
3804 		goto err;
3805 
3806 	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3807 		t->neigh_vars[i].data += (long) p;
3808 		t->neigh_vars[i].extra1 = dev;
3809 		t->neigh_vars[i].extra2 = p;
3810 	}
3811 
3812 	if (dev) {
3813 		dev_name_source = dev->name;
3814 		/* Terminate the table early */
3815 		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3816 		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3817 	} else {
3818 		struct neigh_table *tbl = p->tbl;
3819 		dev_name_source = "default";
3820 		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3821 		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3822 		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3823 		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3824 	}
3825 
3826 	if (handler) {
3827 		/* RetransTime */
3828 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3829 		/* ReachableTime */
3830 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3831 		/* RetransTime (in milliseconds)*/
3832 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3833 		/* ReachableTime (in milliseconds) */
3834 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3835 	} else {
3836 		/* Those handlers will update p->reachable_time after
3837 		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3838 		 * applied after the next neighbour update instead of waiting for
3839 		 * neigh_periodic_work to update its value (can be multiple minutes)
3840 		 * So any handler that replaces them should do this as well
3841 		 */
3842 		/* ReachableTime */
3843 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3844 			neigh_proc_base_reachable_time;
3845 		/* ReachableTime (in milliseconds) */
3846 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3847 			neigh_proc_base_reachable_time;
3848 	}
3849 
3850 	switch (neigh_parms_family(p)) {
3851 	case AF_INET:
3852 	      p_name = "ipv4";
3853 	      break;
3854 	case AF_INET6:
3855 	      p_name = "ipv6";
3856 	      break;
3857 	default:
3858 	      BUG();
3859 	}
3860 
3861 	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3862 		p_name, dev_name_source);
3863 	t->sysctl_header =
3864 		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3865 	if (!t->sysctl_header)
3866 		goto free;
3867 
3868 	p->sysctl_table = t;
3869 	return 0;
3870 
3871 free:
3872 	kfree(t);
3873 err:
3874 	return -ENOBUFS;
3875 }
3876 EXPORT_SYMBOL(neigh_sysctl_register);
3877 
3878 void neigh_sysctl_unregister(struct neigh_parms *p)
3879 {
3880 	if (p->sysctl_table) {
3881 		struct neigh_sysctl_table *t = p->sysctl_table;
3882 		p->sysctl_table = NULL;
3883 		unregister_net_sysctl_table(t->sysctl_header);
3884 		kfree(t);
3885 	}
3886 }
3887 EXPORT_SYMBOL(neigh_sysctl_unregister);
3888 
3889 #endif	/* CONFIG_SYSCTL */
3890 
3891 static int __init neigh_init(void)
3892 {
3893 	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3894 	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3895 	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
3896 
3897 	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3898 		      0);
3899 	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3900 
3901 	return 0;
3902 }
3903 
3904 subsys_initcall(neigh_init);
3905