xref: /openbmc/linux/net/core/neighbour.c (revision f220d3eb)
1 /*
2  *	Generic address resolution entity
3  *
4  *	Authors:
5  *	Pedro Roque		<roque@di.fc.ul.pt>
6  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Fixes:
14  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
15  *	Harald Welte		Add neighbour cache statistics like rtstat
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/socket.h>
25 #include <linux/netdevice.h>
26 #include <linux/proc_fs.h>
27 #ifdef CONFIG_SYSCTL
28 #include <linux/sysctl.h>
29 #endif
30 #include <linux/times.h>
31 #include <net/net_namespace.h>
32 #include <net/neighbour.h>
33 #include <net/dst.h>
34 #include <net/sock.h>
35 #include <net/netevent.h>
36 #include <net/netlink.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/random.h>
39 #include <linux/string.h>
40 #include <linux/log2.h>
41 #include <linux/inetdevice.h>
42 #include <net/addrconf.h>
43 
44 #define DEBUG
45 #define NEIGH_DEBUG 1
46 #define neigh_dbg(level, fmt, ...)		\
47 do {						\
48 	if (level <= NEIGH_DEBUG)		\
49 		pr_debug(fmt, ##__VA_ARGS__);	\
50 } while (0)
51 
52 #define PNEIGH_HASHMASK		0xF
53 
54 static void neigh_timer_handler(struct timer_list *t);
55 static void __neigh_notify(struct neighbour *n, int type, int flags,
56 			   u32 pid);
57 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
58 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
59 				    struct net_device *dev);
60 
61 #ifdef CONFIG_PROC_FS
62 static const struct seq_operations neigh_stat_seq_ops;
63 #endif
64 
65 /*
66    Neighbour hash table buckets are protected with rwlock tbl->lock.
67 
68    - All the scans/updates to hash buckets MUST be made under this lock.
69    - NOTHING clever should be made under this lock: no callbacks
70      to protocol backends, no attempts to send something to network.
71      It will result in deadlocks, if backend/driver wants to use neighbour
72      cache.
73    - If the entry requires some non-trivial actions, increase
74      its reference count and release table lock.
75 
76    Neighbour entries are protected:
77    - with reference count.
78    - with rwlock neigh->lock
79 
80    Reference count prevents destruction.
81 
82    neigh->lock mainly serializes ll address data and its validity state.
83    However, the same lock is used to protect another entry fields:
84     - timer
85     - resolution queue
86 
87    Again, nothing clever shall be made under neigh->lock,
88    the most complicated procedure, which we allow is dev->hard_header.
89    It is supposed, that dev->hard_header is simplistic and does
90    not make callbacks to neighbour tables.
91  */
92 
93 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
94 {
95 	kfree_skb(skb);
96 	return -ENETDOWN;
97 }
98 
99 static void neigh_cleanup_and_release(struct neighbour *neigh)
100 {
101 	if (neigh->parms->neigh_cleanup)
102 		neigh->parms->neigh_cleanup(neigh);
103 
104 	__neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
105 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
106 	neigh_release(neigh);
107 }
108 
109 /*
110  * It is random distribution in the interval (1/2)*base...(3/2)*base.
111  * It corresponds to default IPv6 settings and is not overridable,
112  * because it is really reasonable choice.
113  */
114 
115 unsigned long neigh_rand_reach_time(unsigned long base)
116 {
117 	return base ? (prandom_u32() % base) + (base >> 1) : 0;
118 }
119 EXPORT_SYMBOL(neigh_rand_reach_time);
120 
121 
122 static bool neigh_del(struct neighbour *n, __u8 state, __u8 flags,
123 		      struct neighbour __rcu **np, struct neigh_table *tbl)
124 {
125 	bool retval = false;
126 
127 	write_lock(&n->lock);
128 	if (refcount_read(&n->refcnt) == 1 && !(n->nud_state & state) &&
129 	    !(n->flags & flags)) {
130 		struct neighbour *neigh;
131 
132 		neigh = rcu_dereference_protected(n->next,
133 						  lockdep_is_held(&tbl->lock));
134 		rcu_assign_pointer(*np, neigh);
135 		n->dead = 1;
136 		retval = true;
137 	}
138 	write_unlock(&n->lock);
139 	if (retval)
140 		neigh_cleanup_and_release(n);
141 	return retval;
142 }
143 
144 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
145 {
146 	struct neigh_hash_table *nht;
147 	void *pkey = ndel->primary_key;
148 	u32 hash_val;
149 	struct neighbour *n;
150 	struct neighbour __rcu **np;
151 
152 	nht = rcu_dereference_protected(tbl->nht,
153 					lockdep_is_held(&tbl->lock));
154 	hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
155 	hash_val = hash_val >> (32 - nht->hash_shift);
156 
157 	np = &nht->hash_buckets[hash_val];
158 	while ((n = rcu_dereference_protected(*np,
159 					      lockdep_is_held(&tbl->lock)))) {
160 		if (n == ndel)
161 			return neigh_del(n, 0, 0, np, tbl);
162 		np = &n->next;
163 	}
164 	return false;
165 }
166 
167 static int neigh_forced_gc(struct neigh_table *tbl)
168 {
169 	int shrunk = 0;
170 	int i;
171 	struct neigh_hash_table *nht;
172 
173 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
174 
175 	write_lock_bh(&tbl->lock);
176 	nht = rcu_dereference_protected(tbl->nht,
177 					lockdep_is_held(&tbl->lock));
178 	for (i = 0; i < (1 << nht->hash_shift); i++) {
179 		struct neighbour *n;
180 		struct neighbour __rcu **np;
181 
182 		np = &nht->hash_buckets[i];
183 		while ((n = rcu_dereference_protected(*np,
184 					lockdep_is_held(&tbl->lock))) != NULL) {
185 			/* Neighbour record may be discarded if:
186 			 * - nobody refers to it.
187 			 * - it is not permanent
188 			 */
189 			if (neigh_del(n, NUD_PERMANENT, NTF_EXT_LEARNED, np,
190 				      tbl)) {
191 				shrunk = 1;
192 				continue;
193 			}
194 			np = &n->next;
195 		}
196 	}
197 
198 	tbl->last_flush = jiffies;
199 
200 	write_unlock_bh(&tbl->lock);
201 
202 	return shrunk;
203 }
204 
205 static void neigh_add_timer(struct neighbour *n, unsigned long when)
206 {
207 	neigh_hold(n);
208 	if (unlikely(mod_timer(&n->timer, when))) {
209 		printk("NEIGH: BUG, double timer add, state is %x\n",
210 		       n->nud_state);
211 		dump_stack();
212 	}
213 }
214 
215 static int neigh_del_timer(struct neighbour *n)
216 {
217 	if ((n->nud_state & NUD_IN_TIMER) &&
218 	    del_timer(&n->timer)) {
219 		neigh_release(n);
220 		return 1;
221 	}
222 	return 0;
223 }
224 
225 static void pneigh_queue_purge(struct sk_buff_head *list)
226 {
227 	struct sk_buff *skb;
228 
229 	while ((skb = skb_dequeue(list)) != NULL) {
230 		dev_put(skb->dev);
231 		kfree_skb(skb);
232 	}
233 }
234 
235 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
236 {
237 	int i;
238 	struct neigh_hash_table *nht;
239 
240 	nht = rcu_dereference_protected(tbl->nht,
241 					lockdep_is_held(&tbl->lock));
242 
243 	for (i = 0; i < (1 << nht->hash_shift); i++) {
244 		struct neighbour *n;
245 		struct neighbour __rcu **np = &nht->hash_buckets[i];
246 
247 		while ((n = rcu_dereference_protected(*np,
248 					lockdep_is_held(&tbl->lock))) != NULL) {
249 			if (dev && n->dev != dev) {
250 				np = &n->next;
251 				continue;
252 			}
253 			rcu_assign_pointer(*np,
254 				   rcu_dereference_protected(n->next,
255 						lockdep_is_held(&tbl->lock)));
256 			write_lock(&n->lock);
257 			neigh_del_timer(n);
258 			n->dead = 1;
259 
260 			if (refcount_read(&n->refcnt) != 1) {
261 				/* The most unpleasant situation.
262 				   We must destroy neighbour entry,
263 				   but someone still uses it.
264 
265 				   The destroy will be delayed until
266 				   the last user releases us, but
267 				   we must kill timers etc. and move
268 				   it to safe state.
269 				 */
270 				__skb_queue_purge(&n->arp_queue);
271 				n->arp_queue_len_bytes = 0;
272 				n->output = neigh_blackhole;
273 				if (n->nud_state & NUD_VALID)
274 					n->nud_state = NUD_NOARP;
275 				else
276 					n->nud_state = NUD_NONE;
277 				neigh_dbg(2, "neigh %p is stray\n", n);
278 			}
279 			write_unlock(&n->lock);
280 			neigh_cleanup_and_release(n);
281 		}
282 	}
283 }
284 
285 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
286 {
287 	write_lock_bh(&tbl->lock);
288 	neigh_flush_dev(tbl, dev);
289 	write_unlock_bh(&tbl->lock);
290 }
291 EXPORT_SYMBOL(neigh_changeaddr);
292 
293 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
294 {
295 	write_lock_bh(&tbl->lock);
296 	neigh_flush_dev(tbl, dev);
297 	pneigh_ifdown_and_unlock(tbl, dev);
298 
299 	del_timer_sync(&tbl->proxy_timer);
300 	pneigh_queue_purge(&tbl->proxy_queue);
301 	return 0;
302 }
303 EXPORT_SYMBOL(neigh_ifdown);
304 
305 static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
306 {
307 	struct neighbour *n = NULL;
308 	unsigned long now = jiffies;
309 	int entries;
310 
311 	entries = atomic_inc_return(&tbl->entries) - 1;
312 	if (entries >= tbl->gc_thresh3 ||
313 	    (entries >= tbl->gc_thresh2 &&
314 	     time_after(now, tbl->last_flush + 5 * HZ))) {
315 		if (!neigh_forced_gc(tbl) &&
316 		    entries >= tbl->gc_thresh3) {
317 			net_info_ratelimited("%s: neighbor table overflow!\n",
318 					     tbl->id);
319 			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
320 			goto out_entries;
321 		}
322 	}
323 
324 	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
325 	if (!n)
326 		goto out_entries;
327 
328 	__skb_queue_head_init(&n->arp_queue);
329 	rwlock_init(&n->lock);
330 	seqlock_init(&n->ha_lock);
331 	n->updated	  = n->used = now;
332 	n->nud_state	  = NUD_NONE;
333 	n->output	  = neigh_blackhole;
334 	seqlock_init(&n->hh.hh_lock);
335 	n->parms	  = neigh_parms_clone(&tbl->parms);
336 	timer_setup(&n->timer, neigh_timer_handler, 0);
337 
338 	NEIGH_CACHE_STAT_INC(tbl, allocs);
339 	n->tbl		  = tbl;
340 	refcount_set(&n->refcnt, 1);
341 	n->dead		  = 1;
342 out:
343 	return n;
344 
345 out_entries:
346 	atomic_dec(&tbl->entries);
347 	goto out;
348 }
349 
350 static void neigh_get_hash_rnd(u32 *x)
351 {
352 	*x = get_random_u32() | 1;
353 }
354 
355 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
356 {
357 	size_t size = (1 << shift) * sizeof(struct neighbour *);
358 	struct neigh_hash_table *ret;
359 	struct neighbour __rcu **buckets;
360 	int i;
361 
362 	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
363 	if (!ret)
364 		return NULL;
365 	if (size <= PAGE_SIZE)
366 		buckets = kzalloc(size, GFP_ATOMIC);
367 	else
368 		buckets = (struct neighbour __rcu **)
369 			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
370 					   get_order(size));
371 	if (!buckets) {
372 		kfree(ret);
373 		return NULL;
374 	}
375 	ret->hash_buckets = buckets;
376 	ret->hash_shift = shift;
377 	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
378 		neigh_get_hash_rnd(&ret->hash_rnd[i]);
379 	return ret;
380 }
381 
382 static void neigh_hash_free_rcu(struct rcu_head *head)
383 {
384 	struct neigh_hash_table *nht = container_of(head,
385 						    struct neigh_hash_table,
386 						    rcu);
387 	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
388 	struct neighbour __rcu **buckets = nht->hash_buckets;
389 
390 	if (size <= PAGE_SIZE)
391 		kfree(buckets);
392 	else
393 		free_pages((unsigned long)buckets, get_order(size));
394 	kfree(nht);
395 }
396 
397 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
398 						unsigned long new_shift)
399 {
400 	unsigned int i, hash;
401 	struct neigh_hash_table *new_nht, *old_nht;
402 
403 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
404 
405 	old_nht = rcu_dereference_protected(tbl->nht,
406 					    lockdep_is_held(&tbl->lock));
407 	new_nht = neigh_hash_alloc(new_shift);
408 	if (!new_nht)
409 		return old_nht;
410 
411 	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
412 		struct neighbour *n, *next;
413 
414 		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
415 						   lockdep_is_held(&tbl->lock));
416 		     n != NULL;
417 		     n = next) {
418 			hash = tbl->hash(n->primary_key, n->dev,
419 					 new_nht->hash_rnd);
420 
421 			hash >>= (32 - new_nht->hash_shift);
422 			next = rcu_dereference_protected(n->next,
423 						lockdep_is_held(&tbl->lock));
424 
425 			rcu_assign_pointer(n->next,
426 					   rcu_dereference_protected(
427 						new_nht->hash_buckets[hash],
428 						lockdep_is_held(&tbl->lock)));
429 			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
430 		}
431 	}
432 
433 	rcu_assign_pointer(tbl->nht, new_nht);
434 	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
435 	return new_nht;
436 }
437 
438 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
439 			       struct net_device *dev)
440 {
441 	struct neighbour *n;
442 
443 	NEIGH_CACHE_STAT_INC(tbl, lookups);
444 
445 	rcu_read_lock_bh();
446 	n = __neigh_lookup_noref(tbl, pkey, dev);
447 	if (n) {
448 		if (!refcount_inc_not_zero(&n->refcnt))
449 			n = NULL;
450 		NEIGH_CACHE_STAT_INC(tbl, hits);
451 	}
452 
453 	rcu_read_unlock_bh();
454 	return n;
455 }
456 EXPORT_SYMBOL(neigh_lookup);
457 
458 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
459 				     const void *pkey)
460 {
461 	struct neighbour *n;
462 	unsigned int key_len = tbl->key_len;
463 	u32 hash_val;
464 	struct neigh_hash_table *nht;
465 
466 	NEIGH_CACHE_STAT_INC(tbl, lookups);
467 
468 	rcu_read_lock_bh();
469 	nht = rcu_dereference_bh(tbl->nht);
470 	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
471 
472 	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
473 	     n != NULL;
474 	     n = rcu_dereference_bh(n->next)) {
475 		if (!memcmp(n->primary_key, pkey, key_len) &&
476 		    net_eq(dev_net(n->dev), net)) {
477 			if (!refcount_inc_not_zero(&n->refcnt))
478 				n = NULL;
479 			NEIGH_CACHE_STAT_INC(tbl, hits);
480 			break;
481 		}
482 	}
483 
484 	rcu_read_unlock_bh();
485 	return n;
486 }
487 EXPORT_SYMBOL(neigh_lookup_nodev);
488 
489 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
490 				 struct net_device *dev, bool want_ref)
491 {
492 	u32 hash_val;
493 	unsigned int key_len = tbl->key_len;
494 	int error;
495 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
496 	struct neigh_hash_table *nht;
497 
498 	if (!n) {
499 		rc = ERR_PTR(-ENOBUFS);
500 		goto out;
501 	}
502 
503 	memcpy(n->primary_key, pkey, key_len);
504 	n->dev = dev;
505 	dev_hold(dev);
506 
507 	/* Protocol specific setup. */
508 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
509 		rc = ERR_PTR(error);
510 		goto out_neigh_release;
511 	}
512 
513 	if (dev->netdev_ops->ndo_neigh_construct) {
514 		error = dev->netdev_ops->ndo_neigh_construct(dev, n);
515 		if (error < 0) {
516 			rc = ERR_PTR(error);
517 			goto out_neigh_release;
518 		}
519 	}
520 
521 	/* Device specific setup. */
522 	if (n->parms->neigh_setup &&
523 	    (error = n->parms->neigh_setup(n)) < 0) {
524 		rc = ERR_PTR(error);
525 		goto out_neigh_release;
526 	}
527 
528 	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
529 
530 	write_lock_bh(&tbl->lock);
531 	nht = rcu_dereference_protected(tbl->nht,
532 					lockdep_is_held(&tbl->lock));
533 
534 	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
535 		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
536 
537 	hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
538 
539 	if (n->parms->dead) {
540 		rc = ERR_PTR(-EINVAL);
541 		goto out_tbl_unlock;
542 	}
543 
544 	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
545 					    lockdep_is_held(&tbl->lock));
546 	     n1 != NULL;
547 	     n1 = rcu_dereference_protected(n1->next,
548 			lockdep_is_held(&tbl->lock))) {
549 		if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
550 			if (want_ref)
551 				neigh_hold(n1);
552 			rc = n1;
553 			goto out_tbl_unlock;
554 		}
555 	}
556 
557 	n->dead = 0;
558 	if (want_ref)
559 		neigh_hold(n);
560 	rcu_assign_pointer(n->next,
561 			   rcu_dereference_protected(nht->hash_buckets[hash_val],
562 						     lockdep_is_held(&tbl->lock)));
563 	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
564 	write_unlock_bh(&tbl->lock);
565 	neigh_dbg(2, "neigh %p is created\n", n);
566 	rc = n;
567 out:
568 	return rc;
569 out_tbl_unlock:
570 	write_unlock_bh(&tbl->lock);
571 out_neigh_release:
572 	neigh_release(n);
573 	goto out;
574 }
575 EXPORT_SYMBOL(__neigh_create);
576 
577 static u32 pneigh_hash(const void *pkey, unsigned int key_len)
578 {
579 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
580 	hash_val ^= (hash_val >> 16);
581 	hash_val ^= hash_val >> 8;
582 	hash_val ^= hash_val >> 4;
583 	hash_val &= PNEIGH_HASHMASK;
584 	return hash_val;
585 }
586 
587 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
588 					      struct net *net,
589 					      const void *pkey,
590 					      unsigned int key_len,
591 					      struct net_device *dev)
592 {
593 	while (n) {
594 		if (!memcmp(n->key, pkey, key_len) &&
595 		    net_eq(pneigh_net(n), net) &&
596 		    (n->dev == dev || !n->dev))
597 			return n;
598 		n = n->next;
599 	}
600 	return NULL;
601 }
602 
603 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
604 		struct net *net, const void *pkey, struct net_device *dev)
605 {
606 	unsigned int key_len = tbl->key_len;
607 	u32 hash_val = pneigh_hash(pkey, key_len);
608 
609 	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
610 				 net, pkey, key_len, dev);
611 }
612 EXPORT_SYMBOL_GPL(__pneigh_lookup);
613 
614 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
615 				    struct net *net, const void *pkey,
616 				    struct net_device *dev, int creat)
617 {
618 	struct pneigh_entry *n;
619 	unsigned int key_len = tbl->key_len;
620 	u32 hash_val = pneigh_hash(pkey, key_len);
621 
622 	read_lock_bh(&tbl->lock);
623 	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
624 			      net, pkey, key_len, dev);
625 	read_unlock_bh(&tbl->lock);
626 
627 	if (n || !creat)
628 		goto out;
629 
630 	ASSERT_RTNL();
631 
632 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
633 	if (!n)
634 		goto out;
635 
636 	write_pnet(&n->net, net);
637 	memcpy(n->key, pkey, key_len);
638 	n->dev = dev;
639 	if (dev)
640 		dev_hold(dev);
641 
642 	if (tbl->pconstructor && tbl->pconstructor(n)) {
643 		if (dev)
644 			dev_put(dev);
645 		kfree(n);
646 		n = NULL;
647 		goto out;
648 	}
649 
650 	write_lock_bh(&tbl->lock);
651 	n->next = tbl->phash_buckets[hash_val];
652 	tbl->phash_buckets[hash_val] = n;
653 	write_unlock_bh(&tbl->lock);
654 out:
655 	return n;
656 }
657 EXPORT_SYMBOL(pneigh_lookup);
658 
659 
660 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
661 		  struct net_device *dev)
662 {
663 	struct pneigh_entry *n, **np;
664 	unsigned int key_len = tbl->key_len;
665 	u32 hash_val = pneigh_hash(pkey, key_len);
666 
667 	write_lock_bh(&tbl->lock);
668 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
669 	     np = &n->next) {
670 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
671 		    net_eq(pneigh_net(n), net)) {
672 			*np = n->next;
673 			write_unlock_bh(&tbl->lock);
674 			if (tbl->pdestructor)
675 				tbl->pdestructor(n);
676 			if (n->dev)
677 				dev_put(n->dev);
678 			kfree(n);
679 			return 0;
680 		}
681 	}
682 	write_unlock_bh(&tbl->lock);
683 	return -ENOENT;
684 }
685 
686 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
687 				    struct net_device *dev)
688 {
689 	struct pneigh_entry *n, **np, *freelist = NULL;
690 	u32 h;
691 
692 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
693 		np = &tbl->phash_buckets[h];
694 		while ((n = *np) != NULL) {
695 			if (!dev || n->dev == dev) {
696 				*np = n->next;
697 				n->next = freelist;
698 				freelist = n;
699 				continue;
700 			}
701 			np = &n->next;
702 		}
703 	}
704 	write_unlock_bh(&tbl->lock);
705 	while ((n = freelist)) {
706 		freelist = n->next;
707 		n->next = NULL;
708 		if (tbl->pdestructor)
709 			tbl->pdestructor(n);
710 		if (n->dev)
711 			dev_put(n->dev);
712 		kfree(n);
713 	}
714 	return -ENOENT;
715 }
716 
717 static void neigh_parms_destroy(struct neigh_parms *parms);
718 
719 static inline void neigh_parms_put(struct neigh_parms *parms)
720 {
721 	if (refcount_dec_and_test(&parms->refcnt))
722 		neigh_parms_destroy(parms);
723 }
724 
725 /*
726  *	neighbour must already be out of the table;
727  *
728  */
729 void neigh_destroy(struct neighbour *neigh)
730 {
731 	struct net_device *dev = neigh->dev;
732 
733 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
734 
735 	if (!neigh->dead) {
736 		pr_warn("Destroying alive neighbour %p\n", neigh);
737 		dump_stack();
738 		return;
739 	}
740 
741 	if (neigh_del_timer(neigh))
742 		pr_warn("Impossible event\n");
743 
744 	write_lock_bh(&neigh->lock);
745 	__skb_queue_purge(&neigh->arp_queue);
746 	write_unlock_bh(&neigh->lock);
747 	neigh->arp_queue_len_bytes = 0;
748 
749 	if (dev->netdev_ops->ndo_neigh_destroy)
750 		dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
751 
752 	dev_put(dev);
753 	neigh_parms_put(neigh->parms);
754 
755 	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
756 
757 	atomic_dec(&neigh->tbl->entries);
758 	kfree_rcu(neigh, rcu);
759 }
760 EXPORT_SYMBOL(neigh_destroy);
761 
762 /* Neighbour state is suspicious;
763    disable fast path.
764 
765    Called with write_locked neigh.
766  */
767 static void neigh_suspect(struct neighbour *neigh)
768 {
769 	neigh_dbg(2, "neigh %p is suspected\n", neigh);
770 
771 	neigh->output = neigh->ops->output;
772 }
773 
774 /* Neighbour state is OK;
775    enable fast path.
776 
777    Called with write_locked neigh.
778  */
779 static void neigh_connect(struct neighbour *neigh)
780 {
781 	neigh_dbg(2, "neigh %p is connected\n", neigh);
782 
783 	neigh->output = neigh->ops->connected_output;
784 }
785 
786 static void neigh_periodic_work(struct work_struct *work)
787 {
788 	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
789 	struct neighbour *n;
790 	struct neighbour __rcu **np;
791 	unsigned int i;
792 	struct neigh_hash_table *nht;
793 
794 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
795 
796 	write_lock_bh(&tbl->lock);
797 	nht = rcu_dereference_protected(tbl->nht,
798 					lockdep_is_held(&tbl->lock));
799 
800 	/*
801 	 *	periodically recompute ReachableTime from random function
802 	 */
803 
804 	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
805 		struct neigh_parms *p;
806 		tbl->last_rand = jiffies;
807 		list_for_each_entry(p, &tbl->parms_list, list)
808 			p->reachable_time =
809 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
810 	}
811 
812 	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
813 		goto out;
814 
815 	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
816 		np = &nht->hash_buckets[i];
817 
818 		while ((n = rcu_dereference_protected(*np,
819 				lockdep_is_held(&tbl->lock))) != NULL) {
820 			unsigned int state;
821 
822 			write_lock(&n->lock);
823 
824 			state = n->nud_state;
825 			if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) ||
826 			    (n->flags & NTF_EXT_LEARNED)) {
827 				write_unlock(&n->lock);
828 				goto next_elt;
829 			}
830 
831 			if (time_before(n->used, n->confirmed))
832 				n->used = n->confirmed;
833 
834 			if (refcount_read(&n->refcnt) == 1 &&
835 			    (state == NUD_FAILED ||
836 			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
837 				*np = n->next;
838 				n->dead = 1;
839 				write_unlock(&n->lock);
840 				neigh_cleanup_and_release(n);
841 				continue;
842 			}
843 			write_unlock(&n->lock);
844 
845 next_elt:
846 			np = &n->next;
847 		}
848 		/*
849 		 * It's fine to release lock here, even if hash table
850 		 * grows while we are preempted.
851 		 */
852 		write_unlock_bh(&tbl->lock);
853 		cond_resched();
854 		write_lock_bh(&tbl->lock);
855 		nht = rcu_dereference_protected(tbl->nht,
856 						lockdep_is_held(&tbl->lock));
857 	}
858 out:
859 	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
860 	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
861 	 * BASE_REACHABLE_TIME.
862 	 */
863 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
864 			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
865 	write_unlock_bh(&tbl->lock);
866 }
867 
868 static __inline__ int neigh_max_probes(struct neighbour *n)
869 {
870 	struct neigh_parms *p = n->parms;
871 	return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
872 	       (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
873 	        NEIGH_VAR(p, MCAST_PROBES));
874 }
875 
876 static void neigh_invalidate(struct neighbour *neigh)
877 	__releases(neigh->lock)
878 	__acquires(neigh->lock)
879 {
880 	struct sk_buff *skb;
881 
882 	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
883 	neigh_dbg(2, "neigh %p is failed\n", neigh);
884 	neigh->updated = jiffies;
885 
886 	/* It is very thin place. report_unreachable is very complicated
887 	   routine. Particularly, it can hit the same neighbour entry!
888 
889 	   So that, we try to be accurate and avoid dead loop. --ANK
890 	 */
891 	while (neigh->nud_state == NUD_FAILED &&
892 	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
893 		write_unlock(&neigh->lock);
894 		neigh->ops->error_report(neigh, skb);
895 		write_lock(&neigh->lock);
896 	}
897 	__skb_queue_purge(&neigh->arp_queue);
898 	neigh->arp_queue_len_bytes = 0;
899 }
900 
901 static void neigh_probe(struct neighbour *neigh)
902 	__releases(neigh->lock)
903 {
904 	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
905 	/* keep skb alive even if arp_queue overflows */
906 	if (skb)
907 		skb = skb_clone(skb, GFP_ATOMIC);
908 	write_unlock(&neigh->lock);
909 	if (neigh->ops->solicit)
910 		neigh->ops->solicit(neigh, skb);
911 	atomic_inc(&neigh->probes);
912 	kfree_skb(skb);
913 }
914 
915 /* Called when a timer expires for a neighbour entry. */
916 
917 static void neigh_timer_handler(struct timer_list *t)
918 {
919 	unsigned long now, next;
920 	struct neighbour *neigh = from_timer(neigh, t, timer);
921 	unsigned int state;
922 	int notify = 0;
923 
924 	write_lock(&neigh->lock);
925 
926 	state = neigh->nud_state;
927 	now = jiffies;
928 	next = now + HZ;
929 
930 	if (!(state & NUD_IN_TIMER))
931 		goto out;
932 
933 	if (state & NUD_REACHABLE) {
934 		if (time_before_eq(now,
935 				   neigh->confirmed + neigh->parms->reachable_time)) {
936 			neigh_dbg(2, "neigh %p is still alive\n", neigh);
937 			next = neigh->confirmed + neigh->parms->reachable_time;
938 		} else if (time_before_eq(now,
939 					  neigh->used +
940 					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
941 			neigh_dbg(2, "neigh %p is delayed\n", neigh);
942 			neigh->nud_state = NUD_DELAY;
943 			neigh->updated = jiffies;
944 			neigh_suspect(neigh);
945 			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
946 		} else {
947 			neigh_dbg(2, "neigh %p is suspected\n", neigh);
948 			neigh->nud_state = NUD_STALE;
949 			neigh->updated = jiffies;
950 			neigh_suspect(neigh);
951 			notify = 1;
952 		}
953 	} else if (state & NUD_DELAY) {
954 		if (time_before_eq(now,
955 				   neigh->confirmed +
956 				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
957 			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
958 			neigh->nud_state = NUD_REACHABLE;
959 			neigh->updated = jiffies;
960 			neigh_connect(neigh);
961 			notify = 1;
962 			next = neigh->confirmed + neigh->parms->reachable_time;
963 		} else {
964 			neigh_dbg(2, "neigh %p is probed\n", neigh);
965 			neigh->nud_state = NUD_PROBE;
966 			neigh->updated = jiffies;
967 			atomic_set(&neigh->probes, 0);
968 			notify = 1;
969 			next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
970 		}
971 	} else {
972 		/* NUD_PROBE|NUD_INCOMPLETE */
973 		next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
974 	}
975 
976 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
977 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
978 		neigh->nud_state = NUD_FAILED;
979 		notify = 1;
980 		neigh_invalidate(neigh);
981 		goto out;
982 	}
983 
984 	if (neigh->nud_state & NUD_IN_TIMER) {
985 		if (time_before(next, jiffies + HZ/2))
986 			next = jiffies + HZ/2;
987 		if (!mod_timer(&neigh->timer, next))
988 			neigh_hold(neigh);
989 	}
990 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
991 		neigh_probe(neigh);
992 	} else {
993 out:
994 		write_unlock(&neigh->lock);
995 	}
996 
997 	if (notify)
998 		neigh_update_notify(neigh, 0);
999 
1000 	neigh_release(neigh);
1001 }
1002 
1003 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
1004 {
1005 	int rc;
1006 	bool immediate_probe = false;
1007 
1008 	write_lock_bh(&neigh->lock);
1009 
1010 	rc = 0;
1011 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1012 		goto out_unlock_bh;
1013 	if (neigh->dead)
1014 		goto out_dead;
1015 
1016 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1017 		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1018 		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
1019 			unsigned long next, now = jiffies;
1020 
1021 			atomic_set(&neigh->probes,
1022 				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
1023 			neigh->nud_state     = NUD_INCOMPLETE;
1024 			neigh->updated = now;
1025 			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1026 					 HZ/2);
1027 			neigh_add_timer(neigh, next);
1028 			immediate_probe = true;
1029 		} else {
1030 			neigh->nud_state = NUD_FAILED;
1031 			neigh->updated = jiffies;
1032 			write_unlock_bh(&neigh->lock);
1033 
1034 			kfree_skb(skb);
1035 			return 1;
1036 		}
1037 	} else if (neigh->nud_state & NUD_STALE) {
1038 		neigh_dbg(2, "neigh %p is delayed\n", neigh);
1039 		neigh->nud_state = NUD_DELAY;
1040 		neigh->updated = jiffies;
1041 		neigh_add_timer(neigh, jiffies +
1042 				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1043 	}
1044 
1045 	if (neigh->nud_state == NUD_INCOMPLETE) {
1046 		if (skb) {
1047 			while (neigh->arp_queue_len_bytes + skb->truesize >
1048 			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1049 				struct sk_buff *buff;
1050 
1051 				buff = __skb_dequeue(&neigh->arp_queue);
1052 				if (!buff)
1053 					break;
1054 				neigh->arp_queue_len_bytes -= buff->truesize;
1055 				kfree_skb(buff);
1056 				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1057 			}
1058 			skb_dst_force(skb);
1059 			__skb_queue_tail(&neigh->arp_queue, skb);
1060 			neigh->arp_queue_len_bytes += skb->truesize;
1061 		}
1062 		rc = 1;
1063 	}
1064 out_unlock_bh:
1065 	if (immediate_probe)
1066 		neigh_probe(neigh);
1067 	else
1068 		write_unlock(&neigh->lock);
1069 	local_bh_enable();
1070 	return rc;
1071 
1072 out_dead:
1073 	if (neigh->nud_state & NUD_STALE)
1074 		goto out_unlock_bh;
1075 	write_unlock_bh(&neigh->lock);
1076 	kfree_skb(skb);
1077 	return 1;
1078 }
1079 EXPORT_SYMBOL(__neigh_event_send);
1080 
1081 static void neigh_update_hhs(struct neighbour *neigh)
1082 {
1083 	struct hh_cache *hh;
1084 	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1085 		= NULL;
1086 
1087 	if (neigh->dev->header_ops)
1088 		update = neigh->dev->header_ops->cache_update;
1089 
1090 	if (update) {
1091 		hh = &neigh->hh;
1092 		if (hh->hh_len) {
1093 			write_seqlock_bh(&hh->hh_lock);
1094 			update(hh, neigh->dev, neigh->ha);
1095 			write_sequnlock_bh(&hh->hh_lock);
1096 		}
1097 	}
1098 }
1099 
1100 
1101 
1102 /* Generic update routine.
1103    -- lladdr is new lladdr or NULL, if it is not supplied.
1104    -- new    is new state.
1105    -- flags
1106 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1107 				if it is different.
1108 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1109 				lladdr instead of overriding it
1110 				if it is different.
1111 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1112 
1113 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1114 				NTF_ROUTER flag.
1115 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1116 				a router.
1117 
1118    Caller MUST hold reference count on the entry.
1119  */
1120 
1121 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1122 		 u32 flags, u32 nlmsg_pid)
1123 {
1124 	u8 old;
1125 	int err;
1126 	int notify = 0;
1127 	struct net_device *dev;
1128 	int update_isrouter = 0;
1129 
1130 	write_lock_bh(&neigh->lock);
1131 
1132 	dev    = neigh->dev;
1133 	old    = neigh->nud_state;
1134 	err    = -EPERM;
1135 
1136 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1137 	    (old & (NUD_NOARP | NUD_PERMANENT)))
1138 		goto out;
1139 	if (neigh->dead)
1140 		goto out;
1141 
1142 	neigh_update_ext_learned(neigh, flags, &notify);
1143 
1144 	if (!(new & NUD_VALID)) {
1145 		neigh_del_timer(neigh);
1146 		if (old & NUD_CONNECTED)
1147 			neigh_suspect(neigh);
1148 		neigh->nud_state = new;
1149 		err = 0;
1150 		notify = old & NUD_VALID;
1151 		if (((old & (NUD_INCOMPLETE | NUD_PROBE)) ||
1152 		     (flags & NEIGH_UPDATE_F_ADMIN)) &&
1153 		    (new & NUD_FAILED)) {
1154 			neigh_invalidate(neigh);
1155 			notify = 1;
1156 		}
1157 		goto out;
1158 	}
1159 
1160 	/* Compare new lladdr with cached one */
1161 	if (!dev->addr_len) {
1162 		/* First case: device needs no address. */
1163 		lladdr = neigh->ha;
1164 	} else if (lladdr) {
1165 		/* The second case: if something is already cached
1166 		   and a new address is proposed:
1167 		   - compare new & old
1168 		   - if they are different, check override flag
1169 		 */
1170 		if ((old & NUD_VALID) &&
1171 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1172 			lladdr = neigh->ha;
1173 	} else {
1174 		/* No address is supplied; if we know something,
1175 		   use it, otherwise discard the request.
1176 		 */
1177 		err = -EINVAL;
1178 		if (!(old & NUD_VALID))
1179 			goto out;
1180 		lladdr = neigh->ha;
1181 	}
1182 
1183 	/* If entry was valid and address is not changed,
1184 	   do not change entry state, if new one is STALE.
1185 	 */
1186 	err = 0;
1187 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1188 	if (old & NUD_VALID) {
1189 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1190 			update_isrouter = 0;
1191 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1192 			    (old & NUD_CONNECTED)) {
1193 				lladdr = neigh->ha;
1194 				new = NUD_STALE;
1195 			} else
1196 				goto out;
1197 		} else {
1198 			if (lladdr == neigh->ha && new == NUD_STALE &&
1199 			    !(flags & NEIGH_UPDATE_F_ADMIN))
1200 				new = old;
1201 		}
1202 	}
1203 
1204 	/* Update timestamps only once we know we will make a change to the
1205 	 * neighbour entry. Otherwise we risk to move the locktime window with
1206 	 * noop updates and ignore relevant ARP updates.
1207 	 */
1208 	if (new != old || lladdr != neigh->ha) {
1209 		if (new & NUD_CONNECTED)
1210 			neigh->confirmed = jiffies;
1211 		neigh->updated = jiffies;
1212 	}
1213 
1214 	if (new != old) {
1215 		neigh_del_timer(neigh);
1216 		if (new & NUD_PROBE)
1217 			atomic_set(&neigh->probes, 0);
1218 		if (new & NUD_IN_TIMER)
1219 			neigh_add_timer(neigh, (jiffies +
1220 						((new & NUD_REACHABLE) ?
1221 						 neigh->parms->reachable_time :
1222 						 0)));
1223 		neigh->nud_state = new;
1224 		notify = 1;
1225 	}
1226 
1227 	if (lladdr != neigh->ha) {
1228 		write_seqlock(&neigh->ha_lock);
1229 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1230 		write_sequnlock(&neigh->ha_lock);
1231 		neigh_update_hhs(neigh);
1232 		if (!(new & NUD_CONNECTED))
1233 			neigh->confirmed = jiffies -
1234 				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1235 		notify = 1;
1236 	}
1237 	if (new == old)
1238 		goto out;
1239 	if (new & NUD_CONNECTED)
1240 		neigh_connect(neigh);
1241 	else
1242 		neigh_suspect(neigh);
1243 	if (!(old & NUD_VALID)) {
1244 		struct sk_buff *skb;
1245 
1246 		/* Again: avoid dead loop if something went wrong */
1247 
1248 		while (neigh->nud_state & NUD_VALID &&
1249 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1250 			struct dst_entry *dst = skb_dst(skb);
1251 			struct neighbour *n2, *n1 = neigh;
1252 			write_unlock_bh(&neigh->lock);
1253 
1254 			rcu_read_lock();
1255 
1256 			/* Why not just use 'neigh' as-is?  The problem is that
1257 			 * things such as shaper, eql, and sch_teql can end up
1258 			 * using alternative, different, neigh objects to output
1259 			 * the packet in the output path.  So what we need to do
1260 			 * here is re-lookup the top-level neigh in the path so
1261 			 * we can reinject the packet there.
1262 			 */
1263 			n2 = NULL;
1264 			if (dst) {
1265 				n2 = dst_neigh_lookup_skb(dst, skb);
1266 				if (n2)
1267 					n1 = n2;
1268 			}
1269 			n1->output(n1, skb);
1270 			if (n2)
1271 				neigh_release(n2);
1272 			rcu_read_unlock();
1273 
1274 			write_lock_bh(&neigh->lock);
1275 		}
1276 		__skb_queue_purge(&neigh->arp_queue);
1277 		neigh->arp_queue_len_bytes = 0;
1278 	}
1279 out:
1280 	if (update_isrouter) {
1281 		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1282 			(neigh->flags | NTF_ROUTER) :
1283 			(neigh->flags & ~NTF_ROUTER);
1284 	}
1285 	write_unlock_bh(&neigh->lock);
1286 
1287 	if (notify)
1288 		neigh_update_notify(neigh, nlmsg_pid);
1289 
1290 	return err;
1291 }
1292 EXPORT_SYMBOL(neigh_update);
1293 
1294 /* Update the neigh to listen temporarily for probe responses, even if it is
1295  * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1296  */
1297 void __neigh_set_probe_once(struct neighbour *neigh)
1298 {
1299 	if (neigh->dead)
1300 		return;
1301 	neigh->updated = jiffies;
1302 	if (!(neigh->nud_state & NUD_FAILED))
1303 		return;
1304 	neigh->nud_state = NUD_INCOMPLETE;
1305 	atomic_set(&neigh->probes, neigh_max_probes(neigh));
1306 	neigh_add_timer(neigh,
1307 			jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1308 }
1309 EXPORT_SYMBOL(__neigh_set_probe_once);
1310 
1311 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1312 				 u8 *lladdr, void *saddr,
1313 				 struct net_device *dev)
1314 {
1315 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1316 						 lladdr || !dev->addr_len);
1317 	if (neigh)
1318 		neigh_update(neigh, lladdr, NUD_STALE,
1319 			     NEIGH_UPDATE_F_OVERRIDE, 0);
1320 	return neigh;
1321 }
1322 EXPORT_SYMBOL(neigh_event_ns);
1323 
1324 /* called with read_lock_bh(&n->lock); */
1325 static void neigh_hh_init(struct neighbour *n)
1326 {
1327 	struct net_device *dev = n->dev;
1328 	__be16 prot = n->tbl->protocol;
1329 	struct hh_cache	*hh = &n->hh;
1330 
1331 	write_lock_bh(&n->lock);
1332 
1333 	/* Only one thread can come in here and initialize the
1334 	 * hh_cache entry.
1335 	 */
1336 	if (!hh->hh_len)
1337 		dev->header_ops->cache(n, hh, prot);
1338 
1339 	write_unlock_bh(&n->lock);
1340 }
1341 
1342 /* Slow and careful. */
1343 
1344 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1345 {
1346 	int rc = 0;
1347 
1348 	if (!neigh_event_send(neigh, skb)) {
1349 		int err;
1350 		struct net_device *dev = neigh->dev;
1351 		unsigned int seq;
1352 
1353 		if (dev->header_ops->cache && !neigh->hh.hh_len)
1354 			neigh_hh_init(neigh);
1355 
1356 		do {
1357 			__skb_pull(skb, skb_network_offset(skb));
1358 			seq = read_seqbegin(&neigh->ha_lock);
1359 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1360 					      neigh->ha, NULL, skb->len);
1361 		} while (read_seqretry(&neigh->ha_lock, seq));
1362 
1363 		if (err >= 0)
1364 			rc = dev_queue_xmit(skb);
1365 		else
1366 			goto out_kfree_skb;
1367 	}
1368 out:
1369 	return rc;
1370 out_kfree_skb:
1371 	rc = -EINVAL;
1372 	kfree_skb(skb);
1373 	goto out;
1374 }
1375 EXPORT_SYMBOL(neigh_resolve_output);
1376 
1377 /* As fast as possible without hh cache */
1378 
1379 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1380 {
1381 	struct net_device *dev = neigh->dev;
1382 	unsigned int seq;
1383 	int err;
1384 
1385 	do {
1386 		__skb_pull(skb, skb_network_offset(skb));
1387 		seq = read_seqbegin(&neigh->ha_lock);
1388 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1389 				      neigh->ha, NULL, skb->len);
1390 	} while (read_seqretry(&neigh->ha_lock, seq));
1391 
1392 	if (err >= 0)
1393 		err = dev_queue_xmit(skb);
1394 	else {
1395 		err = -EINVAL;
1396 		kfree_skb(skb);
1397 	}
1398 	return err;
1399 }
1400 EXPORT_SYMBOL(neigh_connected_output);
1401 
1402 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1403 {
1404 	return dev_queue_xmit(skb);
1405 }
1406 EXPORT_SYMBOL(neigh_direct_output);
1407 
1408 static void neigh_proxy_process(struct timer_list *t)
1409 {
1410 	struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1411 	long sched_next = 0;
1412 	unsigned long now = jiffies;
1413 	struct sk_buff *skb, *n;
1414 
1415 	spin_lock(&tbl->proxy_queue.lock);
1416 
1417 	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1418 		long tdif = NEIGH_CB(skb)->sched_next - now;
1419 
1420 		if (tdif <= 0) {
1421 			struct net_device *dev = skb->dev;
1422 
1423 			__skb_unlink(skb, &tbl->proxy_queue);
1424 			if (tbl->proxy_redo && netif_running(dev)) {
1425 				rcu_read_lock();
1426 				tbl->proxy_redo(skb);
1427 				rcu_read_unlock();
1428 			} else {
1429 				kfree_skb(skb);
1430 			}
1431 
1432 			dev_put(dev);
1433 		} else if (!sched_next || tdif < sched_next)
1434 			sched_next = tdif;
1435 	}
1436 	del_timer(&tbl->proxy_timer);
1437 	if (sched_next)
1438 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1439 	spin_unlock(&tbl->proxy_queue.lock);
1440 }
1441 
1442 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1443 		    struct sk_buff *skb)
1444 {
1445 	unsigned long now = jiffies;
1446 
1447 	unsigned long sched_next = now + (prandom_u32() %
1448 					  NEIGH_VAR(p, PROXY_DELAY));
1449 
1450 	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1451 		kfree_skb(skb);
1452 		return;
1453 	}
1454 
1455 	NEIGH_CB(skb)->sched_next = sched_next;
1456 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1457 
1458 	spin_lock(&tbl->proxy_queue.lock);
1459 	if (del_timer(&tbl->proxy_timer)) {
1460 		if (time_before(tbl->proxy_timer.expires, sched_next))
1461 			sched_next = tbl->proxy_timer.expires;
1462 	}
1463 	skb_dst_drop(skb);
1464 	dev_hold(skb->dev);
1465 	__skb_queue_tail(&tbl->proxy_queue, skb);
1466 	mod_timer(&tbl->proxy_timer, sched_next);
1467 	spin_unlock(&tbl->proxy_queue.lock);
1468 }
1469 EXPORT_SYMBOL(pneigh_enqueue);
1470 
1471 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1472 						      struct net *net, int ifindex)
1473 {
1474 	struct neigh_parms *p;
1475 
1476 	list_for_each_entry(p, &tbl->parms_list, list) {
1477 		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1478 		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1479 			return p;
1480 	}
1481 
1482 	return NULL;
1483 }
1484 
1485 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1486 				      struct neigh_table *tbl)
1487 {
1488 	struct neigh_parms *p;
1489 	struct net *net = dev_net(dev);
1490 	const struct net_device_ops *ops = dev->netdev_ops;
1491 
1492 	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1493 	if (p) {
1494 		p->tbl		  = tbl;
1495 		refcount_set(&p->refcnt, 1);
1496 		p->reachable_time =
1497 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1498 		dev_hold(dev);
1499 		p->dev = dev;
1500 		write_pnet(&p->net, net);
1501 		p->sysctl_table = NULL;
1502 
1503 		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1504 			dev_put(dev);
1505 			kfree(p);
1506 			return NULL;
1507 		}
1508 
1509 		write_lock_bh(&tbl->lock);
1510 		list_add(&p->list, &tbl->parms.list);
1511 		write_unlock_bh(&tbl->lock);
1512 
1513 		neigh_parms_data_state_cleanall(p);
1514 	}
1515 	return p;
1516 }
1517 EXPORT_SYMBOL(neigh_parms_alloc);
1518 
1519 static void neigh_rcu_free_parms(struct rcu_head *head)
1520 {
1521 	struct neigh_parms *parms =
1522 		container_of(head, struct neigh_parms, rcu_head);
1523 
1524 	neigh_parms_put(parms);
1525 }
1526 
1527 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1528 {
1529 	if (!parms || parms == &tbl->parms)
1530 		return;
1531 	write_lock_bh(&tbl->lock);
1532 	list_del(&parms->list);
1533 	parms->dead = 1;
1534 	write_unlock_bh(&tbl->lock);
1535 	if (parms->dev)
1536 		dev_put(parms->dev);
1537 	call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1538 }
1539 EXPORT_SYMBOL(neigh_parms_release);
1540 
1541 static void neigh_parms_destroy(struct neigh_parms *parms)
1542 {
1543 	kfree(parms);
1544 }
1545 
1546 static struct lock_class_key neigh_table_proxy_queue_class;
1547 
1548 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1549 
1550 void neigh_table_init(int index, struct neigh_table *tbl)
1551 {
1552 	unsigned long now = jiffies;
1553 	unsigned long phsize;
1554 
1555 	INIT_LIST_HEAD(&tbl->parms_list);
1556 	list_add(&tbl->parms.list, &tbl->parms_list);
1557 	write_pnet(&tbl->parms.net, &init_net);
1558 	refcount_set(&tbl->parms.refcnt, 1);
1559 	tbl->parms.reachable_time =
1560 			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1561 
1562 	tbl->stats = alloc_percpu(struct neigh_statistics);
1563 	if (!tbl->stats)
1564 		panic("cannot create neighbour cache statistics");
1565 
1566 #ifdef CONFIG_PROC_FS
1567 	if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1568 			      &neigh_stat_seq_ops, tbl))
1569 		panic("cannot create neighbour proc dir entry");
1570 #endif
1571 
1572 	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1573 
1574 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1575 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1576 
1577 	if (!tbl->nht || !tbl->phash_buckets)
1578 		panic("cannot allocate neighbour cache hashes");
1579 
1580 	if (!tbl->entry_size)
1581 		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1582 					tbl->key_len, NEIGH_PRIV_ALIGN);
1583 	else
1584 		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1585 
1586 	rwlock_init(&tbl->lock);
1587 	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1588 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1589 			tbl->parms.reachable_time);
1590 	timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1591 	skb_queue_head_init_class(&tbl->proxy_queue,
1592 			&neigh_table_proxy_queue_class);
1593 
1594 	tbl->last_flush = now;
1595 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1596 
1597 	neigh_tables[index] = tbl;
1598 }
1599 EXPORT_SYMBOL(neigh_table_init);
1600 
1601 int neigh_table_clear(int index, struct neigh_table *tbl)
1602 {
1603 	neigh_tables[index] = NULL;
1604 	/* It is not clean... Fix it to unload IPv6 module safely */
1605 	cancel_delayed_work_sync(&tbl->gc_work);
1606 	del_timer_sync(&tbl->proxy_timer);
1607 	pneigh_queue_purge(&tbl->proxy_queue);
1608 	neigh_ifdown(tbl, NULL);
1609 	if (atomic_read(&tbl->entries))
1610 		pr_crit("neighbour leakage\n");
1611 
1612 	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1613 		 neigh_hash_free_rcu);
1614 	tbl->nht = NULL;
1615 
1616 	kfree(tbl->phash_buckets);
1617 	tbl->phash_buckets = NULL;
1618 
1619 	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1620 
1621 	free_percpu(tbl->stats);
1622 	tbl->stats = NULL;
1623 
1624 	return 0;
1625 }
1626 EXPORT_SYMBOL(neigh_table_clear);
1627 
1628 static struct neigh_table *neigh_find_table(int family)
1629 {
1630 	struct neigh_table *tbl = NULL;
1631 
1632 	switch (family) {
1633 	case AF_INET:
1634 		tbl = neigh_tables[NEIGH_ARP_TABLE];
1635 		break;
1636 	case AF_INET6:
1637 		tbl = neigh_tables[NEIGH_ND_TABLE];
1638 		break;
1639 	case AF_DECnet:
1640 		tbl = neigh_tables[NEIGH_DN_TABLE];
1641 		break;
1642 	}
1643 
1644 	return tbl;
1645 }
1646 
1647 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1648 			struct netlink_ext_ack *extack)
1649 {
1650 	struct net *net = sock_net(skb->sk);
1651 	struct ndmsg *ndm;
1652 	struct nlattr *dst_attr;
1653 	struct neigh_table *tbl;
1654 	struct neighbour *neigh;
1655 	struct net_device *dev = NULL;
1656 	int err = -EINVAL;
1657 
1658 	ASSERT_RTNL();
1659 	if (nlmsg_len(nlh) < sizeof(*ndm))
1660 		goto out;
1661 
1662 	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1663 	if (dst_attr == NULL)
1664 		goto out;
1665 
1666 	ndm = nlmsg_data(nlh);
1667 	if (ndm->ndm_ifindex) {
1668 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1669 		if (dev == NULL) {
1670 			err = -ENODEV;
1671 			goto out;
1672 		}
1673 	}
1674 
1675 	tbl = neigh_find_table(ndm->ndm_family);
1676 	if (tbl == NULL)
1677 		return -EAFNOSUPPORT;
1678 
1679 	if (nla_len(dst_attr) < (int)tbl->key_len)
1680 		goto out;
1681 
1682 	if (ndm->ndm_flags & NTF_PROXY) {
1683 		err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1684 		goto out;
1685 	}
1686 
1687 	if (dev == NULL)
1688 		goto out;
1689 
1690 	neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1691 	if (neigh == NULL) {
1692 		err = -ENOENT;
1693 		goto out;
1694 	}
1695 
1696 	err = neigh_update(neigh, NULL, NUD_FAILED,
1697 			   NEIGH_UPDATE_F_OVERRIDE |
1698 			   NEIGH_UPDATE_F_ADMIN,
1699 			   NETLINK_CB(skb).portid);
1700 	write_lock_bh(&tbl->lock);
1701 	neigh_release(neigh);
1702 	neigh_remove_one(neigh, tbl);
1703 	write_unlock_bh(&tbl->lock);
1704 
1705 out:
1706 	return err;
1707 }
1708 
1709 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1710 		     struct netlink_ext_ack *extack)
1711 {
1712 	int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1713 	struct net *net = sock_net(skb->sk);
1714 	struct ndmsg *ndm;
1715 	struct nlattr *tb[NDA_MAX+1];
1716 	struct neigh_table *tbl;
1717 	struct net_device *dev = NULL;
1718 	struct neighbour *neigh;
1719 	void *dst, *lladdr;
1720 	int err;
1721 
1722 	ASSERT_RTNL();
1723 	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
1724 	if (err < 0)
1725 		goto out;
1726 
1727 	err = -EINVAL;
1728 	if (tb[NDA_DST] == NULL)
1729 		goto out;
1730 
1731 	ndm = nlmsg_data(nlh);
1732 	if (ndm->ndm_ifindex) {
1733 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1734 		if (dev == NULL) {
1735 			err = -ENODEV;
1736 			goto out;
1737 		}
1738 
1739 		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1740 			goto out;
1741 	}
1742 
1743 	tbl = neigh_find_table(ndm->ndm_family);
1744 	if (tbl == NULL)
1745 		return -EAFNOSUPPORT;
1746 
1747 	if (nla_len(tb[NDA_DST]) < (int)tbl->key_len)
1748 		goto out;
1749 	dst = nla_data(tb[NDA_DST]);
1750 	lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1751 
1752 	if (ndm->ndm_flags & NTF_PROXY) {
1753 		struct pneigh_entry *pn;
1754 
1755 		err = -ENOBUFS;
1756 		pn = pneigh_lookup(tbl, net, dst, dev, 1);
1757 		if (pn) {
1758 			pn->flags = ndm->ndm_flags;
1759 			err = 0;
1760 		}
1761 		goto out;
1762 	}
1763 
1764 	if (dev == NULL)
1765 		goto out;
1766 
1767 	neigh = neigh_lookup(tbl, dst, dev);
1768 	if (neigh == NULL) {
1769 		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1770 			err = -ENOENT;
1771 			goto out;
1772 		}
1773 
1774 		neigh = __neigh_lookup_errno(tbl, dst, dev);
1775 		if (IS_ERR(neigh)) {
1776 			err = PTR_ERR(neigh);
1777 			goto out;
1778 		}
1779 	} else {
1780 		if (nlh->nlmsg_flags & NLM_F_EXCL) {
1781 			err = -EEXIST;
1782 			neigh_release(neigh);
1783 			goto out;
1784 		}
1785 
1786 		if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1787 			flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1788 	}
1789 
1790 	if (ndm->ndm_flags & NTF_EXT_LEARNED)
1791 		flags |= NEIGH_UPDATE_F_EXT_LEARNED;
1792 
1793 	if (ndm->ndm_flags & NTF_USE) {
1794 		neigh_event_send(neigh, NULL);
1795 		err = 0;
1796 	} else
1797 		err = neigh_update(neigh, lladdr, ndm->ndm_state, flags,
1798 				   NETLINK_CB(skb).portid);
1799 	neigh_release(neigh);
1800 
1801 out:
1802 	return err;
1803 }
1804 
1805 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1806 {
1807 	struct nlattr *nest;
1808 
1809 	nest = nla_nest_start(skb, NDTA_PARMS);
1810 	if (nest == NULL)
1811 		return -ENOBUFS;
1812 
1813 	if ((parms->dev &&
1814 	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1815 	    nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
1816 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1817 			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1818 	    /* approximative value for deprecated QUEUE_LEN (in packets) */
1819 	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
1820 			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1821 	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1822 	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
1823 	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
1824 			NEIGH_VAR(parms, UCAST_PROBES)) ||
1825 	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
1826 			NEIGH_VAR(parms, MCAST_PROBES)) ||
1827 	    nla_put_u32(skb, NDTPA_MCAST_REPROBES,
1828 			NEIGH_VAR(parms, MCAST_REPROBES)) ||
1829 	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
1830 			  NDTPA_PAD) ||
1831 	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1832 			  NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
1833 	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
1834 			  NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
1835 	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1836 			  NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
1837 	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
1838 			  NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
1839 	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
1840 			  NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
1841 	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
1842 			  NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
1843 	    nla_put_msecs(skb, NDTPA_LOCKTIME,
1844 			  NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
1845 		goto nla_put_failure;
1846 	return nla_nest_end(skb, nest);
1847 
1848 nla_put_failure:
1849 	nla_nest_cancel(skb, nest);
1850 	return -EMSGSIZE;
1851 }
1852 
1853 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1854 			      u32 pid, u32 seq, int type, int flags)
1855 {
1856 	struct nlmsghdr *nlh;
1857 	struct ndtmsg *ndtmsg;
1858 
1859 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1860 	if (nlh == NULL)
1861 		return -EMSGSIZE;
1862 
1863 	ndtmsg = nlmsg_data(nlh);
1864 
1865 	read_lock_bh(&tbl->lock);
1866 	ndtmsg->ndtm_family = tbl->family;
1867 	ndtmsg->ndtm_pad1   = 0;
1868 	ndtmsg->ndtm_pad2   = 0;
1869 
1870 	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1871 	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
1872 	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1873 	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1874 	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1875 		goto nla_put_failure;
1876 	{
1877 		unsigned long now = jiffies;
1878 		unsigned int flush_delta = now - tbl->last_flush;
1879 		unsigned int rand_delta = now - tbl->last_rand;
1880 		struct neigh_hash_table *nht;
1881 		struct ndt_config ndc = {
1882 			.ndtc_key_len		= tbl->key_len,
1883 			.ndtc_entry_size	= tbl->entry_size,
1884 			.ndtc_entries		= atomic_read(&tbl->entries),
1885 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1886 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1887 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1888 		};
1889 
1890 		rcu_read_lock_bh();
1891 		nht = rcu_dereference_bh(tbl->nht);
1892 		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1893 		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1894 		rcu_read_unlock_bh();
1895 
1896 		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1897 			goto nla_put_failure;
1898 	}
1899 
1900 	{
1901 		int cpu;
1902 		struct ndt_stats ndst;
1903 
1904 		memset(&ndst, 0, sizeof(ndst));
1905 
1906 		for_each_possible_cpu(cpu) {
1907 			struct neigh_statistics	*st;
1908 
1909 			st = per_cpu_ptr(tbl->stats, cpu);
1910 			ndst.ndts_allocs		+= st->allocs;
1911 			ndst.ndts_destroys		+= st->destroys;
1912 			ndst.ndts_hash_grows		+= st->hash_grows;
1913 			ndst.ndts_res_failed		+= st->res_failed;
1914 			ndst.ndts_lookups		+= st->lookups;
1915 			ndst.ndts_hits			+= st->hits;
1916 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1917 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1918 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1919 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1920 			ndst.ndts_table_fulls		+= st->table_fulls;
1921 		}
1922 
1923 		if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
1924 				  NDTA_PAD))
1925 			goto nla_put_failure;
1926 	}
1927 
1928 	BUG_ON(tbl->parms.dev);
1929 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1930 		goto nla_put_failure;
1931 
1932 	read_unlock_bh(&tbl->lock);
1933 	nlmsg_end(skb, nlh);
1934 	return 0;
1935 
1936 nla_put_failure:
1937 	read_unlock_bh(&tbl->lock);
1938 	nlmsg_cancel(skb, nlh);
1939 	return -EMSGSIZE;
1940 }
1941 
1942 static int neightbl_fill_param_info(struct sk_buff *skb,
1943 				    struct neigh_table *tbl,
1944 				    struct neigh_parms *parms,
1945 				    u32 pid, u32 seq, int type,
1946 				    unsigned int flags)
1947 {
1948 	struct ndtmsg *ndtmsg;
1949 	struct nlmsghdr *nlh;
1950 
1951 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1952 	if (nlh == NULL)
1953 		return -EMSGSIZE;
1954 
1955 	ndtmsg = nlmsg_data(nlh);
1956 
1957 	read_lock_bh(&tbl->lock);
1958 	ndtmsg->ndtm_family = tbl->family;
1959 	ndtmsg->ndtm_pad1   = 0;
1960 	ndtmsg->ndtm_pad2   = 0;
1961 
1962 	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1963 	    neightbl_fill_parms(skb, parms) < 0)
1964 		goto errout;
1965 
1966 	read_unlock_bh(&tbl->lock);
1967 	nlmsg_end(skb, nlh);
1968 	return 0;
1969 errout:
1970 	read_unlock_bh(&tbl->lock);
1971 	nlmsg_cancel(skb, nlh);
1972 	return -EMSGSIZE;
1973 }
1974 
1975 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1976 	[NDTA_NAME]		= { .type = NLA_STRING },
1977 	[NDTA_THRESH1]		= { .type = NLA_U32 },
1978 	[NDTA_THRESH2]		= { .type = NLA_U32 },
1979 	[NDTA_THRESH3]		= { .type = NLA_U32 },
1980 	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
1981 	[NDTA_PARMS]		= { .type = NLA_NESTED },
1982 };
1983 
1984 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1985 	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
1986 	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
1987 	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
1988 	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
1989 	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
1990 	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
1991 	[NDTPA_MCAST_REPROBES]		= { .type = NLA_U32 },
1992 	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
1993 	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
1994 	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
1995 	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
1996 	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
1997 	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
1998 	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
1999 };
2000 
2001 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
2002 			struct netlink_ext_ack *extack)
2003 {
2004 	struct net *net = sock_net(skb->sk);
2005 	struct neigh_table *tbl;
2006 	struct ndtmsg *ndtmsg;
2007 	struct nlattr *tb[NDTA_MAX+1];
2008 	bool found = false;
2009 	int err, tidx;
2010 
2011 	err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
2012 			  nl_neightbl_policy, extack);
2013 	if (err < 0)
2014 		goto errout;
2015 
2016 	if (tb[NDTA_NAME] == NULL) {
2017 		err = -EINVAL;
2018 		goto errout;
2019 	}
2020 
2021 	ndtmsg = nlmsg_data(nlh);
2022 
2023 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2024 		tbl = neigh_tables[tidx];
2025 		if (!tbl)
2026 			continue;
2027 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2028 			continue;
2029 		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2030 			found = true;
2031 			break;
2032 		}
2033 	}
2034 
2035 	if (!found)
2036 		return -ENOENT;
2037 
2038 	/*
2039 	 * We acquire tbl->lock to be nice to the periodic timers and
2040 	 * make sure they always see a consistent set of values.
2041 	 */
2042 	write_lock_bh(&tbl->lock);
2043 
2044 	if (tb[NDTA_PARMS]) {
2045 		struct nlattr *tbp[NDTPA_MAX+1];
2046 		struct neigh_parms *p;
2047 		int i, ifindex = 0;
2048 
2049 		err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
2050 				       nl_ntbl_parm_policy, extack);
2051 		if (err < 0)
2052 			goto errout_tbl_lock;
2053 
2054 		if (tbp[NDTPA_IFINDEX])
2055 			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2056 
2057 		p = lookup_neigh_parms(tbl, net, ifindex);
2058 		if (p == NULL) {
2059 			err = -ENOENT;
2060 			goto errout_tbl_lock;
2061 		}
2062 
2063 		for (i = 1; i <= NDTPA_MAX; i++) {
2064 			if (tbp[i] == NULL)
2065 				continue;
2066 
2067 			switch (i) {
2068 			case NDTPA_QUEUE_LEN:
2069 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2070 					      nla_get_u32(tbp[i]) *
2071 					      SKB_TRUESIZE(ETH_FRAME_LEN));
2072 				break;
2073 			case NDTPA_QUEUE_LENBYTES:
2074 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2075 					      nla_get_u32(tbp[i]));
2076 				break;
2077 			case NDTPA_PROXY_QLEN:
2078 				NEIGH_VAR_SET(p, PROXY_QLEN,
2079 					      nla_get_u32(tbp[i]));
2080 				break;
2081 			case NDTPA_APP_PROBES:
2082 				NEIGH_VAR_SET(p, APP_PROBES,
2083 					      nla_get_u32(tbp[i]));
2084 				break;
2085 			case NDTPA_UCAST_PROBES:
2086 				NEIGH_VAR_SET(p, UCAST_PROBES,
2087 					      nla_get_u32(tbp[i]));
2088 				break;
2089 			case NDTPA_MCAST_PROBES:
2090 				NEIGH_VAR_SET(p, MCAST_PROBES,
2091 					      nla_get_u32(tbp[i]));
2092 				break;
2093 			case NDTPA_MCAST_REPROBES:
2094 				NEIGH_VAR_SET(p, MCAST_REPROBES,
2095 					      nla_get_u32(tbp[i]));
2096 				break;
2097 			case NDTPA_BASE_REACHABLE_TIME:
2098 				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2099 					      nla_get_msecs(tbp[i]));
2100 				/* update reachable_time as well, otherwise, the change will
2101 				 * only be effective after the next time neigh_periodic_work
2102 				 * decides to recompute it (can be multiple minutes)
2103 				 */
2104 				p->reachable_time =
2105 					neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2106 				break;
2107 			case NDTPA_GC_STALETIME:
2108 				NEIGH_VAR_SET(p, GC_STALETIME,
2109 					      nla_get_msecs(tbp[i]));
2110 				break;
2111 			case NDTPA_DELAY_PROBE_TIME:
2112 				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2113 					      nla_get_msecs(tbp[i]));
2114 				call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2115 				break;
2116 			case NDTPA_RETRANS_TIME:
2117 				NEIGH_VAR_SET(p, RETRANS_TIME,
2118 					      nla_get_msecs(tbp[i]));
2119 				break;
2120 			case NDTPA_ANYCAST_DELAY:
2121 				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2122 					      nla_get_msecs(tbp[i]));
2123 				break;
2124 			case NDTPA_PROXY_DELAY:
2125 				NEIGH_VAR_SET(p, PROXY_DELAY,
2126 					      nla_get_msecs(tbp[i]));
2127 				break;
2128 			case NDTPA_LOCKTIME:
2129 				NEIGH_VAR_SET(p, LOCKTIME,
2130 					      nla_get_msecs(tbp[i]));
2131 				break;
2132 			}
2133 		}
2134 	}
2135 
2136 	err = -ENOENT;
2137 	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2138 	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2139 	    !net_eq(net, &init_net))
2140 		goto errout_tbl_lock;
2141 
2142 	if (tb[NDTA_THRESH1])
2143 		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2144 
2145 	if (tb[NDTA_THRESH2])
2146 		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2147 
2148 	if (tb[NDTA_THRESH3])
2149 		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2150 
2151 	if (tb[NDTA_GC_INTERVAL])
2152 		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2153 
2154 	err = 0;
2155 
2156 errout_tbl_lock:
2157 	write_unlock_bh(&tbl->lock);
2158 errout:
2159 	return err;
2160 }
2161 
2162 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2163 {
2164 	struct net *net = sock_net(skb->sk);
2165 	int family, tidx, nidx = 0;
2166 	int tbl_skip = cb->args[0];
2167 	int neigh_skip = cb->args[1];
2168 	struct neigh_table *tbl;
2169 
2170 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2171 
2172 	for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2173 		struct neigh_parms *p;
2174 
2175 		tbl = neigh_tables[tidx];
2176 		if (!tbl)
2177 			continue;
2178 
2179 		if (tidx < tbl_skip || (family && tbl->family != family))
2180 			continue;
2181 
2182 		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2183 				       cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2184 				       NLM_F_MULTI) < 0)
2185 			break;
2186 
2187 		nidx = 0;
2188 		p = list_next_entry(&tbl->parms, list);
2189 		list_for_each_entry_from(p, &tbl->parms_list, list) {
2190 			if (!net_eq(neigh_parms_net(p), net))
2191 				continue;
2192 
2193 			if (nidx < neigh_skip)
2194 				goto next;
2195 
2196 			if (neightbl_fill_param_info(skb, tbl, p,
2197 						     NETLINK_CB(cb->skb).portid,
2198 						     cb->nlh->nlmsg_seq,
2199 						     RTM_NEWNEIGHTBL,
2200 						     NLM_F_MULTI) < 0)
2201 				goto out;
2202 		next:
2203 			nidx++;
2204 		}
2205 
2206 		neigh_skip = 0;
2207 	}
2208 out:
2209 	cb->args[0] = tidx;
2210 	cb->args[1] = nidx;
2211 
2212 	return skb->len;
2213 }
2214 
2215 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2216 			   u32 pid, u32 seq, int type, unsigned int flags)
2217 {
2218 	unsigned long now = jiffies;
2219 	struct nda_cacheinfo ci;
2220 	struct nlmsghdr *nlh;
2221 	struct ndmsg *ndm;
2222 
2223 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2224 	if (nlh == NULL)
2225 		return -EMSGSIZE;
2226 
2227 	ndm = nlmsg_data(nlh);
2228 	ndm->ndm_family	 = neigh->ops->family;
2229 	ndm->ndm_pad1    = 0;
2230 	ndm->ndm_pad2    = 0;
2231 	ndm->ndm_flags	 = neigh->flags;
2232 	ndm->ndm_type	 = neigh->type;
2233 	ndm->ndm_ifindex = neigh->dev->ifindex;
2234 
2235 	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2236 		goto nla_put_failure;
2237 
2238 	read_lock_bh(&neigh->lock);
2239 	ndm->ndm_state	 = neigh->nud_state;
2240 	if (neigh->nud_state & NUD_VALID) {
2241 		char haddr[MAX_ADDR_LEN];
2242 
2243 		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2244 		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2245 			read_unlock_bh(&neigh->lock);
2246 			goto nla_put_failure;
2247 		}
2248 	}
2249 
2250 	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2251 	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2252 	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2253 	ci.ndm_refcnt	 = refcount_read(&neigh->refcnt) - 1;
2254 	read_unlock_bh(&neigh->lock);
2255 
2256 	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2257 	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2258 		goto nla_put_failure;
2259 
2260 	nlmsg_end(skb, nlh);
2261 	return 0;
2262 
2263 nla_put_failure:
2264 	nlmsg_cancel(skb, nlh);
2265 	return -EMSGSIZE;
2266 }
2267 
2268 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2269 			    u32 pid, u32 seq, int type, unsigned int flags,
2270 			    struct neigh_table *tbl)
2271 {
2272 	struct nlmsghdr *nlh;
2273 	struct ndmsg *ndm;
2274 
2275 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2276 	if (nlh == NULL)
2277 		return -EMSGSIZE;
2278 
2279 	ndm = nlmsg_data(nlh);
2280 	ndm->ndm_family	 = tbl->family;
2281 	ndm->ndm_pad1    = 0;
2282 	ndm->ndm_pad2    = 0;
2283 	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
2284 	ndm->ndm_type	 = RTN_UNICAST;
2285 	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2286 	ndm->ndm_state	 = NUD_NONE;
2287 
2288 	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2289 		goto nla_put_failure;
2290 
2291 	nlmsg_end(skb, nlh);
2292 	return 0;
2293 
2294 nla_put_failure:
2295 	nlmsg_cancel(skb, nlh);
2296 	return -EMSGSIZE;
2297 }
2298 
2299 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2300 {
2301 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2302 	__neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2303 }
2304 
2305 static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2306 {
2307 	struct net_device *master;
2308 
2309 	if (!master_idx)
2310 		return false;
2311 
2312 	master = netdev_master_upper_dev_get(dev);
2313 	if (!master || master->ifindex != master_idx)
2314 		return true;
2315 
2316 	return false;
2317 }
2318 
2319 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2320 {
2321 	if (filter_idx && dev->ifindex != filter_idx)
2322 		return true;
2323 
2324 	return false;
2325 }
2326 
2327 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2328 			    struct netlink_callback *cb)
2329 {
2330 	struct net *net = sock_net(skb->sk);
2331 	const struct nlmsghdr *nlh = cb->nlh;
2332 	struct nlattr *tb[NDA_MAX + 1];
2333 	struct neighbour *n;
2334 	int rc, h, s_h = cb->args[1];
2335 	int idx, s_idx = idx = cb->args[2];
2336 	struct neigh_hash_table *nht;
2337 	int filter_master_idx = 0, filter_idx = 0;
2338 	unsigned int flags = NLM_F_MULTI;
2339 	int err;
2340 
2341 	err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL);
2342 	if (!err) {
2343 		if (tb[NDA_IFINDEX]) {
2344 			if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
2345 				return -EINVAL;
2346 			filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
2347 		}
2348 		if (tb[NDA_MASTER]) {
2349 			if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
2350 				return -EINVAL;
2351 			filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
2352 		}
2353 		if (filter_idx || filter_master_idx)
2354 			flags |= NLM_F_DUMP_FILTERED;
2355 	}
2356 
2357 	rcu_read_lock_bh();
2358 	nht = rcu_dereference_bh(tbl->nht);
2359 
2360 	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2361 		if (h > s_h)
2362 			s_idx = 0;
2363 		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2364 		     n != NULL;
2365 		     n = rcu_dereference_bh(n->next)) {
2366 			if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2367 				goto next;
2368 			if (neigh_ifindex_filtered(n->dev, filter_idx) ||
2369 			    neigh_master_filtered(n->dev, filter_master_idx))
2370 				goto next;
2371 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2372 					    cb->nlh->nlmsg_seq,
2373 					    RTM_NEWNEIGH,
2374 					    flags) < 0) {
2375 				rc = -1;
2376 				goto out;
2377 			}
2378 next:
2379 			idx++;
2380 		}
2381 	}
2382 	rc = skb->len;
2383 out:
2384 	rcu_read_unlock_bh();
2385 	cb->args[1] = h;
2386 	cb->args[2] = idx;
2387 	return rc;
2388 }
2389 
2390 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2391 			     struct netlink_callback *cb)
2392 {
2393 	struct pneigh_entry *n;
2394 	struct net *net = sock_net(skb->sk);
2395 	int rc, h, s_h = cb->args[3];
2396 	int idx, s_idx = idx = cb->args[4];
2397 
2398 	read_lock_bh(&tbl->lock);
2399 
2400 	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2401 		if (h > s_h)
2402 			s_idx = 0;
2403 		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2404 			if (idx < s_idx || pneigh_net(n) != net)
2405 				goto next;
2406 			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2407 					    cb->nlh->nlmsg_seq,
2408 					    RTM_NEWNEIGH,
2409 					    NLM_F_MULTI, tbl) < 0) {
2410 				read_unlock_bh(&tbl->lock);
2411 				rc = -1;
2412 				goto out;
2413 			}
2414 		next:
2415 			idx++;
2416 		}
2417 	}
2418 
2419 	read_unlock_bh(&tbl->lock);
2420 	rc = skb->len;
2421 out:
2422 	cb->args[3] = h;
2423 	cb->args[4] = idx;
2424 	return rc;
2425 
2426 }
2427 
2428 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2429 {
2430 	struct neigh_table *tbl;
2431 	int t, family, s_t;
2432 	int proxy = 0;
2433 	int err;
2434 
2435 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2436 
2437 	/* check for full ndmsg structure presence, family member is
2438 	 * the same for both structures
2439 	 */
2440 	if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2441 	    ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2442 		proxy = 1;
2443 
2444 	s_t = cb->args[0];
2445 
2446 	for (t = 0; t < NEIGH_NR_TABLES; t++) {
2447 		tbl = neigh_tables[t];
2448 
2449 		if (!tbl)
2450 			continue;
2451 		if (t < s_t || (family && tbl->family != family))
2452 			continue;
2453 		if (t > s_t)
2454 			memset(&cb->args[1], 0, sizeof(cb->args) -
2455 						sizeof(cb->args[0]));
2456 		if (proxy)
2457 			err = pneigh_dump_table(tbl, skb, cb);
2458 		else
2459 			err = neigh_dump_table(tbl, skb, cb);
2460 		if (err < 0)
2461 			break;
2462 	}
2463 
2464 	cb->args[0] = t;
2465 	return skb->len;
2466 }
2467 
2468 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2469 {
2470 	int chain;
2471 	struct neigh_hash_table *nht;
2472 
2473 	rcu_read_lock_bh();
2474 	nht = rcu_dereference_bh(tbl->nht);
2475 
2476 	read_lock(&tbl->lock); /* avoid resizes */
2477 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2478 		struct neighbour *n;
2479 
2480 		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2481 		     n != NULL;
2482 		     n = rcu_dereference_bh(n->next))
2483 			cb(n, cookie);
2484 	}
2485 	read_unlock(&tbl->lock);
2486 	rcu_read_unlock_bh();
2487 }
2488 EXPORT_SYMBOL(neigh_for_each);
2489 
2490 /* The tbl->lock must be held as a writer and BH disabled. */
2491 void __neigh_for_each_release(struct neigh_table *tbl,
2492 			      int (*cb)(struct neighbour *))
2493 {
2494 	int chain;
2495 	struct neigh_hash_table *nht;
2496 
2497 	nht = rcu_dereference_protected(tbl->nht,
2498 					lockdep_is_held(&tbl->lock));
2499 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2500 		struct neighbour *n;
2501 		struct neighbour __rcu **np;
2502 
2503 		np = &nht->hash_buckets[chain];
2504 		while ((n = rcu_dereference_protected(*np,
2505 					lockdep_is_held(&tbl->lock))) != NULL) {
2506 			int release;
2507 
2508 			write_lock(&n->lock);
2509 			release = cb(n);
2510 			if (release) {
2511 				rcu_assign_pointer(*np,
2512 					rcu_dereference_protected(n->next,
2513 						lockdep_is_held(&tbl->lock)));
2514 				n->dead = 1;
2515 			} else
2516 				np = &n->next;
2517 			write_unlock(&n->lock);
2518 			if (release)
2519 				neigh_cleanup_and_release(n);
2520 		}
2521 	}
2522 }
2523 EXPORT_SYMBOL(__neigh_for_each_release);
2524 
2525 int neigh_xmit(int index, struct net_device *dev,
2526 	       const void *addr, struct sk_buff *skb)
2527 {
2528 	int err = -EAFNOSUPPORT;
2529 	if (likely(index < NEIGH_NR_TABLES)) {
2530 		struct neigh_table *tbl;
2531 		struct neighbour *neigh;
2532 
2533 		tbl = neigh_tables[index];
2534 		if (!tbl)
2535 			goto out;
2536 		rcu_read_lock_bh();
2537 		neigh = __neigh_lookup_noref(tbl, addr, dev);
2538 		if (!neigh)
2539 			neigh = __neigh_create(tbl, addr, dev, false);
2540 		err = PTR_ERR(neigh);
2541 		if (IS_ERR(neigh)) {
2542 			rcu_read_unlock_bh();
2543 			goto out_kfree_skb;
2544 		}
2545 		err = neigh->output(neigh, skb);
2546 		rcu_read_unlock_bh();
2547 	}
2548 	else if (index == NEIGH_LINK_TABLE) {
2549 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
2550 				      addr, NULL, skb->len);
2551 		if (err < 0)
2552 			goto out_kfree_skb;
2553 		err = dev_queue_xmit(skb);
2554 	}
2555 out:
2556 	return err;
2557 out_kfree_skb:
2558 	kfree_skb(skb);
2559 	goto out;
2560 }
2561 EXPORT_SYMBOL(neigh_xmit);
2562 
2563 #ifdef CONFIG_PROC_FS
2564 
2565 static struct neighbour *neigh_get_first(struct seq_file *seq)
2566 {
2567 	struct neigh_seq_state *state = seq->private;
2568 	struct net *net = seq_file_net(seq);
2569 	struct neigh_hash_table *nht = state->nht;
2570 	struct neighbour *n = NULL;
2571 	int bucket = state->bucket;
2572 
2573 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2574 	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2575 		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2576 
2577 		while (n) {
2578 			if (!net_eq(dev_net(n->dev), net))
2579 				goto next;
2580 			if (state->neigh_sub_iter) {
2581 				loff_t fakep = 0;
2582 				void *v;
2583 
2584 				v = state->neigh_sub_iter(state, n, &fakep);
2585 				if (!v)
2586 					goto next;
2587 			}
2588 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2589 				break;
2590 			if (n->nud_state & ~NUD_NOARP)
2591 				break;
2592 next:
2593 			n = rcu_dereference_bh(n->next);
2594 		}
2595 
2596 		if (n)
2597 			break;
2598 	}
2599 	state->bucket = bucket;
2600 
2601 	return n;
2602 }
2603 
2604 static struct neighbour *neigh_get_next(struct seq_file *seq,
2605 					struct neighbour *n,
2606 					loff_t *pos)
2607 {
2608 	struct neigh_seq_state *state = seq->private;
2609 	struct net *net = seq_file_net(seq);
2610 	struct neigh_hash_table *nht = state->nht;
2611 
2612 	if (state->neigh_sub_iter) {
2613 		void *v = state->neigh_sub_iter(state, n, pos);
2614 		if (v)
2615 			return n;
2616 	}
2617 	n = rcu_dereference_bh(n->next);
2618 
2619 	while (1) {
2620 		while (n) {
2621 			if (!net_eq(dev_net(n->dev), net))
2622 				goto next;
2623 			if (state->neigh_sub_iter) {
2624 				void *v = state->neigh_sub_iter(state, n, pos);
2625 				if (v)
2626 					return n;
2627 				goto next;
2628 			}
2629 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2630 				break;
2631 
2632 			if (n->nud_state & ~NUD_NOARP)
2633 				break;
2634 next:
2635 			n = rcu_dereference_bh(n->next);
2636 		}
2637 
2638 		if (n)
2639 			break;
2640 
2641 		if (++state->bucket >= (1 << nht->hash_shift))
2642 			break;
2643 
2644 		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2645 	}
2646 
2647 	if (n && pos)
2648 		--(*pos);
2649 	return n;
2650 }
2651 
2652 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2653 {
2654 	struct neighbour *n = neigh_get_first(seq);
2655 
2656 	if (n) {
2657 		--(*pos);
2658 		while (*pos) {
2659 			n = neigh_get_next(seq, n, pos);
2660 			if (!n)
2661 				break;
2662 		}
2663 	}
2664 	return *pos ? NULL : n;
2665 }
2666 
2667 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2668 {
2669 	struct neigh_seq_state *state = seq->private;
2670 	struct net *net = seq_file_net(seq);
2671 	struct neigh_table *tbl = state->tbl;
2672 	struct pneigh_entry *pn = NULL;
2673 	int bucket = state->bucket;
2674 
2675 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2676 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2677 		pn = tbl->phash_buckets[bucket];
2678 		while (pn && !net_eq(pneigh_net(pn), net))
2679 			pn = pn->next;
2680 		if (pn)
2681 			break;
2682 	}
2683 	state->bucket = bucket;
2684 
2685 	return pn;
2686 }
2687 
2688 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2689 					    struct pneigh_entry *pn,
2690 					    loff_t *pos)
2691 {
2692 	struct neigh_seq_state *state = seq->private;
2693 	struct net *net = seq_file_net(seq);
2694 	struct neigh_table *tbl = state->tbl;
2695 
2696 	do {
2697 		pn = pn->next;
2698 	} while (pn && !net_eq(pneigh_net(pn), net));
2699 
2700 	while (!pn) {
2701 		if (++state->bucket > PNEIGH_HASHMASK)
2702 			break;
2703 		pn = tbl->phash_buckets[state->bucket];
2704 		while (pn && !net_eq(pneigh_net(pn), net))
2705 			pn = pn->next;
2706 		if (pn)
2707 			break;
2708 	}
2709 
2710 	if (pn && pos)
2711 		--(*pos);
2712 
2713 	return pn;
2714 }
2715 
2716 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2717 {
2718 	struct pneigh_entry *pn = pneigh_get_first(seq);
2719 
2720 	if (pn) {
2721 		--(*pos);
2722 		while (*pos) {
2723 			pn = pneigh_get_next(seq, pn, pos);
2724 			if (!pn)
2725 				break;
2726 		}
2727 	}
2728 	return *pos ? NULL : pn;
2729 }
2730 
2731 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2732 {
2733 	struct neigh_seq_state *state = seq->private;
2734 	void *rc;
2735 	loff_t idxpos = *pos;
2736 
2737 	rc = neigh_get_idx(seq, &idxpos);
2738 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2739 		rc = pneigh_get_idx(seq, &idxpos);
2740 
2741 	return rc;
2742 }
2743 
2744 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2745 	__acquires(rcu_bh)
2746 {
2747 	struct neigh_seq_state *state = seq->private;
2748 
2749 	state->tbl = tbl;
2750 	state->bucket = 0;
2751 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2752 
2753 	rcu_read_lock_bh();
2754 	state->nht = rcu_dereference_bh(tbl->nht);
2755 
2756 	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2757 }
2758 EXPORT_SYMBOL(neigh_seq_start);
2759 
2760 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2761 {
2762 	struct neigh_seq_state *state;
2763 	void *rc;
2764 
2765 	if (v == SEQ_START_TOKEN) {
2766 		rc = neigh_get_first(seq);
2767 		goto out;
2768 	}
2769 
2770 	state = seq->private;
2771 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2772 		rc = neigh_get_next(seq, v, NULL);
2773 		if (rc)
2774 			goto out;
2775 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2776 			rc = pneigh_get_first(seq);
2777 	} else {
2778 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2779 		rc = pneigh_get_next(seq, v, NULL);
2780 	}
2781 out:
2782 	++(*pos);
2783 	return rc;
2784 }
2785 EXPORT_SYMBOL(neigh_seq_next);
2786 
2787 void neigh_seq_stop(struct seq_file *seq, void *v)
2788 	__releases(rcu_bh)
2789 {
2790 	rcu_read_unlock_bh();
2791 }
2792 EXPORT_SYMBOL(neigh_seq_stop);
2793 
2794 /* statistics via seq_file */
2795 
2796 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2797 {
2798 	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
2799 	int cpu;
2800 
2801 	if (*pos == 0)
2802 		return SEQ_START_TOKEN;
2803 
2804 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2805 		if (!cpu_possible(cpu))
2806 			continue;
2807 		*pos = cpu+1;
2808 		return per_cpu_ptr(tbl->stats, cpu);
2809 	}
2810 	return NULL;
2811 }
2812 
2813 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2814 {
2815 	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
2816 	int cpu;
2817 
2818 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2819 		if (!cpu_possible(cpu))
2820 			continue;
2821 		*pos = cpu+1;
2822 		return per_cpu_ptr(tbl->stats, cpu);
2823 	}
2824 	return NULL;
2825 }
2826 
2827 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2828 {
2829 
2830 }
2831 
2832 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2833 {
2834 	struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
2835 	struct neigh_statistics *st = v;
2836 
2837 	if (v == SEQ_START_TOKEN) {
2838 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
2839 		return 0;
2840 	}
2841 
2842 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2843 			"%08lx %08lx  %08lx %08lx %08lx %08lx\n",
2844 		   atomic_read(&tbl->entries),
2845 
2846 		   st->allocs,
2847 		   st->destroys,
2848 		   st->hash_grows,
2849 
2850 		   st->lookups,
2851 		   st->hits,
2852 
2853 		   st->res_failed,
2854 
2855 		   st->rcv_probes_mcast,
2856 		   st->rcv_probes_ucast,
2857 
2858 		   st->periodic_gc_runs,
2859 		   st->forced_gc_runs,
2860 		   st->unres_discards,
2861 		   st->table_fulls
2862 		   );
2863 
2864 	return 0;
2865 }
2866 
2867 static const struct seq_operations neigh_stat_seq_ops = {
2868 	.start	= neigh_stat_seq_start,
2869 	.next	= neigh_stat_seq_next,
2870 	.stop	= neigh_stat_seq_stop,
2871 	.show	= neigh_stat_seq_show,
2872 };
2873 #endif /* CONFIG_PROC_FS */
2874 
2875 static inline size_t neigh_nlmsg_size(void)
2876 {
2877 	return NLMSG_ALIGN(sizeof(struct ndmsg))
2878 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2879 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2880 	       + nla_total_size(sizeof(struct nda_cacheinfo))
2881 	       + nla_total_size(4); /* NDA_PROBES */
2882 }
2883 
2884 static void __neigh_notify(struct neighbour *n, int type, int flags,
2885 			   u32 pid)
2886 {
2887 	struct net *net = dev_net(n->dev);
2888 	struct sk_buff *skb;
2889 	int err = -ENOBUFS;
2890 
2891 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2892 	if (skb == NULL)
2893 		goto errout;
2894 
2895 	err = neigh_fill_info(skb, n, pid, 0, type, flags);
2896 	if (err < 0) {
2897 		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2898 		WARN_ON(err == -EMSGSIZE);
2899 		kfree_skb(skb);
2900 		goto errout;
2901 	}
2902 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2903 	return;
2904 errout:
2905 	if (err < 0)
2906 		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2907 }
2908 
2909 void neigh_app_ns(struct neighbour *n)
2910 {
2911 	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
2912 }
2913 EXPORT_SYMBOL(neigh_app_ns);
2914 
2915 #ifdef CONFIG_SYSCTL
2916 static int zero;
2917 static int int_max = INT_MAX;
2918 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
2919 
2920 static int proc_unres_qlen(struct ctl_table *ctl, int write,
2921 			   void __user *buffer, size_t *lenp, loff_t *ppos)
2922 {
2923 	int size, ret;
2924 	struct ctl_table tmp = *ctl;
2925 
2926 	tmp.extra1 = &zero;
2927 	tmp.extra2 = &unres_qlen_max;
2928 	tmp.data = &size;
2929 
2930 	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
2931 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2932 
2933 	if (write && !ret)
2934 		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2935 	return ret;
2936 }
2937 
2938 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
2939 						   int family)
2940 {
2941 	switch (family) {
2942 	case AF_INET:
2943 		return __in_dev_arp_parms_get_rcu(dev);
2944 	case AF_INET6:
2945 		return __in6_dev_nd_parms_get_rcu(dev);
2946 	}
2947 	return NULL;
2948 }
2949 
2950 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
2951 				  int index)
2952 {
2953 	struct net_device *dev;
2954 	int family = neigh_parms_family(p);
2955 
2956 	rcu_read_lock();
2957 	for_each_netdev_rcu(net, dev) {
2958 		struct neigh_parms *dst_p =
2959 				neigh_get_dev_parms_rcu(dev, family);
2960 
2961 		if (dst_p && !test_bit(index, dst_p->data_state))
2962 			dst_p->data[index] = p->data[index];
2963 	}
2964 	rcu_read_unlock();
2965 }
2966 
2967 static void neigh_proc_update(struct ctl_table *ctl, int write)
2968 {
2969 	struct net_device *dev = ctl->extra1;
2970 	struct neigh_parms *p = ctl->extra2;
2971 	struct net *net = neigh_parms_net(p);
2972 	int index = (int *) ctl->data - p->data;
2973 
2974 	if (!write)
2975 		return;
2976 
2977 	set_bit(index, p->data_state);
2978 	if (index == NEIGH_VAR_DELAY_PROBE_TIME)
2979 		call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2980 	if (!dev) /* NULL dev means this is default value */
2981 		neigh_copy_dflt_parms(net, p, index);
2982 }
2983 
2984 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
2985 					   void __user *buffer,
2986 					   size_t *lenp, loff_t *ppos)
2987 {
2988 	struct ctl_table tmp = *ctl;
2989 	int ret;
2990 
2991 	tmp.extra1 = &zero;
2992 	tmp.extra2 = &int_max;
2993 
2994 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2995 	neigh_proc_update(ctl, write);
2996 	return ret;
2997 }
2998 
2999 int neigh_proc_dointvec(struct ctl_table *ctl, int write,
3000 			void __user *buffer, size_t *lenp, loff_t *ppos)
3001 {
3002 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
3003 
3004 	neigh_proc_update(ctl, write);
3005 	return ret;
3006 }
3007 EXPORT_SYMBOL(neigh_proc_dointvec);
3008 
3009 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
3010 				void __user *buffer,
3011 				size_t *lenp, loff_t *ppos)
3012 {
3013 	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3014 
3015 	neigh_proc_update(ctl, write);
3016 	return ret;
3017 }
3018 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
3019 
3020 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
3021 					      void __user *buffer,
3022 					      size_t *lenp, loff_t *ppos)
3023 {
3024 	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
3025 
3026 	neigh_proc_update(ctl, write);
3027 	return ret;
3028 }
3029 
3030 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
3031 				   void __user *buffer,
3032 				   size_t *lenp, loff_t *ppos)
3033 {
3034 	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3035 
3036 	neigh_proc_update(ctl, write);
3037 	return ret;
3038 }
3039 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
3040 
3041 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
3042 					  void __user *buffer,
3043 					  size_t *lenp, loff_t *ppos)
3044 {
3045 	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3046 
3047 	neigh_proc_update(ctl, write);
3048 	return ret;
3049 }
3050 
3051 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3052 					  void __user *buffer,
3053 					  size_t *lenp, loff_t *ppos)
3054 {
3055 	struct neigh_parms *p = ctl->extra2;
3056 	int ret;
3057 
3058 	if (strcmp(ctl->procname, "base_reachable_time") == 0)
3059 		ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3060 	else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3061 		ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3062 	else
3063 		ret = -1;
3064 
3065 	if (write && ret == 0) {
3066 		/* update reachable_time as well, otherwise, the change will
3067 		 * only be effective after the next time neigh_periodic_work
3068 		 * decides to recompute it
3069 		 */
3070 		p->reachable_time =
3071 			neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3072 	}
3073 	return ret;
3074 }
3075 
3076 #define NEIGH_PARMS_DATA_OFFSET(index)	\
3077 	(&((struct neigh_parms *) 0)->data[index])
3078 
3079 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3080 	[NEIGH_VAR_ ## attr] = { \
3081 		.procname	= name, \
3082 		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3083 		.maxlen		= sizeof(int), \
3084 		.mode		= mval, \
3085 		.proc_handler	= proc, \
3086 	}
3087 
3088 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3089 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3090 
3091 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3092 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3093 
3094 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3095 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3096 
3097 #define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
3098 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3099 
3100 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3101 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3102 
3103 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3104 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3105 
3106 static struct neigh_sysctl_table {
3107 	struct ctl_table_header *sysctl_header;
3108 	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3109 } neigh_sysctl_template __read_mostly = {
3110 	.neigh_vars = {
3111 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3112 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3113 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3114 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3115 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3116 		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3117 		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3118 		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3119 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3120 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3121 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3122 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3123 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3124 		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3125 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3126 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3127 		[NEIGH_VAR_GC_INTERVAL] = {
3128 			.procname	= "gc_interval",
3129 			.maxlen		= sizeof(int),
3130 			.mode		= 0644,
3131 			.proc_handler	= proc_dointvec_jiffies,
3132 		},
3133 		[NEIGH_VAR_GC_THRESH1] = {
3134 			.procname	= "gc_thresh1",
3135 			.maxlen		= sizeof(int),
3136 			.mode		= 0644,
3137 			.extra1 	= &zero,
3138 			.extra2		= &int_max,
3139 			.proc_handler	= proc_dointvec_minmax,
3140 		},
3141 		[NEIGH_VAR_GC_THRESH2] = {
3142 			.procname	= "gc_thresh2",
3143 			.maxlen		= sizeof(int),
3144 			.mode		= 0644,
3145 			.extra1 	= &zero,
3146 			.extra2		= &int_max,
3147 			.proc_handler	= proc_dointvec_minmax,
3148 		},
3149 		[NEIGH_VAR_GC_THRESH3] = {
3150 			.procname	= "gc_thresh3",
3151 			.maxlen		= sizeof(int),
3152 			.mode		= 0644,
3153 			.extra1 	= &zero,
3154 			.extra2		= &int_max,
3155 			.proc_handler	= proc_dointvec_minmax,
3156 		},
3157 		{},
3158 	},
3159 };
3160 
3161 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3162 			  proc_handler *handler)
3163 {
3164 	int i;
3165 	struct neigh_sysctl_table *t;
3166 	const char *dev_name_source;
3167 	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3168 	char *p_name;
3169 
3170 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3171 	if (!t)
3172 		goto err;
3173 
3174 	for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3175 		t->neigh_vars[i].data += (long) p;
3176 		t->neigh_vars[i].extra1 = dev;
3177 		t->neigh_vars[i].extra2 = p;
3178 	}
3179 
3180 	if (dev) {
3181 		dev_name_source = dev->name;
3182 		/* Terminate the table early */
3183 		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3184 		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3185 	} else {
3186 		struct neigh_table *tbl = p->tbl;
3187 		dev_name_source = "default";
3188 		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3189 		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3190 		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3191 		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3192 	}
3193 
3194 	if (handler) {
3195 		/* RetransTime */
3196 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3197 		/* ReachableTime */
3198 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3199 		/* RetransTime (in milliseconds)*/
3200 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3201 		/* ReachableTime (in milliseconds) */
3202 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3203 	} else {
3204 		/* Those handlers will update p->reachable_time after
3205 		 * base_reachable_time(_ms) is set to ensure the new timer starts being
3206 		 * applied after the next neighbour update instead of waiting for
3207 		 * neigh_periodic_work to update its value (can be multiple minutes)
3208 		 * So any handler that replaces them should do this as well
3209 		 */
3210 		/* ReachableTime */
3211 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3212 			neigh_proc_base_reachable_time;
3213 		/* ReachableTime (in milliseconds) */
3214 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3215 			neigh_proc_base_reachable_time;
3216 	}
3217 
3218 	/* Don't export sysctls to unprivileged users */
3219 	if (neigh_parms_net(p)->user_ns != &init_user_ns)
3220 		t->neigh_vars[0].procname = NULL;
3221 
3222 	switch (neigh_parms_family(p)) {
3223 	case AF_INET:
3224 	      p_name = "ipv4";
3225 	      break;
3226 	case AF_INET6:
3227 	      p_name = "ipv6";
3228 	      break;
3229 	default:
3230 	      BUG();
3231 	}
3232 
3233 	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3234 		p_name, dev_name_source);
3235 	t->sysctl_header =
3236 		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3237 	if (!t->sysctl_header)
3238 		goto free;
3239 
3240 	p->sysctl_table = t;
3241 	return 0;
3242 
3243 free:
3244 	kfree(t);
3245 err:
3246 	return -ENOBUFS;
3247 }
3248 EXPORT_SYMBOL(neigh_sysctl_register);
3249 
3250 void neigh_sysctl_unregister(struct neigh_parms *p)
3251 {
3252 	if (p->sysctl_table) {
3253 		struct neigh_sysctl_table *t = p->sysctl_table;
3254 		p->sysctl_table = NULL;
3255 		unregister_net_sysctl_table(t->sysctl_header);
3256 		kfree(t);
3257 	}
3258 }
3259 EXPORT_SYMBOL(neigh_sysctl_unregister);
3260 
3261 #endif	/* CONFIG_SYSCTL */
3262 
3263 static int __init neigh_init(void)
3264 {
3265 	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
3266 	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
3267 	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, 0);
3268 
3269 	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3270 		      0);
3271 	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0);
3272 
3273 	return 0;
3274 }
3275 
3276 subsys_initcall(neigh_init);
3277