xref: /openbmc/linux/net/core/neighbour.c (revision 483eb062)
1 /*
2  *	Generic address resolution entity
3  *
4  *	Authors:
5  *	Pedro Roque		<roque@di.fc.ul.pt>
6  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  *
13  *	Fixes:
14  *	Vitaly E. Lavrov	releasing NULL neighbor in neigh_add.
15  *	Harald Welte		Add neighbour cache statistics like rtstat
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/socket.h>
25 #include <linux/netdevice.h>
26 #include <linux/proc_fs.h>
27 #ifdef CONFIG_SYSCTL
28 #include <linux/sysctl.h>
29 #endif
30 #include <linux/times.h>
31 #include <net/net_namespace.h>
32 #include <net/neighbour.h>
33 #include <net/dst.h>
34 #include <net/sock.h>
35 #include <net/netevent.h>
36 #include <net/netlink.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/random.h>
39 #include <linux/string.h>
40 #include <linux/log2.h>
41 #include <linux/inetdevice.h>
42 #include <net/addrconf.h>
43 
44 #define DEBUG
45 #define NEIGH_DEBUG 1
46 #define neigh_dbg(level, fmt, ...)		\
47 do {						\
48 	if (level <= NEIGH_DEBUG)		\
49 		pr_debug(fmt, ##__VA_ARGS__);	\
50 } while (0)
51 
52 #define PNEIGH_HASHMASK		0xF
53 
54 static void neigh_timer_handler(unsigned long arg);
55 static void __neigh_notify(struct neighbour *n, int type, int flags);
56 static void neigh_update_notify(struct neighbour *neigh);
57 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
58 
59 static struct neigh_table *neigh_tables;
60 #ifdef CONFIG_PROC_FS
61 static const struct file_operations neigh_stat_seq_fops;
62 #endif
63 
64 /*
65    Neighbour hash table buckets are protected with rwlock tbl->lock.
66 
67    - All the scans/updates to hash buckets MUST be made under this lock.
68    - NOTHING clever should be made under this lock: no callbacks
69      to protocol backends, no attempts to send something to network.
70      It will result in deadlocks, if backend/driver wants to use neighbour
71      cache.
72    - If the entry requires some non-trivial actions, increase
73      its reference count and release table lock.
74 
75    Neighbour entries are protected:
76    - with reference count.
77    - with rwlock neigh->lock
78 
79    Reference count prevents destruction.
80 
81    neigh->lock mainly serializes ll address data and its validity state.
82    However, the same lock is used to protect another entry fields:
83     - timer
84     - resolution queue
85 
86    Again, nothing clever shall be made under neigh->lock,
87    the most complicated procedure, which we allow is dev->hard_header.
88    It is supposed, that dev->hard_header is simplistic and does
89    not make callbacks to neighbour tables.
90 
91    The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
92    list of neighbour tables. This list is used only in process context,
93  */
94 
95 static DEFINE_RWLOCK(neigh_tbl_lock);
96 
97 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
98 {
99 	kfree_skb(skb);
100 	return -ENETDOWN;
101 }
102 
103 static void neigh_cleanup_and_release(struct neighbour *neigh)
104 {
105 	if (neigh->parms->neigh_cleanup)
106 		neigh->parms->neigh_cleanup(neigh);
107 
108 	__neigh_notify(neigh, RTM_DELNEIGH, 0);
109 	neigh_release(neigh);
110 }
111 
112 /*
113  * It is random distribution in the interval (1/2)*base...(3/2)*base.
114  * It corresponds to default IPv6 settings and is not overridable,
115  * because it is really reasonable choice.
116  */
117 
118 unsigned long neigh_rand_reach_time(unsigned long base)
119 {
120 	return base ? (prandom_u32() % base) + (base >> 1) : 0;
121 }
122 EXPORT_SYMBOL(neigh_rand_reach_time);
123 
124 
125 static int neigh_forced_gc(struct neigh_table *tbl)
126 {
127 	int shrunk = 0;
128 	int i;
129 	struct neigh_hash_table *nht;
130 
131 	NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
132 
133 	write_lock_bh(&tbl->lock);
134 	nht = rcu_dereference_protected(tbl->nht,
135 					lockdep_is_held(&tbl->lock));
136 	for (i = 0; i < (1 << nht->hash_shift); i++) {
137 		struct neighbour *n;
138 		struct neighbour __rcu **np;
139 
140 		np = &nht->hash_buckets[i];
141 		while ((n = rcu_dereference_protected(*np,
142 					lockdep_is_held(&tbl->lock))) != NULL) {
143 			/* Neighbour record may be discarded if:
144 			 * - nobody refers to it.
145 			 * - it is not permanent
146 			 */
147 			write_lock(&n->lock);
148 			if (atomic_read(&n->refcnt) == 1 &&
149 			    !(n->nud_state & NUD_PERMANENT)) {
150 				rcu_assign_pointer(*np,
151 					rcu_dereference_protected(n->next,
152 						  lockdep_is_held(&tbl->lock)));
153 				n->dead = 1;
154 				shrunk	= 1;
155 				write_unlock(&n->lock);
156 				neigh_cleanup_and_release(n);
157 				continue;
158 			}
159 			write_unlock(&n->lock);
160 			np = &n->next;
161 		}
162 	}
163 
164 	tbl->last_flush = jiffies;
165 
166 	write_unlock_bh(&tbl->lock);
167 
168 	return shrunk;
169 }
170 
171 static void neigh_add_timer(struct neighbour *n, unsigned long when)
172 {
173 	neigh_hold(n);
174 	if (unlikely(mod_timer(&n->timer, when))) {
175 		printk("NEIGH: BUG, double timer add, state is %x\n",
176 		       n->nud_state);
177 		dump_stack();
178 	}
179 }
180 
181 static int neigh_del_timer(struct neighbour *n)
182 {
183 	if ((n->nud_state & NUD_IN_TIMER) &&
184 	    del_timer(&n->timer)) {
185 		neigh_release(n);
186 		return 1;
187 	}
188 	return 0;
189 }
190 
191 static void pneigh_queue_purge(struct sk_buff_head *list)
192 {
193 	struct sk_buff *skb;
194 
195 	while ((skb = skb_dequeue(list)) != NULL) {
196 		dev_put(skb->dev);
197 		kfree_skb(skb);
198 	}
199 }
200 
201 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
202 {
203 	int i;
204 	struct neigh_hash_table *nht;
205 
206 	nht = rcu_dereference_protected(tbl->nht,
207 					lockdep_is_held(&tbl->lock));
208 
209 	for (i = 0; i < (1 << nht->hash_shift); i++) {
210 		struct neighbour *n;
211 		struct neighbour __rcu **np = &nht->hash_buckets[i];
212 
213 		while ((n = rcu_dereference_protected(*np,
214 					lockdep_is_held(&tbl->lock))) != NULL) {
215 			if (dev && n->dev != dev) {
216 				np = &n->next;
217 				continue;
218 			}
219 			rcu_assign_pointer(*np,
220 				   rcu_dereference_protected(n->next,
221 						lockdep_is_held(&tbl->lock)));
222 			write_lock(&n->lock);
223 			neigh_del_timer(n);
224 			n->dead = 1;
225 
226 			if (atomic_read(&n->refcnt) != 1) {
227 				/* The most unpleasant situation.
228 				   We must destroy neighbour entry,
229 				   but someone still uses it.
230 
231 				   The destroy will be delayed until
232 				   the last user releases us, but
233 				   we must kill timers etc. and move
234 				   it to safe state.
235 				 */
236 				__skb_queue_purge(&n->arp_queue);
237 				n->arp_queue_len_bytes = 0;
238 				n->output = neigh_blackhole;
239 				if (n->nud_state & NUD_VALID)
240 					n->nud_state = NUD_NOARP;
241 				else
242 					n->nud_state = NUD_NONE;
243 				neigh_dbg(2, "neigh %p is stray\n", n);
244 			}
245 			write_unlock(&n->lock);
246 			neigh_cleanup_and_release(n);
247 		}
248 	}
249 }
250 
251 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
252 {
253 	write_lock_bh(&tbl->lock);
254 	neigh_flush_dev(tbl, dev);
255 	write_unlock_bh(&tbl->lock);
256 }
257 EXPORT_SYMBOL(neigh_changeaddr);
258 
259 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
260 {
261 	write_lock_bh(&tbl->lock);
262 	neigh_flush_dev(tbl, dev);
263 	pneigh_ifdown(tbl, dev);
264 	write_unlock_bh(&tbl->lock);
265 
266 	del_timer_sync(&tbl->proxy_timer);
267 	pneigh_queue_purge(&tbl->proxy_queue);
268 	return 0;
269 }
270 EXPORT_SYMBOL(neigh_ifdown);
271 
272 static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
273 {
274 	struct neighbour *n = NULL;
275 	unsigned long now = jiffies;
276 	int entries;
277 
278 	entries = atomic_inc_return(&tbl->entries) - 1;
279 	if (entries >= tbl->gc_thresh3 ||
280 	    (entries >= tbl->gc_thresh2 &&
281 	     time_after(now, tbl->last_flush + 5 * HZ))) {
282 		if (!neigh_forced_gc(tbl) &&
283 		    entries >= tbl->gc_thresh3)
284 			goto out_entries;
285 	}
286 
287 	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
288 	if (!n)
289 		goto out_entries;
290 
291 	__skb_queue_head_init(&n->arp_queue);
292 	rwlock_init(&n->lock);
293 	seqlock_init(&n->ha_lock);
294 	n->updated	  = n->used = now;
295 	n->nud_state	  = NUD_NONE;
296 	n->output	  = neigh_blackhole;
297 	seqlock_init(&n->hh.hh_lock);
298 	n->parms	  = neigh_parms_clone(&tbl->parms);
299 	setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
300 
301 	NEIGH_CACHE_STAT_INC(tbl, allocs);
302 	n->tbl		  = tbl;
303 	atomic_set(&n->refcnt, 1);
304 	n->dead		  = 1;
305 out:
306 	return n;
307 
308 out_entries:
309 	atomic_dec(&tbl->entries);
310 	goto out;
311 }
312 
313 static void neigh_get_hash_rnd(u32 *x)
314 {
315 	get_random_bytes(x, sizeof(*x));
316 	*x |= 1;
317 }
318 
319 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
320 {
321 	size_t size = (1 << shift) * sizeof(struct neighbour *);
322 	struct neigh_hash_table *ret;
323 	struct neighbour __rcu **buckets;
324 	int i;
325 
326 	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
327 	if (!ret)
328 		return NULL;
329 	if (size <= PAGE_SIZE)
330 		buckets = kzalloc(size, GFP_ATOMIC);
331 	else
332 		buckets = (struct neighbour __rcu **)
333 			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
334 					   get_order(size));
335 	if (!buckets) {
336 		kfree(ret);
337 		return NULL;
338 	}
339 	ret->hash_buckets = buckets;
340 	ret->hash_shift = shift;
341 	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
342 		neigh_get_hash_rnd(&ret->hash_rnd[i]);
343 	return ret;
344 }
345 
346 static void neigh_hash_free_rcu(struct rcu_head *head)
347 {
348 	struct neigh_hash_table *nht = container_of(head,
349 						    struct neigh_hash_table,
350 						    rcu);
351 	size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
352 	struct neighbour __rcu **buckets = nht->hash_buckets;
353 
354 	if (size <= PAGE_SIZE)
355 		kfree(buckets);
356 	else
357 		free_pages((unsigned long)buckets, get_order(size));
358 	kfree(nht);
359 }
360 
361 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
362 						unsigned long new_shift)
363 {
364 	unsigned int i, hash;
365 	struct neigh_hash_table *new_nht, *old_nht;
366 
367 	NEIGH_CACHE_STAT_INC(tbl, hash_grows);
368 
369 	old_nht = rcu_dereference_protected(tbl->nht,
370 					    lockdep_is_held(&tbl->lock));
371 	new_nht = neigh_hash_alloc(new_shift);
372 	if (!new_nht)
373 		return old_nht;
374 
375 	for (i = 0; i < (1 << old_nht->hash_shift); i++) {
376 		struct neighbour *n, *next;
377 
378 		for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
379 						   lockdep_is_held(&tbl->lock));
380 		     n != NULL;
381 		     n = next) {
382 			hash = tbl->hash(n->primary_key, n->dev,
383 					 new_nht->hash_rnd);
384 
385 			hash >>= (32 - new_nht->hash_shift);
386 			next = rcu_dereference_protected(n->next,
387 						lockdep_is_held(&tbl->lock));
388 
389 			rcu_assign_pointer(n->next,
390 					   rcu_dereference_protected(
391 						new_nht->hash_buckets[hash],
392 						lockdep_is_held(&tbl->lock)));
393 			rcu_assign_pointer(new_nht->hash_buckets[hash], n);
394 		}
395 	}
396 
397 	rcu_assign_pointer(tbl->nht, new_nht);
398 	call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
399 	return new_nht;
400 }
401 
402 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
403 			       struct net_device *dev)
404 {
405 	struct neighbour *n;
406 	int key_len = tbl->key_len;
407 	u32 hash_val;
408 	struct neigh_hash_table *nht;
409 
410 	NEIGH_CACHE_STAT_INC(tbl, lookups);
411 
412 	rcu_read_lock_bh();
413 	nht = rcu_dereference_bh(tbl->nht);
414 	hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
415 
416 	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
417 	     n != NULL;
418 	     n = rcu_dereference_bh(n->next)) {
419 		if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
420 			if (!atomic_inc_not_zero(&n->refcnt))
421 				n = NULL;
422 			NEIGH_CACHE_STAT_INC(tbl, hits);
423 			break;
424 		}
425 	}
426 
427 	rcu_read_unlock_bh();
428 	return n;
429 }
430 EXPORT_SYMBOL(neigh_lookup);
431 
432 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
433 				     const void *pkey)
434 {
435 	struct neighbour *n;
436 	int key_len = tbl->key_len;
437 	u32 hash_val;
438 	struct neigh_hash_table *nht;
439 
440 	NEIGH_CACHE_STAT_INC(tbl, lookups);
441 
442 	rcu_read_lock_bh();
443 	nht = rcu_dereference_bh(tbl->nht);
444 	hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
445 
446 	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
447 	     n != NULL;
448 	     n = rcu_dereference_bh(n->next)) {
449 		if (!memcmp(n->primary_key, pkey, key_len) &&
450 		    net_eq(dev_net(n->dev), net)) {
451 			if (!atomic_inc_not_zero(&n->refcnt))
452 				n = NULL;
453 			NEIGH_CACHE_STAT_INC(tbl, hits);
454 			break;
455 		}
456 	}
457 
458 	rcu_read_unlock_bh();
459 	return n;
460 }
461 EXPORT_SYMBOL(neigh_lookup_nodev);
462 
463 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
464 				 struct net_device *dev, bool want_ref)
465 {
466 	u32 hash_val;
467 	int key_len = tbl->key_len;
468 	int error;
469 	struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
470 	struct neigh_hash_table *nht;
471 
472 	if (!n) {
473 		rc = ERR_PTR(-ENOBUFS);
474 		goto out;
475 	}
476 
477 	memcpy(n->primary_key, pkey, key_len);
478 	n->dev = dev;
479 	dev_hold(dev);
480 
481 	/* Protocol specific setup. */
482 	if (tbl->constructor &&	(error = tbl->constructor(n)) < 0) {
483 		rc = ERR_PTR(error);
484 		goto out_neigh_release;
485 	}
486 
487 	if (dev->netdev_ops->ndo_neigh_construct) {
488 		error = dev->netdev_ops->ndo_neigh_construct(n);
489 		if (error < 0) {
490 			rc = ERR_PTR(error);
491 			goto out_neigh_release;
492 		}
493 	}
494 
495 	/* Device specific setup. */
496 	if (n->parms->neigh_setup &&
497 	    (error = n->parms->neigh_setup(n)) < 0) {
498 		rc = ERR_PTR(error);
499 		goto out_neigh_release;
500 	}
501 
502 	n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
503 
504 	write_lock_bh(&tbl->lock);
505 	nht = rcu_dereference_protected(tbl->nht,
506 					lockdep_is_held(&tbl->lock));
507 
508 	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
509 		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
510 
511 	hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
512 
513 	if (n->parms->dead) {
514 		rc = ERR_PTR(-EINVAL);
515 		goto out_tbl_unlock;
516 	}
517 
518 	for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
519 					    lockdep_is_held(&tbl->lock));
520 	     n1 != NULL;
521 	     n1 = rcu_dereference_protected(n1->next,
522 			lockdep_is_held(&tbl->lock))) {
523 		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
524 			if (want_ref)
525 				neigh_hold(n1);
526 			rc = n1;
527 			goto out_tbl_unlock;
528 		}
529 	}
530 
531 	n->dead = 0;
532 	if (want_ref)
533 		neigh_hold(n);
534 	rcu_assign_pointer(n->next,
535 			   rcu_dereference_protected(nht->hash_buckets[hash_val],
536 						     lockdep_is_held(&tbl->lock)));
537 	rcu_assign_pointer(nht->hash_buckets[hash_val], n);
538 	write_unlock_bh(&tbl->lock);
539 	neigh_dbg(2, "neigh %p is created\n", n);
540 	rc = n;
541 out:
542 	return rc;
543 out_tbl_unlock:
544 	write_unlock_bh(&tbl->lock);
545 out_neigh_release:
546 	neigh_release(n);
547 	goto out;
548 }
549 EXPORT_SYMBOL(__neigh_create);
550 
551 static u32 pneigh_hash(const void *pkey, int key_len)
552 {
553 	u32 hash_val = *(u32 *)(pkey + key_len - 4);
554 	hash_val ^= (hash_val >> 16);
555 	hash_val ^= hash_val >> 8;
556 	hash_val ^= hash_val >> 4;
557 	hash_val &= PNEIGH_HASHMASK;
558 	return hash_val;
559 }
560 
561 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
562 					      struct net *net,
563 					      const void *pkey,
564 					      int key_len,
565 					      struct net_device *dev)
566 {
567 	while (n) {
568 		if (!memcmp(n->key, pkey, key_len) &&
569 		    net_eq(pneigh_net(n), net) &&
570 		    (n->dev == dev || !n->dev))
571 			return n;
572 		n = n->next;
573 	}
574 	return NULL;
575 }
576 
577 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
578 		struct net *net, const void *pkey, struct net_device *dev)
579 {
580 	int key_len = tbl->key_len;
581 	u32 hash_val = pneigh_hash(pkey, key_len);
582 
583 	return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
584 				 net, pkey, key_len, dev);
585 }
586 EXPORT_SYMBOL_GPL(__pneigh_lookup);
587 
588 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
589 				    struct net *net, const void *pkey,
590 				    struct net_device *dev, int creat)
591 {
592 	struct pneigh_entry *n;
593 	int key_len = tbl->key_len;
594 	u32 hash_val = pneigh_hash(pkey, key_len);
595 
596 	read_lock_bh(&tbl->lock);
597 	n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
598 			      net, pkey, key_len, dev);
599 	read_unlock_bh(&tbl->lock);
600 
601 	if (n || !creat)
602 		goto out;
603 
604 	ASSERT_RTNL();
605 
606 	n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
607 	if (!n)
608 		goto out;
609 
610 	write_pnet(&n->net, hold_net(net));
611 	memcpy(n->key, pkey, key_len);
612 	n->dev = dev;
613 	if (dev)
614 		dev_hold(dev);
615 
616 	if (tbl->pconstructor && tbl->pconstructor(n)) {
617 		if (dev)
618 			dev_put(dev);
619 		release_net(net);
620 		kfree(n);
621 		n = NULL;
622 		goto out;
623 	}
624 
625 	write_lock_bh(&tbl->lock);
626 	n->next = tbl->phash_buckets[hash_val];
627 	tbl->phash_buckets[hash_val] = n;
628 	write_unlock_bh(&tbl->lock);
629 out:
630 	return n;
631 }
632 EXPORT_SYMBOL(pneigh_lookup);
633 
634 
635 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
636 		  struct net_device *dev)
637 {
638 	struct pneigh_entry *n, **np;
639 	int key_len = tbl->key_len;
640 	u32 hash_val = pneigh_hash(pkey, key_len);
641 
642 	write_lock_bh(&tbl->lock);
643 	for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
644 	     np = &n->next) {
645 		if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
646 		    net_eq(pneigh_net(n), net)) {
647 			*np = n->next;
648 			write_unlock_bh(&tbl->lock);
649 			if (tbl->pdestructor)
650 				tbl->pdestructor(n);
651 			if (n->dev)
652 				dev_put(n->dev);
653 			release_net(pneigh_net(n));
654 			kfree(n);
655 			return 0;
656 		}
657 	}
658 	write_unlock_bh(&tbl->lock);
659 	return -ENOENT;
660 }
661 
662 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
663 {
664 	struct pneigh_entry *n, **np;
665 	u32 h;
666 
667 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
668 		np = &tbl->phash_buckets[h];
669 		while ((n = *np) != NULL) {
670 			if (!dev || n->dev == dev) {
671 				*np = n->next;
672 				if (tbl->pdestructor)
673 					tbl->pdestructor(n);
674 				if (n->dev)
675 					dev_put(n->dev);
676 				release_net(pneigh_net(n));
677 				kfree(n);
678 				continue;
679 			}
680 			np = &n->next;
681 		}
682 	}
683 	return -ENOENT;
684 }
685 
686 static void neigh_parms_destroy(struct neigh_parms *parms);
687 
688 static inline void neigh_parms_put(struct neigh_parms *parms)
689 {
690 	if (atomic_dec_and_test(&parms->refcnt))
691 		neigh_parms_destroy(parms);
692 }
693 
694 /*
695  *	neighbour must already be out of the table;
696  *
697  */
698 void neigh_destroy(struct neighbour *neigh)
699 {
700 	struct net_device *dev = neigh->dev;
701 
702 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
703 
704 	if (!neigh->dead) {
705 		pr_warn("Destroying alive neighbour %p\n", neigh);
706 		dump_stack();
707 		return;
708 	}
709 
710 	if (neigh_del_timer(neigh))
711 		pr_warn("Impossible event\n");
712 
713 	write_lock_bh(&neigh->lock);
714 	__skb_queue_purge(&neigh->arp_queue);
715 	write_unlock_bh(&neigh->lock);
716 	neigh->arp_queue_len_bytes = 0;
717 
718 	if (dev->netdev_ops->ndo_neigh_destroy)
719 		dev->netdev_ops->ndo_neigh_destroy(neigh);
720 
721 	dev_put(dev);
722 	neigh_parms_put(neigh->parms);
723 
724 	neigh_dbg(2, "neigh %p is destroyed\n", neigh);
725 
726 	atomic_dec(&neigh->tbl->entries);
727 	kfree_rcu(neigh, rcu);
728 }
729 EXPORT_SYMBOL(neigh_destroy);
730 
731 /* Neighbour state is suspicious;
732    disable fast path.
733 
734    Called with write_locked neigh.
735  */
736 static void neigh_suspect(struct neighbour *neigh)
737 {
738 	neigh_dbg(2, "neigh %p is suspected\n", neigh);
739 
740 	neigh->output = neigh->ops->output;
741 }
742 
743 /* Neighbour state is OK;
744    enable fast path.
745 
746    Called with write_locked neigh.
747  */
748 static void neigh_connect(struct neighbour *neigh)
749 {
750 	neigh_dbg(2, "neigh %p is connected\n", neigh);
751 
752 	neigh->output = neigh->ops->connected_output;
753 }
754 
755 static void neigh_periodic_work(struct work_struct *work)
756 {
757 	struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
758 	struct neighbour *n;
759 	struct neighbour __rcu **np;
760 	unsigned int i;
761 	struct neigh_hash_table *nht;
762 
763 	NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
764 
765 	write_lock_bh(&tbl->lock);
766 	nht = rcu_dereference_protected(tbl->nht,
767 					lockdep_is_held(&tbl->lock));
768 
769 	if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
770 		goto out;
771 
772 	/*
773 	 *	periodically recompute ReachableTime from random function
774 	 */
775 
776 	if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
777 		struct neigh_parms *p;
778 		tbl->last_rand = jiffies;
779 		for (p = &tbl->parms; p; p = p->next)
780 			p->reachable_time =
781 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
782 	}
783 
784 	for (i = 0 ; i < (1 << nht->hash_shift); i++) {
785 		np = &nht->hash_buckets[i];
786 
787 		while ((n = rcu_dereference_protected(*np,
788 				lockdep_is_held(&tbl->lock))) != NULL) {
789 			unsigned int state;
790 
791 			write_lock(&n->lock);
792 
793 			state = n->nud_state;
794 			if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
795 				write_unlock(&n->lock);
796 				goto next_elt;
797 			}
798 
799 			if (time_before(n->used, n->confirmed))
800 				n->used = n->confirmed;
801 
802 			if (atomic_read(&n->refcnt) == 1 &&
803 			    (state == NUD_FAILED ||
804 			     time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
805 				*np = n->next;
806 				n->dead = 1;
807 				write_unlock(&n->lock);
808 				neigh_cleanup_and_release(n);
809 				continue;
810 			}
811 			write_unlock(&n->lock);
812 
813 next_elt:
814 			np = &n->next;
815 		}
816 		/*
817 		 * It's fine to release lock here, even if hash table
818 		 * grows while we are preempted.
819 		 */
820 		write_unlock_bh(&tbl->lock);
821 		cond_resched();
822 		write_lock_bh(&tbl->lock);
823 		nht = rcu_dereference_protected(tbl->nht,
824 						lockdep_is_held(&tbl->lock));
825 	}
826 out:
827 	/* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
828 	 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
829 	 * BASE_REACHABLE_TIME.
830 	 */
831 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
832 			      NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
833 	write_unlock_bh(&tbl->lock);
834 }
835 
836 static __inline__ int neigh_max_probes(struct neighbour *n)
837 {
838 	struct neigh_parms *p = n->parms;
839 	return (n->nud_state & NUD_PROBE) ?
840 		NEIGH_VAR(p, UCAST_PROBES) :
841 		NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
842 		NEIGH_VAR(p, MCAST_PROBES);
843 }
844 
845 static void neigh_invalidate(struct neighbour *neigh)
846 	__releases(neigh->lock)
847 	__acquires(neigh->lock)
848 {
849 	struct sk_buff *skb;
850 
851 	NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
852 	neigh_dbg(2, "neigh %p is failed\n", neigh);
853 	neigh->updated = jiffies;
854 
855 	/* It is very thin place. report_unreachable is very complicated
856 	   routine. Particularly, it can hit the same neighbour entry!
857 
858 	   So that, we try to be accurate and avoid dead loop. --ANK
859 	 */
860 	while (neigh->nud_state == NUD_FAILED &&
861 	       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
862 		write_unlock(&neigh->lock);
863 		neigh->ops->error_report(neigh, skb);
864 		write_lock(&neigh->lock);
865 	}
866 	__skb_queue_purge(&neigh->arp_queue);
867 	neigh->arp_queue_len_bytes = 0;
868 }
869 
870 static void neigh_probe(struct neighbour *neigh)
871 	__releases(neigh->lock)
872 {
873 	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
874 	/* keep skb alive even if arp_queue overflows */
875 	if (skb)
876 		skb = skb_copy(skb, GFP_ATOMIC);
877 	write_unlock(&neigh->lock);
878 	neigh->ops->solicit(neigh, skb);
879 	atomic_inc(&neigh->probes);
880 	kfree_skb(skb);
881 }
882 
883 /* Called when a timer expires for a neighbour entry. */
884 
885 static void neigh_timer_handler(unsigned long arg)
886 {
887 	unsigned long now, next;
888 	struct neighbour *neigh = (struct neighbour *)arg;
889 	unsigned int state;
890 	int notify = 0;
891 
892 	write_lock(&neigh->lock);
893 
894 	state = neigh->nud_state;
895 	now = jiffies;
896 	next = now + HZ;
897 
898 	if (!(state & NUD_IN_TIMER))
899 		goto out;
900 
901 	if (state & NUD_REACHABLE) {
902 		if (time_before_eq(now,
903 				   neigh->confirmed + neigh->parms->reachable_time)) {
904 			neigh_dbg(2, "neigh %p is still alive\n", neigh);
905 			next = neigh->confirmed + neigh->parms->reachable_time;
906 		} else if (time_before_eq(now,
907 					  neigh->used +
908 					  NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
909 			neigh_dbg(2, "neigh %p is delayed\n", neigh);
910 			neigh->nud_state = NUD_DELAY;
911 			neigh->updated = jiffies;
912 			neigh_suspect(neigh);
913 			next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
914 		} else {
915 			neigh_dbg(2, "neigh %p is suspected\n", neigh);
916 			neigh->nud_state = NUD_STALE;
917 			neigh->updated = jiffies;
918 			neigh_suspect(neigh);
919 			notify = 1;
920 		}
921 	} else if (state & NUD_DELAY) {
922 		if (time_before_eq(now,
923 				   neigh->confirmed +
924 				   NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
925 			neigh_dbg(2, "neigh %p is now reachable\n", neigh);
926 			neigh->nud_state = NUD_REACHABLE;
927 			neigh->updated = jiffies;
928 			neigh_connect(neigh);
929 			notify = 1;
930 			next = neigh->confirmed + neigh->parms->reachable_time;
931 		} else {
932 			neigh_dbg(2, "neigh %p is probed\n", neigh);
933 			neigh->nud_state = NUD_PROBE;
934 			neigh->updated = jiffies;
935 			atomic_set(&neigh->probes, 0);
936 			next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
937 		}
938 	} else {
939 		/* NUD_PROBE|NUD_INCOMPLETE */
940 		next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
941 	}
942 
943 	if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
944 	    atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
945 		neigh->nud_state = NUD_FAILED;
946 		notify = 1;
947 		neigh_invalidate(neigh);
948 	}
949 
950 	if (neigh->nud_state & NUD_IN_TIMER) {
951 		if (time_before(next, jiffies + HZ/2))
952 			next = jiffies + HZ/2;
953 		if (!mod_timer(&neigh->timer, next))
954 			neigh_hold(neigh);
955 	}
956 	if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
957 		neigh_probe(neigh);
958 	} else {
959 out:
960 		write_unlock(&neigh->lock);
961 	}
962 
963 	if (notify)
964 		neigh_update_notify(neigh);
965 
966 	neigh_release(neigh);
967 }
968 
969 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
970 {
971 	int rc;
972 	bool immediate_probe = false;
973 
974 	write_lock_bh(&neigh->lock);
975 
976 	rc = 0;
977 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
978 		goto out_unlock_bh;
979 
980 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
981 		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
982 		    NEIGH_VAR(neigh->parms, APP_PROBES)) {
983 			unsigned long next, now = jiffies;
984 
985 			atomic_set(&neigh->probes,
986 				   NEIGH_VAR(neigh->parms, UCAST_PROBES));
987 			neigh->nud_state     = NUD_INCOMPLETE;
988 			neigh->updated = now;
989 			next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
990 					 HZ/2);
991 			neigh_add_timer(neigh, next);
992 			immediate_probe = true;
993 		} else {
994 			neigh->nud_state = NUD_FAILED;
995 			neigh->updated = jiffies;
996 			write_unlock_bh(&neigh->lock);
997 
998 			kfree_skb(skb);
999 			return 1;
1000 		}
1001 	} else if (neigh->nud_state & NUD_STALE) {
1002 		neigh_dbg(2, "neigh %p is delayed\n", neigh);
1003 		neigh->nud_state = NUD_DELAY;
1004 		neigh->updated = jiffies;
1005 		neigh_add_timer(neigh, jiffies +
1006 				NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1007 	}
1008 
1009 	if (neigh->nud_state == NUD_INCOMPLETE) {
1010 		if (skb) {
1011 			while (neigh->arp_queue_len_bytes + skb->truesize >
1012 			       NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1013 				struct sk_buff *buff;
1014 
1015 				buff = __skb_dequeue(&neigh->arp_queue);
1016 				if (!buff)
1017 					break;
1018 				neigh->arp_queue_len_bytes -= buff->truesize;
1019 				kfree_skb(buff);
1020 				NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1021 			}
1022 			skb_dst_force(skb);
1023 			__skb_queue_tail(&neigh->arp_queue, skb);
1024 			neigh->arp_queue_len_bytes += skb->truesize;
1025 		}
1026 		rc = 1;
1027 	}
1028 out_unlock_bh:
1029 	if (immediate_probe)
1030 		neigh_probe(neigh);
1031 	else
1032 		write_unlock(&neigh->lock);
1033 	local_bh_enable();
1034 	return rc;
1035 }
1036 EXPORT_SYMBOL(__neigh_event_send);
1037 
1038 static void neigh_update_hhs(struct neighbour *neigh)
1039 {
1040 	struct hh_cache *hh;
1041 	void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1042 		= NULL;
1043 
1044 	if (neigh->dev->header_ops)
1045 		update = neigh->dev->header_ops->cache_update;
1046 
1047 	if (update) {
1048 		hh = &neigh->hh;
1049 		if (hh->hh_len) {
1050 			write_seqlock_bh(&hh->hh_lock);
1051 			update(hh, neigh->dev, neigh->ha);
1052 			write_sequnlock_bh(&hh->hh_lock);
1053 		}
1054 	}
1055 }
1056 
1057 
1058 
1059 /* Generic update routine.
1060    -- lladdr is new lladdr or NULL, if it is not supplied.
1061    -- new    is new state.
1062    -- flags
1063 	NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1064 				if it is different.
1065 	NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1066 				lladdr instead of overriding it
1067 				if it is different.
1068 				It also allows to retain current state
1069 				if lladdr is unchanged.
1070 	NEIGH_UPDATE_F_ADMIN	means that the change is administrative.
1071 
1072 	NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1073 				NTF_ROUTER flag.
1074 	NEIGH_UPDATE_F_ISROUTER	indicates if the neighbour is known as
1075 				a router.
1076 
1077    Caller MUST hold reference count on the entry.
1078  */
1079 
1080 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1081 		 u32 flags)
1082 {
1083 	u8 old;
1084 	int err;
1085 	int notify = 0;
1086 	struct net_device *dev;
1087 	int update_isrouter = 0;
1088 
1089 	write_lock_bh(&neigh->lock);
1090 
1091 	dev    = neigh->dev;
1092 	old    = neigh->nud_state;
1093 	err    = -EPERM;
1094 
1095 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1096 	    (old & (NUD_NOARP | NUD_PERMANENT)))
1097 		goto out;
1098 
1099 	if (!(new & NUD_VALID)) {
1100 		neigh_del_timer(neigh);
1101 		if (old & NUD_CONNECTED)
1102 			neigh_suspect(neigh);
1103 		neigh->nud_state = new;
1104 		err = 0;
1105 		notify = old & NUD_VALID;
1106 		if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1107 		    (new & NUD_FAILED)) {
1108 			neigh_invalidate(neigh);
1109 			notify = 1;
1110 		}
1111 		goto out;
1112 	}
1113 
1114 	/* Compare new lladdr with cached one */
1115 	if (!dev->addr_len) {
1116 		/* First case: device needs no address. */
1117 		lladdr = neigh->ha;
1118 	} else if (lladdr) {
1119 		/* The second case: if something is already cached
1120 		   and a new address is proposed:
1121 		   - compare new & old
1122 		   - if they are different, check override flag
1123 		 */
1124 		if ((old & NUD_VALID) &&
1125 		    !memcmp(lladdr, neigh->ha, dev->addr_len))
1126 			lladdr = neigh->ha;
1127 	} else {
1128 		/* No address is supplied; if we know something,
1129 		   use it, otherwise discard the request.
1130 		 */
1131 		err = -EINVAL;
1132 		if (!(old & NUD_VALID))
1133 			goto out;
1134 		lladdr = neigh->ha;
1135 	}
1136 
1137 	if (new & NUD_CONNECTED)
1138 		neigh->confirmed = jiffies;
1139 	neigh->updated = jiffies;
1140 
1141 	/* If entry was valid and address is not changed,
1142 	   do not change entry state, if new one is STALE.
1143 	 */
1144 	err = 0;
1145 	update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1146 	if (old & NUD_VALID) {
1147 		if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1148 			update_isrouter = 0;
1149 			if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1150 			    (old & NUD_CONNECTED)) {
1151 				lladdr = neigh->ha;
1152 				new = NUD_STALE;
1153 			} else
1154 				goto out;
1155 		} else {
1156 			if (lladdr == neigh->ha && new == NUD_STALE &&
1157 			    ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1158 			     (old & NUD_CONNECTED))
1159 			    )
1160 				new = old;
1161 		}
1162 	}
1163 
1164 	if (new != old) {
1165 		neigh_del_timer(neigh);
1166 		if (new & NUD_IN_TIMER)
1167 			neigh_add_timer(neigh, (jiffies +
1168 						((new & NUD_REACHABLE) ?
1169 						 neigh->parms->reachable_time :
1170 						 0)));
1171 		neigh->nud_state = new;
1172 		notify = 1;
1173 	}
1174 
1175 	if (lladdr != neigh->ha) {
1176 		write_seqlock(&neigh->ha_lock);
1177 		memcpy(&neigh->ha, lladdr, dev->addr_len);
1178 		write_sequnlock(&neigh->ha_lock);
1179 		neigh_update_hhs(neigh);
1180 		if (!(new & NUD_CONNECTED))
1181 			neigh->confirmed = jiffies -
1182 				      (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1183 		notify = 1;
1184 	}
1185 	if (new == old)
1186 		goto out;
1187 	if (new & NUD_CONNECTED)
1188 		neigh_connect(neigh);
1189 	else
1190 		neigh_suspect(neigh);
1191 	if (!(old & NUD_VALID)) {
1192 		struct sk_buff *skb;
1193 
1194 		/* Again: avoid dead loop if something went wrong */
1195 
1196 		while (neigh->nud_state & NUD_VALID &&
1197 		       (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1198 			struct dst_entry *dst = skb_dst(skb);
1199 			struct neighbour *n2, *n1 = neigh;
1200 			write_unlock_bh(&neigh->lock);
1201 
1202 			rcu_read_lock();
1203 
1204 			/* Why not just use 'neigh' as-is?  The problem is that
1205 			 * things such as shaper, eql, and sch_teql can end up
1206 			 * using alternative, different, neigh objects to output
1207 			 * the packet in the output path.  So what we need to do
1208 			 * here is re-lookup the top-level neigh in the path so
1209 			 * we can reinject the packet there.
1210 			 */
1211 			n2 = NULL;
1212 			if (dst) {
1213 				n2 = dst_neigh_lookup_skb(dst, skb);
1214 				if (n2)
1215 					n1 = n2;
1216 			}
1217 			n1->output(n1, skb);
1218 			if (n2)
1219 				neigh_release(n2);
1220 			rcu_read_unlock();
1221 
1222 			write_lock_bh(&neigh->lock);
1223 		}
1224 		__skb_queue_purge(&neigh->arp_queue);
1225 		neigh->arp_queue_len_bytes = 0;
1226 	}
1227 out:
1228 	if (update_isrouter) {
1229 		neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1230 			(neigh->flags | NTF_ROUTER) :
1231 			(neigh->flags & ~NTF_ROUTER);
1232 	}
1233 	write_unlock_bh(&neigh->lock);
1234 
1235 	if (notify)
1236 		neigh_update_notify(neigh);
1237 
1238 	return err;
1239 }
1240 EXPORT_SYMBOL(neigh_update);
1241 
1242 /* Update the neigh to listen temporarily for probe responses, even if it is
1243  * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1244  */
1245 void __neigh_set_probe_once(struct neighbour *neigh)
1246 {
1247 	neigh->updated = jiffies;
1248 	if (!(neigh->nud_state & NUD_FAILED))
1249 		return;
1250 	neigh->nud_state = NUD_PROBE;
1251 	atomic_set(&neigh->probes, NEIGH_VAR(neigh->parms, UCAST_PROBES));
1252 	neigh_add_timer(neigh,
1253 			jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1254 }
1255 EXPORT_SYMBOL(__neigh_set_probe_once);
1256 
1257 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1258 				 u8 *lladdr, void *saddr,
1259 				 struct net_device *dev)
1260 {
1261 	struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1262 						 lladdr || !dev->addr_len);
1263 	if (neigh)
1264 		neigh_update(neigh, lladdr, NUD_STALE,
1265 			     NEIGH_UPDATE_F_OVERRIDE);
1266 	return neigh;
1267 }
1268 EXPORT_SYMBOL(neigh_event_ns);
1269 
1270 /* called with read_lock_bh(&n->lock); */
1271 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst)
1272 {
1273 	struct net_device *dev = dst->dev;
1274 	__be16 prot = dst->ops->protocol;
1275 	struct hh_cache	*hh = &n->hh;
1276 
1277 	write_lock_bh(&n->lock);
1278 
1279 	/* Only one thread can come in here and initialize the
1280 	 * hh_cache entry.
1281 	 */
1282 	if (!hh->hh_len)
1283 		dev->header_ops->cache(n, hh, prot);
1284 
1285 	write_unlock_bh(&n->lock);
1286 }
1287 
1288 /* This function can be used in contexts, where only old dev_queue_xmit
1289  * worked, f.e. if you want to override normal output path (eql, shaper),
1290  * but resolution is not made yet.
1291  */
1292 
1293 int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
1294 {
1295 	struct net_device *dev = skb->dev;
1296 
1297 	__skb_pull(skb, skb_network_offset(skb));
1298 
1299 	if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1300 			    skb->len) < 0 &&
1301 	    dev_rebuild_header(skb))
1302 		return 0;
1303 
1304 	return dev_queue_xmit(skb);
1305 }
1306 EXPORT_SYMBOL(neigh_compat_output);
1307 
1308 /* Slow and careful. */
1309 
1310 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1311 {
1312 	struct dst_entry *dst = skb_dst(skb);
1313 	int rc = 0;
1314 
1315 	if (!dst)
1316 		goto discard;
1317 
1318 	if (!neigh_event_send(neigh, skb)) {
1319 		int err;
1320 		struct net_device *dev = neigh->dev;
1321 		unsigned int seq;
1322 
1323 		if (dev->header_ops->cache && !neigh->hh.hh_len)
1324 			neigh_hh_init(neigh, dst);
1325 
1326 		do {
1327 			__skb_pull(skb, skb_network_offset(skb));
1328 			seq = read_seqbegin(&neigh->ha_lock);
1329 			err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1330 					      neigh->ha, NULL, skb->len);
1331 		} while (read_seqretry(&neigh->ha_lock, seq));
1332 
1333 		if (err >= 0)
1334 			rc = dev_queue_xmit(skb);
1335 		else
1336 			goto out_kfree_skb;
1337 	}
1338 out:
1339 	return rc;
1340 discard:
1341 	neigh_dbg(1, "%s: dst=%p neigh=%p\n", __func__, dst, neigh);
1342 out_kfree_skb:
1343 	rc = -EINVAL;
1344 	kfree_skb(skb);
1345 	goto out;
1346 }
1347 EXPORT_SYMBOL(neigh_resolve_output);
1348 
1349 /* As fast as possible without hh cache */
1350 
1351 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1352 {
1353 	struct net_device *dev = neigh->dev;
1354 	unsigned int seq;
1355 	int err;
1356 
1357 	do {
1358 		__skb_pull(skb, skb_network_offset(skb));
1359 		seq = read_seqbegin(&neigh->ha_lock);
1360 		err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1361 				      neigh->ha, NULL, skb->len);
1362 	} while (read_seqretry(&neigh->ha_lock, seq));
1363 
1364 	if (err >= 0)
1365 		err = dev_queue_xmit(skb);
1366 	else {
1367 		err = -EINVAL;
1368 		kfree_skb(skb);
1369 	}
1370 	return err;
1371 }
1372 EXPORT_SYMBOL(neigh_connected_output);
1373 
1374 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1375 {
1376 	return dev_queue_xmit(skb);
1377 }
1378 EXPORT_SYMBOL(neigh_direct_output);
1379 
1380 static void neigh_proxy_process(unsigned long arg)
1381 {
1382 	struct neigh_table *tbl = (struct neigh_table *)arg;
1383 	long sched_next = 0;
1384 	unsigned long now = jiffies;
1385 	struct sk_buff *skb, *n;
1386 
1387 	spin_lock(&tbl->proxy_queue.lock);
1388 
1389 	skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1390 		long tdif = NEIGH_CB(skb)->sched_next - now;
1391 
1392 		if (tdif <= 0) {
1393 			struct net_device *dev = skb->dev;
1394 
1395 			__skb_unlink(skb, &tbl->proxy_queue);
1396 			if (tbl->proxy_redo && netif_running(dev)) {
1397 				rcu_read_lock();
1398 				tbl->proxy_redo(skb);
1399 				rcu_read_unlock();
1400 			} else {
1401 				kfree_skb(skb);
1402 			}
1403 
1404 			dev_put(dev);
1405 		} else if (!sched_next || tdif < sched_next)
1406 			sched_next = tdif;
1407 	}
1408 	del_timer(&tbl->proxy_timer);
1409 	if (sched_next)
1410 		mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1411 	spin_unlock(&tbl->proxy_queue.lock);
1412 }
1413 
1414 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1415 		    struct sk_buff *skb)
1416 {
1417 	unsigned long now = jiffies;
1418 
1419 	unsigned long sched_next = now + (prandom_u32() %
1420 					  NEIGH_VAR(p, PROXY_DELAY));
1421 
1422 	if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1423 		kfree_skb(skb);
1424 		return;
1425 	}
1426 
1427 	NEIGH_CB(skb)->sched_next = sched_next;
1428 	NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1429 
1430 	spin_lock(&tbl->proxy_queue.lock);
1431 	if (del_timer(&tbl->proxy_timer)) {
1432 		if (time_before(tbl->proxy_timer.expires, sched_next))
1433 			sched_next = tbl->proxy_timer.expires;
1434 	}
1435 	skb_dst_drop(skb);
1436 	dev_hold(skb->dev);
1437 	__skb_queue_tail(&tbl->proxy_queue, skb);
1438 	mod_timer(&tbl->proxy_timer, sched_next);
1439 	spin_unlock(&tbl->proxy_queue.lock);
1440 }
1441 EXPORT_SYMBOL(pneigh_enqueue);
1442 
1443 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1444 						      struct net *net, int ifindex)
1445 {
1446 	struct neigh_parms *p;
1447 
1448 	for (p = &tbl->parms; p; p = p->next) {
1449 		if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1450 		    (!p->dev && !ifindex && net_eq(net, &init_net)))
1451 			return p;
1452 	}
1453 
1454 	return NULL;
1455 }
1456 
1457 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1458 				      struct neigh_table *tbl)
1459 {
1460 	struct neigh_parms *p;
1461 	struct net *net = dev_net(dev);
1462 	const struct net_device_ops *ops = dev->netdev_ops;
1463 
1464 	p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1465 	if (p) {
1466 		p->tbl		  = tbl;
1467 		atomic_set(&p->refcnt, 1);
1468 		p->reachable_time =
1469 				neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1470 		dev_hold(dev);
1471 		p->dev = dev;
1472 		write_pnet(&p->net, hold_net(net));
1473 		p->sysctl_table = NULL;
1474 
1475 		if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1476 			release_net(net);
1477 			dev_put(dev);
1478 			kfree(p);
1479 			return NULL;
1480 		}
1481 
1482 		write_lock_bh(&tbl->lock);
1483 		p->next		= tbl->parms.next;
1484 		tbl->parms.next = p;
1485 		write_unlock_bh(&tbl->lock);
1486 
1487 		neigh_parms_data_state_cleanall(p);
1488 	}
1489 	return p;
1490 }
1491 EXPORT_SYMBOL(neigh_parms_alloc);
1492 
1493 static void neigh_rcu_free_parms(struct rcu_head *head)
1494 {
1495 	struct neigh_parms *parms =
1496 		container_of(head, struct neigh_parms, rcu_head);
1497 
1498 	neigh_parms_put(parms);
1499 }
1500 
1501 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1502 {
1503 	struct neigh_parms **p;
1504 
1505 	if (!parms || parms == &tbl->parms)
1506 		return;
1507 	write_lock_bh(&tbl->lock);
1508 	for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1509 		if (*p == parms) {
1510 			*p = parms->next;
1511 			parms->dead = 1;
1512 			write_unlock_bh(&tbl->lock);
1513 			if (parms->dev)
1514 				dev_put(parms->dev);
1515 			call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1516 			return;
1517 		}
1518 	}
1519 	write_unlock_bh(&tbl->lock);
1520 	neigh_dbg(1, "%s: not found\n", __func__);
1521 }
1522 EXPORT_SYMBOL(neigh_parms_release);
1523 
1524 static void neigh_parms_destroy(struct neigh_parms *parms)
1525 {
1526 	release_net(neigh_parms_net(parms));
1527 	kfree(parms);
1528 }
1529 
1530 static struct lock_class_key neigh_table_proxy_queue_class;
1531 
1532 static void neigh_table_init_no_netlink(struct neigh_table *tbl)
1533 {
1534 	unsigned long now = jiffies;
1535 	unsigned long phsize;
1536 
1537 	write_pnet(&tbl->parms.net, &init_net);
1538 	atomic_set(&tbl->parms.refcnt, 1);
1539 	tbl->parms.reachable_time =
1540 			  neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1541 
1542 	tbl->stats = alloc_percpu(struct neigh_statistics);
1543 	if (!tbl->stats)
1544 		panic("cannot create neighbour cache statistics");
1545 
1546 #ifdef CONFIG_PROC_FS
1547 	if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1548 			      &neigh_stat_seq_fops, tbl))
1549 		panic("cannot create neighbour proc dir entry");
1550 #endif
1551 
1552 	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1553 
1554 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1555 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1556 
1557 	if (!tbl->nht || !tbl->phash_buckets)
1558 		panic("cannot allocate neighbour cache hashes");
1559 
1560 	if (!tbl->entry_size)
1561 		tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1562 					tbl->key_len, NEIGH_PRIV_ALIGN);
1563 	else
1564 		WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1565 
1566 	rwlock_init(&tbl->lock);
1567 	INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1568 	queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1569 			tbl->parms.reachable_time);
1570 	setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1571 	skb_queue_head_init_class(&tbl->proxy_queue,
1572 			&neigh_table_proxy_queue_class);
1573 
1574 	tbl->last_flush = now;
1575 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
1576 }
1577 
1578 void neigh_table_init(struct neigh_table *tbl)
1579 {
1580 	struct neigh_table *tmp;
1581 
1582 	neigh_table_init_no_netlink(tbl);
1583 	write_lock(&neigh_tbl_lock);
1584 	for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1585 		if (tmp->family == tbl->family)
1586 			break;
1587 	}
1588 	tbl->next	= neigh_tables;
1589 	neigh_tables	= tbl;
1590 	write_unlock(&neigh_tbl_lock);
1591 
1592 	if (unlikely(tmp)) {
1593 		pr_err("Registering multiple tables for family %d\n",
1594 		       tbl->family);
1595 		dump_stack();
1596 	}
1597 }
1598 EXPORT_SYMBOL(neigh_table_init);
1599 
1600 int neigh_table_clear(struct neigh_table *tbl)
1601 {
1602 	struct neigh_table **tp;
1603 
1604 	/* It is not clean... Fix it to unload IPv6 module safely */
1605 	cancel_delayed_work_sync(&tbl->gc_work);
1606 	del_timer_sync(&tbl->proxy_timer);
1607 	pneigh_queue_purge(&tbl->proxy_queue);
1608 	neigh_ifdown(tbl, NULL);
1609 	if (atomic_read(&tbl->entries))
1610 		pr_crit("neighbour leakage\n");
1611 	write_lock(&neigh_tbl_lock);
1612 	for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1613 		if (*tp == tbl) {
1614 			*tp = tbl->next;
1615 			break;
1616 		}
1617 	}
1618 	write_unlock(&neigh_tbl_lock);
1619 
1620 	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1621 		 neigh_hash_free_rcu);
1622 	tbl->nht = NULL;
1623 
1624 	kfree(tbl->phash_buckets);
1625 	tbl->phash_buckets = NULL;
1626 
1627 	remove_proc_entry(tbl->id, init_net.proc_net_stat);
1628 
1629 	free_percpu(tbl->stats);
1630 	tbl->stats = NULL;
1631 
1632 	return 0;
1633 }
1634 EXPORT_SYMBOL(neigh_table_clear);
1635 
1636 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh)
1637 {
1638 	struct net *net = sock_net(skb->sk);
1639 	struct ndmsg *ndm;
1640 	struct nlattr *dst_attr;
1641 	struct neigh_table *tbl;
1642 	struct net_device *dev = NULL;
1643 	int err = -EINVAL;
1644 
1645 	ASSERT_RTNL();
1646 	if (nlmsg_len(nlh) < sizeof(*ndm))
1647 		goto out;
1648 
1649 	dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1650 	if (dst_attr == NULL)
1651 		goto out;
1652 
1653 	ndm = nlmsg_data(nlh);
1654 	if (ndm->ndm_ifindex) {
1655 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1656 		if (dev == NULL) {
1657 			err = -ENODEV;
1658 			goto out;
1659 		}
1660 	}
1661 
1662 	read_lock(&neigh_tbl_lock);
1663 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1664 		struct neighbour *neigh;
1665 
1666 		if (tbl->family != ndm->ndm_family)
1667 			continue;
1668 		read_unlock(&neigh_tbl_lock);
1669 
1670 		if (nla_len(dst_attr) < tbl->key_len)
1671 			goto out;
1672 
1673 		if (ndm->ndm_flags & NTF_PROXY) {
1674 			err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1675 			goto out;
1676 		}
1677 
1678 		if (dev == NULL)
1679 			goto out;
1680 
1681 		neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1682 		if (neigh == NULL) {
1683 			err = -ENOENT;
1684 			goto out;
1685 		}
1686 
1687 		err = neigh_update(neigh, NULL, NUD_FAILED,
1688 				   NEIGH_UPDATE_F_OVERRIDE |
1689 				   NEIGH_UPDATE_F_ADMIN);
1690 		neigh_release(neigh);
1691 		goto out;
1692 	}
1693 	read_unlock(&neigh_tbl_lock);
1694 	err = -EAFNOSUPPORT;
1695 
1696 out:
1697 	return err;
1698 }
1699 
1700 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh)
1701 {
1702 	struct net *net = sock_net(skb->sk);
1703 	struct ndmsg *ndm;
1704 	struct nlattr *tb[NDA_MAX+1];
1705 	struct neigh_table *tbl;
1706 	struct net_device *dev = NULL;
1707 	int err;
1708 
1709 	ASSERT_RTNL();
1710 	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1711 	if (err < 0)
1712 		goto out;
1713 
1714 	err = -EINVAL;
1715 	if (tb[NDA_DST] == NULL)
1716 		goto out;
1717 
1718 	ndm = nlmsg_data(nlh);
1719 	if (ndm->ndm_ifindex) {
1720 		dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1721 		if (dev == NULL) {
1722 			err = -ENODEV;
1723 			goto out;
1724 		}
1725 
1726 		if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1727 			goto out;
1728 	}
1729 
1730 	read_lock(&neigh_tbl_lock);
1731 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1732 		int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1733 		struct neighbour *neigh;
1734 		void *dst, *lladdr;
1735 
1736 		if (tbl->family != ndm->ndm_family)
1737 			continue;
1738 		read_unlock(&neigh_tbl_lock);
1739 
1740 		if (nla_len(tb[NDA_DST]) < tbl->key_len)
1741 			goto out;
1742 		dst = nla_data(tb[NDA_DST]);
1743 		lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1744 
1745 		if (ndm->ndm_flags & NTF_PROXY) {
1746 			struct pneigh_entry *pn;
1747 
1748 			err = -ENOBUFS;
1749 			pn = pneigh_lookup(tbl, net, dst, dev, 1);
1750 			if (pn) {
1751 				pn->flags = ndm->ndm_flags;
1752 				err = 0;
1753 			}
1754 			goto out;
1755 		}
1756 
1757 		if (dev == NULL)
1758 			goto out;
1759 
1760 		neigh = neigh_lookup(tbl, dst, dev);
1761 		if (neigh == NULL) {
1762 			if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1763 				err = -ENOENT;
1764 				goto out;
1765 			}
1766 
1767 			neigh = __neigh_lookup_errno(tbl, dst, dev);
1768 			if (IS_ERR(neigh)) {
1769 				err = PTR_ERR(neigh);
1770 				goto out;
1771 			}
1772 		} else {
1773 			if (nlh->nlmsg_flags & NLM_F_EXCL) {
1774 				err = -EEXIST;
1775 				neigh_release(neigh);
1776 				goto out;
1777 			}
1778 
1779 			if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1780 				flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1781 		}
1782 
1783 		if (ndm->ndm_flags & NTF_USE) {
1784 			neigh_event_send(neigh, NULL);
1785 			err = 0;
1786 		} else
1787 			err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1788 		neigh_release(neigh);
1789 		goto out;
1790 	}
1791 
1792 	read_unlock(&neigh_tbl_lock);
1793 	err = -EAFNOSUPPORT;
1794 out:
1795 	return err;
1796 }
1797 
1798 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1799 {
1800 	struct nlattr *nest;
1801 
1802 	nest = nla_nest_start(skb, NDTA_PARMS);
1803 	if (nest == NULL)
1804 		return -ENOBUFS;
1805 
1806 	if ((parms->dev &&
1807 	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1808 	    nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
1809 	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1810 			NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1811 	    /* approximative value for deprecated QUEUE_LEN (in packets) */
1812 	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
1813 			NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1814 	    nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1815 	    nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
1816 	    nla_put_u32(skb, NDTPA_UCAST_PROBES,
1817 			NEIGH_VAR(parms, UCAST_PROBES)) ||
1818 	    nla_put_u32(skb, NDTPA_MCAST_PROBES,
1819 			NEIGH_VAR(parms, MCAST_PROBES)) ||
1820 	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
1821 	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1822 			  NEIGH_VAR(parms, BASE_REACHABLE_TIME)) ||
1823 	    nla_put_msecs(skb, NDTPA_GC_STALETIME,
1824 			  NEIGH_VAR(parms, GC_STALETIME)) ||
1825 	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1826 			  NEIGH_VAR(parms, DELAY_PROBE_TIME)) ||
1827 	    nla_put_msecs(skb, NDTPA_RETRANS_TIME,
1828 			  NEIGH_VAR(parms, RETRANS_TIME)) ||
1829 	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
1830 			  NEIGH_VAR(parms, ANYCAST_DELAY)) ||
1831 	    nla_put_msecs(skb, NDTPA_PROXY_DELAY,
1832 			  NEIGH_VAR(parms, PROXY_DELAY)) ||
1833 	    nla_put_msecs(skb, NDTPA_LOCKTIME,
1834 			  NEIGH_VAR(parms, LOCKTIME)))
1835 		goto nla_put_failure;
1836 	return nla_nest_end(skb, nest);
1837 
1838 nla_put_failure:
1839 	nla_nest_cancel(skb, nest);
1840 	return -EMSGSIZE;
1841 }
1842 
1843 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1844 			      u32 pid, u32 seq, int type, int flags)
1845 {
1846 	struct nlmsghdr *nlh;
1847 	struct ndtmsg *ndtmsg;
1848 
1849 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1850 	if (nlh == NULL)
1851 		return -EMSGSIZE;
1852 
1853 	ndtmsg = nlmsg_data(nlh);
1854 
1855 	read_lock_bh(&tbl->lock);
1856 	ndtmsg->ndtm_family = tbl->family;
1857 	ndtmsg->ndtm_pad1   = 0;
1858 	ndtmsg->ndtm_pad2   = 0;
1859 
1860 	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1861 	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) ||
1862 	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1863 	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1864 	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1865 		goto nla_put_failure;
1866 	{
1867 		unsigned long now = jiffies;
1868 		unsigned int flush_delta = now - tbl->last_flush;
1869 		unsigned int rand_delta = now - tbl->last_rand;
1870 		struct neigh_hash_table *nht;
1871 		struct ndt_config ndc = {
1872 			.ndtc_key_len		= tbl->key_len,
1873 			.ndtc_entry_size	= tbl->entry_size,
1874 			.ndtc_entries		= atomic_read(&tbl->entries),
1875 			.ndtc_last_flush	= jiffies_to_msecs(flush_delta),
1876 			.ndtc_last_rand		= jiffies_to_msecs(rand_delta),
1877 			.ndtc_proxy_qlen	= tbl->proxy_queue.qlen,
1878 		};
1879 
1880 		rcu_read_lock_bh();
1881 		nht = rcu_dereference_bh(tbl->nht);
1882 		ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1883 		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1884 		rcu_read_unlock_bh();
1885 
1886 		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1887 			goto nla_put_failure;
1888 	}
1889 
1890 	{
1891 		int cpu;
1892 		struct ndt_stats ndst;
1893 
1894 		memset(&ndst, 0, sizeof(ndst));
1895 
1896 		for_each_possible_cpu(cpu) {
1897 			struct neigh_statistics	*st;
1898 
1899 			st = per_cpu_ptr(tbl->stats, cpu);
1900 			ndst.ndts_allocs		+= st->allocs;
1901 			ndst.ndts_destroys		+= st->destroys;
1902 			ndst.ndts_hash_grows		+= st->hash_grows;
1903 			ndst.ndts_res_failed		+= st->res_failed;
1904 			ndst.ndts_lookups		+= st->lookups;
1905 			ndst.ndts_hits			+= st->hits;
1906 			ndst.ndts_rcv_probes_mcast	+= st->rcv_probes_mcast;
1907 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
1908 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
1909 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
1910 		}
1911 
1912 		if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
1913 			goto nla_put_failure;
1914 	}
1915 
1916 	BUG_ON(tbl->parms.dev);
1917 	if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1918 		goto nla_put_failure;
1919 
1920 	read_unlock_bh(&tbl->lock);
1921 	return nlmsg_end(skb, nlh);
1922 
1923 nla_put_failure:
1924 	read_unlock_bh(&tbl->lock);
1925 	nlmsg_cancel(skb, nlh);
1926 	return -EMSGSIZE;
1927 }
1928 
1929 static int neightbl_fill_param_info(struct sk_buff *skb,
1930 				    struct neigh_table *tbl,
1931 				    struct neigh_parms *parms,
1932 				    u32 pid, u32 seq, int type,
1933 				    unsigned int flags)
1934 {
1935 	struct ndtmsg *ndtmsg;
1936 	struct nlmsghdr *nlh;
1937 
1938 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1939 	if (nlh == NULL)
1940 		return -EMSGSIZE;
1941 
1942 	ndtmsg = nlmsg_data(nlh);
1943 
1944 	read_lock_bh(&tbl->lock);
1945 	ndtmsg->ndtm_family = tbl->family;
1946 	ndtmsg->ndtm_pad1   = 0;
1947 	ndtmsg->ndtm_pad2   = 0;
1948 
1949 	if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1950 	    neightbl_fill_parms(skb, parms) < 0)
1951 		goto errout;
1952 
1953 	read_unlock_bh(&tbl->lock);
1954 	return nlmsg_end(skb, nlh);
1955 errout:
1956 	read_unlock_bh(&tbl->lock);
1957 	nlmsg_cancel(skb, nlh);
1958 	return -EMSGSIZE;
1959 }
1960 
1961 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1962 	[NDTA_NAME]		= { .type = NLA_STRING },
1963 	[NDTA_THRESH1]		= { .type = NLA_U32 },
1964 	[NDTA_THRESH2]		= { .type = NLA_U32 },
1965 	[NDTA_THRESH3]		= { .type = NLA_U32 },
1966 	[NDTA_GC_INTERVAL]	= { .type = NLA_U64 },
1967 	[NDTA_PARMS]		= { .type = NLA_NESTED },
1968 };
1969 
1970 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1971 	[NDTPA_IFINDEX]			= { .type = NLA_U32 },
1972 	[NDTPA_QUEUE_LEN]		= { .type = NLA_U32 },
1973 	[NDTPA_PROXY_QLEN]		= { .type = NLA_U32 },
1974 	[NDTPA_APP_PROBES]		= { .type = NLA_U32 },
1975 	[NDTPA_UCAST_PROBES]		= { .type = NLA_U32 },
1976 	[NDTPA_MCAST_PROBES]		= { .type = NLA_U32 },
1977 	[NDTPA_BASE_REACHABLE_TIME]	= { .type = NLA_U64 },
1978 	[NDTPA_GC_STALETIME]		= { .type = NLA_U64 },
1979 	[NDTPA_DELAY_PROBE_TIME]	= { .type = NLA_U64 },
1980 	[NDTPA_RETRANS_TIME]		= { .type = NLA_U64 },
1981 	[NDTPA_ANYCAST_DELAY]		= { .type = NLA_U64 },
1982 	[NDTPA_PROXY_DELAY]		= { .type = NLA_U64 },
1983 	[NDTPA_LOCKTIME]		= { .type = NLA_U64 },
1984 };
1985 
1986 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
1987 {
1988 	struct net *net = sock_net(skb->sk);
1989 	struct neigh_table *tbl;
1990 	struct ndtmsg *ndtmsg;
1991 	struct nlattr *tb[NDTA_MAX+1];
1992 	int err;
1993 
1994 	err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1995 			  nl_neightbl_policy);
1996 	if (err < 0)
1997 		goto errout;
1998 
1999 	if (tb[NDTA_NAME] == NULL) {
2000 		err = -EINVAL;
2001 		goto errout;
2002 	}
2003 
2004 	ndtmsg = nlmsg_data(nlh);
2005 	read_lock(&neigh_tbl_lock);
2006 	for (tbl = neigh_tables; tbl; tbl = tbl->next) {
2007 		if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2008 			continue;
2009 
2010 		if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
2011 			break;
2012 	}
2013 
2014 	if (tbl == NULL) {
2015 		err = -ENOENT;
2016 		goto errout_locked;
2017 	}
2018 
2019 	/*
2020 	 * We acquire tbl->lock to be nice to the periodic timers and
2021 	 * make sure they always see a consistent set of values.
2022 	 */
2023 	write_lock_bh(&tbl->lock);
2024 
2025 	if (tb[NDTA_PARMS]) {
2026 		struct nlattr *tbp[NDTPA_MAX+1];
2027 		struct neigh_parms *p;
2028 		int i, ifindex = 0;
2029 
2030 		err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
2031 				       nl_ntbl_parm_policy);
2032 		if (err < 0)
2033 			goto errout_tbl_lock;
2034 
2035 		if (tbp[NDTPA_IFINDEX])
2036 			ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2037 
2038 		p = lookup_neigh_parms(tbl, net, ifindex);
2039 		if (p == NULL) {
2040 			err = -ENOENT;
2041 			goto errout_tbl_lock;
2042 		}
2043 
2044 		for (i = 1; i <= NDTPA_MAX; i++) {
2045 			if (tbp[i] == NULL)
2046 				continue;
2047 
2048 			switch (i) {
2049 			case NDTPA_QUEUE_LEN:
2050 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2051 					      nla_get_u32(tbp[i]) *
2052 					      SKB_TRUESIZE(ETH_FRAME_LEN));
2053 				break;
2054 			case NDTPA_QUEUE_LENBYTES:
2055 				NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2056 					      nla_get_u32(tbp[i]));
2057 				break;
2058 			case NDTPA_PROXY_QLEN:
2059 				NEIGH_VAR_SET(p, PROXY_QLEN,
2060 					      nla_get_u32(tbp[i]));
2061 				break;
2062 			case NDTPA_APP_PROBES:
2063 				NEIGH_VAR_SET(p, APP_PROBES,
2064 					      nla_get_u32(tbp[i]));
2065 				break;
2066 			case NDTPA_UCAST_PROBES:
2067 				NEIGH_VAR_SET(p, UCAST_PROBES,
2068 					      nla_get_u32(tbp[i]));
2069 				break;
2070 			case NDTPA_MCAST_PROBES:
2071 				NEIGH_VAR_SET(p, MCAST_PROBES,
2072 					      nla_get_u32(tbp[i]));
2073 				break;
2074 			case NDTPA_BASE_REACHABLE_TIME:
2075 				NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2076 					      nla_get_msecs(tbp[i]));
2077 				break;
2078 			case NDTPA_GC_STALETIME:
2079 				NEIGH_VAR_SET(p, GC_STALETIME,
2080 					      nla_get_msecs(tbp[i]));
2081 				break;
2082 			case NDTPA_DELAY_PROBE_TIME:
2083 				NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2084 					      nla_get_msecs(tbp[i]));
2085 				break;
2086 			case NDTPA_RETRANS_TIME:
2087 				NEIGH_VAR_SET(p, RETRANS_TIME,
2088 					      nla_get_msecs(tbp[i]));
2089 				break;
2090 			case NDTPA_ANYCAST_DELAY:
2091 				NEIGH_VAR_SET(p, ANYCAST_DELAY,
2092 					      nla_get_msecs(tbp[i]));
2093 				break;
2094 			case NDTPA_PROXY_DELAY:
2095 				NEIGH_VAR_SET(p, PROXY_DELAY,
2096 					      nla_get_msecs(tbp[i]));
2097 				break;
2098 			case NDTPA_LOCKTIME:
2099 				NEIGH_VAR_SET(p, LOCKTIME,
2100 					      nla_get_msecs(tbp[i]));
2101 				break;
2102 			}
2103 		}
2104 	}
2105 
2106 	err = -ENOENT;
2107 	if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2108 	     tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2109 	    !net_eq(net, &init_net))
2110 		goto errout_tbl_lock;
2111 
2112 	if (tb[NDTA_THRESH1])
2113 		tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2114 
2115 	if (tb[NDTA_THRESH2])
2116 		tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2117 
2118 	if (tb[NDTA_THRESH3])
2119 		tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2120 
2121 	if (tb[NDTA_GC_INTERVAL])
2122 		tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2123 
2124 	err = 0;
2125 
2126 errout_tbl_lock:
2127 	write_unlock_bh(&tbl->lock);
2128 errout_locked:
2129 	read_unlock(&neigh_tbl_lock);
2130 errout:
2131 	return err;
2132 }
2133 
2134 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2135 {
2136 	struct net *net = sock_net(skb->sk);
2137 	int family, tidx, nidx = 0;
2138 	int tbl_skip = cb->args[0];
2139 	int neigh_skip = cb->args[1];
2140 	struct neigh_table *tbl;
2141 
2142 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2143 
2144 	read_lock(&neigh_tbl_lock);
2145 	for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
2146 		struct neigh_parms *p;
2147 
2148 		if (tidx < tbl_skip || (family && tbl->family != family))
2149 			continue;
2150 
2151 		if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2152 				       cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2153 				       NLM_F_MULTI) <= 0)
2154 			break;
2155 
2156 		for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
2157 			if (!net_eq(neigh_parms_net(p), net))
2158 				continue;
2159 
2160 			if (nidx < neigh_skip)
2161 				goto next;
2162 
2163 			if (neightbl_fill_param_info(skb, tbl, p,
2164 						     NETLINK_CB(cb->skb).portid,
2165 						     cb->nlh->nlmsg_seq,
2166 						     RTM_NEWNEIGHTBL,
2167 						     NLM_F_MULTI) <= 0)
2168 				goto out;
2169 		next:
2170 			nidx++;
2171 		}
2172 
2173 		neigh_skip = 0;
2174 	}
2175 out:
2176 	read_unlock(&neigh_tbl_lock);
2177 	cb->args[0] = tidx;
2178 	cb->args[1] = nidx;
2179 
2180 	return skb->len;
2181 }
2182 
2183 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2184 			   u32 pid, u32 seq, int type, unsigned int flags)
2185 {
2186 	unsigned long now = jiffies;
2187 	struct nda_cacheinfo ci;
2188 	struct nlmsghdr *nlh;
2189 	struct ndmsg *ndm;
2190 
2191 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2192 	if (nlh == NULL)
2193 		return -EMSGSIZE;
2194 
2195 	ndm = nlmsg_data(nlh);
2196 	ndm->ndm_family	 = neigh->ops->family;
2197 	ndm->ndm_pad1    = 0;
2198 	ndm->ndm_pad2    = 0;
2199 	ndm->ndm_flags	 = neigh->flags;
2200 	ndm->ndm_type	 = neigh->type;
2201 	ndm->ndm_ifindex = neigh->dev->ifindex;
2202 
2203 	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2204 		goto nla_put_failure;
2205 
2206 	read_lock_bh(&neigh->lock);
2207 	ndm->ndm_state	 = neigh->nud_state;
2208 	if (neigh->nud_state & NUD_VALID) {
2209 		char haddr[MAX_ADDR_LEN];
2210 
2211 		neigh_ha_snapshot(haddr, neigh, neigh->dev);
2212 		if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2213 			read_unlock_bh(&neigh->lock);
2214 			goto nla_put_failure;
2215 		}
2216 	}
2217 
2218 	ci.ndm_used	 = jiffies_to_clock_t(now - neigh->used);
2219 	ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2220 	ci.ndm_updated	 = jiffies_to_clock_t(now - neigh->updated);
2221 	ci.ndm_refcnt	 = atomic_read(&neigh->refcnt) - 1;
2222 	read_unlock_bh(&neigh->lock);
2223 
2224 	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2225 	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2226 		goto nla_put_failure;
2227 
2228 	return nlmsg_end(skb, nlh);
2229 
2230 nla_put_failure:
2231 	nlmsg_cancel(skb, nlh);
2232 	return -EMSGSIZE;
2233 }
2234 
2235 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2236 			    u32 pid, u32 seq, int type, unsigned int flags,
2237 			    struct neigh_table *tbl)
2238 {
2239 	struct nlmsghdr *nlh;
2240 	struct ndmsg *ndm;
2241 
2242 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2243 	if (nlh == NULL)
2244 		return -EMSGSIZE;
2245 
2246 	ndm = nlmsg_data(nlh);
2247 	ndm->ndm_family	 = tbl->family;
2248 	ndm->ndm_pad1    = 0;
2249 	ndm->ndm_pad2    = 0;
2250 	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
2251 	ndm->ndm_type	 = NDA_DST;
2252 	ndm->ndm_ifindex = pn->dev->ifindex;
2253 	ndm->ndm_state	 = NUD_NONE;
2254 
2255 	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2256 		goto nla_put_failure;
2257 
2258 	return nlmsg_end(skb, nlh);
2259 
2260 nla_put_failure:
2261 	nlmsg_cancel(skb, nlh);
2262 	return -EMSGSIZE;
2263 }
2264 
2265 static void neigh_update_notify(struct neighbour *neigh)
2266 {
2267 	call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2268 	__neigh_notify(neigh, RTM_NEWNEIGH, 0);
2269 }
2270 
2271 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2272 			    struct netlink_callback *cb)
2273 {
2274 	struct net *net = sock_net(skb->sk);
2275 	struct neighbour *n;
2276 	int rc, h, s_h = cb->args[1];
2277 	int idx, s_idx = idx = cb->args[2];
2278 	struct neigh_hash_table *nht;
2279 
2280 	rcu_read_lock_bh();
2281 	nht = rcu_dereference_bh(tbl->nht);
2282 
2283 	for (h = s_h; h < (1 << nht->hash_shift); h++) {
2284 		if (h > s_h)
2285 			s_idx = 0;
2286 		for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2287 		     n != NULL;
2288 		     n = rcu_dereference_bh(n->next)) {
2289 			if (!net_eq(dev_net(n->dev), net))
2290 				continue;
2291 			if (idx < s_idx)
2292 				goto next;
2293 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2294 					    cb->nlh->nlmsg_seq,
2295 					    RTM_NEWNEIGH,
2296 					    NLM_F_MULTI) <= 0) {
2297 				rc = -1;
2298 				goto out;
2299 			}
2300 next:
2301 			idx++;
2302 		}
2303 	}
2304 	rc = skb->len;
2305 out:
2306 	rcu_read_unlock_bh();
2307 	cb->args[1] = h;
2308 	cb->args[2] = idx;
2309 	return rc;
2310 }
2311 
2312 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2313 			     struct netlink_callback *cb)
2314 {
2315 	struct pneigh_entry *n;
2316 	struct net *net = sock_net(skb->sk);
2317 	int rc, h, s_h = cb->args[3];
2318 	int idx, s_idx = idx = cb->args[4];
2319 
2320 	read_lock_bh(&tbl->lock);
2321 
2322 	for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2323 		if (h > s_h)
2324 			s_idx = 0;
2325 		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2326 			if (dev_net(n->dev) != net)
2327 				continue;
2328 			if (idx < s_idx)
2329 				goto next;
2330 			if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2331 					    cb->nlh->nlmsg_seq,
2332 					    RTM_NEWNEIGH,
2333 					    NLM_F_MULTI, tbl) <= 0) {
2334 				read_unlock_bh(&tbl->lock);
2335 				rc = -1;
2336 				goto out;
2337 			}
2338 		next:
2339 			idx++;
2340 		}
2341 	}
2342 
2343 	read_unlock_bh(&tbl->lock);
2344 	rc = skb->len;
2345 out:
2346 	cb->args[3] = h;
2347 	cb->args[4] = idx;
2348 	return rc;
2349 
2350 }
2351 
2352 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2353 {
2354 	struct neigh_table *tbl;
2355 	int t, family, s_t;
2356 	int proxy = 0;
2357 	int err;
2358 
2359 	read_lock(&neigh_tbl_lock);
2360 	family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2361 
2362 	/* check for full ndmsg structure presence, family member is
2363 	 * the same for both structures
2364 	 */
2365 	if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2366 	    ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2367 		proxy = 1;
2368 
2369 	s_t = cb->args[0];
2370 
2371 	for (tbl = neigh_tables, t = 0; tbl;
2372 	     tbl = tbl->next, t++) {
2373 		if (t < s_t || (family && tbl->family != family))
2374 			continue;
2375 		if (t > s_t)
2376 			memset(&cb->args[1], 0, sizeof(cb->args) -
2377 						sizeof(cb->args[0]));
2378 		if (proxy)
2379 			err = pneigh_dump_table(tbl, skb, cb);
2380 		else
2381 			err = neigh_dump_table(tbl, skb, cb);
2382 		if (err < 0)
2383 			break;
2384 	}
2385 	read_unlock(&neigh_tbl_lock);
2386 
2387 	cb->args[0] = t;
2388 	return skb->len;
2389 }
2390 
2391 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2392 {
2393 	int chain;
2394 	struct neigh_hash_table *nht;
2395 
2396 	rcu_read_lock_bh();
2397 	nht = rcu_dereference_bh(tbl->nht);
2398 
2399 	read_lock(&tbl->lock); /* avoid resizes */
2400 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2401 		struct neighbour *n;
2402 
2403 		for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2404 		     n != NULL;
2405 		     n = rcu_dereference_bh(n->next))
2406 			cb(n, cookie);
2407 	}
2408 	read_unlock(&tbl->lock);
2409 	rcu_read_unlock_bh();
2410 }
2411 EXPORT_SYMBOL(neigh_for_each);
2412 
2413 /* The tbl->lock must be held as a writer and BH disabled. */
2414 void __neigh_for_each_release(struct neigh_table *tbl,
2415 			      int (*cb)(struct neighbour *))
2416 {
2417 	int chain;
2418 	struct neigh_hash_table *nht;
2419 
2420 	nht = rcu_dereference_protected(tbl->nht,
2421 					lockdep_is_held(&tbl->lock));
2422 	for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2423 		struct neighbour *n;
2424 		struct neighbour __rcu **np;
2425 
2426 		np = &nht->hash_buckets[chain];
2427 		while ((n = rcu_dereference_protected(*np,
2428 					lockdep_is_held(&tbl->lock))) != NULL) {
2429 			int release;
2430 
2431 			write_lock(&n->lock);
2432 			release = cb(n);
2433 			if (release) {
2434 				rcu_assign_pointer(*np,
2435 					rcu_dereference_protected(n->next,
2436 						lockdep_is_held(&tbl->lock)));
2437 				n->dead = 1;
2438 			} else
2439 				np = &n->next;
2440 			write_unlock(&n->lock);
2441 			if (release)
2442 				neigh_cleanup_and_release(n);
2443 		}
2444 	}
2445 }
2446 EXPORT_SYMBOL(__neigh_for_each_release);
2447 
2448 #ifdef CONFIG_PROC_FS
2449 
2450 static struct neighbour *neigh_get_first(struct seq_file *seq)
2451 {
2452 	struct neigh_seq_state *state = seq->private;
2453 	struct net *net = seq_file_net(seq);
2454 	struct neigh_hash_table *nht = state->nht;
2455 	struct neighbour *n = NULL;
2456 	int bucket = state->bucket;
2457 
2458 	state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2459 	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2460 		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2461 
2462 		while (n) {
2463 			if (!net_eq(dev_net(n->dev), net))
2464 				goto next;
2465 			if (state->neigh_sub_iter) {
2466 				loff_t fakep = 0;
2467 				void *v;
2468 
2469 				v = state->neigh_sub_iter(state, n, &fakep);
2470 				if (!v)
2471 					goto next;
2472 			}
2473 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2474 				break;
2475 			if (n->nud_state & ~NUD_NOARP)
2476 				break;
2477 next:
2478 			n = rcu_dereference_bh(n->next);
2479 		}
2480 
2481 		if (n)
2482 			break;
2483 	}
2484 	state->bucket = bucket;
2485 
2486 	return n;
2487 }
2488 
2489 static struct neighbour *neigh_get_next(struct seq_file *seq,
2490 					struct neighbour *n,
2491 					loff_t *pos)
2492 {
2493 	struct neigh_seq_state *state = seq->private;
2494 	struct net *net = seq_file_net(seq);
2495 	struct neigh_hash_table *nht = state->nht;
2496 
2497 	if (state->neigh_sub_iter) {
2498 		void *v = state->neigh_sub_iter(state, n, pos);
2499 		if (v)
2500 			return n;
2501 	}
2502 	n = rcu_dereference_bh(n->next);
2503 
2504 	while (1) {
2505 		while (n) {
2506 			if (!net_eq(dev_net(n->dev), net))
2507 				goto next;
2508 			if (state->neigh_sub_iter) {
2509 				void *v = state->neigh_sub_iter(state, n, pos);
2510 				if (v)
2511 					return n;
2512 				goto next;
2513 			}
2514 			if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2515 				break;
2516 
2517 			if (n->nud_state & ~NUD_NOARP)
2518 				break;
2519 next:
2520 			n = rcu_dereference_bh(n->next);
2521 		}
2522 
2523 		if (n)
2524 			break;
2525 
2526 		if (++state->bucket >= (1 << nht->hash_shift))
2527 			break;
2528 
2529 		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2530 	}
2531 
2532 	if (n && pos)
2533 		--(*pos);
2534 	return n;
2535 }
2536 
2537 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2538 {
2539 	struct neighbour *n = neigh_get_first(seq);
2540 
2541 	if (n) {
2542 		--(*pos);
2543 		while (*pos) {
2544 			n = neigh_get_next(seq, n, pos);
2545 			if (!n)
2546 				break;
2547 		}
2548 	}
2549 	return *pos ? NULL : n;
2550 }
2551 
2552 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2553 {
2554 	struct neigh_seq_state *state = seq->private;
2555 	struct net *net = seq_file_net(seq);
2556 	struct neigh_table *tbl = state->tbl;
2557 	struct pneigh_entry *pn = NULL;
2558 	int bucket = state->bucket;
2559 
2560 	state->flags |= NEIGH_SEQ_IS_PNEIGH;
2561 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2562 		pn = tbl->phash_buckets[bucket];
2563 		while (pn && !net_eq(pneigh_net(pn), net))
2564 			pn = pn->next;
2565 		if (pn)
2566 			break;
2567 	}
2568 	state->bucket = bucket;
2569 
2570 	return pn;
2571 }
2572 
2573 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2574 					    struct pneigh_entry *pn,
2575 					    loff_t *pos)
2576 {
2577 	struct neigh_seq_state *state = seq->private;
2578 	struct net *net = seq_file_net(seq);
2579 	struct neigh_table *tbl = state->tbl;
2580 
2581 	do {
2582 		pn = pn->next;
2583 	} while (pn && !net_eq(pneigh_net(pn), net));
2584 
2585 	while (!pn) {
2586 		if (++state->bucket > PNEIGH_HASHMASK)
2587 			break;
2588 		pn = tbl->phash_buckets[state->bucket];
2589 		while (pn && !net_eq(pneigh_net(pn), net))
2590 			pn = pn->next;
2591 		if (pn)
2592 			break;
2593 	}
2594 
2595 	if (pn && pos)
2596 		--(*pos);
2597 
2598 	return pn;
2599 }
2600 
2601 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2602 {
2603 	struct pneigh_entry *pn = pneigh_get_first(seq);
2604 
2605 	if (pn) {
2606 		--(*pos);
2607 		while (*pos) {
2608 			pn = pneigh_get_next(seq, pn, pos);
2609 			if (!pn)
2610 				break;
2611 		}
2612 	}
2613 	return *pos ? NULL : pn;
2614 }
2615 
2616 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2617 {
2618 	struct neigh_seq_state *state = seq->private;
2619 	void *rc;
2620 	loff_t idxpos = *pos;
2621 
2622 	rc = neigh_get_idx(seq, &idxpos);
2623 	if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2624 		rc = pneigh_get_idx(seq, &idxpos);
2625 
2626 	return rc;
2627 }
2628 
2629 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2630 	__acquires(rcu_bh)
2631 {
2632 	struct neigh_seq_state *state = seq->private;
2633 
2634 	state->tbl = tbl;
2635 	state->bucket = 0;
2636 	state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2637 
2638 	rcu_read_lock_bh();
2639 	state->nht = rcu_dereference_bh(tbl->nht);
2640 
2641 	return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2642 }
2643 EXPORT_SYMBOL(neigh_seq_start);
2644 
2645 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2646 {
2647 	struct neigh_seq_state *state;
2648 	void *rc;
2649 
2650 	if (v == SEQ_START_TOKEN) {
2651 		rc = neigh_get_first(seq);
2652 		goto out;
2653 	}
2654 
2655 	state = seq->private;
2656 	if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2657 		rc = neigh_get_next(seq, v, NULL);
2658 		if (rc)
2659 			goto out;
2660 		if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2661 			rc = pneigh_get_first(seq);
2662 	} else {
2663 		BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2664 		rc = pneigh_get_next(seq, v, NULL);
2665 	}
2666 out:
2667 	++(*pos);
2668 	return rc;
2669 }
2670 EXPORT_SYMBOL(neigh_seq_next);
2671 
2672 void neigh_seq_stop(struct seq_file *seq, void *v)
2673 	__releases(rcu_bh)
2674 {
2675 	rcu_read_unlock_bh();
2676 }
2677 EXPORT_SYMBOL(neigh_seq_stop);
2678 
2679 /* statistics via seq_file */
2680 
2681 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2682 {
2683 	struct neigh_table *tbl = seq->private;
2684 	int cpu;
2685 
2686 	if (*pos == 0)
2687 		return SEQ_START_TOKEN;
2688 
2689 	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2690 		if (!cpu_possible(cpu))
2691 			continue;
2692 		*pos = cpu+1;
2693 		return per_cpu_ptr(tbl->stats, cpu);
2694 	}
2695 	return NULL;
2696 }
2697 
2698 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2699 {
2700 	struct neigh_table *tbl = seq->private;
2701 	int cpu;
2702 
2703 	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2704 		if (!cpu_possible(cpu))
2705 			continue;
2706 		*pos = cpu+1;
2707 		return per_cpu_ptr(tbl->stats, cpu);
2708 	}
2709 	return NULL;
2710 }
2711 
2712 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2713 {
2714 
2715 }
2716 
2717 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2718 {
2719 	struct neigh_table *tbl = seq->private;
2720 	struct neigh_statistics *st = v;
2721 
2722 	if (v == SEQ_START_TOKEN) {
2723 		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards\n");
2724 		return 0;
2725 	}
2726 
2727 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
2728 			"%08lx %08lx  %08lx %08lx %08lx\n",
2729 		   atomic_read(&tbl->entries),
2730 
2731 		   st->allocs,
2732 		   st->destroys,
2733 		   st->hash_grows,
2734 
2735 		   st->lookups,
2736 		   st->hits,
2737 
2738 		   st->res_failed,
2739 
2740 		   st->rcv_probes_mcast,
2741 		   st->rcv_probes_ucast,
2742 
2743 		   st->periodic_gc_runs,
2744 		   st->forced_gc_runs,
2745 		   st->unres_discards
2746 		   );
2747 
2748 	return 0;
2749 }
2750 
2751 static const struct seq_operations neigh_stat_seq_ops = {
2752 	.start	= neigh_stat_seq_start,
2753 	.next	= neigh_stat_seq_next,
2754 	.stop	= neigh_stat_seq_stop,
2755 	.show	= neigh_stat_seq_show,
2756 };
2757 
2758 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2759 {
2760 	int ret = seq_open(file, &neigh_stat_seq_ops);
2761 
2762 	if (!ret) {
2763 		struct seq_file *sf = file->private_data;
2764 		sf->private = PDE_DATA(inode);
2765 	}
2766 	return ret;
2767 };
2768 
2769 static const struct file_operations neigh_stat_seq_fops = {
2770 	.owner	 = THIS_MODULE,
2771 	.open 	 = neigh_stat_seq_open,
2772 	.read	 = seq_read,
2773 	.llseek	 = seq_lseek,
2774 	.release = seq_release,
2775 };
2776 
2777 #endif /* CONFIG_PROC_FS */
2778 
2779 static inline size_t neigh_nlmsg_size(void)
2780 {
2781 	return NLMSG_ALIGN(sizeof(struct ndmsg))
2782 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2783 	       + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2784 	       + nla_total_size(sizeof(struct nda_cacheinfo))
2785 	       + nla_total_size(4); /* NDA_PROBES */
2786 }
2787 
2788 static void __neigh_notify(struct neighbour *n, int type, int flags)
2789 {
2790 	struct net *net = dev_net(n->dev);
2791 	struct sk_buff *skb;
2792 	int err = -ENOBUFS;
2793 
2794 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2795 	if (skb == NULL)
2796 		goto errout;
2797 
2798 	err = neigh_fill_info(skb, n, 0, 0, type, flags);
2799 	if (err < 0) {
2800 		/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2801 		WARN_ON(err == -EMSGSIZE);
2802 		kfree_skb(skb);
2803 		goto errout;
2804 	}
2805 	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2806 	return;
2807 errout:
2808 	if (err < 0)
2809 		rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2810 }
2811 
2812 void neigh_app_ns(struct neighbour *n)
2813 {
2814 	__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2815 }
2816 EXPORT_SYMBOL(neigh_app_ns);
2817 
2818 #ifdef CONFIG_SYSCTL
2819 static int zero;
2820 static int int_max = INT_MAX;
2821 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
2822 
2823 static int proc_unres_qlen(struct ctl_table *ctl, int write,
2824 			   void __user *buffer, size_t *lenp, loff_t *ppos)
2825 {
2826 	int size, ret;
2827 	struct ctl_table tmp = *ctl;
2828 
2829 	tmp.extra1 = &zero;
2830 	tmp.extra2 = &unres_qlen_max;
2831 	tmp.data = &size;
2832 
2833 	size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
2834 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2835 
2836 	if (write && !ret)
2837 		*(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2838 	return ret;
2839 }
2840 
2841 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
2842 						   int family)
2843 {
2844 	switch (family) {
2845 	case AF_INET:
2846 		return __in_dev_arp_parms_get_rcu(dev);
2847 	case AF_INET6:
2848 		return __in6_dev_nd_parms_get_rcu(dev);
2849 	}
2850 	return NULL;
2851 }
2852 
2853 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
2854 				  int index)
2855 {
2856 	struct net_device *dev;
2857 	int family = neigh_parms_family(p);
2858 
2859 	rcu_read_lock();
2860 	for_each_netdev_rcu(net, dev) {
2861 		struct neigh_parms *dst_p =
2862 				neigh_get_dev_parms_rcu(dev, family);
2863 
2864 		if (dst_p && !test_bit(index, dst_p->data_state))
2865 			dst_p->data[index] = p->data[index];
2866 	}
2867 	rcu_read_unlock();
2868 }
2869 
2870 static void neigh_proc_update(struct ctl_table *ctl, int write)
2871 {
2872 	struct net_device *dev = ctl->extra1;
2873 	struct neigh_parms *p = ctl->extra2;
2874 	struct net *net = neigh_parms_net(p);
2875 	int index = (int *) ctl->data - p->data;
2876 
2877 	if (!write)
2878 		return;
2879 
2880 	set_bit(index, p->data_state);
2881 	if (!dev) /* NULL dev means this is default value */
2882 		neigh_copy_dflt_parms(net, p, index);
2883 }
2884 
2885 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
2886 					   void __user *buffer,
2887 					   size_t *lenp, loff_t *ppos)
2888 {
2889 	struct ctl_table tmp = *ctl;
2890 	int ret;
2891 
2892 	tmp.extra1 = &zero;
2893 	tmp.extra2 = &int_max;
2894 
2895 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2896 	neigh_proc_update(ctl, write);
2897 	return ret;
2898 }
2899 
2900 int neigh_proc_dointvec(struct ctl_table *ctl, int write,
2901 			void __user *buffer, size_t *lenp, loff_t *ppos)
2902 {
2903 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2904 
2905 	neigh_proc_update(ctl, write);
2906 	return ret;
2907 }
2908 EXPORT_SYMBOL(neigh_proc_dointvec);
2909 
2910 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
2911 				void __user *buffer,
2912 				size_t *lenp, loff_t *ppos)
2913 {
2914 	int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
2915 
2916 	neigh_proc_update(ctl, write);
2917 	return ret;
2918 }
2919 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
2920 
2921 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
2922 					      void __user *buffer,
2923 					      size_t *lenp, loff_t *ppos)
2924 {
2925 	int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
2926 
2927 	neigh_proc_update(ctl, write);
2928 	return ret;
2929 }
2930 
2931 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
2932 				   void __user *buffer,
2933 				   size_t *lenp, loff_t *ppos)
2934 {
2935 	int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
2936 
2937 	neigh_proc_update(ctl, write);
2938 	return ret;
2939 }
2940 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
2941 
2942 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
2943 					  void __user *buffer,
2944 					  size_t *lenp, loff_t *ppos)
2945 {
2946 	int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
2947 
2948 	neigh_proc_update(ctl, write);
2949 	return ret;
2950 }
2951 
2952 #define NEIGH_PARMS_DATA_OFFSET(index)	\
2953 	(&((struct neigh_parms *) 0)->data[index])
2954 
2955 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
2956 	[NEIGH_VAR_ ## attr] = { \
2957 		.procname	= name, \
2958 		.data		= NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
2959 		.maxlen		= sizeof(int), \
2960 		.mode		= mval, \
2961 		.proc_handler	= proc, \
2962 	}
2963 
2964 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
2965 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
2966 
2967 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
2968 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
2969 
2970 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
2971 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
2972 
2973 #define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
2974 	NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
2975 
2976 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
2977 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
2978 
2979 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
2980 	NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
2981 
2982 static struct neigh_sysctl_table {
2983 	struct ctl_table_header *sysctl_header;
2984 	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
2985 } neigh_sysctl_template __read_mostly = {
2986 	.neigh_vars = {
2987 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
2988 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
2989 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
2990 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
2991 		NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
2992 		NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
2993 		NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
2994 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
2995 		NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
2996 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
2997 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
2998 		NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
2999 		NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3000 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3001 		NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3002 		[NEIGH_VAR_GC_INTERVAL] = {
3003 			.procname	= "gc_interval",
3004 			.maxlen		= sizeof(int),
3005 			.mode		= 0644,
3006 			.proc_handler	= proc_dointvec_jiffies,
3007 		},
3008 		[NEIGH_VAR_GC_THRESH1] = {
3009 			.procname	= "gc_thresh1",
3010 			.maxlen		= sizeof(int),
3011 			.mode		= 0644,
3012 			.extra1 	= &zero,
3013 			.extra2		= &int_max,
3014 			.proc_handler	= proc_dointvec_minmax,
3015 		},
3016 		[NEIGH_VAR_GC_THRESH2] = {
3017 			.procname	= "gc_thresh2",
3018 			.maxlen		= sizeof(int),
3019 			.mode		= 0644,
3020 			.extra1 	= &zero,
3021 			.extra2		= &int_max,
3022 			.proc_handler	= proc_dointvec_minmax,
3023 		},
3024 		[NEIGH_VAR_GC_THRESH3] = {
3025 			.procname	= "gc_thresh3",
3026 			.maxlen		= sizeof(int),
3027 			.mode		= 0644,
3028 			.extra1 	= &zero,
3029 			.extra2		= &int_max,
3030 			.proc_handler	= proc_dointvec_minmax,
3031 		},
3032 		{},
3033 	},
3034 };
3035 
3036 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3037 			  proc_handler *handler)
3038 {
3039 	int i;
3040 	struct neigh_sysctl_table *t;
3041 	const char *dev_name_source;
3042 	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3043 	char *p_name;
3044 
3045 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3046 	if (!t)
3047 		goto err;
3048 
3049 	for (i = 0; i < ARRAY_SIZE(t->neigh_vars); i++) {
3050 		t->neigh_vars[i].data += (long) p;
3051 		t->neigh_vars[i].extra1 = dev;
3052 		t->neigh_vars[i].extra2 = p;
3053 	}
3054 
3055 	if (dev) {
3056 		dev_name_source = dev->name;
3057 		/* Terminate the table early */
3058 		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3059 		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3060 	} else {
3061 		dev_name_source = "default";
3062 		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
3063 		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
3064 		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
3065 		t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
3066 	}
3067 
3068 	if (handler) {
3069 		/* RetransTime */
3070 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3071 		/* ReachableTime */
3072 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3073 		/* RetransTime (in milliseconds)*/
3074 		t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3075 		/* ReachableTime (in milliseconds) */
3076 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3077 	}
3078 
3079 	/* Don't export sysctls to unprivileged users */
3080 	if (neigh_parms_net(p)->user_ns != &init_user_ns)
3081 		t->neigh_vars[0].procname = NULL;
3082 
3083 	switch (neigh_parms_family(p)) {
3084 	case AF_INET:
3085 	      p_name = "ipv4";
3086 	      break;
3087 	case AF_INET6:
3088 	      p_name = "ipv6";
3089 	      break;
3090 	default:
3091 	      BUG();
3092 	}
3093 
3094 	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3095 		p_name, dev_name_source);
3096 	t->sysctl_header =
3097 		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3098 	if (!t->sysctl_header)
3099 		goto free;
3100 
3101 	p->sysctl_table = t;
3102 	return 0;
3103 
3104 free:
3105 	kfree(t);
3106 err:
3107 	return -ENOBUFS;
3108 }
3109 EXPORT_SYMBOL(neigh_sysctl_register);
3110 
3111 void neigh_sysctl_unregister(struct neigh_parms *p)
3112 {
3113 	if (p->sysctl_table) {
3114 		struct neigh_sysctl_table *t = p->sysctl_table;
3115 		p->sysctl_table = NULL;
3116 		unregister_net_sysctl_table(t->sysctl_header);
3117 		kfree(t);
3118 	}
3119 }
3120 EXPORT_SYMBOL(neigh_sysctl_unregister);
3121 
3122 #endif	/* CONFIG_SYSCTL */
3123 
3124 static int __init neigh_init(void)
3125 {
3126 	rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
3127 	rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
3128 	rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
3129 
3130 	rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3131 		      NULL);
3132 	rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
3133 
3134 	return 0;
3135 }
3136 
3137 subsys_initcall(neigh_init);
3138 
3139