1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Generic address resolution entity 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 8 * 9 * Fixes: 10 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add. 11 * Harald Welte Add neighbour cache statistics like rtstat 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/slab.h> 17 #include <linux/kmemleak.h> 18 #include <linux/types.h> 19 #include <linux/kernel.h> 20 #include <linux/module.h> 21 #include <linux/socket.h> 22 #include <linux/netdevice.h> 23 #include <linux/proc_fs.h> 24 #ifdef CONFIG_SYSCTL 25 #include <linux/sysctl.h> 26 #endif 27 #include <linux/times.h> 28 #include <net/net_namespace.h> 29 #include <net/neighbour.h> 30 #include <net/arp.h> 31 #include <net/dst.h> 32 #include <net/sock.h> 33 #include <net/netevent.h> 34 #include <net/netlink.h> 35 #include <linux/rtnetlink.h> 36 #include <linux/random.h> 37 #include <linux/string.h> 38 #include <linux/log2.h> 39 #include <linux/inetdevice.h> 40 #include <net/addrconf.h> 41 42 #include <trace/events/neigh.h> 43 44 #define NEIGH_DEBUG 1 45 #define neigh_dbg(level, fmt, ...) \ 46 do { \ 47 if (level <= NEIGH_DEBUG) \ 48 pr_debug(fmt, ##__VA_ARGS__); \ 49 } while (0) 50 51 #define PNEIGH_HASHMASK 0xF 52 53 static void neigh_timer_handler(struct timer_list *t); 54 static void __neigh_notify(struct neighbour *n, int type, int flags, 55 u32 pid); 56 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid); 57 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, 58 struct net_device *dev); 59 60 #ifdef CONFIG_PROC_FS 61 static const struct seq_operations neigh_stat_seq_ops; 62 #endif 63 64 /* 65 Neighbour hash table buckets are protected with rwlock tbl->lock. 66 67 - All the scans/updates to hash buckets MUST be made under this lock. 68 - NOTHING clever should be made under this lock: no callbacks 69 to protocol backends, no attempts to send something to network. 70 It will result in deadlocks, if backend/driver wants to use neighbour 71 cache. 72 - If the entry requires some non-trivial actions, increase 73 its reference count and release table lock. 74 75 Neighbour entries are protected: 76 - with reference count. 77 - with rwlock neigh->lock 78 79 Reference count prevents destruction. 80 81 neigh->lock mainly serializes ll address data and its validity state. 82 However, the same lock is used to protect another entry fields: 83 - timer 84 - resolution queue 85 86 Again, nothing clever shall be made under neigh->lock, 87 the most complicated procedure, which we allow is dev->hard_header. 88 It is supposed, that dev->hard_header is simplistic and does 89 not make callbacks to neighbour tables. 90 */ 91 92 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb) 93 { 94 kfree_skb(skb); 95 return -ENETDOWN; 96 } 97 98 static void neigh_cleanup_and_release(struct neighbour *neigh) 99 { 100 trace_neigh_cleanup_and_release(neigh, 0); 101 __neigh_notify(neigh, RTM_DELNEIGH, 0, 0); 102 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 103 neigh_release(neigh); 104 } 105 106 /* 107 * It is random distribution in the interval (1/2)*base...(3/2)*base. 108 * It corresponds to default IPv6 settings and is not overridable, 109 * because it is really reasonable choice. 110 */ 111 112 unsigned long neigh_rand_reach_time(unsigned long base) 113 { 114 return base ? prandom_u32_max(base) + (base >> 1) : 0; 115 } 116 EXPORT_SYMBOL(neigh_rand_reach_time); 117 118 static void neigh_mark_dead(struct neighbour *n) 119 { 120 n->dead = 1; 121 if (!list_empty(&n->gc_list)) { 122 list_del_init(&n->gc_list); 123 atomic_dec(&n->tbl->gc_entries); 124 } 125 if (!list_empty(&n->managed_list)) 126 list_del_init(&n->managed_list); 127 } 128 129 static void neigh_update_gc_list(struct neighbour *n) 130 { 131 bool on_gc_list, exempt_from_gc; 132 133 write_lock_bh(&n->tbl->lock); 134 write_lock(&n->lock); 135 if (n->dead) 136 goto out; 137 138 /* remove from the gc list if new state is permanent or if neighbor 139 * is externally learned; otherwise entry should be on the gc list 140 */ 141 exempt_from_gc = n->nud_state & NUD_PERMANENT || 142 n->flags & NTF_EXT_LEARNED; 143 on_gc_list = !list_empty(&n->gc_list); 144 145 if (exempt_from_gc && on_gc_list) { 146 list_del_init(&n->gc_list); 147 atomic_dec(&n->tbl->gc_entries); 148 } else if (!exempt_from_gc && !on_gc_list) { 149 /* add entries to the tail; cleaning removes from the front */ 150 list_add_tail(&n->gc_list, &n->tbl->gc_list); 151 atomic_inc(&n->tbl->gc_entries); 152 } 153 out: 154 write_unlock(&n->lock); 155 write_unlock_bh(&n->tbl->lock); 156 } 157 158 static void neigh_update_managed_list(struct neighbour *n) 159 { 160 bool on_managed_list, add_to_managed; 161 162 write_lock_bh(&n->tbl->lock); 163 write_lock(&n->lock); 164 if (n->dead) 165 goto out; 166 167 add_to_managed = n->flags & NTF_MANAGED; 168 on_managed_list = !list_empty(&n->managed_list); 169 170 if (!add_to_managed && on_managed_list) 171 list_del_init(&n->managed_list); 172 else if (add_to_managed && !on_managed_list) 173 list_add_tail(&n->managed_list, &n->tbl->managed_list); 174 out: 175 write_unlock(&n->lock); 176 write_unlock_bh(&n->tbl->lock); 177 } 178 179 static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify, 180 bool *gc_update, bool *managed_update) 181 { 182 u32 ndm_flags, old_flags = neigh->flags; 183 184 if (!(flags & NEIGH_UPDATE_F_ADMIN)) 185 return; 186 187 ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0; 188 ndm_flags |= (flags & NEIGH_UPDATE_F_MANAGED) ? NTF_MANAGED : 0; 189 190 if ((old_flags ^ ndm_flags) & NTF_EXT_LEARNED) { 191 if (ndm_flags & NTF_EXT_LEARNED) 192 neigh->flags |= NTF_EXT_LEARNED; 193 else 194 neigh->flags &= ~NTF_EXT_LEARNED; 195 *notify = 1; 196 *gc_update = true; 197 } 198 if ((old_flags ^ ndm_flags) & NTF_MANAGED) { 199 if (ndm_flags & NTF_MANAGED) 200 neigh->flags |= NTF_MANAGED; 201 else 202 neigh->flags &= ~NTF_MANAGED; 203 *notify = 1; 204 *managed_update = true; 205 } 206 } 207 208 static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np, 209 struct neigh_table *tbl) 210 { 211 bool retval = false; 212 213 write_lock(&n->lock); 214 if (refcount_read(&n->refcnt) == 1) { 215 struct neighbour *neigh; 216 217 neigh = rcu_dereference_protected(n->next, 218 lockdep_is_held(&tbl->lock)); 219 rcu_assign_pointer(*np, neigh); 220 neigh_mark_dead(n); 221 retval = true; 222 } 223 write_unlock(&n->lock); 224 if (retval) 225 neigh_cleanup_and_release(n); 226 return retval; 227 } 228 229 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl) 230 { 231 struct neigh_hash_table *nht; 232 void *pkey = ndel->primary_key; 233 u32 hash_val; 234 struct neighbour *n; 235 struct neighbour __rcu **np; 236 237 nht = rcu_dereference_protected(tbl->nht, 238 lockdep_is_held(&tbl->lock)); 239 hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd); 240 hash_val = hash_val >> (32 - nht->hash_shift); 241 242 np = &nht->hash_buckets[hash_val]; 243 while ((n = rcu_dereference_protected(*np, 244 lockdep_is_held(&tbl->lock)))) { 245 if (n == ndel) 246 return neigh_del(n, np, tbl); 247 np = &n->next; 248 } 249 return false; 250 } 251 252 static int neigh_forced_gc(struct neigh_table *tbl) 253 { 254 int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2; 255 unsigned long tref = jiffies - 5 * HZ; 256 struct neighbour *n, *tmp; 257 int shrunk = 0; 258 259 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs); 260 261 write_lock_bh(&tbl->lock); 262 263 list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) { 264 if (refcount_read(&n->refcnt) == 1) { 265 bool remove = false; 266 267 write_lock(&n->lock); 268 if ((n->nud_state == NUD_FAILED) || 269 (n->nud_state == NUD_NOARP) || 270 (tbl->is_multicast && 271 tbl->is_multicast(n->primary_key)) || 272 time_after(tref, n->updated)) 273 remove = true; 274 write_unlock(&n->lock); 275 276 if (remove && neigh_remove_one(n, tbl)) 277 shrunk++; 278 if (shrunk >= max_clean) 279 break; 280 } 281 } 282 283 tbl->last_flush = jiffies; 284 285 write_unlock_bh(&tbl->lock); 286 287 return shrunk; 288 } 289 290 static void neigh_add_timer(struct neighbour *n, unsigned long when) 291 { 292 neigh_hold(n); 293 if (unlikely(mod_timer(&n->timer, when))) { 294 printk("NEIGH: BUG, double timer add, state is %x\n", 295 n->nud_state); 296 dump_stack(); 297 } 298 } 299 300 static int neigh_del_timer(struct neighbour *n) 301 { 302 if ((n->nud_state & NUD_IN_TIMER) && 303 del_timer(&n->timer)) { 304 neigh_release(n); 305 return 1; 306 } 307 return 0; 308 } 309 310 static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net) 311 { 312 struct sk_buff_head tmp; 313 unsigned long flags; 314 struct sk_buff *skb; 315 316 skb_queue_head_init(&tmp); 317 spin_lock_irqsave(&list->lock, flags); 318 skb = skb_peek(list); 319 while (skb != NULL) { 320 struct sk_buff *skb_next = skb_peek_next(skb, list); 321 struct net_device *dev = skb->dev; 322 323 if (net == NULL || net_eq(dev_net(dev), net)) { 324 struct in_device *in_dev; 325 326 rcu_read_lock(); 327 in_dev = __in_dev_get_rcu(dev); 328 if (in_dev) 329 in_dev->arp_parms->qlen--; 330 rcu_read_unlock(); 331 __skb_unlink(skb, list); 332 __skb_queue_tail(&tmp, skb); 333 } 334 skb = skb_next; 335 } 336 spin_unlock_irqrestore(&list->lock, flags); 337 338 while ((skb = __skb_dequeue(&tmp))) { 339 dev_put(skb->dev); 340 kfree_skb(skb); 341 } 342 } 343 344 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev, 345 bool skip_perm) 346 { 347 int i; 348 struct neigh_hash_table *nht; 349 350 nht = rcu_dereference_protected(tbl->nht, 351 lockdep_is_held(&tbl->lock)); 352 353 for (i = 0; i < (1 << nht->hash_shift); i++) { 354 struct neighbour *n; 355 struct neighbour __rcu **np = &nht->hash_buckets[i]; 356 357 while ((n = rcu_dereference_protected(*np, 358 lockdep_is_held(&tbl->lock))) != NULL) { 359 if (dev && n->dev != dev) { 360 np = &n->next; 361 continue; 362 } 363 if (skip_perm && n->nud_state & NUD_PERMANENT) { 364 np = &n->next; 365 continue; 366 } 367 rcu_assign_pointer(*np, 368 rcu_dereference_protected(n->next, 369 lockdep_is_held(&tbl->lock))); 370 write_lock(&n->lock); 371 neigh_del_timer(n); 372 neigh_mark_dead(n); 373 if (refcount_read(&n->refcnt) != 1) { 374 /* The most unpleasant situation. 375 We must destroy neighbour entry, 376 but someone still uses it. 377 378 The destroy will be delayed until 379 the last user releases us, but 380 we must kill timers etc. and move 381 it to safe state. 382 */ 383 __skb_queue_purge(&n->arp_queue); 384 n->arp_queue_len_bytes = 0; 385 n->output = neigh_blackhole; 386 if (n->nud_state & NUD_VALID) 387 n->nud_state = NUD_NOARP; 388 else 389 n->nud_state = NUD_NONE; 390 neigh_dbg(2, "neigh %p is stray\n", n); 391 } 392 write_unlock(&n->lock); 393 neigh_cleanup_and_release(n); 394 } 395 } 396 } 397 398 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev) 399 { 400 write_lock_bh(&tbl->lock); 401 neigh_flush_dev(tbl, dev, false); 402 write_unlock_bh(&tbl->lock); 403 } 404 EXPORT_SYMBOL(neigh_changeaddr); 405 406 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev, 407 bool skip_perm) 408 { 409 write_lock_bh(&tbl->lock); 410 neigh_flush_dev(tbl, dev, skip_perm); 411 pneigh_ifdown_and_unlock(tbl, dev); 412 pneigh_queue_purge(&tbl->proxy_queue, dev_net(dev)); 413 if (skb_queue_empty_lockless(&tbl->proxy_queue)) 414 del_timer_sync(&tbl->proxy_timer); 415 return 0; 416 } 417 418 int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev) 419 { 420 __neigh_ifdown(tbl, dev, true); 421 return 0; 422 } 423 EXPORT_SYMBOL(neigh_carrier_down); 424 425 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) 426 { 427 __neigh_ifdown(tbl, dev, false); 428 return 0; 429 } 430 EXPORT_SYMBOL(neigh_ifdown); 431 432 static struct neighbour *neigh_alloc(struct neigh_table *tbl, 433 struct net_device *dev, 434 u32 flags, bool exempt_from_gc) 435 { 436 struct neighbour *n = NULL; 437 unsigned long now = jiffies; 438 int entries; 439 440 if (exempt_from_gc) 441 goto do_alloc; 442 443 entries = atomic_inc_return(&tbl->gc_entries) - 1; 444 if (entries >= tbl->gc_thresh3 || 445 (entries >= tbl->gc_thresh2 && 446 time_after(now, tbl->last_flush + 5 * HZ))) { 447 if (!neigh_forced_gc(tbl) && 448 entries >= tbl->gc_thresh3) { 449 net_info_ratelimited("%s: neighbor table overflow!\n", 450 tbl->id); 451 NEIGH_CACHE_STAT_INC(tbl, table_fulls); 452 goto out_entries; 453 } 454 } 455 456 do_alloc: 457 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC); 458 if (!n) 459 goto out_entries; 460 461 __skb_queue_head_init(&n->arp_queue); 462 rwlock_init(&n->lock); 463 seqlock_init(&n->ha_lock); 464 n->updated = n->used = now; 465 n->nud_state = NUD_NONE; 466 n->output = neigh_blackhole; 467 n->flags = flags; 468 seqlock_init(&n->hh.hh_lock); 469 n->parms = neigh_parms_clone(&tbl->parms); 470 timer_setup(&n->timer, neigh_timer_handler, 0); 471 472 NEIGH_CACHE_STAT_INC(tbl, allocs); 473 n->tbl = tbl; 474 refcount_set(&n->refcnt, 1); 475 n->dead = 1; 476 INIT_LIST_HEAD(&n->gc_list); 477 INIT_LIST_HEAD(&n->managed_list); 478 479 atomic_inc(&tbl->entries); 480 out: 481 return n; 482 483 out_entries: 484 if (!exempt_from_gc) 485 atomic_dec(&tbl->gc_entries); 486 goto out; 487 } 488 489 static void neigh_get_hash_rnd(u32 *x) 490 { 491 *x = get_random_u32() | 1; 492 } 493 494 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift) 495 { 496 size_t size = (1 << shift) * sizeof(struct neighbour *); 497 struct neigh_hash_table *ret; 498 struct neighbour __rcu **buckets; 499 int i; 500 501 ret = kmalloc(sizeof(*ret), GFP_ATOMIC); 502 if (!ret) 503 return NULL; 504 if (size <= PAGE_SIZE) { 505 buckets = kzalloc(size, GFP_ATOMIC); 506 } else { 507 buckets = (struct neighbour __rcu **) 508 __get_free_pages(GFP_ATOMIC | __GFP_ZERO, 509 get_order(size)); 510 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC); 511 } 512 if (!buckets) { 513 kfree(ret); 514 return NULL; 515 } 516 ret->hash_buckets = buckets; 517 ret->hash_shift = shift; 518 for (i = 0; i < NEIGH_NUM_HASH_RND; i++) 519 neigh_get_hash_rnd(&ret->hash_rnd[i]); 520 return ret; 521 } 522 523 static void neigh_hash_free_rcu(struct rcu_head *head) 524 { 525 struct neigh_hash_table *nht = container_of(head, 526 struct neigh_hash_table, 527 rcu); 528 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *); 529 struct neighbour __rcu **buckets = nht->hash_buckets; 530 531 if (size <= PAGE_SIZE) { 532 kfree(buckets); 533 } else { 534 kmemleak_free(buckets); 535 free_pages((unsigned long)buckets, get_order(size)); 536 } 537 kfree(nht); 538 } 539 540 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl, 541 unsigned long new_shift) 542 { 543 unsigned int i, hash; 544 struct neigh_hash_table *new_nht, *old_nht; 545 546 NEIGH_CACHE_STAT_INC(tbl, hash_grows); 547 548 old_nht = rcu_dereference_protected(tbl->nht, 549 lockdep_is_held(&tbl->lock)); 550 new_nht = neigh_hash_alloc(new_shift); 551 if (!new_nht) 552 return old_nht; 553 554 for (i = 0; i < (1 << old_nht->hash_shift); i++) { 555 struct neighbour *n, *next; 556 557 for (n = rcu_dereference_protected(old_nht->hash_buckets[i], 558 lockdep_is_held(&tbl->lock)); 559 n != NULL; 560 n = next) { 561 hash = tbl->hash(n->primary_key, n->dev, 562 new_nht->hash_rnd); 563 564 hash >>= (32 - new_nht->hash_shift); 565 next = rcu_dereference_protected(n->next, 566 lockdep_is_held(&tbl->lock)); 567 568 rcu_assign_pointer(n->next, 569 rcu_dereference_protected( 570 new_nht->hash_buckets[hash], 571 lockdep_is_held(&tbl->lock))); 572 rcu_assign_pointer(new_nht->hash_buckets[hash], n); 573 } 574 } 575 576 rcu_assign_pointer(tbl->nht, new_nht); 577 call_rcu(&old_nht->rcu, neigh_hash_free_rcu); 578 return new_nht; 579 } 580 581 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, 582 struct net_device *dev) 583 { 584 struct neighbour *n; 585 586 NEIGH_CACHE_STAT_INC(tbl, lookups); 587 588 rcu_read_lock_bh(); 589 n = __neigh_lookup_noref(tbl, pkey, dev); 590 if (n) { 591 if (!refcount_inc_not_zero(&n->refcnt)) 592 n = NULL; 593 NEIGH_CACHE_STAT_INC(tbl, hits); 594 } 595 596 rcu_read_unlock_bh(); 597 return n; 598 } 599 EXPORT_SYMBOL(neigh_lookup); 600 601 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, 602 const void *pkey) 603 { 604 struct neighbour *n; 605 unsigned int key_len = tbl->key_len; 606 u32 hash_val; 607 struct neigh_hash_table *nht; 608 609 NEIGH_CACHE_STAT_INC(tbl, lookups); 610 611 rcu_read_lock_bh(); 612 nht = rcu_dereference_bh(tbl->nht); 613 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift); 614 615 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); 616 n != NULL; 617 n = rcu_dereference_bh(n->next)) { 618 if (!memcmp(n->primary_key, pkey, key_len) && 619 net_eq(dev_net(n->dev), net)) { 620 if (!refcount_inc_not_zero(&n->refcnt)) 621 n = NULL; 622 NEIGH_CACHE_STAT_INC(tbl, hits); 623 break; 624 } 625 } 626 627 rcu_read_unlock_bh(); 628 return n; 629 } 630 EXPORT_SYMBOL(neigh_lookup_nodev); 631 632 static struct neighbour * 633 ___neigh_create(struct neigh_table *tbl, const void *pkey, 634 struct net_device *dev, u32 flags, 635 bool exempt_from_gc, bool want_ref) 636 { 637 u32 hash_val, key_len = tbl->key_len; 638 struct neighbour *n1, *rc, *n; 639 struct neigh_hash_table *nht; 640 int error; 641 642 n = neigh_alloc(tbl, dev, flags, exempt_from_gc); 643 trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc); 644 if (!n) { 645 rc = ERR_PTR(-ENOBUFS); 646 goto out; 647 } 648 649 memcpy(n->primary_key, pkey, key_len); 650 n->dev = dev; 651 netdev_hold(dev, &n->dev_tracker, GFP_ATOMIC); 652 653 /* Protocol specific setup. */ 654 if (tbl->constructor && (error = tbl->constructor(n)) < 0) { 655 rc = ERR_PTR(error); 656 goto out_neigh_release; 657 } 658 659 if (dev->netdev_ops->ndo_neigh_construct) { 660 error = dev->netdev_ops->ndo_neigh_construct(dev, n); 661 if (error < 0) { 662 rc = ERR_PTR(error); 663 goto out_neigh_release; 664 } 665 } 666 667 /* Device specific setup. */ 668 if (n->parms->neigh_setup && 669 (error = n->parms->neigh_setup(n)) < 0) { 670 rc = ERR_PTR(error); 671 goto out_neigh_release; 672 } 673 674 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1); 675 676 write_lock_bh(&tbl->lock); 677 nht = rcu_dereference_protected(tbl->nht, 678 lockdep_is_held(&tbl->lock)); 679 680 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift)) 681 nht = neigh_hash_grow(tbl, nht->hash_shift + 1); 682 683 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift); 684 685 if (n->parms->dead) { 686 rc = ERR_PTR(-EINVAL); 687 goto out_tbl_unlock; 688 } 689 690 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val], 691 lockdep_is_held(&tbl->lock)); 692 n1 != NULL; 693 n1 = rcu_dereference_protected(n1->next, 694 lockdep_is_held(&tbl->lock))) { 695 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) { 696 if (want_ref) 697 neigh_hold(n1); 698 rc = n1; 699 goto out_tbl_unlock; 700 } 701 } 702 703 n->dead = 0; 704 if (!exempt_from_gc) 705 list_add_tail(&n->gc_list, &n->tbl->gc_list); 706 if (n->flags & NTF_MANAGED) 707 list_add_tail(&n->managed_list, &n->tbl->managed_list); 708 if (want_ref) 709 neigh_hold(n); 710 rcu_assign_pointer(n->next, 711 rcu_dereference_protected(nht->hash_buckets[hash_val], 712 lockdep_is_held(&tbl->lock))); 713 rcu_assign_pointer(nht->hash_buckets[hash_val], n); 714 write_unlock_bh(&tbl->lock); 715 neigh_dbg(2, "neigh %p is created\n", n); 716 rc = n; 717 out: 718 return rc; 719 out_tbl_unlock: 720 write_unlock_bh(&tbl->lock); 721 out_neigh_release: 722 if (!exempt_from_gc) 723 atomic_dec(&tbl->gc_entries); 724 neigh_release(n); 725 goto out; 726 } 727 728 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, 729 struct net_device *dev, bool want_ref) 730 { 731 return ___neigh_create(tbl, pkey, dev, 0, false, want_ref); 732 } 733 EXPORT_SYMBOL(__neigh_create); 734 735 static u32 pneigh_hash(const void *pkey, unsigned int key_len) 736 { 737 u32 hash_val = *(u32 *)(pkey + key_len - 4); 738 hash_val ^= (hash_val >> 16); 739 hash_val ^= hash_val >> 8; 740 hash_val ^= hash_val >> 4; 741 hash_val &= PNEIGH_HASHMASK; 742 return hash_val; 743 } 744 745 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n, 746 struct net *net, 747 const void *pkey, 748 unsigned int key_len, 749 struct net_device *dev) 750 { 751 while (n) { 752 if (!memcmp(n->key, pkey, key_len) && 753 net_eq(pneigh_net(n), net) && 754 (n->dev == dev || !n->dev)) 755 return n; 756 n = n->next; 757 } 758 return NULL; 759 } 760 761 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, 762 struct net *net, const void *pkey, struct net_device *dev) 763 { 764 unsigned int key_len = tbl->key_len; 765 u32 hash_val = pneigh_hash(pkey, key_len); 766 767 return __pneigh_lookup_1(tbl->phash_buckets[hash_val], 768 net, pkey, key_len, dev); 769 } 770 EXPORT_SYMBOL_GPL(__pneigh_lookup); 771 772 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, 773 struct net *net, const void *pkey, 774 struct net_device *dev, int creat) 775 { 776 struct pneigh_entry *n; 777 unsigned int key_len = tbl->key_len; 778 u32 hash_val = pneigh_hash(pkey, key_len); 779 780 read_lock_bh(&tbl->lock); 781 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val], 782 net, pkey, key_len, dev); 783 read_unlock_bh(&tbl->lock); 784 785 if (n || !creat) 786 goto out; 787 788 ASSERT_RTNL(); 789 790 n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL); 791 if (!n) 792 goto out; 793 794 write_pnet(&n->net, net); 795 memcpy(n->key, pkey, key_len); 796 n->dev = dev; 797 netdev_hold(dev, &n->dev_tracker, GFP_KERNEL); 798 799 if (tbl->pconstructor && tbl->pconstructor(n)) { 800 netdev_put(dev, &n->dev_tracker); 801 kfree(n); 802 n = NULL; 803 goto out; 804 } 805 806 write_lock_bh(&tbl->lock); 807 n->next = tbl->phash_buckets[hash_val]; 808 tbl->phash_buckets[hash_val] = n; 809 write_unlock_bh(&tbl->lock); 810 out: 811 return n; 812 } 813 EXPORT_SYMBOL(pneigh_lookup); 814 815 816 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey, 817 struct net_device *dev) 818 { 819 struct pneigh_entry *n, **np; 820 unsigned int key_len = tbl->key_len; 821 u32 hash_val = pneigh_hash(pkey, key_len); 822 823 write_lock_bh(&tbl->lock); 824 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL; 825 np = &n->next) { 826 if (!memcmp(n->key, pkey, key_len) && n->dev == dev && 827 net_eq(pneigh_net(n), net)) { 828 *np = n->next; 829 write_unlock_bh(&tbl->lock); 830 if (tbl->pdestructor) 831 tbl->pdestructor(n); 832 netdev_put(n->dev, &n->dev_tracker); 833 kfree(n); 834 return 0; 835 } 836 } 837 write_unlock_bh(&tbl->lock); 838 return -ENOENT; 839 } 840 841 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, 842 struct net_device *dev) 843 { 844 struct pneigh_entry *n, **np, *freelist = NULL; 845 u32 h; 846 847 for (h = 0; h <= PNEIGH_HASHMASK; h++) { 848 np = &tbl->phash_buckets[h]; 849 while ((n = *np) != NULL) { 850 if (!dev || n->dev == dev) { 851 *np = n->next; 852 n->next = freelist; 853 freelist = n; 854 continue; 855 } 856 np = &n->next; 857 } 858 } 859 write_unlock_bh(&tbl->lock); 860 while ((n = freelist)) { 861 freelist = n->next; 862 n->next = NULL; 863 if (tbl->pdestructor) 864 tbl->pdestructor(n); 865 netdev_put(n->dev, &n->dev_tracker); 866 kfree(n); 867 } 868 return -ENOENT; 869 } 870 871 static void neigh_parms_destroy(struct neigh_parms *parms); 872 873 static inline void neigh_parms_put(struct neigh_parms *parms) 874 { 875 if (refcount_dec_and_test(&parms->refcnt)) 876 neigh_parms_destroy(parms); 877 } 878 879 /* 880 * neighbour must already be out of the table; 881 * 882 */ 883 void neigh_destroy(struct neighbour *neigh) 884 { 885 struct net_device *dev = neigh->dev; 886 887 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys); 888 889 if (!neigh->dead) { 890 pr_warn("Destroying alive neighbour %p\n", neigh); 891 dump_stack(); 892 return; 893 } 894 895 if (neigh_del_timer(neigh)) 896 pr_warn("Impossible event\n"); 897 898 write_lock_bh(&neigh->lock); 899 __skb_queue_purge(&neigh->arp_queue); 900 write_unlock_bh(&neigh->lock); 901 neigh->arp_queue_len_bytes = 0; 902 903 if (dev->netdev_ops->ndo_neigh_destroy) 904 dev->netdev_ops->ndo_neigh_destroy(dev, neigh); 905 906 netdev_put(dev, &neigh->dev_tracker); 907 neigh_parms_put(neigh->parms); 908 909 neigh_dbg(2, "neigh %p is destroyed\n", neigh); 910 911 atomic_dec(&neigh->tbl->entries); 912 kfree_rcu(neigh, rcu); 913 } 914 EXPORT_SYMBOL(neigh_destroy); 915 916 /* Neighbour state is suspicious; 917 disable fast path. 918 919 Called with write_locked neigh. 920 */ 921 static void neigh_suspect(struct neighbour *neigh) 922 { 923 neigh_dbg(2, "neigh %p is suspected\n", neigh); 924 925 neigh->output = neigh->ops->output; 926 } 927 928 /* Neighbour state is OK; 929 enable fast path. 930 931 Called with write_locked neigh. 932 */ 933 static void neigh_connect(struct neighbour *neigh) 934 { 935 neigh_dbg(2, "neigh %p is connected\n", neigh); 936 937 neigh->output = neigh->ops->connected_output; 938 } 939 940 static void neigh_periodic_work(struct work_struct *work) 941 { 942 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work); 943 struct neighbour *n; 944 struct neighbour __rcu **np; 945 unsigned int i; 946 struct neigh_hash_table *nht; 947 948 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs); 949 950 write_lock_bh(&tbl->lock); 951 nht = rcu_dereference_protected(tbl->nht, 952 lockdep_is_held(&tbl->lock)); 953 954 /* 955 * periodically recompute ReachableTime from random function 956 */ 957 958 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) { 959 struct neigh_parms *p; 960 tbl->last_rand = jiffies; 961 list_for_each_entry(p, &tbl->parms_list, list) 962 p->reachable_time = 963 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 964 } 965 966 if (atomic_read(&tbl->entries) < tbl->gc_thresh1) 967 goto out; 968 969 for (i = 0 ; i < (1 << nht->hash_shift); i++) { 970 np = &nht->hash_buckets[i]; 971 972 while ((n = rcu_dereference_protected(*np, 973 lockdep_is_held(&tbl->lock))) != NULL) { 974 unsigned int state; 975 976 write_lock(&n->lock); 977 978 state = n->nud_state; 979 if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) || 980 (n->flags & NTF_EXT_LEARNED)) { 981 write_unlock(&n->lock); 982 goto next_elt; 983 } 984 985 if (time_before(n->used, n->confirmed)) 986 n->used = n->confirmed; 987 988 if (refcount_read(&n->refcnt) == 1 && 989 (state == NUD_FAILED || 990 time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) { 991 *np = n->next; 992 neigh_mark_dead(n); 993 write_unlock(&n->lock); 994 neigh_cleanup_and_release(n); 995 continue; 996 } 997 write_unlock(&n->lock); 998 999 next_elt: 1000 np = &n->next; 1001 } 1002 /* 1003 * It's fine to release lock here, even if hash table 1004 * grows while we are preempted. 1005 */ 1006 write_unlock_bh(&tbl->lock); 1007 cond_resched(); 1008 write_lock_bh(&tbl->lock); 1009 nht = rcu_dereference_protected(tbl->nht, 1010 lockdep_is_held(&tbl->lock)); 1011 } 1012 out: 1013 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks. 1014 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2 1015 * BASE_REACHABLE_TIME. 1016 */ 1017 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work, 1018 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1); 1019 write_unlock_bh(&tbl->lock); 1020 } 1021 1022 static __inline__ int neigh_max_probes(struct neighbour *n) 1023 { 1024 struct neigh_parms *p = n->parms; 1025 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) + 1026 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) : 1027 NEIGH_VAR(p, MCAST_PROBES)); 1028 } 1029 1030 static void neigh_invalidate(struct neighbour *neigh) 1031 __releases(neigh->lock) 1032 __acquires(neigh->lock) 1033 { 1034 struct sk_buff *skb; 1035 1036 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed); 1037 neigh_dbg(2, "neigh %p is failed\n", neigh); 1038 neigh->updated = jiffies; 1039 1040 /* It is very thin place. report_unreachable is very complicated 1041 routine. Particularly, it can hit the same neighbour entry! 1042 1043 So that, we try to be accurate and avoid dead loop. --ANK 1044 */ 1045 while (neigh->nud_state == NUD_FAILED && 1046 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { 1047 write_unlock(&neigh->lock); 1048 neigh->ops->error_report(neigh, skb); 1049 write_lock(&neigh->lock); 1050 } 1051 __skb_queue_purge(&neigh->arp_queue); 1052 neigh->arp_queue_len_bytes = 0; 1053 } 1054 1055 static void neigh_probe(struct neighbour *neigh) 1056 __releases(neigh->lock) 1057 { 1058 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue); 1059 /* keep skb alive even if arp_queue overflows */ 1060 if (skb) 1061 skb = skb_clone(skb, GFP_ATOMIC); 1062 write_unlock(&neigh->lock); 1063 if (neigh->ops->solicit) 1064 neigh->ops->solicit(neigh, skb); 1065 atomic_inc(&neigh->probes); 1066 consume_skb(skb); 1067 } 1068 1069 /* Called when a timer expires for a neighbour entry. */ 1070 1071 static void neigh_timer_handler(struct timer_list *t) 1072 { 1073 unsigned long now, next; 1074 struct neighbour *neigh = from_timer(neigh, t, timer); 1075 unsigned int state; 1076 int notify = 0; 1077 1078 write_lock(&neigh->lock); 1079 1080 state = neigh->nud_state; 1081 now = jiffies; 1082 next = now + HZ; 1083 1084 if (!(state & NUD_IN_TIMER)) 1085 goto out; 1086 1087 if (state & NUD_REACHABLE) { 1088 if (time_before_eq(now, 1089 neigh->confirmed + neigh->parms->reachable_time)) { 1090 neigh_dbg(2, "neigh %p is still alive\n", neigh); 1091 next = neigh->confirmed + neigh->parms->reachable_time; 1092 } else if (time_before_eq(now, 1093 neigh->used + 1094 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) { 1095 neigh_dbg(2, "neigh %p is delayed\n", neigh); 1096 neigh->nud_state = NUD_DELAY; 1097 neigh->updated = jiffies; 1098 neigh_suspect(neigh); 1099 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME); 1100 } else { 1101 neigh_dbg(2, "neigh %p is suspected\n", neigh); 1102 neigh->nud_state = NUD_STALE; 1103 neigh->updated = jiffies; 1104 neigh_suspect(neigh); 1105 notify = 1; 1106 } 1107 } else if (state & NUD_DELAY) { 1108 if (time_before_eq(now, 1109 neigh->confirmed + 1110 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) { 1111 neigh_dbg(2, "neigh %p is now reachable\n", neigh); 1112 neigh->nud_state = NUD_REACHABLE; 1113 neigh->updated = jiffies; 1114 neigh_connect(neigh); 1115 notify = 1; 1116 next = neigh->confirmed + neigh->parms->reachable_time; 1117 } else { 1118 neigh_dbg(2, "neigh %p is probed\n", neigh); 1119 neigh->nud_state = NUD_PROBE; 1120 neigh->updated = jiffies; 1121 atomic_set(&neigh->probes, 0); 1122 notify = 1; 1123 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), 1124 HZ/100); 1125 } 1126 } else { 1127 /* NUD_PROBE|NUD_INCOMPLETE */ 1128 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100); 1129 } 1130 1131 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) && 1132 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) { 1133 neigh->nud_state = NUD_FAILED; 1134 notify = 1; 1135 neigh_invalidate(neigh); 1136 goto out; 1137 } 1138 1139 if (neigh->nud_state & NUD_IN_TIMER) { 1140 if (time_before(next, jiffies + HZ/100)) 1141 next = jiffies + HZ/100; 1142 if (!mod_timer(&neigh->timer, next)) 1143 neigh_hold(neigh); 1144 } 1145 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) { 1146 neigh_probe(neigh); 1147 } else { 1148 out: 1149 write_unlock(&neigh->lock); 1150 } 1151 1152 if (notify) 1153 neigh_update_notify(neigh, 0); 1154 1155 trace_neigh_timer_handler(neigh, 0); 1156 1157 neigh_release(neigh); 1158 } 1159 1160 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb, 1161 const bool immediate_ok) 1162 { 1163 int rc; 1164 bool immediate_probe = false; 1165 1166 write_lock_bh(&neigh->lock); 1167 1168 rc = 0; 1169 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)) 1170 goto out_unlock_bh; 1171 if (neigh->dead) 1172 goto out_dead; 1173 1174 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) { 1175 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) + 1176 NEIGH_VAR(neigh->parms, APP_PROBES)) { 1177 unsigned long next, now = jiffies; 1178 1179 atomic_set(&neigh->probes, 1180 NEIGH_VAR(neigh->parms, UCAST_PROBES)); 1181 neigh_del_timer(neigh); 1182 neigh->nud_state = NUD_INCOMPLETE; 1183 neigh->updated = now; 1184 if (!immediate_ok) { 1185 next = now + 1; 1186 } else { 1187 immediate_probe = true; 1188 next = now + max(NEIGH_VAR(neigh->parms, 1189 RETRANS_TIME), 1190 HZ / 100); 1191 } 1192 neigh_add_timer(neigh, next); 1193 } else { 1194 neigh->nud_state = NUD_FAILED; 1195 neigh->updated = jiffies; 1196 write_unlock_bh(&neigh->lock); 1197 1198 kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_FAILED); 1199 return 1; 1200 } 1201 } else if (neigh->nud_state & NUD_STALE) { 1202 neigh_dbg(2, "neigh %p is delayed\n", neigh); 1203 neigh_del_timer(neigh); 1204 neigh->nud_state = NUD_DELAY; 1205 neigh->updated = jiffies; 1206 neigh_add_timer(neigh, jiffies + 1207 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME)); 1208 } 1209 1210 if (neigh->nud_state == NUD_INCOMPLETE) { 1211 if (skb) { 1212 while (neigh->arp_queue_len_bytes + skb->truesize > 1213 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) { 1214 struct sk_buff *buff; 1215 1216 buff = __skb_dequeue(&neigh->arp_queue); 1217 if (!buff) 1218 break; 1219 neigh->arp_queue_len_bytes -= buff->truesize; 1220 kfree_skb_reason(buff, SKB_DROP_REASON_NEIGH_QUEUEFULL); 1221 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); 1222 } 1223 skb_dst_force(skb); 1224 __skb_queue_tail(&neigh->arp_queue, skb); 1225 neigh->arp_queue_len_bytes += skb->truesize; 1226 } 1227 rc = 1; 1228 } 1229 out_unlock_bh: 1230 if (immediate_probe) 1231 neigh_probe(neigh); 1232 else 1233 write_unlock(&neigh->lock); 1234 local_bh_enable(); 1235 trace_neigh_event_send_done(neigh, rc); 1236 return rc; 1237 1238 out_dead: 1239 if (neigh->nud_state & NUD_STALE) 1240 goto out_unlock_bh; 1241 write_unlock_bh(&neigh->lock); 1242 kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_DEAD); 1243 trace_neigh_event_send_dead(neigh, 1); 1244 return 1; 1245 } 1246 EXPORT_SYMBOL(__neigh_event_send); 1247 1248 static void neigh_update_hhs(struct neighbour *neigh) 1249 { 1250 struct hh_cache *hh; 1251 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *) 1252 = NULL; 1253 1254 if (neigh->dev->header_ops) 1255 update = neigh->dev->header_ops->cache_update; 1256 1257 if (update) { 1258 hh = &neigh->hh; 1259 if (READ_ONCE(hh->hh_len)) { 1260 write_seqlock_bh(&hh->hh_lock); 1261 update(hh, neigh->dev, neigh->ha); 1262 write_sequnlock_bh(&hh->hh_lock); 1263 } 1264 } 1265 } 1266 1267 /* Generic update routine. 1268 -- lladdr is new lladdr or NULL, if it is not supplied. 1269 -- new is new state. 1270 -- flags 1271 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr, 1272 if it is different. 1273 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected" 1274 lladdr instead of overriding it 1275 if it is different. 1276 NEIGH_UPDATE_F_ADMIN means that the change is administrative. 1277 NEIGH_UPDATE_F_USE means that the entry is user triggered. 1278 NEIGH_UPDATE_F_MANAGED means that the entry will be auto-refreshed. 1279 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing 1280 NTF_ROUTER flag. 1281 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as 1282 a router. 1283 1284 Caller MUST hold reference count on the entry. 1285 */ 1286 static int __neigh_update(struct neighbour *neigh, const u8 *lladdr, 1287 u8 new, u32 flags, u32 nlmsg_pid, 1288 struct netlink_ext_ack *extack) 1289 { 1290 bool gc_update = false, managed_update = false; 1291 int update_isrouter = 0; 1292 struct net_device *dev; 1293 int err, notify = 0; 1294 u8 old; 1295 1296 trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid); 1297 1298 write_lock_bh(&neigh->lock); 1299 1300 dev = neigh->dev; 1301 old = neigh->nud_state; 1302 err = -EPERM; 1303 1304 if (neigh->dead) { 1305 NL_SET_ERR_MSG(extack, "Neighbor entry is now dead"); 1306 new = old; 1307 goto out; 1308 } 1309 if (!(flags & NEIGH_UPDATE_F_ADMIN) && 1310 (old & (NUD_NOARP | NUD_PERMANENT))) 1311 goto out; 1312 1313 neigh_update_flags(neigh, flags, ¬ify, &gc_update, &managed_update); 1314 if (flags & (NEIGH_UPDATE_F_USE | NEIGH_UPDATE_F_MANAGED)) { 1315 new = old & ~NUD_PERMANENT; 1316 neigh->nud_state = new; 1317 err = 0; 1318 goto out; 1319 } 1320 1321 if (!(new & NUD_VALID)) { 1322 neigh_del_timer(neigh); 1323 if (old & NUD_CONNECTED) 1324 neigh_suspect(neigh); 1325 neigh->nud_state = new; 1326 err = 0; 1327 notify = old & NUD_VALID; 1328 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) && 1329 (new & NUD_FAILED)) { 1330 neigh_invalidate(neigh); 1331 notify = 1; 1332 } 1333 goto out; 1334 } 1335 1336 /* Compare new lladdr with cached one */ 1337 if (!dev->addr_len) { 1338 /* First case: device needs no address. */ 1339 lladdr = neigh->ha; 1340 } else if (lladdr) { 1341 /* The second case: if something is already cached 1342 and a new address is proposed: 1343 - compare new & old 1344 - if they are different, check override flag 1345 */ 1346 if ((old & NUD_VALID) && 1347 !memcmp(lladdr, neigh->ha, dev->addr_len)) 1348 lladdr = neigh->ha; 1349 } else { 1350 /* No address is supplied; if we know something, 1351 use it, otherwise discard the request. 1352 */ 1353 err = -EINVAL; 1354 if (!(old & NUD_VALID)) { 1355 NL_SET_ERR_MSG(extack, "No link layer address given"); 1356 goto out; 1357 } 1358 lladdr = neigh->ha; 1359 } 1360 1361 /* Update confirmed timestamp for neighbour entry after we 1362 * received ARP packet even if it doesn't change IP to MAC binding. 1363 */ 1364 if (new & NUD_CONNECTED) 1365 neigh->confirmed = jiffies; 1366 1367 /* If entry was valid and address is not changed, 1368 do not change entry state, if new one is STALE. 1369 */ 1370 err = 0; 1371 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER; 1372 if (old & NUD_VALID) { 1373 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) { 1374 update_isrouter = 0; 1375 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) && 1376 (old & NUD_CONNECTED)) { 1377 lladdr = neigh->ha; 1378 new = NUD_STALE; 1379 } else 1380 goto out; 1381 } else { 1382 if (lladdr == neigh->ha && new == NUD_STALE && 1383 !(flags & NEIGH_UPDATE_F_ADMIN)) 1384 new = old; 1385 } 1386 } 1387 1388 /* Update timestamp only once we know we will make a change to the 1389 * neighbour entry. Otherwise we risk to move the locktime window with 1390 * noop updates and ignore relevant ARP updates. 1391 */ 1392 if (new != old || lladdr != neigh->ha) 1393 neigh->updated = jiffies; 1394 1395 if (new != old) { 1396 neigh_del_timer(neigh); 1397 if (new & NUD_PROBE) 1398 atomic_set(&neigh->probes, 0); 1399 if (new & NUD_IN_TIMER) 1400 neigh_add_timer(neigh, (jiffies + 1401 ((new & NUD_REACHABLE) ? 1402 neigh->parms->reachable_time : 1403 0))); 1404 neigh->nud_state = new; 1405 notify = 1; 1406 } 1407 1408 if (lladdr != neigh->ha) { 1409 write_seqlock(&neigh->ha_lock); 1410 memcpy(&neigh->ha, lladdr, dev->addr_len); 1411 write_sequnlock(&neigh->ha_lock); 1412 neigh_update_hhs(neigh); 1413 if (!(new & NUD_CONNECTED)) 1414 neigh->confirmed = jiffies - 1415 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1); 1416 notify = 1; 1417 } 1418 if (new == old) 1419 goto out; 1420 if (new & NUD_CONNECTED) 1421 neigh_connect(neigh); 1422 else 1423 neigh_suspect(neigh); 1424 if (!(old & NUD_VALID)) { 1425 struct sk_buff *skb; 1426 1427 /* Again: avoid dead loop if something went wrong */ 1428 1429 while (neigh->nud_state & NUD_VALID && 1430 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { 1431 struct dst_entry *dst = skb_dst(skb); 1432 struct neighbour *n2, *n1 = neigh; 1433 write_unlock_bh(&neigh->lock); 1434 1435 rcu_read_lock(); 1436 1437 /* Why not just use 'neigh' as-is? The problem is that 1438 * things such as shaper, eql, and sch_teql can end up 1439 * using alternative, different, neigh objects to output 1440 * the packet in the output path. So what we need to do 1441 * here is re-lookup the top-level neigh in the path so 1442 * we can reinject the packet there. 1443 */ 1444 n2 = NULL; 1445 if (dst && dst->obsolete != DST_OBSOLETE_DEAD) { 1446 n2 = dst_neigh_lookup_skb(dst, skb); 1447 if (n2) 1448 n1 = n2; 1449 } 1450 n1->output(n1, skb); 1451 if (n2) 1452 neigh_release(n2); 1453 rcu_read_unlock(); 1454 1455 write_lock_bh(&neigh->lock); 1456 } 1457 __skb_queue_purge(&neigh->arp_queue); 1458 neigh->arp_queue_len_bytes = 0; 1459 } 1460 out: 1461 if (update_isrouter) 1462 neigh_update_is_router(neigh, flags, ¬ify); 1463 write_unlock_bh(&neigh->lock); 1464 if (((new ^ old) & NUD_PERMANENT) || gc_update) 1465 neigh_update_gc_list(neigh); 1466 if (managed_update) 1467 neigh_update_managed_list(neigh); 1468 if (notify) 1469 neigh_update_notify(neigh, nlmsg_pid); 1470 trace_neigh_update_done(neigh, err); 1471 return err; 1472 } 1473 1474 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, 1475 u32 flags, u32 nlmsg_pid) 1476 { 1477 return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL); 1478 } 1479 EXPORT_SYMBOL(neigh_update); 1480 1481 /* Update the neigh to listen temporarily for probe responses, even if it is 1482 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing. 1483 */ 1484 void __neigh_set_probe_once(struct neighbour *neigh) 1485 { 1486 if (neigh->dead) 1487 return; 1488 neigh->updated = jiffies; 1489 if (!(neigh->nud_state & NUD_FAILED)) 1490 return; 1491 neigh->nud_state = NUD_INCOMPLETE; 1492 atomic_set(&neigh->probes, neigh_max_probes(neigh)); 1493 neigh_add_timer(neigh, 1494 jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), 1495 HZ/100)); 1496 } 1497 EXPORT_SYMBOL(__neigh_set_probe_once); 1498 1499 struct neighbour *neigh_event_ns(struct neigh_table *tbl, 1500 u8 *lladdr, void *saddr, 1501 struct net_device *dev) 1502 { 1503 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev, 1504 lladdr || !dev->addr_len); 1505 if (neigh) 1506 neigh_update(neigh, lladdr, NUD_STALE, 1507 NEIGH_UPDATE_F_OVERRIDE, 0); 1508 return neigh; 1509 } 1510 EXPORT_SYMBOL(neigh_event_ns); 1511 1512 /* called with read_lock_bh(&n->lock); */ 1513 static void neigh_hh_init(struct neighbour *n) 1514 { 1515 struct net_device *dev = n->dev; 1516 __be16 prot = n->tbl->protocol; 1517 struct hh_cache *hh = &n->hh; 1518 1519 write_lock_bh(&n->lock); 1520 1521 /* Only one thread can come in here and initialize the 1522 * hh_cache entry. 1523 */ 1524 if (!hh->hh_len) 1525 dev->header_ops->cache(n, hh, prot); 1526 1527 write_unlock_bh(&n->lock); 1528 } 1529 1530 /* Slow and careful. */ 1531 1532 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb) 1533 { 1534 int rc = 0; 1535 1536 if (!neigh_event_send(neigh, skb)) { 1537 int err; 1538 struct net_device *dev = neigh->dev; 1539 unsigned int seq; 1540 1541 if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len)) 1542 neigh_hh_init(neigh); 1543 1544 do { 1545 __skb_pull(skb, skb_network_offset(skb)); 1546 seq = read_seqbegin(&neigh->ha_lock); 1547 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 1548 neigh->ha, NULL, skb->len); 1549 } while (read_seqretry(&neigh->ha_lock, seq)); 1550 1551 if (err >= 0) 1552 rc = dev_queue_xmit(skb); 1553 else 1554 goto out_kfree_skb; 1555 } 1556 out: 1557 return rc; 1558 out_kfree_skb: 1559 rc = -EINVAL; 1560 kfree_skb(skb); 1561 goto out; 1562 } 1563 EXPORT_SYMBOL(neigh_resolve_output); 1564 1565 /* As fast as possible without hh cache */ 1566 1567 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb) 1568 { 1569 struct net_device *dev = neigh->dev; 1570 unsigned int seq; 1571 int err; 1572 1573 do { 1574 __skb_pull(skb, skb_network_offset(skb)); 1575 seq = read_seqbegin(&neigh->ha_lock); 1576 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 1577 neigh->ha, NULL, skb->len); 1578 } while (read_seqretry(&neigh->ha_lock, seq)); 1579 1580 if (err >= 0) 1581 err = dev_queue_xmit(skb); 1582 else { 1583 err = -EINVAL; 1584 kfree_skb(skb); 1585 } 1586 return err; 1587 } 1588 EXPORT_SYMBOL(neigh_connected_output); 1589 1590 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb) 1591 { 1592 return dev_queue_xmit(skb); 1593 } 1594 EXPORT_SYMBOL(neigh_direct_output); 1595 1596 static void neigh_managed_work(struct work_struct *work) 1597 { 1598 struct neigh_table *tbl = container_of(work, struct neigh_table, 1599 managed_work.work); 1600 struct neighbour *neigh; 1601 1602 write_lock_bh(&tbl->lock); 1603 list_for_each_entry(neigh, &tbl->managed_list, managed_list) 1604 neigh_event_send_probe(neigh, NULL, false); 1605 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 1606 NEIGH_VAR(&tbl->parms, INTERVAL_PROBE_TIME_MS)); 1607 write_unlock_bh(&tbl->lock); 1608 } 1609 1610 static void neigh_proxy_process(struct timer_list *t) 1611 { 1612 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer); 1613 long sched_next = 0; 1614 unsigned long now = jiffies; 1615 struct sk_buff *skb, *n; 1616 1617 spin_lock(&tbl->proxy_queue.lock); 1618 1619 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) { 1620 long tdif = NEIGH_CB(skb)->sched_next - now; 1621 1622 if (tdif <= 0) { 1623 struct net_device *dev = skb->dev; 1624 struct in_device *in_dev; 1625 1626 rcu_read_lock(); 1627 in_dev = __in_dev_get_rcu(dev); 1628 if (in_dev) 1629 in_dev->arp_parms->qlen--; 1630 rcu_read_unlock(); 1631 __skb_unlink(skb, &tbl->proxy_queue); 1632 1633 if (tbl->proxy_redo && netif_running(dev)) { 1634 rcu_read_lock(); 1635 tbl->proxy_redo(skb); 1636 rcu_read_unlock(); 1637 } else { 1638 kfree_skb(skb); 1639 } 1640 1641 dev_put(dev); 1642 } else if (!sched_next || tdif < sched_next) 1643 sched_next = tdif; 1644 } 1645 del_timer(&tbl->proxy_timer); 1646 if (sched_next) 1647 mod_timer(&tbl->proxy_timer, jiffies + sched_next); 1648 spin_unlock(&tbl->proxy_queue.lock); 1649 } 1650 1651 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, 1652 struct sk_buff *skb) 1653 { 1654 unsigned long sched_next = jiffies + 1655 prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY)); 1656 1657 if (p->qlen > NEIGH_VAR(p, PROXY_QLEN)) { 1658 kfree_skb(skb); 1659 return; 1660 } 1661 1662 NEIGH_CB(skb)->sched_next = sched_next; 1663 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED; 1664 1665 spin_lock(&tbl->proxy_queue.lock); 1666 if (del_timer(&tbl->proxy_timer)) { 1667 if (time_before(tbl->proxy_timer.expires, sched_next)) 1668 sched_next = tbl->proxy_timer.expires; 1669 } 1670 skb_dst_drop(skb); 1671 dev_hold(skb->dev); 1672 __skb_queue_tail(&tbl->proxy_queue, skb); 1673 p->qlen++; 1674 mod_timer(&tbl->proxy_timer, sched_next); 1675 spin_unlock(&tbl->proxy_queue.lock); 1676 } 1677 EXPORT_SYMBOL(pneigh_enqueue); 1678 1679 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl, 1680 struct net *net, int ifindex) 1681 { 1682 struct neigh_parms *p; 1683 1684 list_for_each_entry(p, &tbl->parms_list, list) { 1685 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) || 1686 (!p->dev && !ifindex && net_eq(net, &init_net))) 1687 return p; 1688 } 1689 1690 return NULL; 1691 } 1692 1693 struct neigh_parms *neigh_parms_alloc(struct net_device *dev, 1694 struct neigh_table *tbl) 1695 { 1696 struct neigh_parms *p; 1697 struct net *net = dev_net(dev); 1698 const struct net_device_ops *ops = dev->netdev_ops; 1699 1700 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL); 1701 if (p) { 1702 p->tbl = tbl; 1703 refcount_set(&p->refcnt, 1); 1704 p->reachable_time = 1705 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 1706 p->qlen = 0; 1707 netdev_hold(dev, &p->dev_tracker, GFP_KERNEL); 1708 p->dev = dev; 1709 write_pnet(&p->net, net); 1710 p->sysctl_table = NULL; 1711 1712 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { 1713 netdev_put(dev, &p->dev_tracker); 1714 kfree(p); 1715 return NULL; 1716 } 1717 1718 write_lock_bh(&tbl->lock); 1719 list_add(&p->list, &tbl->parms.list); 1720 write_unlock_bh(&tbl->lock); 1721 1722 neigh_parms_data_state_cleanall(p); 1723 } 1724 return p; 1725 } 1726 EXPORT_SYMBOL(neigh_parms_alloc); 1727 1728 static void neigh_rcu_free_parms(struct rcu_head *head) 1729 { 1730 struct neigh_parms *parms = 1731 container_of(head, struct neigh_parms, rcu_head); 1732 1733 neigh_parms_put(parms); 1734 } 1735 1736 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms) 1737 { 1738 if (!parms || parms == &tbl->parms) 1739 return; 1740 write_lock_bh(&tbl->lock); 1741 list_del(&parms->list); 1742 parms->dead = 1; 1743 write_unlock_bh(&tbl->lock); 1744 netdev_put(parms->dev, &parms->dev_tracker); 1745 call_rcu(&parms->rcu_head, neigh_rcu_free_parms); 1746 } 1747 EXPORT_SYMBOL(neigh_parms_release); 1748 1749 static void neigh_parms_destroy(struct neigh_parms *parms) 1750 { 1751 kfree(parms); 1752 } 1753 1754 static struct lock_class_key neigh_table_proxy_queue_class; 1755 1756 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly; 1757 1758 void neigh_table_init(int index, struct neigh_table *tbl) 1759 { 1760 unsigned long now = jiffies; 1761 unsigned long phsize; 1762 1763 INIT_LIST_HEAD(&tbl->parms_list); 1764 INIT_LIST_HEAD(&tbl->gc_list); 1765 INIT_LIST_HEAD(&tbl->managed_list); 1766 1767 list_add(&tbl->parms.list, &tbl->parms_list); 1768 write_pnet(&tbl->parms.net, &init_net); 1769 refcount_set(&tbl->parms.refcnt, 1); 1770 tbl->parms.reachable_time = 1771 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME)); 1772 tbl->parms.qlen = 0; 1773 1774 tbl->stats = alloc_percpu(struct neigh_statistics); 1775 if (!tbl->stats) 1776 panic("cannot create neighbour cache statistics"); 1777 1778 #ifdef CONFIG_PROC_FS 1779 if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat, 1780 &neigh_stat_seq_ops, tbl)) 1781 panic("cannot create neighbour proc dir entry"); 1782 #endif 1783 1784 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3)); 1785 1786 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *); 1787 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL); 1788 1789 if (!tbl->nht || !tbl->phash_buckets) 1790 panic("cannot allocate neighbour cache hashes"); 1791 1792 if (!tbl->entry_size) 1793 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) + 1794 tbl->key_len, NEIGH_PRIV_ALIGN); 1795 else 1796 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN); 1797 1798 rwlock_init(&tbl->lock); 1799 1800 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work); 1801 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work, 1802 tbl->parms.reachable_time); 1803 INIT_DEFERRABLE_WORK(&tbl->managed_work, neigh_managed_work); 1804 queue_delayed_work(system_power_efficient_wq, &tbl->managed_work, 0); 1805 1806 timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0); 1807 skb_queue_head_init_class(&tbl->proxy_queue, 1808 &neigh_table_proxy_queue_class); 1809 1810 tbl->last_flush = now; 1811 tbl->last_rand = now + tbl->parms.reachable_time * 20; 1812 1813 neigh_tables[index] = tbl; 1814 } 1815 EXPORT_SYMBOL(neigh_table_init); 1816 1817 int neigh_table_clear(int index, struct neigh_table *tbl) 1818 { 1819 neigh_tables[index] = NULL; 1820 /* It is not clean... Fix it to unload IPv6 module safely */ 1821 cancel_delayed_work_sync(&tbl->managed_work); 1822 cancel_delayed_work_sync(&tbl->gc_work); 1823 del_timer_sync(&tbl->proxy_timer); 1824 pneigh_queue_purge(&tbl->proxy_queue, NULL); 1825 neigh_ifdown(tbl, NULL); 1826 if (atomic_read(&tbl->entries)) 1827 pr_crit("neighbour leakage\n"); 1828 1829 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu, 1830 neigh_hash_free_rcu); 1831 tbl->nht = NULL; 1832 1833 kfree(tbl->phash_buckets); 1834 tbl->phash_buckets = NULL; 1835 1836 remove_proc_entry(tbl->id, init_net.proc_net_stat); 1837 1838 free_percpu(tbl->stats); 1839 tbl->stats = NULL; 1840 1841 return 0; 1842 } 1843 EXPORT_SYMBOL(neigh_table_clear); 1844 1845 static struct neigh_table *neigh_find_table(int family) 1846 { 1847 struct neigh_table *tbl = NULL; 1848 1849 switch (family) { 1850 case AF_INET: 1851 tbl = neigh_tables[NEIGH_ARP_TABLE]; 1852 break; 1853 case AF_INET6: 1854 tbl = neigh_tables[NEIGH_ND_TABLE]; 1855 break; 1856 } 1857 1858 return tbl; 1859 } 1860 1861 const struct nla_policy nda_policy[NDA_MAX+1] = { 1862 [NDA_UNSPEC] = { .strict_start_type = NDA_NH_ID }, 1863 [NDA_DST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1864 [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1865 [NDA_CACHEINFO] = { .len = sizeof(struct nda_cacheinfo) }, 1866 [NDA_PROBES] = { .type = NLA_U32 }, 1867 [NDA_VLAN] = { .type = NLA_U16 }, 1868 [NDA_PORT] = { .type = NLA_U16 }, 1869 [NDA_VNI] = { .type = NLA_U32 }, 1870 [NDA_IFINDEX] = { .type = NLA_U32 }, 1871 [NDA_MASTER] = { .type = NLA_U32 }, 1872 [NDA_PROTOCOL] = { .type = NLA_U8 }, 1873 [NDA_NH_ID] = { .type = NLA_U32 }, 1874 [NDA_FLAGS_EXT] = NLA_POLICY_MASK(NLA_U32, NTF_EXT_MASK), 1875 [NDA_FDB_EXT_ATTRS] = { .type = NLA_NESTED }, 1876 }; 1877 1878 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, 1879 struct netlink_ext_ack *extack) 1880 { 1881 struct net *net = sock_net(skb->sk); 1882 struct ndmsg *ndm; 1883 struct nlattr *dst_attr; 1884 struct neigh_table *tbl; 1885 struct neighbour *neigh; 1886 struct net_device *dev = NULL; 1887 int err = -EINVAL; 1888 1889 ASSERT_RTNL(); 1890 if (nlmsg_len(nlh) < sizeof(*ndm)) 1891 goto out; 1892 1893 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST); 1894 if (!dst_attr) { 1895 NL_SET_ERR_MSG(extack, "Network address not specified"); 1896 goto out; 1897 } 1898 1899 ndm = nlmsg_data(nlh); 1900 if (ndm->ndm_ifindex) { 1901 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 1902 if (dev == NULL) { 1903 err = -ENODEV; 1904 goto out; 1905 } 1906 } 1907 1908 tbl = neigh_find_table(ndm->ndm_family); 1909 if (tbl == NULL) 1910 return -EAFNOSUPPORT; 1911 1912 if (nla_len(dst_attr) < (int)tbl->key_len) { 1913 NL_SET_ERR_MSG(extack, "Invalid network address"); 1914 goto out; 1915 } 1916 1917 if (ndm->ndm_flags & NTF_PROXY) { 1918 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev); 1919 goto out; 1920 } 1921 1922 if (dev == NULL) 1923 goto out; 1924 1925 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev); 1926 if (neigh == NULL) { 1927 err = -ENOENT; 1928 goto out; 1929 } 1930 1931 err = __neigh_update(neigh, NULL, NUD_FAILED, 1932 NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN, 1933 NETLINK_CB(skb).portid, extack); 1934 write_lock_bh(&tbl->lock); 1935 neigh_release(neigh); 1936 neigh_remove_one(neigh, tbl); 1937 write_unlock_bh(&tbl->lock); 1938 1939 out: 1940 return err; 1941 } 1942 1943 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, 1944 struct netlink_ext_ack *extack) 1945 { 1946 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE | 1947 NEIGH_UPDATE_F_OVERRIDE_ISROUTER; 1948 struct net *net = sock_net(skb->sk); 1949 struct ndmsg *ndm; 1950 struct nlattr *tb[NDA_MAX+1]; 1951 struct neigh_table *tbl; 1952 struct net_device *dev = NULL; 1953 struct neighbour *neigh; 1954 void *dst, *lladdr; 1955 u8 protocol = 0; 1956 u32 ndm_flags; 1957 int err; 1958 1959 ASSERT_RTNL(); 1960 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, 1961 nda_policy, extack); 1962 if (err < 0) 1963 goto out; 1964 1965 err = -EINVAL; 1966 if (!tb[NDA_DST]) { 1967 NL_SET_ERR_MSG(extack, "Network address not specified"); 1968 goto out; 1969 } 1970 1971 ndm = nlmsg_data(nlh); 1972 ndm_flags = ndm->ndm_flags; 1973 if (tb[NDA_FLAGS_EXT]) { 1974 u32 ext = nla_get_u32(tb[NDA_FLAGS_EXT]); 1975 1976 BUILD_BUG_ON(sizeof(neigh->flags) * BITS_PER_BYTE < 1977 (sizeof(ndm->ndm_flags) * BITS_PER_BYTE + 1978 hweight32(NTF_EXT_MASK))); 1979 ndm_flags |= (ext << NTF_EXT_SHIFT); 1980 } 1981 if (ndm->ndm_ifindex) { 1982 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 1983 if (dev == NULL) { 1984 err = -ENODEV; 1985 goto out; 1986 } 1987 1988 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) { 1989 NL_SET_ERR_MSG(extack, "Invalid link address"); 1990 goto out; 1991 } 1992 } 1993 1994 tbl = neigh_find_table(ndm->ndm_family); 1995 if (tbl == NULL) 1996 return -EAFNOSUPPORT; 1997 1998 if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) { 1999 NL_SET_ERR_MSG(extack, "Invalid network address"); 2000 goto out; 2001 } 2002 2003 dst = nla_data(tb[NDA_DST]); 2004 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL; 2005 2006 if (tb[NDA_PROTOCOL]) 2007 protocol = nla_get_u8(tb[NDA_PROTOCOL]); 2008 if (ndm_flags & NTF_PROXY) { 2009 struct pneigh_entry *pn; 2010 2011 if (ndm_flags & NTF_MANAGED) { 2012 NL_SET_ERR_MSG(extack, "Invalid NTF_* flag combination"); 2013 goto out; 2014 } 2015 2016 err = -ENOBUFS; 2017 pn = pneigh_lookup(tbl, net, dst, dev, 1); 2018 if (pn) { 2019 pn->flags = ndm_flags; 2020 if (protocol) 2021 pn->protocol = protocol; 2022 err = 0; 2023 } 2024 goto out; 2025 } 2026 2027 if (!dev) { 2028 NL_SET_ERR_MSG(extack, "Device not specified"); 2029 goto out; 2030 } 2031 2032 if (tbl->allow_add && !tbl->allow_add(dev, extack)) { 2033 err = -EINVAL; 2034 goto out; 2035 } 2036 2037 neigh = neigh_lookup(tbl, dst, dev); 2038 if (neigh == NULL) { 2039 bool ndm_permanent = ndm->ndm_state & NUD_PERMANENT; 2040 bool exempt_from_gc = ndm_permanent || 2041 ndm_flags & NTF_EXT_LEARNED; 2042 2043 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { 2044 err = -ENOENT; 2045 goto out; 2046 } 2047 if (ndm_permanent && (ndm_flags & NTF_MANAGED)) { 2048 NL_SET_ERR_MSG(extack, "Invalid NTF_* flag for permanent entry"); 2049 err = -EINVAL; 2050 goto out; 2051 } 2052 2053 neigh = ___neigh_create(tbl, dst, dev, 2054 ndm_flags & 2055 (NTF_EXT_LEARNED | NTF_MANAGED), 2056 exempt_from_gc, true); 2057 if (IS_ERR(neigh)) { 2058 err = PTR_ERR(neigh); 2059 goto out; 2060 } 2061 } else { 2062 if (nlh->nlmsg_flags & NLM_F_EXCL) { 2063 err = -EEXIST; 2064 neigh_release(neigh); 2065 goto out; 2066 } 2067 2068 if (!(nlh->nlmsg_flags & NLM_F_REPLACE)) 2069 flags &= ~(NEIGH_UPDATE_F_OVERRIDE | 2070 NEIGH_UPDATE_F_OVERRIDE_ISROUTER); 2071 } 2072 2073 if (protocol) 2074 neigh->protocol = protocol; 2075 if (ndm_flags & NTF_EXT_LEARNED) 2076 flags |= NEIGH_UPDATE_F_EXT_LEARNED; 2077 if (ndm_flags & NTF_ROUTER) 2078 flags |= NEIGH_UPDATE_F_ISROUTER; 2079 if (ndm_flags & NTF_MANAGED) 2080 flags |= NEIGH_UPDATE_F_MANAGED; 2081 if (ndm_flags & NTF_USE) 2082 flags |= NEIGH_UPDATE_F_USE; 2083 2084 err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags, 2085 NETLINK_CB(skb).portid, extack); 2086 if (!err && ndm_flags & (NTF_USE | NTF_MANAGED)) { 2087 neigh_event_send(neigh, NULL); 2088 err = 0; 2089 } 2090 neigh_release(neigh); 2091 out: 2092 return err; 2093 } 2094 2095 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms) 2096 { 2097 struct nlattr *nest; 2098 2099 nest = nla_nest_start_noflag(skb, NDTA_PARMS); 2100 if (nest == NULL) 2101 return -ENOBUFS; 2102 2103 if ((parms->dev && 2104 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) || 2105 nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) || 2106 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, 2107 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) || 2108 /* approximative value for deprecated QUEUE_LEN (in packets) */ 2109 nla_put_u32(skb, NDTPA_QUEUE_LEN, 2110 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) || 2111 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) || 2112 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) || 2113 nla_put_u32(skb, NDTPA_UCAST_PROBES, 2114 NEIGH_VAR(parms, UCAST_PROBES)) || 2115 nla_put_u32(skb, NDTPA_MCAST_PROBES, 2116 NEIGH_VAR(parms, MCAST_PROBES)) || 2117 nla_put_u32(skb, NDTPA_MCAST_REPROBES, 2118 NEIGH_VAR(parms, MCAST_REPROBES)) || 2119 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time, 2120 NDTPA_PAD) || 2121 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME, 2122 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) || 2123 nla_put_msecs(skb, NDTPA_GC_STALETIME, 2124 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) || 2125 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME, 2126 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) || 2127 nla_put_msecs(skb, NDTPA_RETRANS_TIME, 2128 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) || 2129 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, 2130 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) || 2131 nla_put_msecs(skb, NDTPA_PROXY_DELAY, 2132 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) || 2133 nla_put_msecs(skb, NDTPA_LOCKTIME, 2134 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD) || 2135 nla_put_msecs(skb, NDTPA_INTERVAL_PROBE_TIME_MS, 2136 NEIGH_VAR(parms, INTERVAL_PROBE_TIME_MS), NDTPA_PAD)) 2137 goto nla_put_failure; 2138 return nla_nest_end(skb, nest); 2139 2140 nla_put_failure: 2141 nla_nest_cancel(skb, nest); 2142 return -EMSGSIZE; 2143 } 2144 2145 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, 2146 u32 pid, u32 seq, int type, int flags) 2147 { 2148 struct nlmsghdr *nlh; 2149 struct ndtmsg *ndtmsg; 2150 2151 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags); 2152 if (nlh == NULL) 2153 return -EMSGSIZE; 2154 2155 ndtmsg = nlmsg_data(nlh); 2156 2157 read_lock_bh(&tbl->lock); 2158 ndtmsg->ndtm_family = tbl->family; 2159 ndtmsg->ndtm_pad1 = 0; 2160 ndtmsg->ndtm_pad2 = 0; 2161 2162 if (nla_put_string(skb, NDTA_NAME, tbl->id) || 2163 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) || 2164 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) || 2165 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) || 2166 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3)) 2167 goto nla_put_failure; 2168 { 2169 unsigned long now = jiffies; 2170 long flush_delta = now - tbl->last_flush; 2171 long rand_delta = now - tbl->last_rand; 2172 struct neigh_hash_table *nht; 2173 struct ndt_config ndc = { 2174 .ndtc_key_len = tbl->key_len, 2175 .ndtc_entry_size = tbl->entry_size, 2176 .ndtc_entries = atomic_read(&tbl->entries), 2177 .ndtc_last_flush = jiffies_to_msecs(flush_delta), 2178 .ndtc_last_rand = jiffies_to_msecs(rand_delta), 2179 .ndtc_proxy_qlen = tbl->proxy_queue.qlen, 2180 }; 2181 2182 rcu_read_lock_bh(); 2183 nht = rcu_dereference_bh(tbl->nht); 2184 ndc.ndtc_hash_rnd = nht->hash_rnd[0]; 2185 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1); 2186 rcu_read_unlock_bh(); 2187 2188 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc)) 2189 goto nla_put_failure; 2190 } 2191 2192 { 2193 int cpu; 2194 struct ndt_stats ndst; 2195 2196 memset(&ndst, 0, sizeof(ndst)); 2197 2198 for_each_possible_cpu(cpu) { 2199 struct neigh_statistics *st; 2200 2201 st = per_cpu_ptr(tbl->stats, cpu); 2202 ndst.ndts_allocs += st->allocs; 2203 ndst.ndts_destroys += st->destroys; 2204 ndst.ndts_hash_grows += st->hash_grows; 2205 ndst.ndts_res_failed += st->res_failed; 2206 ndst.ndts_lookups += st->lookups; 2207 ndst.ndts_hits += st->hits; 2208 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast; 2209 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast; 2210 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs; 2211 ndst.ndts_forced_gc_runs += st->forced_gc_runs; 2212 ndst.ndts_table_fulls += st->table_fulls; 2213 } 2214 2215 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst, 2216 NDTA_PAD)) 2217 goto nla_put_failure; 2218 } 2219 2220 BUG_ON(tbl->parms.dev); 2221 if (neightbl_fill_parms(skb, &tbl->parms) < 0) 2222 goto nla_put_failure; 2223 2224 read_unlock_bh(&tbl->lock); 2225 nlmsg_end(skb, nlh); 2226 return 0; 2227 2228 nla_put_failure: 2229 read_unlock_bh(&tbl->lock); 2230 nlmsg_cancel(skb, nlh); 2231 return -EMSGSIZE; 2232 } 2233 2234 static int neightbl_fill_param_info(struct sk_buff *skb, 2235 struct neigh_table *tbl, 2236 struct neigh_parms *parms, 2237 u32 pid, u32 seq, int type, 2238 unsigned int flags) 2239 { 2240 struct ndtmsg *ndtmsg; 2241 struct nlmsghdr *nlh; 2242 2243 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags); 2244 if (nlh == NULL) 2245 return -EMSGSIZE; 2246 2247 ndtmsg = nlmsg_data(nlh); 2248 2249 read_lock_bh(&tbl->lock); 2250 ndtmsg->ndtm_family = tbl->family; 2251 ndtmsg->ndtm_pad1 = 0; 2252 ndtmsg->ndtm_pad2 = 0; 2253 2254 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 || 2255 neightbl_fill_parms(skb, parms) < 0) 2256 goto errout; 2257 2258 read_unlock_bh(&tbl->lock); 2259 nlmsg_end(skb, nlh); 2260 return 0; 2261 errout: 2262 read_unlock_bh(&tbl->lock); 2263 nlmsg_cancel(skb, nlh); 2264 return -EMSGSIZE; 2265 } 2266 2267 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = { 2268 [NDTA_NAME] = { .type = NLA_STRING }, 2269 [NDTA_THRESH1] = { .type = NLA_U32 }, 2270 [NDTA_THRESH2] = { .type = NLA_U32 }, 2271 [NDTA_THRESH3] = { .type = NLA_U32 }, 2272 [NDTA_GC_INTERVAL] = { .type = NLA_U64 }, 2273 [NDTA_PARMS] = { .type = NLA_NESTED }, 2274 }; 2275 2276 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = { 2277 [NDTPA_IFINDEX] = { .type = NLA_U32 }, 2278 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 }, 2279 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 }, 2280 [NDTPA_APP_PROBES] = { .type = NLA_U32 }, 2281 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 }, 2282 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 }, 2283 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 }, 2284 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 }, 2285 [NDTPA_GC_STALETIME] = { .type = NLA_U64 }, 2286 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 }, 2287 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 }, 2288 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 }, 2289 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 }, 2290 [NDTPA_LOCKTIME] = { .type = NLA_U64 }, 2291 [NDTPA_INTERVAL_PROBE_TIME_MS] = { .type = NLA_U64, .min = 1 }, 2292 }; 2293 2294 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, 2295 struct netlink_ext_ack *extack) 2296 { 2297 struct net *net = sock_net(skb->sk); 2298 struct neigh_table *tbl; 2299 struct ndtmsg *ndtmsg; 2300 struct nlattr *tb[NDTA_MAX+1]; 2301 bool found = false; 2302 int err, tidx; 2303 2304 err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX, 2305 nl_neightbl_policy, extack); 2306 if (err < 0) 2307 goto errout; 2308 2309 if (tb[NDTA_NAME] == NULL) { 2310 err = -EINVAL; 2311 goto errout; 2312 } 2313 2314 ndtmsg = nlmsg_data(nlh); 2315 2316 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) { 2317 tbl = neigh_tables[tidx]; 2318 if (!tbl) 2319 continue; 2320 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family) 2321 continue; 2322 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) { 2323 found = true; 2324 break; 2325 } 2326 } 2327 2328 if (!found) 2329 return -ENOENT; 2330 2331 /* 2332 * We acquire tbl->lock to be nice to the periodic timers and 2333 * make sure they always see a consistent set of values. 2334 */ 2335 write_lock_bh(&tbl->lock); 2336 2337 if (tb[NDTA_PARMS]) { 2338 struct nlattr *tbp[NDTPA_MAX+1]; 2339 struct neigh_parms *p; 2340 int i, ifindex = 0; 2341 2342 err = nla_parse_nested_deprecated(tbp, NDTPA_MAX, 2343 tb[NDTA_PARMS], 2344 nl_ntbl_parm_policy, extack); 2345 if (err < 0) 2346 goto errout_tbl_lock; 2347 2348 if (tbp[NDTPA_IFINDEX]) 2349 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]); 2350 2351 p = lookup_neigh_parms(tbl, net, ifindex); 2352 if (p == NULL) { 2353 err = -ENOENT; 2354 goto errout_tbl_lock; 2355 } 2356 2357 for (i = 1; i <= NDTPA_MAX; i++) { 2358 if (tbp[i] == NULL) 2359 continue; 2360 2361 switch (i) { 2362 case NDTPA_QUEUE_LEN: 2363 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES, 2364 nla_get_u32(tbp[i]) * 2365 SKB_TRUESIZE(ETH_FRAME_LEN)); 2366 break; 2367 case NDTPA_QUEUE_LENBYTES: 2368 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES, 2369 nla_get_u32(tbp[i])); 2370 break; 2371 case NDTPA_PROXY_QLEN: 2372 NEIGH_VAR_SET(p, PROXY_QLEN, 2373 nla_get_u32(tbp[i])); 2374 break; 2375 case NDTPA_APP_PROBES: 2376 NEIGH_VAR_SET(p, APP_PROBES, 2377 nla_get_u32(tbp[i])); 2378 break; 2379 case NDTPA_UCAST_PROBES: 2380 NEIGH_VAR_SET(p, UCAST_PROBES, 2381 nla_get_u32(tbp[i])); 2382 break; 2383 case NDTPA_MCAST_PROBES: 2384 NEIGH_VAR_SET(p, MCAST_PROBES, 2385 nla_get_u32(tbp[i])); 2386 break; 2387 case NDTPA_MCAST_REPROBES: 2388 NEIGH_VAR_SET(p, MCAST_REPROBES, 2389 nla_get_u32(tbp[i])); 2390 break; 2391 case NDTPA_BASE_REACHABLE_TIME: 2392 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME, 2393 nla_get_msecs(tbp[i])); 2394 /* update reachable_time as well, otherwise, the change will 2395 * only be effective after the next time neigh_periodic_work 2396 * decides to recompute it (can be multiple minutes) 2397 */ 2398 p->reachable_time = 2399 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 2400 break; 2401 case NDTPA_GC_STALETIME: 2402 NEIGH_VAR_SET(p, GC_STALETIME, 2403 nla_get_msecs(tbp[i])); 2404 break; 2405 case NDTPA_DELAY_PROBE_TIME: 2406 NEIGH_VAR_SET(p, DELAY_PROBE_TIME, 2407 nla_get_msecs(tbp[i])); 2408 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p); 2409 break; 2410 case NDTPA_INTERVAL_PROBE_TIME_MS: 2411 NEIGH_VAR_SET(p, INTERVAL_PROBE_TIME_MS, 2412 nla_get_msecs(tbp[i])); 2413 break; 2414 case NDTPA_RETRANS_TIME: 2415 NEIGH_VAR_SET(p, RETRANS_TIME, 2416 nla_get_msecs(tbp[i])); 2417 break; 2418 case NDTPA_ANYCAST_DELAY: 2419 NEIGH_VAR_SET(p, ANYCAST_DELAY, 2420 nla_get_msecs(tbp[i])); 2421 break; 2422 case NDTPA_PROXY_DELAY: 2423 NEIGH_VAR_SET(p, PROXY_DELAY, 2424 nla_get_msecs(tbp[i])); 2425 break; 2426 case NDTPA_LOCKTIME: 2427 NEIGH_VAR_SET(p, LOCKTIME, 2428 nla_get_msecs(tbp[i])); 2429 break; 2430 } 2431 } 2432 } 2433 2434 err = -ENOENT; 2435 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] || 2436 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) && 2437 !net_eq(net, &init_net)) 2438 goto errout_tbl_lock; 2439 2440 if (tb[NDTA_THRESH1]) 2441 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]); 2442 2443 if (tb[NDTA_THRESH2]) 2444 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]); 2445 2446 if (tb[NDTA_THRESH3]) 2447 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]); 2448 2449 if (tb[NDTA_GC_INTERVAL]) 2450 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]); 2451 2452 err = 0; 2453 2454 errout_tbl_lock: 2455 write_unlock_bh(&tbl->lock); 2456 errout: 2457 return err; 2458 } 2459 2460 static int neightbl_valid_dump_info(const struct nlmsghdr *nlh, 2461 struct netlink_ext_ack *extack) 2462 { 2463 struct ndtmsg *ndtm; 2464 2465 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) { 2466 NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request"); 2467 return -EINVAL; 2468 } 2469 2470 ndtm = nlmsg_data(nlh); 2471 if (ndtm->ndtm_pad1 || ndtm->ndtm_pad2) { 2472 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request"); 2473 return -EINVAL; 2474 } 2475 2476 if (nlmsg_attrlen(nlh, sizeof(*ndtm))) { 2477 NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request"); 2478 return -EINVAL; 2479 } 2480 2481 return 0; 2482 } 2483 2484 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) 2485 { 2486 const struct nlmsghdr *nlh = cb->nlh; 2487 struct net *net = sock_net(skb->sk); 2488 int family, tidx, nidx = 0; 2489 int tbl_skip = cb->args[0]; 2490 int neigh_skip = cb->args[1]; 2491 struct neigh_table *tbl; 2492 2493 if (cb->strict_check) { 2494 int err = neightbl_valid_dump_info(nlh, cb->extack); 2495 2496 if (err < 0) 2497 return err; 2498 } 2499 2500 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 2501 2502 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) { 2503 struct neigh_parms *p; 2504 2505 tbl = neigh_tables[tidx]; 2506 if (!tbl) 2507 continue; 2508 2509 if (tidx < tbl_skip || (family && tbl->family != family)) 2510 continue; 2511 2512 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid, 2513 nlh->nlmsg_seq, RTM_NEWNEIGHTBL, 2514 NLM_F_MULTI) < 0) 2515 break; 2516 2517 nidx = 0; 2518 p = list_next_entry(&tbl->parms, list); 2519 list_for_each_entry_from(p, &tbl->parms_list, list) { 2520 if (!net_eq(neigh_parms_net(p), net)) 2521 continue; 2522 2523 if (nidx < neigh_skip) 2524 goto next; 2525 2526 if (neightbl_fill_param_info(skb, tbl, p, 2527 NETLINK_CB(cb->skb).portid, 2528 nlh->nlmsg_seq, 2529 RTM_NEWNEIGHTBL, 2530 NLM_F_MULTI) < 0) 2531 goto out; 2532 next: 2533 nidx++; 2534 } 2535 2536 neigh_skip = 0; 2537 } 2538 out: 2539 cb->args[0] = tidx; 2540 cb->args[1] = nidx; 2541 2542 return skb->len; 2543 } 2544 2545 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh, 2546 u32 pid, u32 seq, int type, unsigned int flags) 2547 { 2548 u32 neigh_flags, neigh_flags_ext; 2549 unsigned long now = jiffies; 2550 struct nda_cacheinfo ci; 2551 struct nlmsghdr *nlh; 2552 struct ndmsg *ndm; 2553 2554 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags); 2555 if (nlh == NULL) 2556 return -EMSGSIZE; 2557 2558 neigh_flags_ext = neigh->flags >> NTF_EXT_SHIFT; 2559 neigh_flags = neigh->flags & NTF_OLD_MASK; 2560 2561 ndm = nlmsg_data(nlh); 2562 ndm->ndm_family = neigh->ops->family; 2563 ndm->ndm_pad1 = 0; 2564 ndm->ndm_pad2 = 0; 2565 ndm->ndm_flags = neigh_flags; 2566 ndm->ndm_type = neigh->type; 2567 ndm->ndm_ifindex = neigh->dev->ifindex; 2568 2569 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key)) 2570 goto nla_put_failure; 2571 2572 read_lock_bh(&neigh->lock); 2573 ndm->ndm_state = neigh->nud_state; 2574 if (neigh->nud_state & NUD_VALID) { 2575 char haddr[MAX_ADDR_LEN]; 2576 2577 neigh_ha_snapshot(haddr, neigh, neigh->dev); 2578 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) { 2579 read_unlock_bh(&neigh->lock); 2580 goto nla_put_failure; 2581 } 2582 } 2583 2584 ci.ndm_used = jiffies_to_clock_t(now - neigh->used); 2585 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed); 2586 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated); 2587 ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1; 2588 read_unlock_bh(&neigh->lock); 2589 2590 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) || 2591 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) 2592 goto nla_put_failure; 2593 2594 if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol)) 2595 goto nla_put_failure; 2596 if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext)) 2597 goto nla_put_failure; 2598 2599 nlmsg_end(skb, nlh); 2600 return 0; 2601 2602 nla_put_failure: 2603 nlmsg_cancel(skb, nlh); 2604 return -EMSGSIZE; 2605 } 2606 2607 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn, 2608 u32 pid, u32 seq, int type, unsigned int flags, 2609 struct neigh_table *tbl) 2610 { 2611 u32 neigh_flags, neigh_flags_ext; 2612 struct nlmsghdr *nlh; 2613 struct ndmsg *ndm; 2614 2615 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags); 2616 if (nlh == NULL) 2617 return -EMSGSIZE; 2618 2619 neigh_flags_ext = pn->flags >> NTF_EXT_SHIFT; 2620 neigh_flags = pn->flags & NTF_OLD_MASK; 2621 2622 ndm = nlmsg_data(nlh); 2623 ndm->ndm_family = tbl->family; 2624 ndm->ndm_pad1 = 0; 2625 ndm->ndm_pad2 = 0; 2626 ndm->ndm_flags = neigh_flags | NTF_PROXY; 2627 ndm->ndm_type = RTN_UNICAST; 2628 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0; 2629 ndm->ndm_state = NUD_NONE; 2630 2631 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key)) 2632 goto nla_put_failure; 2633 2634 if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol)) 2635 goto nla_put_failure; 2636 if (neigh_flags_ext && nla_put_u32(skb, NDA_FLAGS_EXT, neigh_flags_ext)) 2637 goto nla_put_failure; 2638 2639 nlmsg_end(skb, nlh); 2640 return 0; 2641 2642 nla_put_failure: 2643 nlmsg_cancel(skb, nlh); 2644 return -EMSGSIZE; 2645 } 2646 2647 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid) 2648 { 2649 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 2650 __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid); 2651 } 2652 2653 static bool neigh_master_filtered(struct net_device *dev, int master_idx) 2654 { 2655 struct net_device *master; 2656 2657 if (!master_idx) 2658 return false; 2659 2660 master = dev ? netdev_master_upper_dev_get(dev) : NULL; 2661 2662 /* 0 is already used to denote NDA_MASTER wasn't passed, therefore need another 2663 * invalid value for ifindex to denote "no master". 2664 */ 2665 if (master_idx == -1) 2666 return !!master; 2667 2668 if (!master || master->ifindex != master_idx) 2669 return true; 2670 2671 return false; 2672 } 2673 2674 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx) 2675 { 2676 if (filter_idx && (!dev || dev->ifindex != filter_idx)) 2677 return true; 2678 2679 return false; 2680 } 2681 2682 struct neigh_dump_filter { 2683 int master_idx; 2684 int dev_idx; 2685 }; 2686 2687 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, 2688 struct netlink_callback *cb, 2689 struct neigh_dump_filter *filter) 2690 { 2691 struct net *net = sock_net(skb->sk); 2692 struct neighbour *n; 2693 int rc, h, s_h = cb->args[1]; 2694 int idx, s_idx = idx = cb->args[2]; 2695 struct neigh_hash_table *nht; 2696 unsigned int flags = NLM_F_MULTI; 2697 2698 if (filter->dev_idx || filter->master_idx) 2699 flags |= NLM_F_DUMP_FILTERED; 2700 2701 rcu_read_lock_bh(); 2702 nht = rcu_dereference_bh(tbl->nht); 2703 2704 for (h = s_h; h < (1 << nht->hash_shift); h++) { 2705 if (h > s_h) 2706 s_idx = 0; 2707 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; 2708 n != NULL; 2709 n = rcu_dereference_bh(n->next)) { 2710 if (idx < s_idx || !net_eq(dev_net(n->dev), net)) 2711 goto next; 2712 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) || 2713 neigh_master_filtered(n->dev, filter->master_idx)) 2714 goto next; 2715 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid, 2716 cb->nlh->nlmsg_seq, 2717 RTM_NEWNEIGH, 2718 flags) < 0) { 2719 rc = -1; 2720 goto out; 2721 } 2722 next: 2723 idx++; 2724 } 2725 } 2726 rc = skb->len; 2727 out: 2728 rcu_read_unlock_bh(); 2729 cb->args[1] = h; 2730 cb->args[2] = idx; 2731 return rc; 2732 } 2733 2734 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, 2735 struct netlink_callback *cb, 2736 struct neigh_dump_filter *filter) 2737 { 2738 struct pneigh_entry *n; 2739 struct net *net = sock_net(skb->sk); 2740 int rc, h, s_h = cb->args[3]; 2741 int idx, s_idx = idx = cb->args[4]; 2742 unsigned int flags = NLM_F_MULTI; 2743 2744 if (filter->dev_idx || filter->master_idx) 2745 flags |= NLM_F_DUMP_FILTERED; 2746 2747 read_lock_bh(&tbl->lock); 2748 2749 for (h = s_h; h <= PNEIGH_HASHMASK; h++) { 2750 if (h > s_h) 2751 s_idx = 0; 2752 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { 2753 if (idx < s_idx || pneigh_net(n) != net) 2754 goto next; 2755 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) || 2756 neigh_master_filtered(n->dev, filter->master_idx)) 2757 goto next; 2758 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid, 2759 cb->nlh->nlmsg_seq, 2760 RTM_NEWNEIGH, flags, tbl) < 0) { 2761 read_unlock_bh(&tbl->lock); 2762 rc = -1; 2763 goto out; 2764 } 2765 next: 2766 idx++; 2767 } 2768 } 2769 2770 read_unlock_bh(&tbl->lock); 2771 rc = skb->len; 2772 out: 2773 cb->args[3] = h; 2774 cb->args[4] = idx; 2775 return rc; 2776 2777 } 2778 2779 static int neigh_valid_dump_req(const struct nlmsghdr *nlh, 2780 bool strict_check, 2781 struct neigh_dump_filter *filter, 2782 struct netlink_ext_ack *extack) 2783 { 2784 struct nlattr *tb[NDA_MAX + 1]; 2785 int err, i; 2786 2787 if (strict_check) { 2788 struct ndmsg *ndm; 2789 2790 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 2791 NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request"); 2792 return -EINVAL; 2793 } 2794 2795 ndm = nlmsg_data(nlh); 2796 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex || 2797 ndm->ndm_state || ndm->ndm_type) { 2798 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request"); 2799 return -EINVAL; 2800 } 2801 2802 if (ndm->ndm_flags & ~NTF_PROXY) { 2803 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request"); 2804 return -EINVAL; 2805 } 2806 2807 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), 2808 tb, NDA_MAX, nda_policy, 2809 extack); 2810 } else { 2811 err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb, 2812 NDA_MAX, nda_policy, extack); 2813 } 2814 if (err < 0) 2815 return err; 2816 2817 for (i = 0; i <= NDA_MAX; ++i) { 2818 if (!tb[i]) 2819 continue; 2820 2821 /* all new attributes should require strict_check */ 2822 switch (i) { 2823 case NDA_IFINDEX: 2824 filter->dev_idx = nla_get_u32(tb[i]); 2825 break; 2826 case NDA_MASTER: 2827 filter->master_idx = nla_get_u32(tb[i]); 2828 break; 2829 default: 2830 if (strict_check) { 2831 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request"); 2832 return -EINVAL; 2833 } 2834 } 2835 } 2836 2837 return 0; 2838 } 2839 2840 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) 2841 { 2842 const struct nlmsghdr *nlh = cb->nlh; 2843 struct neigh_dump_filter filter = {}; 2844 struct neigh_table *tbl; 2845 int t, family, s_t; 2846 int proxy = 0; 2847 int err; 2848 2849 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 2850 2851 /* check for full ndmsg structure presence, family member is 2852 * the same for both structures 2853 */ 2854 if (nlmsg_len(nlh) >= sizeof(struct ndmsg) && 2855 ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY) 2856 proxy = 1; 2857 2858 err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack); 2859 if (err < 0 && cb->strict_check) 2860 return err; 2861 2862 s_t = cb->args[0]; 2863 2864 for (t = 0; t < NEIGH_NR_TABLES; t++) { 2865 tbl = neigh_tables[t]; 2866 2867 if (!tbl) 2868 continue; 2869 if (t < s_t || (family && tbl->family != family)) 2870 continue; 2871 if (t > s_t) 2872 memset(&cb->args[1], 0, sizeof(cb->args) - 2873 sizeof(cb->args[0])); 2874 if (proxy) 2875 err = pneigh_dump_table(tbl, skb, cb, &filter); 2876 else 2877 err = neigh_dump_table(tbl, skb, cb, &filter); 2878 if (err < 0) 2879 break; 2880 } 2881 2882 cb->args[0] = t; 2883 return skb->len; 2884 } 2885 2886 static int neigh_valid_get_req(const struct nlmsghdr *nlh, 2887 struct neigh_table **tbl, 2888 void **dst, int *dev_idx, u8 *ndm_flags, 2889 struct netlink_ext_ack *extack) 2890 { 2891 struct nlattr *tb[NDA_MAX + 1]; 2892 struct ndmsg *ndm; 2893 int err, i; 2894 2895 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 2896 NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request"); 2897 return -EINVAL; 2898 } 2899 2900 ndm = nlmsg_data(nlh); 2901 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 2902 ndm->ndm_type) { 2903 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request"); 2904 return -EINVAL; 2905 } 2906 2907 if (ndm->ndm_flags & ~NTF_PROXY) { 2908 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request"); 2909 return -EINVAL; 2910 } 2911 2912 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 2913 NDA_MAX, nda_policy, extack); 2914 if (err < 0) 2915 return err; 2916 2917 *ndm_flags = ndm->ndm_flags; 2918 *dev_idx = ndm->ndm_ifindex; 2919 *tbl = neigh_find_table(ndm->ndm_family); 2920 if (*tbl == NULL) { 2921 NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request"); 2922 return -EAFNOSUPPORT; 2923 } 2924 2925 for (i = 0; i <= NDA_MAX; ++i) { 2926 if (!tb[i]) 2927 continue; 2928 2929 switch (i) { 2930 case NDA_DST: 2931 if (nla_len(tb[i]) != (int)(*tbl)->key_len) { 2932 NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request"); 2933 return -EINVAL; 2934 } 2935 *dst = nla_data(tb[i]); 2936 break; 2937 default: 2938 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request"); 2939 return -EINVAL; 2940 } 2941 } 2942 2943 return 0; 2944 } 2945 2946 static inline size_t neigh_nlmsg_size(void) 2947 { 2948 return NLMSG_ALIGN(sizeof(struct ndmsg)) 2949 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */ 2950 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */ 2951 + nla_total_size(sizeof(struct nda_cacheinfo)) 2952 + nla_total_size(4) /* NDA_PROBES */ 2953 + nla_total_size(4) /* NDA_FLAGS_EXT */ 2954 + nla_total_size(1); /* NDA_PROTOCOL */ 2955 } 2956 2957 static int neigh_get_reply(struct net *net, struct neighbour *neigh, 2958 u32 pid, u32 seq) 2959 { 2960 struct sk_buff *skb; 2961 int err = 0; 2962 2963 skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL); 2964 if (!skb) 2965 return -ENOBUFS; 2966 2967 err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0); 2968 if (err) { 2969 kfree_skb(skb); 2970 goto errout; 2971 } 2972 2973 err = rtnl_unicast(skb, net, pid); 2974 errout: 2975 return err; 2976 } 2977 2978 static inline size_t pneigh_nlmsg_size(void) 2979 { 2980 return NLMSG_ALIGN(sizeof(struct ndmsg)) 2981 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */ 2982 + nla_total_size(4) /* NDA_FLAGS_EXT */ 2983 + nla_total_size(1); /* NDA_PROTOCOL */ 2984 } 2985 2986 static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh, 2987 u32 pid, u32 seq, struct neigh_table *tbl) 2988 { 2989 struct sk_buff *skb; 2990 int err = 0; 2991 2992 skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL); 2993 if (!skb) 2994 return -ENOBUFS; 2995 2996 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl); 2997 if (err) { 2998 kfree_skb(skb); 2999 goto errout; 3000 } 3001 3002 err = rtnl_unicast(skb, net, pid); 3003 errout: 3004 return err; 3005 } 3006 3007 static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, 3008 struct netlink_ext_ack *extack) 3009 { 3010 struct net *net = sock_net(in_skb->sk); 3011 struct net_device *dev = NULL; 3012 struct neigh_table *tbl = NULL; 3013 struct neighbour *neigh; 3014 void *dst = NULL; 3015 u8 ndm_flags = 0; 3016 int dev_idx = 0; 3017 int err; 3018 3019 err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags, 3020 extack); 3021 if (err < 0) 3022 return err; 3023 3024 if (dev_idx) { 3025 dev = __dev_get_by_index(net, dev_idx); 3026 if (!dev) { 3027 NL_SET_ERR_MSG(extack, "Unknown device ifindex"); 3028 return -ENODEV; 3029 } 3030 } 3031 3032 if (!dst) { 3033 NL_SET_ERR_MSG(extack, "Network address not specified"); 3034 return -EINVAL; 3035 } 3036 3037 if (ndm_flags & NTF_PROXY) { 3038 struct pneigh_entry *pn; 3039 3040 pn = pneigh_lookup(tbl, net, dst, dev, 0); 3041 if (!pn) { 3042 NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found"); 3043 return -ENOENT; 3044 } 3045 return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid, 3046 nlh->nlmsg_seq, tbl); 3047 } 3048 3049 if (!dev) { 3050 NL_SET_ERR_MSG(extack, "No device specified"); 3051 return -EINVAL; 3052 } 3053 3054 neigh = neigh_lookup(tbl, dst, dev); 3055 if (!neigh) { 3056 NL_SET_ERR_MSG(extack, "Neighbour entry not found"); 3057 return -ENOENT; 3058 } 3059 3060 err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid, 3061 nlh->nlmsg_seq); 3062 3063 neigh_release(neigh); 3064 3065 return err; 3066 } 3067 3068 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie) 3069 { 3070 int chain; 3071 struct neigh_hash_table *nht; 3072 3073 rcu_read_lock_bh(); 3074 nht = rcu_dereference_bh(tbl->nht); 3075 3076 read_lock(&tbl->lock); /* avoid resizes */ 3077 for (chain = 0; chain < (1 << nht->hash_shift); chain++) { 3078 struct neighbour *n; 3079 3080 for (n = rcu_dereference_bh(nht->hash_buckets[chain]); 3081 n != NULL; 3082 n = rcu_dereference_bh(n->next)) 3083 cb(n, cookie); 3084 } 3085 read_unlock(&tbl->lock); 3086 rcu_read_unlock_bh(); 3087 } 3088 EXPORT_SYMBOL(neigh_for_each); 3089 3090 /* The tbl->lock must be held as a writer and BH disabled. */ 3091 void __neigh_for_each_release(struct neigh_table *tbl, 3092 int (*cb)(struct neighbour *)) 3093 { 3094 int chain; 3095 struct neigh_hash_table *nht; 3096 3097 nht = rcu_dereference_protected(tbl->nht, 3098 lockdep_is_held(&tbl->lock)); 3099 for (chain = 0; chain < (1 << nht->hash_shift); chain++) { 3100 struct neighbour *n; 3101 struct neighbour __rcu **np; 3102 3103 np = &nht->hash_buckets[chain]; 3104 while ((n = rcu_dereference_protected(*np, 3105 lockdep_is_held(&tbl->lock))) != NULL) { 3106 int release; 3107 3108 write_lock(&n->lock); 3109 release = cb(n); 3110 if (release) { 3111 rcu_assign_pointer(*np, 3112 rcu_dereference_protected(n->next, 3113 lockdep_is_held(&tbl->lock))); 3114 neigh_mark_dead(n); 3115 } else 3116 np = &n->next; 3117 write_unlock(&n->lock); 3118 if (release) 3119 neigh_cleanup_and_release(n); 3120 } 3121 } 3122 } 3123 EXPORT_SYMBOL(__neigh_for_each_release); 3124 3125 int neigh_xmit(int index, struct net_device *dev, 3126 const void *addr, struct sk_buff *skb) 3127 { 3128 int err = -EAFNOSUPPORT; 3129 if (likely(index < NEIGH_NR_TABLES)) { 3130 struct neigh_table *tbl; 3131 struct neighbour *neigh; 3132 3133 tbl = neigh_tables[index]; 3134 if (!tbl) 3135 goto out; 3136 rcu_read_lock_bh(); 3137 if (index == NEIGH_ARP_TABLE) { 3138 u32 key = *((u32 *)addr); 3139 3140 neigh = __ipv4_neigh_lookup_noref(dev, key); 3141 } else { 3142 neigh = __neigh_lookup_noref(tbl, addr, dev); 3143 } 3144 if (!neigh) 3145 neigh = __neigh_create(tbl, addr, dev, false); 3146 err = PTR_ERR(neigh); 3147 if (IS_ERR(neigh)) { 3148 rcu_read_unlock_bh(); 3149 goto out_kfree_skb; 3150 } 3151 err = neigh->output(neigh, skb); 3152 rcu_read_unlock_bh(); 3153 } 3154 else if (index == NEIGH_LINK_TABLE) { 3155 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 3156 addr, NULL, skb->len); 3157 if (err < 0) 3158 goto out_kfree_skb; 3159 err = dev_queue_xmit(skb); 3160 } 3161 out: 3162 return err; 3163 out_kfree_skb: 3164 kfree_skb(skb); 3165 goto out; 3166 } 3167 EXPORT_SYMBOL(neigh_xmit); 3168 3169 #ifdef CONFIG_PROC_FS 3170 3171 static struct neighbour *neigh_get_first(struct seq_file *seq) 3172 { 3173 struct neigh_seq_state *state = seq->private; 3174 struct net *net = seq_file_net(seq); 3175 struct neigh_hash_table *nht = state->nht; 3176 struct neighbour *n = NULL; 3177 int bucket; 3178 3179 state->flags &= ~NEIGH_SEQ_IS_PNEIGH; 3180 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) { 3181 n = rcu_dereference_bh(nht->hash_buckets[bucket]); 3182 3183 while (n) { 3184 if (!net_eq(dev_net(n->dev), net)) 3185 goto next; 3186 if (state->neigh_sub_iter) { 3187 loff_t fakep = 0; 3188 void *v; 3189 3190 v = state->neigh_sub_iter(state, n, &fakep); 3191 if (!v) 3192 goto next; 3193 } 3194 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP)) 3195 break; 3196 if (n->nud_state & ~NUD_NOARP) 3197 break; 3198 next: 3199 n = rcu_dereference_bh(n->next); 3200 } 3201 3202 if (n) 3203 break; 3204 } 3205 state->bucket = bucket; 3206 3207 return n; 3208 } 3209 3210 static struct neighbour *neigh_get_next(struct seq_file *seq, 3211 struct neighbour *n, 3212 loff_t *pos) 3213 { 3214 struct neigh_seq_state *state = seq->private; 3215 struct net *net = seq_file_net(seq); 3216 struct neigh_hash_table *nht = state->nht; 3217 3218 if (state->neigh_sub_iter) { 3219 void *v = state->neigh_sub_iter(state, n, pos); 3220 if (v) 3221 return n; 3222 } 3223 n = rcu_dereference_bh(n->next); 3224 3225 while (1) { 3226 while (n) { 3227 if (!net_eq(dev_net(n->dev), net)) 3228 goto next; 3229 if (state->neigh_sub_iter) { 3230 void *v = state->neigh_sub_iter(state, n, pos); 3231 if (v) 3232 return n; 3233 goto next; 3234 } 3235 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP)) 3236 break; 3237 3238 if (n->nud_state & ~NUD_NOARP) 3239 break; 3240 next: 3241 n = rcu_dereference_bh(n->next); 3242 } 3243 3244 if (n) 3245 break; 3246 3247 if (++state->bucket >= (1 << nht->hash_shift)) 3248 break; 3249 3250 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]); 3251 } 3252 3253 if (n && pos) 3254 --(*pos); 3255 return n; 3256 } 3257 3258 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos) 3259 { 3260 struct neighbour *n = neigh_get_first(seq); 3261 3262 if (n) { 3263 --(*pos); 3264 while (*pos) { 3265 n = neigh_get_next(seq, n, pos); 3266 if (!n) 3267 break; 3268 } 3269 } 3270 return *pos ? NULL : n; 3271 } 3272 3273 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq) 3274 { 3275 struct neigh_seq_state *state = seq->private; 3276 struct net *net = seq_file_net(seq); 3277 struct neigh_table *tbl = state->tbl; 3278 struct pneigh_entry *pn = NULL; 3279 int bucket; 3280 3281 state->flags |= NEIGH_SEQ_IS_PNEIGH; 3282 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) { 3283 pn = tbl->phash_buckets[bucket]; 3284 while (pn && !net_eq(pneigh_net(pn), net)) 3285 pn = pn->next; 3286 if (pn) 3287 break; 3288 } 3289 state->bucket = bucket; 3290 3291 return pn; 3292 } 3293 3294 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq, 3295 struct pneigh_entry *pn, 3296 loff_t *pos) 3297 { 3298 struct neigh_seq_state *state = seq->private; 3299 struct net *net = seq_file_net(seq); 3300 struct neigh_table *tbl = state->tbl; 3301 3302 do { 3303 pn = pn->next; 3304 } while (pn && !net_eq(pneigh_net(pn), net)); 3305 3306 while (!pn) { 3307 if (++state->bucket > PNEIGH_HASHMASK) 3308 break; 3309 pn = tbl->phash_buckets[state->bucket]; 3310 while (pn && !net_eq(pneigh_net(pn), net)) 3311 pn = pn->next; 3312 if (pn) 3313 break; 3314 } 3315 3316 if (pn && pos) 3317 --(*pos); 3318 3319 return pn; 3320 } 3321 3322 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos) 3323 { 3324 struct pneigh_entry *pn = pneigh_get_first(seq); 3325 3326 if (pn) { 3327 --(*pos); 3328 while (*pos) { 3329 pn = pneigh_get_next(seq, pn, pos); 3330 if (!pn) 3331 break; 3332 } 3333 } 3334 return *pos ? NULL : pn; 3335 } 3336 3337 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos) 3338 { 3339 struct neigh_seq_state *state = seq->private; 3340 void *rc; 3341 loff_t idxpos = *pos; 3342 3343 rc = neigh_get_idx(seq, &idxpos); 3344 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY)) 3345 rc = pneigh_get_idx(seq, &idxpos); 3346 3347 return rc; 3348 } 3349 3350 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags) 3351 __acquires(tbl->lock) 3352 __acquires(rcu_bh) 3353 { 3354 struct neigh_seq_state *state = seq->private; 3355 3356 state->tbl = tbl; 3357 state->bucket = 0; 3358 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH); 3359 3360 rcu_read_lock_bh(); 3361 state->nht = rcu_dereference_bh(tbl->nht); 3362 read_lock(&tbl->lock); 3363 3364 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN; 3365 } 3366 EXPORT_SYMBOL(neigh_seq_start); 3367 3368 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3369 { 3370 struct neigh_seq_state *state; 3371 void *rc; 3372 3373 if (v == SEQ_START_TOKEN) { 3374 rc = neigh_get_first(seq); 3375 goto out; 3376 } 3377 3378 state = seq->private; 3379 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) { 3380 rc = neigh_get_next(seq, v, NULL); 3381 if (rc) 3382 goto out; 3383 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY)) 3384 rc = pneigh_get_first(seq); 3385 } else { 3386 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY); 3387 rc = pneigh_get_next(seq, v, NULL); 3388 } 3389 out: 3390 ++(*pos); 3391 return rc; 3392 } 3393 EXPORT_SYMBOL(neigh_seq_next); 3394 3395 void neigh_seq_stop(struct seq_file *seq, void *v) 3396 __releases(tbl->lock) 3397 __releases(rcu_bh) 3398 { 3399 struct neigh_seq_state *state = seq->private; 3400 struct neigh_table *tbl = state->tbl; 3401 3402 read_unlock(&tbl->lock); 3403 rcu_read_unlock_bh(); 3404 } 3405 EXPORT_SYMBOL(neigh_seq_stop); 3406 3407 /* statistics via seq_file */ 3408 3409 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos) 3410 { 3411 struct neigh_table *tbl = pde_data(file_inode(seq->file)); 3412 int cpu; 3413 3414 if (*pos == 0) 3415 return SEQ_START_TOKEN; 3416 3417 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { 3418 if (!cpu_possible(cpu)) 3419 continue; 3420 *pos = cpu+1; 3421 return per_cpu_ptr(tbl->stats, cpu); 3422 } 3423 return NULL; 3424 } 3425 3426 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3427 { 3428 struct neigh_table *tbl = pde_data(file_inode(seq->file)); 3429 int cpu; 3430 3431 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { 3432 if (!cpu_possible(cpu)) 3433 continue; 3434 *pos = cpu+1; 3435 return per_cpu_ptr(tbl->stats, cpu); 3436 } 3437 (*pos)++; 3438 return NULL; 3439 } 3440 3441 static void neigh_stat_seq_stop(struct seq_file *seq, void *v) 3442 { 3443 3444 } 3445 3446 static int neigh_stat_seq_show(struct seq_file *seq, void *v) 3447 { 3448 struct neigh_table *tbl = pde_data(file_inode(seq->file)); 3449 struct neigh_statistics *st = v; 3450 3451 if (v == SEQ_START_TOKEN) { 3452 seq_puts(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n"); 3453 return 0; 3454 } 3455 3456 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx " 3457 "%08lx %08lx %08lx " 3458 "%08lx %08lx %08lx\n", 3459 atomic_read(&tbl->entries), 3460 3461 st->allocs, 3462 st->destroys, 3463 st->hash_grows, 3464 3465 st->lookups, 3466 st->hits, 3467 3468 st->res_failed, 3469 3470 st->rcv_probes_mcast, 3471 st->rcv_probes_ucast, 3472 3473 st->periodic_gc_runs, 3474 st->forced_gc_runs, 3475 st->unres_discards, 3476 st->table_fulls 3477 ); 3478 3479 return 0; 3480 } 3481 3482 static const struct seq_operations neigh_stat_seq_ops = { 3483 .start = neigh_stat_seq_start, 3484 .next = neigh_stat_seq_next, 3485 .stop = neigh_stat_seq_stop, 3486 .show = neigh_stat_seq_show, 3487 }; 3488 #endif /* CONFIG_PROC_FS */ 3489 3490 static void __neigh_notify(struct neighbour *n, int type, int flags, 3491 u32 pid) 3492 { 3493 struct net *net = dev_net(n->dev); 3494 struct sk_buff *skb; 3495 int err = -ENOBUFS; 3496 3497 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC); 3498 if (skb == NULL) 3499 goto errout; 3500 3501 err = neigh_fill_info(skb, n, pid, 0, type, flags); 3502 if (err < 0) { 3503 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */ 3504 WARN_ON(err == -EMSGSIZE); 3505 kfree_skb(skb); 3506 goto errout; 3507 } 3508 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 3509 return; 3510 errout: 3511 if (err < 0) 3512 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 3513 } 3514 3515 void neigh_app_ns(struct neighbour *n) 3516 { 3517 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0); 3518 } 3519 EXPORT_SYMBOL(neigh_app_ns); 3520 3521 #ifdef CONFIG_SYSCTL 3522 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN); 3523 3524 static int proc_unres_qlen(struct ctl_table *ctl, int write, 3525 void *buffer, size_t *lenp, loff_t *ppos) 3526 { 3527 int size, ret; 3528 struct ctl_table tmp = *ctl; 3529 3530 tmp.extra1 = SYSCTL_ZERO; 3531 tmp.extra2 = &unres_qlen_max; 3532 tmp.data = &size; 3533 3534 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN); 3535 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 3536 3537 if (write && !ret) 3538 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN); 3539 return ret; 3540 } 3541 3542 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev, 3543 int family) 3544 { 3545 switch (family) { 3546 case AF_INET: 3547 return __in_dev_arp_parms_get_rcu(dev); 3548 case AF_INET6: 3549 return __in6_dev_nd_parms_get_rcu(dev); 3550 } 3551 return NULL; 3552 } 3553 3554 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p, 3555 int index) 3556 { 3557 struct net_device *dev; 3558 int family = neigh_parms_family(p); 3559 3560 rcu_read_lock(); 3561 for_each_netdev_rcu(net, dev) { 3562 struct neigh_parms *dst_p = 3563 neigh_get_dev_parms_rcu(dev, family); 3564 3565 if (dst_p && !test_bit(index, dst_p->data_state)) 3566 dst_p->data[index] = p->data[index]; 3567 } 3568 rcu_read_unlock(); 3569 } 3570 3571 static void neigh_proc_update(struct ctl_table *ctl, int write) 3572 { 3573 struct net_device *dev = ctl->extra1; 3574 struct neigh_parms *p = ctl->extra2; 3575 struct net *net = neigh_parms_net(p); 3576 int index = (int *) ctl->data - p->data; 3577 3578 if (!write) 3579 return; 3580 3581 set_bit(index, p->data_state); 3582 if (index == NEIGH_VAR_DELAY_PROBE_TIME) 3583 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p); 3584 if (!dev) /* NULL dev means this is default value */ 3585 neigh_copy_dflt_parms(net, p, index); 3586 } 3587 3588 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write, 3589 void *buffer, size_t *lenp, 3590 loff_t *ppos) 3591 { 3592 struct ctl_table tmp = *ctl; 3593 int ret; 3594 3595 tmp.extra1 = SYSCTL_ZERO; 3596 tmp.extra2 = SYSCTL_INT_MAX; 3597 3598 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 3599 neigh_proc_update(ctl, write); 3600 return ret; 3601 } 3602 3603 static int neigh_proc_dointvec_ms_jiffies_positive(struct ctl_table *ctl, int write, 3604 void *buffer, size_t *lenp, loff_t *ppos) 3605 { 3606 struct ctl_table tmp = *ctl; 3607 int ret; 3608 3609 int min = msecs_to_jiffies(1); 3610 3611 tmp.extra1 = &min; 3612 tmp.extra2 = NULL; 3613 3614 ret = proc_dointvec_ms_jiffies_minmax(&tmp, write, buffer, lenp, ppos); 3615 neigh_proc_update(ctl, write); 3616 return ret; 3617 } 3618 3619 int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer, 3620 size_t *lenp, loff_t *ppos) 3621 { 3622 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 3623 3624 neigh_proc_update(ctl, write); 3625 return ret; 3626 } 3627 EXPORT_SYMBOL(neigh_proc_dointvec); 3628 3629 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer, 3630 size_t *lenp, loff_t *ppos) 3631 { 3632 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); 3633 3634 neigh_proc_update(ctl, write); 3635 return ret; 3636 } 3637 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies); 3638 3639 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write, 3640 void *buffer, size_t *lenp, 3641 loff_t *ppos) 3642 { 3643 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos); 3644 3645 neigh_proc_update(ctl, write); 3646 return ret; 3647 } 3648 3649 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write, 3650 void *buffer, size_t *lenp, loff_t *ppos) 3651 { 3652 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos); 3653 3654 neigh_proc_update(ctl, write); 3655 return ret; 3656 } 3657 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies); 3658 3659 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write, 3660 void *buffer, size_t *lenp, 3661 loff_t *ppos) 3662 { 3663 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos); 3664 3665 neigh_proc_update(ctl, write); 3666 return ret; 3667 } 3668 3669 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write, 3670 void *buffer, size_t *lenp, 3671 loff_t *ppos) 3672 { 3673 struct neigh_parms *p = ctl->extra2; 3674 int ret; 3675 3676 if (strcmp(ctl->procname, "base_reachable_time") == 0) 3677 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); 3678 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0) 3679 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos); 3680 else 3681 ret = -1; 3682 3683 if (write && ret == 0) { 3684 /* update reachable_time as well, otherwise, the change will 3685 * only be effective after the next time neigh_periodic_work 3686 * decides to recompute it 3687 */ 3688 p->reachable_time = 3689 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 3690 } 3691 return ret; 3692 } 3693 3694 #define NEIGH_PARMS_DATA_OFFSET(index) \ 3695 (&((struct neigh_parms *) 0)->data[index]) 3696 3697 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \ 3698 [NEIGH_VAR_ ## attr] = { \ 3699 .procname = name, \ 3700 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \ 3701 .maxlen = sizeof(int), \ 3702 .mode = mval, \ 3703 .proc_handler = proc, \ 3704 } 3705 3706 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \ 3707 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax) 3708 3709 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \ 3710 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies) 3711 3712 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \ 3713 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies) 3714 3715 #define NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(attr, name) \ 3716 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies_positive) 3717 3718 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \ 3719 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies) 3720 3721 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \ 3722 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen) 3723 3724 static struct neigh_sysctl_table { 3725 struct ctl_table_header *sysctl_header; 3726 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1]; 3727 } neigh_sysctl_template __read_mostly = { 3728 .neigh_vars = { 3729 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"), 3730 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"), 3731 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"), 3732 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"), 3733 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"), 3734 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"), 3735 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"), 3736 NEIGH_SYSCTL_MS_JIFFIES_POSITIVE_ENTRY(INTERVAL_PROBE_TIME_MS, 3737 "interval_probe_time_ms"), 3738 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"), 3739 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"), 3740 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"), 3741 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"), 3742 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"), 3743 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"), 3744 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"), 3745 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"), 3746 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"), 3747 [NEIGH_VAR_GC_INTERVAL] = { 3748 .procname = "gc_interval", 3749 .maxlen = sizeof(int), 3750 .mode = 0644, 3751 .proc_handler = proc_dointvec_jiffies, 3752 }, 3753 [NEIGH_VAR_GC_THRESH1] = { 3754 .procname = "gc_thresh1", 3755 .maxlen = sizeof(int), 3756 .mode = 0644, 3757 .extra1 = SYSCTL_ZERO, 3758 .extra2 = SYSCTL_INT_MAX, 3759 .proc_handler = proc_dointvec_minmax, 3760 }, 3761 [NEIGH_VAR_GC_THRESH2] = { 3762 .procname = "gc_thresh2", 3763 .maxlen = sizeof(int), 3764 .mode = 0644, 3765 .extra1 = SYSCTL_ZERO, 3766 .extra2 = SYSCTL_INT_MAX, 3767 .proc_handler = proc_dointvec_minmax, 3768 }, 3769 [NEIGH_VAR_GC_THRESH3] = { 3770 .procname = "gc_thresh3", 3771 .maxlen = sizeof(int), 3772 .mode = 0644, 3773 .extra1 = SYSCTL_ZERO, 3774 .extra2 = SYSCTL_INT_MAX, 3775 .proc_handler = proc_dointvec_minmax, 3776 }, 3777 {}, 3778 }, 3779 }; 3780 3781 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, 3782 proc_handler *handler) 3783 { 3784 int i; 3785 struct neigh_sysctl_table *t; 3786 const char *dev_name_source; 3787 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ]; 3788 char *p_name; 3789 3790 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL_ACCOUNT); 3791 if (!t) 3792 goto err; 3793 3794 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) { 3795 t->neigh_vars[i].data += (long) p; 3796 t->neigh_vars[i].extra1 = dev; 3797 t->neigh_vars[i].extra2 = p; 3798 } 3799 3800 if (dev) { 3801 dev_name_source = dev->name; 3802 /* Terminate the table early */ 3803 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0, 3804 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL])); 3805 } else { 3806 struct neigh_table *tbl = p->tbl; 3807 dev_name_source = "default"; 3808 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval; 3809 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1; 3810 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2; 3811 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3; 3812 } 3813 3814 if (handler) { 3815 /* RetransTime */ 3816 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler; 3817 /* ReachableTime */ 3818 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler; 3819 /* RetransTime (in milliseconds)*/ 3820 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler; 3821 /* ReachableTime (in milliseconds) */ 3822 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler; 3823 } else { 3824 /* Those handlers will update p->reachable_time after 3825 * base_reachable_time(_ms) is set to ensure the new timer starts being 3826 * applied after the next neighbour update instead of waiting for 3827 * neigh_periodic_work to update its value (can be multiple minutes) 3828 * So any handler that replaces them should do this as well 3829 */ 3830 /* ReachableTime */ 3831 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = 3832 neigh_proc_base_reachable_time; 3833 /* ReachableTime (in milliseconds) */ 3834 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = 3835 neigh_proc_base_reachable_time; 3836 } 3837 3838 switch (neigh_parms_family(p)) { 3839 case AF_INET: 3840 p_name = "ipv4"; 3841 break; 3842 case AF_INET6: 3843 p_name = "ipv6"; 3844 break; 3845 default: 3846 BUG(); 3847 } 3848 3849 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s", 3850 p_name, dev_name_source); 3851 t->sysctl_header = 3852 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars); 3853 if (!t->sysctl_header) 3854 goto free; 3855 3856 p->sysctl_table = t; 3857 return 0; 3858 3859 free: 3860 kfree(t); 3861 err: 3862 return -ENOBUFS; 3863 } 3864 EXPORT_SYMBOL(neigh_sysctl_register); 3865 3866 void neigh_sysctl_unregister(struct neigh_parms *p) 3867 { 3868 if (p->sysctl_table) { 3869 struct neigh_sysctl_table *t = p->sysctl_table; 3870 p->sysctl_table = NULL; 3871 unregister_net_sysctl_table(t->sysctl_header); 3872 kfree(t); 3873 } 3874 } 3875 EXPORT_SYMBOL(neigh_sysctl_unregister); 3876 3877 #endif /* CONFIG_SYSCTL */ 3878 3879 static int __init neigh_init(void) 3880 { 3881 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0); 3882 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0); 3883 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0); 3884 3885 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info, 3886 0); 3887 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0); 3888 3889 return 0; 3890 } 3891 3892 subsys_initcall(neigh_init); 3893