1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Generic address resolution entity 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 8 * 9 * Fixes: 10 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add. 11 * Harald Welte Add neighbour cache statistics like rtstat 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/slab.h> 17 #include <linux/kmemleak.h> 18 #include <linux/types.h> 19 #include <linux/kernel.h> 20 #include <linux/module.h> 21 #include <linux/socket.h> 22 #include <linux/netdevice.h> 23 #include <linux/proc_fs.h> 24 #ifdef CONFIG_SYSCTL 25 #include <linux/sysctl.h> 26 #endif 27 #include <linux/times.h> 28 #include <net/net_namespace.h> 29 #include <net/neighbour.h> 30 #include <net/arp.h> 31 #include <net/dst.h> 32 #include <net/sock.h> 33 #include <net/netevent.h> 34 #include <net/netlink.h> 35 #include <linux/rtnetlink.h> 36 #include <linux/random.h> 37 #include <linux/string.h> 38 #include <linux/log2.h> 39 #include <linux/inetdevice.h> 40 #include <net/addrconf.h> 41 42 #include <trace/events/neigh.h> 43 44 #define NEIGH_DEBUG 1 45 #define neigh_dbg(level, fmt, ...) \ 46 do { \ 47 if (level <= NEIGH_DEBUG) \ 48 pr_debug(fmt, ##__VA_ARGS__); \ 49 } while (0) 50 51 #define PNEIGH_HASHMASK 0xF 52 53 static void neigh_timer_handler(struct timer_list *t); 54 static void __neigh_notify(struct neighbour *n, int type, int flags, 55 u32 pid); 56 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid); 57 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, 58 struct net_device *dev); 59 60 #ifdef CONFIG_PROC_FS 61 static const struct seq_operations neigh_stat_seq_ops; 62 #endif 63 64 /* 65 Neighbour hash table buckets are protected with rwlock tbl->lock. 66 67 - All the scans/updates to hash buckets MUST be made under this lock. 68 - NOTHING clever should be made under this lock: no callbacks 69 to protocol backends, no attempts to send something to network. 70 It will result in deadlocks, if backend/driver wants to use neighbour 71 cache. 72 - If the entry requires some non-trivial actions, increase 73 its reference count and release table lock. 74 75 Neighbour entries are protected: 76 - with reference count. 77 - with rwlock neigh->lock 78 79 Reference count prevents destruction. 80 81 neigh->lock mainly serializes ll address data and its validity state. 82 However, the same lock is used to protect another entry fields: 83 - timer 84 - resolution queue 85 86 Again, nothing clever shall be made under neigh->lock, 87 the most complicated procedure, which we allow is dev->hard_header. 88 It is supposed, that dev->hard_header is simplistic and does 89 not make callbacks to neighbour tables. 90 */ 91 92 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb) 93 { 94 kfree_skb(skb); 95 return -ENETDOWN; 96 } 97 98 static void neigh_cleanup_and_release(struct neighbour *neigh) 99 { 100 trace_neigh_cleanup_and_release(neigh, 0); 101 __neigh_notify(neigh, RTM_DELNEIGH, 0, 0); 102 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 103 neigh_release(neigh); 104 } 105 106 /* 107 * It is random distribution in the interval (1/2)*base...(3/2)*base. 108 * It corresponds to default IPv6 settings and is not overridable, 109 * because it is really reasonable choice. 110 */ 111 112 unsigned long neigh_rand_reach_time(unsigned long base) 113 { 114 return base ? (prandom_u32() % base) + (base >> 1) : 0; 115 } 116 EXPORT_SYMBOL(neigh_rand_reach_time); 117 118 static void neigh_mark_dead(struct neighbour *n) 119 { 120 n->dead = 1; 121 if (!list_empty(&n->gc_list)) { 122 list_del_init(&n->gc_list); 123 atomic_dec(&n->tbl->gc_entries); 124 } 125 } 126 127 static void neigh_update_gc_list(struct neighbour *n) 128 { 129 bool on_gc_list, exempt_from_gc; 130 131 write_lock_bh(&n->tbl->lock); 132 write_lock(&n->lock); 133 134 if (n->dead) 135 goto out; 136 137 /* remove from the gc list if new state is permanent or if neighbor 138 * is externally learned; otherwise entry should be on the gc list 139 */ 140 exempt_from_gc = n->nud_state & NUD_PERMANENT || 141 n->flags & NTF_EXT_LEARNED; 142 on_gc_list = !list_empty(&n->gc_list); 143 144 if (exempt_from_gc && on_gc_list) { 145 list_del_init(&n->gc_list); 146 atomic_dec(&n->tbl->gc_entries); 147 } else if (!exempt_from_gc && !on_gc_list) { 148 /* add entries to the tail; cleaning removes from the front */ 149 list_add_tail(&n->gc_list, &n->tbl->gc_list); 150 atomic_inc(&n->tbl->gc_entries); 151 } 152 153 out: 154 write_unlock(&n->lock); 155 write_unlock_bh(&n->tbl->lock); 156 } 157 158 static bool neigh_update_ext_learned(struct neighbour *neigh, u32 flags, 159 int *notify) 160 { 161 bool rc = false; 162 u8 ndm_flags; 163 164 if (!(flags & NEIGH_UPDATE_F_ADMIN)) 165 return rc; 166 167 ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0; 168 if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) { 169 if (ndm_flags & NTF_EXT_LEARNED) 170 neigh->flags |= NTF_EXT_LEARNED; 171 else 172 neigh->flags &= ~NTF_EXT_LEARNED; 173 rc = true; 174 *notify = 1; 175 } 176 177 return rc; 178 } 179 180 static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np, 181 struct neigh_table *tbl) 182 { 183 bool retval = false; 184 185 write_lock(&n->lock); 186 if (refcount_read(&n->refcnt) == 1) { 187 struct neighbour *neigh; 188 189 neigh = rcu_dereference_protected(n->next, 190 lockdep_is_held(&tbl->lock)); 191 rcu_assign_pointer(*np, neigh); 192 neigh_mark_dead(n); 193 retval = true; 194 } 195 write_unlock(&n->lock); 196 if (retval) 197 neigh_cleanup_and_release(n); 198 return retval; 199 } 200 201 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl) 202 { 203 struct neigh_hash_table *nht; 204 void *pkey = ndel->primary_key; 205 u32 hash_val; 206 struct neighbour *n; 207 struct neighbour __rcu **np; 208 209 nht = rcu_dereference_protected(tbl->nht, 210 lockdep_is_held(&tbl->lock)); 211 hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd); 212 hash_val = hash_val >> (32 - nht->hash_shift); 213 214 np = &nht->hash_buckets[hash_val]; 215 while ((n = rcu_dereference_protected(*np, 216 lockdep_is_held(&tbl->lock)))) { 217 if (n == ndel) 218 return neigh_del(n, np, tbl); 219 np = &n->next; 220 } 221 return false; 222 } 223 224 static int neigh_forced_gc(struct neigh_table *tbl) 225 { 226 int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2; 227 unsigned long tref = jiffies - 5 * HZ; 228 struct neighbour *n, *tmp; 229 int shrunk = 0; 230 231 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs); 232 233 write_lock_bh(&tbl->lock); 234 235 list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) { 236 if (refcount_read(&n->refcnt) == 1) { 237 bool remove = false; 238 239 write_lock(&n->lock); 240 if ((n->nud_state == NUD_FAILED) || 241 (n->nud_state == NUD_NOARP) || 242 (tbl->is_multicast && 243 tbl->is_multicast(n->primary_key)) || 244 time_after(tref, n->updated)) 245 remove = true; 246 write_unlock(&n->lock); 247 248 if (remove && neigh_remove_one(n, tbl)) 249 shrunk++; 250 if (shrunk >= max_clean) 251 break; 252 } 253 } 254 255 tbl->last_flush = jiffies; 256 257 write_unlock_bh(&tbl->lock); 258 259 return shrunk; 260 } 261 262 static void neigh_add_timer(struct neighbour *n, unsigned long when) 263 { 264 neigh_hold(n); 265 if (unlikely(mod_timer(&n->timer, when))) { 266 printk("NEIGH: BUG, double timer add, state is %x\n", 267 n->nud_state); 268 dump_stack(); 269 } 270 } 271 272 static int neigh_del_timer(struct neighbour *n) 273 { 274 if ((n->nud_state & NUD_IN_TIMER) && 275 del_timer(&n->timer)) { 276 neigh_release(n); 277 return 1; 278 } 279 return 0; 280 } 281 282 static void pneigh_queue_purge(struct sk_buff_head *list) 283 { 284 struct sk_buff *skb; 285 286 while ((skb = skb_dequeue(list)) != NULL) { 287 dev_put(skb->dev); 288 kfree_skb(skb); 289 } 290 } 291 292 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev, 293 bool skip_perm) 294 { 295 int i; 296 struct neigh_hash_table *nht; 297 298 nht = rcu_dereference_protected(tbl->nht, 299 lockdep_is_held(&tbl->lock)); 300 301 for (i = 0; i < (1 << nht->hash_shift); i++) { 302 struct neighbour *n; 303 struct neighbour __rcu **np = &nht->hash_buckets[i]; 304 305 while ((n = rcu_dereference_protected(*np, 306 lockdep_is_held(&tbl->lock))) != NULL) { 307 if (dev && n->dev != dev) { 308 np = &n->next; 309 continue; 310 } 311 if (skip_perm && n->nud_state & NUD_PERMANENT) { 312 np = &n->next; 313 continue; 314 } 315 rcu_assign_pointer(*np, 316 rcu_dereference_protected(n->next, 317 lockdep_is_held(&tbl->lock))); 318 write_lock(&n->lock); 319 neigh_del_timer(n); 320 neigh_mark_dead(n); 321 if (refcount_read(&n->refcnt) != 1) { 322 /* The most unpleasant situation. 323 We must destroy neighbour entry, 324 but someone still uses it. 325 326 The destroy will be delayed until 327 the last user releases us, but 328 we must kill timers etc. and move 329 it to safe state. 330 */ 331 __skb_queue_purge(&n->arp_queue); 332 n->arp_queue_len_bytes = 0; 333 n->output = neigh_blackhole; 334 if (n->nud_state & NUD_VALID) 335 n->nud_state = NUD_NOARP; 336 else 337 n->nud_state = NUD_NONE; 338 neigh_dbg(2, "neigh %p is stray\n", n); 339 } 340 write_unlock(&n->lock); 341 neigh_cleanup_and_release(n); 342 } 343 } 344 } 345 346 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev) 347 { 348 write_lock_bh(&tbl->lock); 349 neigh_flush_dev(tbl, dev, false); 350 write_unlock_bh(&tbl->lock); 351 } 352 EXPORT_SYMBOL(neigh_changeaddr); 353 354 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev, 355 bool skip_perm) 356 { 357 write_lock_bh(&tbl->lock); 358 neigh_flush_dev(tbl, dev, skip_perm); 359 pneigh_ifdown_and_unlock(tbl, dev); 360 361 del_timer_sync(&tbl->proxy_timer); 362 pneigh_queue_purge(&tbl->proxy_queue); 363 return 0; 364 } 365 366 int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev) 367 { 368 __neigh_ifdown(tbl, dev, true); 369 return 0; 370 } 371 EXPORT_SYMBOL(neigh_carrier_down); 372 373 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) 374 { 375 __neigh_ifdown(tbl, dev, false); 376 return 0; 377 } 378 EXPORT_SYMBOL(neigh_ifdown); 379 380 static struct neighbour *neigh_alloc(struct neigh_table *tbl, 381 struct net_device *dev, 382 bool exempt_from_gc) 383 { 384 struct neighbour *n = NULL; 385 unsigned long now = jiffies; 386 int entries; 387 388 if (exempt_from_gc) 389 goto do_alloc; 390 391 entries = atomic_inc_return(&tbl->gc_entries) - 1; 392 if (entries >= tbl->gc_thresh3 || 393 (entries >= tbl->gc_thresh2 && 394 time_after(now, tbl->last_flush + 5 * HZ))) { 395 if (!neigh_forced_gc(tbl) && 396 entries >= tbl->gc_thresh3) { 397 net_info_ratelimited("%s: neighbor table overflow!\n", 398 tbl->id); 399 NEIGH_CACHE_STAT_INC(tbl, table_fulls); 400 goto out_entries; 401 } 402 } 403 404 do_alloc: 405 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC); 406 if (!n) 407 goto out_entries; 408 409 __skb_queue_head_init(&n->arp_queue); 410 rwlock_init(&n->lock); 411 seqlock_init(&n->ha_lock); 412 n->updated = n->used = now; 413 n->nud_state = NUD_NONE; 414 n->output = neigh_blackhole; 415 seqlock_init(&n->hh.hh_lock); 416 n->parms = neigh_parms_clone(&tbl->parms); 417 timer_setup(&n->timer, neigh_timer_handler, 0); 418 419 NEIGH_CACHE_STAT_INC(tbl, allocs); 420 n->tbl = tbl; 421 refcount_set(&n->refcnt, 1); 422 n->dead = 1; 423 INIT_LIST_HEAD(&n->gc_list); 424 425 atomic_inc(&tbl->entries); 426 out: 427 return n; 428 429 out_entries: 430 if (!exempt_from_gc) 431 atomic_dec(&tbl->gc_entries); 432 goto out; 433 } 434 435 static void neigh_get_hash_rnd(u32 *x) 436 { 437 *x = get_random_u32() | 1; 438 } 439 440 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift) 441 { 442 size_t size = (1 << shift) * sizeof(struct neighbour *); 443 struct neigh_hash_table *ret; 444 struct neighbour __rcu **buckets; 445 int i; 446 447 ret = kmalloc(sizeof(*ret), GFP_ATOMIC); 448 if (!ret) 449 return NULL; 450 if (size <= PAGE_SIZE) { 451 buckets = kzalloc(size, GFP_ATOMIC); 452 } else { 453 buckets = (struct neighbour __rcu **) 454 __get_free_pages(GFP_ATOMIC | __GFP_ZERO, 455 get_order(size)); 456 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC); 457 } 458 if (!buckets) { 459 kfree(ret); 460 return NULL; 461 } 462 ret->hash_buckets = buckets; 463 ret->hash_shift = shift; 464 for (i = 0; i < NEIGH_NUM_HASH_RND; i++) 465 neigh_get_hash_rnd(&ret->hash_rnd[i]); 466 return ret; 467 } 468 469 static void neigh_hash_free_rcu(struct rcu_head *head) 470 { 471 struct neigh_hash_table *nht = container_of(head, 472 struct neigh_hash_table, 473 rcu); 474 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *); 475 struct neighbour __rcu **buckets = nht->hash_buckets; 476 477 if (size <= PAGE_SIZE) { 478 kfree(buckets); 479 } else { 480 kmemleak_free(buckets); 481 free_pages((unsigned long)buckets, get_order(size)); 482 } 483 kfree(nht); 484 } 485 486 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl, 487 unsigned long new_shift) 488 { 489 unsigned int i, hash; 490 struct neigh_hash_table *new_nht, *old_nht; 491 492 NEIGH_CACHE_STAT_INC(tbl, hash_grows); 493 494 old_nht = rcu_dereference_protected(tbl->nht, 495 lockdep_is_held(&tbl->lock)); 496 new_nht = neigh_hash_alloc(new_shift); 497 if (!new_nht) 498 return old_nht; 499 500 for (i = 0; i < (1 << old_nht->hash_shift); i++) { 501 struct neighbour *n, *next; 502 503 for (n = rcu_dereference_protected(old_nht->hash_buckets[i], 504 lockdep_is_held(&tbl->lock)); 505 n != NULL; 506 n = next) { 507 hash = tbl->hash(n->primary_key, n->dev, 508 new_nht->hash_rnd); 509 510 hash >>= (32 - new_nht->hash_shift); 511 next = rcu_dereference_protected(n->next, 512 lockdep_is_held(&tbl->lock)); 513 514 rcu_assign_pointer(n->next, 515 rcu_dereference_protected( 516 new_nht->hash_buckets[hash], 517 lockdep_is_held(&tbl->lock))); 518 rcu_assign_pointer(new_nht->hash_buckets[hash], n); 519 } 520 } 521 522 rcu_assign_pointer(tbl->nht, new_nht); 523 call_rcu(&old_nht->rcu, neigh_hash_free_rcu); 524 return new_nht; 525 } 526 527 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, 528 struct net_device *dev) 529 { 530 struct neighbour *n; 531 532 NEIGH_CACHE_STAT_INC(tbl, lookups); 533 534 rcu_read_lock_bh(); 535 n = __neigh_lookup_noref(tbl, pkey, dev); 536 if (n) { 537 if (!refcount_inc_not_zero(&n->refcnt)) 538 n = NULL; 539 NEIGH_CACHE_STAT_INC(tbl, hits); 540 } 541 542 rcu_read_unlock_bh(); 543 return n; 544 } 545 EXPORT_SYMBOL(neigh_lookup); 546 547 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, 548 const void *pkey) 549 { 550 struct neighbour *n; 551 unsigned int key_len = tbl->key_len; 552 u32 hash_val; 553 struct neigh_hash_table *nht; 554 555 NEIGH_CACHE_STAT_INC(tbl, lookups); 556 557 rcu_read_lock_bh(); 558 nht = rcu_dereference_bh(tbl->nht); 559 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift); 560 561 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); 562 n != NULL; 563 n = rcu_dereference_bh(n->next)) { 564 if (!memcmp(n->primary_key, pkey, key_len) && 565 net_eq(dev_net(n->dev), net)) { 566 if (!refcount_inc_not_zero(&n->refcnt)) 567 n = NULL; 568 NEIGH_CACHE_STAT_INC(tbl, hits); 569 break; 570 } 571 } 572 573 rcu_read_unlock_bh(); 574 return n; 575 } 576 EXPORT_SYMBOL(neigh_lookup_nodev); 577 578 static struct neighbour *___neigh_create(struct neigh_table *tbl, 579 const void *pkey, 580 struct net_device *dev, 581 bool exempt_from_gc, bool want_ref) 582 { 583 struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev, exempt_from_gc); 584 u32 hash_val; 585 unsigned int key_len = tbl->key_len; 586 int error; 587 struct neigh_hash_table *nht; 588 589 trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc); 590 591 if (!n) { 592 rc = ERR_PTR(-ENOBUFS); 593 goto out; 594 } 595 596 memcpy(n->primary_key, pkey, key_len); 597 n->dev = dev; 598 dev_hold(dev); 599 600 /* Protocol specific setup. */ 601 if (tbl->constructor && (error = tbl->constructor(n)) < 0) { 602 rc = ERR_PTR(error); 603 goto out_neigh_release; 604 } 605 606 if (dev->netdev_ops->ndo_neigh_construct) { 607 error = dev->netdev_ops->ndo_neigh_construct(dev, n); 608 if (error < 0) { 609 rc = ERR_PTR(error); 610 goto out_neigh_release; 611 } 612 } 613 614 /* Device specific setup. */ 615 if (n->parms->neigh_setup && 616 (error = n->parms->neigh_setup(n)) < 0) { 617 rc = ERR_PTR(error); 618 goto out_neigh_release; 619 } 620 621 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1); 622 623 write_lock_bh(&tbl->lock); 624 nht = rcu_dereference_protected(tbl->nht, 625 lockdep_is_held(&tbl->lock)); 626 627 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift)) 628 nht = neigh_hash_grow(tbl, nht->hash_shift + 1); 629 630 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift); 631 632 if (n->parms->dead) { 633 rc = ERR_PTR(-EINVAL); 634 goto out_tbl_unlock; 635 } 636 637 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val], 638 lockdep_is_held(&tbl->lock)); 639 n1 != NULL; 640 n1 = rcu_dereference_protected(n1->next, 641 lockdep_is_held(&tbl->lock))) { 642 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) { 643 if (want_ref) 644 neigh_hold(n1); 645 rc = n1; 646 goto out_tbl_unlock; 647 } 648 } 649 650 n->dead = 0; 651 if (!exempt_from_gc) 652 list_add_tail(&n->gc_list, &n->tbl->gc_list); 653 654 if (want_ref) 655 neigh_hold(n); 656 rcu_assign_pointer(n->next, 657 rcu_dereference_protected(nht->hash_buckets[hash_val], 658 lockdep_is_held(&tbl->lock))); 659 rcu_assign_pointer(nht->hash_buckets[hash_val], n); 660 write_unlock_bh(&tbl->lock); 661 neigh_dbg(2, "neigh %p is created\n", n); 662 rc = n; 663 out: 664 return rc; 665 out_tbl_unlock: 666 write_unlock_bh(&tbl->lock); 667 out_neigh_release: 668 if (!exempt_from_gc) 669 atomic_dec(&tbl->gc_entries); 670 neigh_release(n); 671 goto out; 672 } 673 674 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, 675 struct net_device *dev, bool want_ref) 676 { 677 return ___neigh_create(tbl, pkey, dev, false, want_ref); 678 } 679 EXPORT_SYMBOL(__neigh_create); 680 681 static u32 pneigh_hash(const void *pkey, unsigned int key_len) 682 { 683 u32 hash_val = *(u32 *)(pkey + key_len - 4); 684 hash_val ^= (hash_val >> 16); 685 hash_val ^= hash_val >> 8; 686 hash_val ^= hash_val >> 4; 687 hash_val &= PNEIGH_HASHMASK; 688 return hash_val; 689 } 690 691 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n, 692 struct net *net, 693 const void *pkey, 694 unsigned int key_len, 695 struct net_device *dev) 696 { 697 while (n) { 698 if (!memcmp(n->key, pkey, key_len) && 699 net_eq(pneigh_net(n), net) && 700 (n->dev == dev || !n->dev)) 701 return n; 702 n = n->next; 703 } 704 return NULL; 705 } 706 707 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, 708 struct net *net, const void *pkey, struct net_device *dev) 709 { 710 unsigned int key_len = tbl->key_len; 711 u32 hash_val = pneigh_hash(pkey, key_len); 712 713 return __pneigh_lookup_1(tbl->phash_buckets[hash_val], 714 net, pkey, key_len, dev); 715 } 716 EXPORT_SYMBOL_GPL(__pneigh_lookup); 717 718 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, 719 struct net *net, const void *pkey, 720 struct net_device *dev, int creat) 721 { 722 struct pneigh_entry *n; 723 unsigned int key_len = tbl->key_len; 724 u32 hash_val = pneigh_hash(pkey, key_len); 725 726 read_lock_bh(&tbl->lock); 727 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val], 728 net, pkey, key_len, dev); 729 read_unlock_bh(&tbl->lock); 730 731 if (n || !creat) 732 goto out; 733 734 ASSERT_RTNL(); 735 736 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL); 737 if (!n) 738 goto out; 739 740 n->protocol = 0; 741 write_pnet(&n->net, net); 742 memcpy(n->key, pkey, key_len); 743 n->dev = dev; 744 dev_hold(dev); 745 746 if (tbl->pconstructor && tbl->pconstructor(n)) { 747 dev_put(dev); 748 kfree(n); 749 n = NULL; 750 goto out; 751 } 752 753 write_lock_bh(&tbl->lock); 754 n->next = tbl->phash_buckets[hash_val]; 755 tbl->phash_buckets[hash_val] = n; 756 write_unlock_bh(&tbl->lock); 757 out: 758 return n; 759 } 760 EXPORT_SYMBOL(pneigh_lookup); 761 762 763 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey, 764 struct net_device *dev) 765 { 766 struct pneigh_entry *n, **np; 767 unsigned int key_len = tbl->key_len; 768 u32 hash_val = pneigh_hash(pkey, key_len); 769 770 write_lock_bh(&tbl->lock); 771 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL; 772 np = &n->next) { 773 if (!memcmp(n->key, pkey, key_len) && n->dev == dev && 774 net_eq(pneigh_net(n), net)) { 775 *np = n->next; 776 write_unlock_bh(&tbl->lock); 777 if (tbl->pdestructor) 778 tbl->pdestructor(n); 779 dev_put(n->dev); 780 kfree(n); 781 return 0; 782 } 783 } 784 write_unlock_bh(&tbl->lock); 785 return -ENOENT; 786 } 787 788 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, 789 struct net_device *dev) 790 { 791 struct pneigh_entry *n, **np, *freelist = NULL; 792 u32 h; 793 794 for (h = 0; h <= PNEIGH_HASHMASK; h++) { 795 np = &tbl->phash_buckets[h]; 796 while ((n = *np) != NULL) { 797 if (!dev || n->dev == dev) { 798 *np = n->next; 799 n->next = freelist; 800 freelist = n; 801 continue; 802 } 803 np = &n->next; 804 } 805 } 806 write_unlock_bh(&tbl->lock); 807 while ((n = freelist)) { 808 freelist = n->next; 809 n->next = NULL; 810 if (tbl->pdestructor) 811 tbl->pdestructor(n); 812 dev_put(n->dev); 813 kfree(n); 814 } 815 return -ENOENT; 816 } 817 818 static void neigh_parms_destroy(struct neigh_parms *parms); 819 820 static inline void neigh_parms_put(struct neigh_parms *parms) 821 { 822 if (refcount_dec_and_test(&parms->refcnt)) 823 neigh_parms_destroy(parms); 824 } 825 826 /* 827 * neighbour must already be out of the table; 828 * 829 */ 830 void neigh_destroy(struct neighbour *neigh) 831 { 832 struct net_device *dev = neigh->dev; 833 834 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys); 835 836 if (!neigh->dead) { 837 pr_warn("Destroying alive neighbour %p\n", neigh); 838 dump_stack(); 839 return; 840 } 841 842 if (neigh_del_timer(neigh)) 843 pr_warn("Impossible event\n"); 844 845 write_lock_bh(&neigh->lock); 846 __skb_queue_purge(&neigh->arp_queue); 847 write_unlock_bh(&neigh->lock); 848 neigh->arp_queue_len_bytes = 0; 849 850 if (dev->netdev_ops->ndo_neigh_destroy) 851 dev->netdev_ops->ndo_neigh_destroy(dev, neigh); 852 853 dev_put(dev); 854 neigh_parms_put(neigh->parms); 855 856 neigh_dbg(2, "neigh %p is destroyed\n", neigh); 857 858 atomic_dec(&neigh->tbl->entries); 859 kfree_rcu(neigh, rcu); 860 } 861 EXPORT_SYMBOL(neigh_destroy); 862 863 /* Neighbour state is suspicious; 864 disable fast path. 865 866 Called with write_locked neigh. 867 */ 868 static void neigh_suspect(struct neighbour *neigh) 869 { 870 neigh_dbg(2, "neigh %p is suspected\n", neigh); 871 872 neigh->output = neigh->ops->output; 873 } 874 875 /* Neighbour state is OK; 876 enable fast path. 877 878 Called with write_locked neigh. 879 */ 880 static void neigh_connect(struct neighbour *neigh) 881 { 882 neigh_dbg(2, "neigh %p is connected\n", neigh); 883 884 neigh->output = neigh->ops->connected_output; 885 } 886 887 static void neigh_periodic_work(struct work_struct *work) 888 { 889 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work); 890 struct neighbour *n; 891 struct neighbour __rcu **np; 892 unsigned int i; 893 struct neigh_hash_table *nht; 894 895 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs); 896 897 write_lock_bh(&tbl->lock); 898 nht = rcu_dereference_protected(tbl->nht, 899 lockdep_is_held(&tbl->lock)); 900 901 /* 902 * periodically recompute ReachableTime from random function 903 */ 904 905 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) { 906 struct neigh_parms *p; 907 tbl->last_rand = jiffies; 908 list_for_each_entry(p, &tbl->parms_list, list) 909 p->reachable_time = 910 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 911 } 912 913 if (atomic_read(&tbl->entries) < tbl->gc_thresh1) 914 goto out; 915 916 for (i = 0 ; i < (1 << nht->hash_shift); i++) { 917 np = &nht->hash_buckets[i]; 918 919 while ((n = rcu_dereference_protected(*np, 920 lockdep_is_held(&tbl->lock))) != NULL) { 921 unsigned int state; 922 923 write_lock(&n->lock); 924 925 state = n->nud_state; 926 if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) || 927 (n->flags & NTF_EXT_LEARNED)) { 928 write_unlock(&n->lock); 929 goto next_elt; 930 } 931 932 if (time_before(n->used, n->confirmed)) 933 n->used = n->confirmed; 934 935 if (refcount_read(&n->refcnt) == 1 && 936 (state == NUD_FAILED || 937 time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) { 938 *np = n->next; 939 neigh_mark_dead(n); 940 write_unlock(&n->lock); 941 neigh_cleanup_and_release(n); 942 continue; 943 } 944 write_unlock(&n->lock); 945 946 next_elt: 947 np = &n->next; 948 } 949 /* 950 * It's fine to release lock here, even if hash table 951 * grows while we are preempted. 952 */ 953 write_unlock_bh(&tbl->lock); 954 cond_resched(); 955 write_lock_bh(&tbl->lock); 956 nht = rcu_dereference_protected(tbl->nht, 957 lockdep_is_held(&tbl->lock)); 958 } 959 out: 960 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks. 961 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2 962 * BASE_REACHABLE_TIME. 963 */ 964 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work, 965 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1); 966 write_unlock_bh(&tbl->lock); 967 } 968 969 static __inline__ int neigh_max_probes(struct neighbour *n) 970 { 971 struct neigh_parms *p = n->parms; 972 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) + 973 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) : 974 NEIGH_VAR(p, MCAST_PROBES)); 975 } 976 977 static void neigh_invalidate(struct neighbour *neigh) 978 __releases(neigh->lock) 979 __acquires(neigh->lock) 980 { 981 struct sk_buff *skb; 982 983 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed); 984 neigh_dbg(2, "neigh %p is failed\n", neigh); 985 neigh->updated = jiffies; 986 987 /* It is very thin place. report_unreachable is very complicated 988 routine. Particularly, it can hit the same neighbour entry! 989 990 So that, we try to be accurate and avoid dead loop. --ANK 991 */ 992 while (neigh->nud_state == NUD_FAILED && 993 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { 994 write_unlock(&neigh->lock); 995 neigh->ops->error_report(neigh, skb); 996 write_lock(&neigh->lock); 997 } 998 __skb_queue_purge(&neigh->arp_queue); 999 neigh->arp_queue_len_bytes = 0; 1000 } 1001 1002 static void neigh_probe(struct neighbour *neigh) 1003 __releases(neigh->lock) 1004 { 1005 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue); 1006 /* keep skb alive even if arp_queue overflows */ 1007 if (skb) 1008 skb = skb_clone(skb, GFP_ATOMIC); 1009 write_unlock(&neigh->lock); 1010 if (neigh->ops->solicit) 1011 neigh->ops->solicit(neigh, skb); 1012 atomic_inc(&neigh->probes); 1013 consume_skb(skb); 1014 } 1015 1016 /* Called when a timer expires for a neighbour entry. */ 1017 1018 static void neigh_timer_handler(struct timer_list *t) 1019 { 1020 unsigned long now, next; 1021 struct neighbour *neigh = from_timer(neigh, t, timer); 1022 unsigned int state; 1023 int notify = 0; 1024 1025 write_lock(&neigh->lock); 1026 1027 state = neigh->nud_state; 1028 now = jiffies; 1029 next = now + HZ; 1030 1031 if (!(state & NUD_IN_TIMER)) 1032 goto out; 1033 1034 if (state & NUD_REACHABLE) { 1035 if (time_before_eq(now, 1036 neigh->confirmed + neigh->parms->reachable_time)) { 1037 neigh_dbg(2, "neigh %p is still alive\n", neigh); 1038 next = neigh->confirmed + neigh->parms->reachable_time; 1039 } else if (time_before_eq(now, 1040 neigh->used + 1041 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) { 1042 neigh_dbg(2, "neigh %p is delayed\n", neigh); 1043 neigh->nud_state = NUD_DELAY; 1044 neigh->updated = jiffies; 1045 neigh_suspect(neigh); 1046 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME); 1047 } else { 1048 neigh_dbg(2, "neigh %p is suspected\n", neigh); 1049 neigh->nud_state = NUD_STALE; 1050 neigh->updated = jiffies; 1051 neigh_suspect(neigh); 1052 notify = 1; 1053 } 1054 } else if (state & NUD_DELAY) { 1055 if (time_before_eq(now, 1056 neigh->confirmed + 1057 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) { 1058 neigh_dbg(2, "neigh %p is now reachable\n", neigh); 1059 neigh->nud_state = NUD_REACHABLE; 1060 neigh->updated = jiffies; 1061 neigh_connect(neigh); 1062 notify = 1; 1063 next = neigh->confirmed + neigh->parms->reachable_time; 1064 } else { 1065 neigh_dbg(2, "neigh %p is probed\n", neigh); 1066 neigh->nud_state = NUD_PROBE; 1067 neigh->updated = jiffies; 1068 atomic_set(&neigh->probes, 0); 1069 notify = 1; 1070 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), 1071 HZ/100); 1072 } 1073 } else { 1074 /* NUD_PROBE|NUD_INCOMPLETE */ 1075 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100); 1076 } 1077 1078 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) && 1079 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) { 1080 neigh->nud_state = NUD_FAILED; 1081 notify = 1; 1082 neigh_invalidate(neigh); 1083 goto out; 1084 } 1085 1086 if (neigh->nud_state & NUD_IN_TIMER) { 1087 if (time_before(next, jiffies + HZ/100)) 1088 next = jiffies + HZ/100; 1089 if (!mod_timer(&neigh->timer, next)) 1090 neigh_hold(neigh); 1091 } 1092 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) { 1093 neigh_probe(neigh); 1094 } else { 1095 out: 1096 write_unlock(&neigh->lock); 1097 } 1098 1099 if (notify) 1100 neigh_update_notify(neigh, 0); 1101 1102 trace_neigh_timer_handler(neigh, 0); 1103 1104 neigh_release(neigh); 1105 } 1106 1107 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) 1108 { 1109 int rc; 1110 bool immediate_probe = false; 1111 1112 write_lock_bh(&neigh->lock); 1113 1114 rc = 0; 1115 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)) 1116 goto out_unlock_bh; 1117 if (neigh->dead) 1118 goto out_dead; 1119 1120 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) { 1121 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) + 1122 NEIGH_VAR(neigh->parms, APP_PROBES)) { 1123 unsigned long next, now = jiffies; 1124 1125 atomic_set(&neigh->probes, 1126 NEIGH_VAR(neigh->parms, UCAST_PROBES)); 1127 neigh_del_timer(neigh); 1128 neigh->nud_state = NUD_INCOMPLETE; 1129 neigh->updated = now; 1130 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), 1131 HZ/100); 1132 neigh_add_timer(neigh, next); 1133 immediate_probe = true; 1134 } else { 1135 neigh->nud_state = NUD_FAILED; 1136 neigh->updated = jiffies; 1137 write_unlock_bh(&neigh->lock); 1138 1139 kfree_skb(skb); 1140 return 1; 1141 } 1142 } else if (neigh->nud_state & NUD_STALE) { 1143 neigh_dbg(2, "neigh %p is delayed\n", neigh); 1144 neigh_del_timer(neigh); 1145 neigh->nud_state = NUD_DELAY; 1146 neigh->updated = jiffies; 1147 neigh_add_timer(neigh, jiffies + 1148 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME)); 1149 } 1150 1151 if (neigh->nud_state == NUD_INCOMPLETE) { 1152 if (skb) { 1153 while (neigh->arp_queue_len_bytes + skb->truesize > 1154 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) { 1155 struct sk_buff *buff; 1156 1157 buff = __skb_dequeue(&neigh->arp_queue); 1158 if (!buff) 1159 break; 1160 neigh->arp_queue_len_bytes -= buff->truesize; 1161 kfree_skb(buff); 1162 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); 1163 } 1164 skb_dst_force(skb); 1165 __skb_queue_tail(&neigh->arp_queue, skb); 1166 neigh->arp_queue_len_bytes += skb->truesize; 1167 } 1168 rc = 1; 1169 } 1170 out_unlock_bh: 1171 if (immediate_probe) 1172 neigh_probe(neigh); 1173 else 1174 write_unlock(&neigh->lock); 1175 local_bh_enable(); 1176 trace_neigh_event_send_done(neigh, rc); 1177 return rc; 1178 1179 out_dead: 1180 if (neigh->nud_state & NUD_STALE) 1181 goto out_unlock_bh; 1182 write_unlock_bh(&neigh->lock); 1183 kfree_skb(skb); 1184 trace_neigh_event_send_dead(neigh, 1); 1185 return 1; 1186 } 1187 EXPORT_SYMBOL(__neigh_event_send); 1188 1189 static void neigh_update_hhs(struct neighbour *neigh) 1190 { 1191 struct hh_cache *hh; 1192 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *) 1193 = NULL; 1194 1195 if (neigh->dev->header_ops) 1196 update = neigh->dev->header_ops->cache_update; 1197 1198 if (update) { 1199 hh = &neigh->hh; 1200 if (READ_ONCE(hh->hh_len)) { 1201 write_seqlock_bh(&hh->hh_lock); 1202 update(hh, neigh->dev, neigh->ha); 1203 write_sequnlock_bh(&hh->hh_lock); 1204 } 1205 } 1206 } 1207 1208 1209 1210 /* Generic update routine. 1211 -- lladdr is new lladdr or NULL, if it is not supplied. 1212 -- new is new state. 1213 -- flags 1214 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr, 1215 if it is different. 1216 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected" 1217 lladdr instead of overriding it 1218 if it is different. 1219 NEIGH_UPDATE_F_ADMIN means that the change is administrative. 1220 1221 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing 1222 NTF_ROUTER flag. 1223 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as 1224 a router. 1225 1226 Caller MUST hold reference count on the entry. 1227 */ 1228 1229 static int __neigh_update(struct neighbour *neigh, const u8 *lladdr, 1230 u8 new, u32 flags, u32 nlmsg_pid, 1231 struct netlink_ext_ack *extack) 1232 { 1233 bool ext_learn_change = false; 1234 u8 old; 1235 int err; 1236 int notify = 0; 1237 struct net_device *dev; 1238 int update_isrouter = 0; 1239 1240 trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid); 1241 1242 write_lock_bh(&neigh->lock); 1243 1244 dev = neigh->dev; 1245 old = neigh->nud_state; 1246 err = -EPERM; 1247 1248 if (neigh->dead) { 1249 NL_SET_ERR_MSG(extack, "Neighbor entry is now dead"); 1250 new = old; 1251 goto out; 1252 } 1253 if (!(flags & NEIGH_UPDATE_F_ADMIN) && 1254 (old & (NUD_NOARP | NUD_PERMANENT))) 1255 goto out; 1256 1257 ext_learn_change = neigh_update_ext_learned(neigh, flags, ¬ify); 1258 1259 if (!(new & NUD_VALID)) { 1260 neigh_del_timer(neigh); 1261 if (old & NUD_CONNECTED) 1262 neigh_suspect(neigh); 1263 neigh->nud_state = new; 1264 err = 0; 1265 notify = old & NUD_VALID; 1266 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) && 1267 (new & NUD_FAILED)) { 1268 neigh_invalidate(neigh); 1269 notify = 1; 1270 } 1271 goto out; 1272 } 1273 1274 /* Compare new lladdr with cached one */ 1275 if (!dev->addr_len) { 1276 /* First case: device needs no address. */ 1277 lladdr = neigh->ha; 1278 } else if (lladdr) { 1279 /* The second case: if something is already cached 1280 and a new address is proposed: 1281 - compare new & old 1282 - if they are different, check override flag 1283 */ 1284 if ((old & NUD_VALID) && 1285 !memcmp(lladdr, neigh->ha, dev->addr_len)) 1286 lladdr = neigh->ha; 1287 } else { 1288 /* No address is supplied; if we know something, 1289 use it, otherwise discard the request. 1290 */ 1291 err = -EINVAL; 1292 if (!(old & NUD_VALID)) { 1293 NL_SET_ERR_MSG(extack, "No link layer address given"); 1294 goto out; 1295 } 1296 lladdr = neigh->ha; 1297 } 1298 1299 /* Update confirmed timestamp for neighbour entry after we 1300 * received ARP packet even if it doesn't change IP to MAC binding. 1301 */ 1302 if (new & NUD_CONNECTED) 1303 neigh->confirmed = jiffies; 1304 1305 /* If entry was valid and address is not changed, 1306 do not change entry state, if new one is STALE. 1307 */ 1308 err = 0; 1309 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER; 1310 if (old & NUD_VALID) { 1311 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) { 1312 update_isrouter = 0; 1313 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) && 1314 (old & NUD_CONNECTED)) { 1315 lladdr = neigh->ha; 1316 new = NUD_STALE; 1317 } else 1318 goto out; 1319 } else { 1320 if (lladdr == neigh->ha && new == NUD_STALE && 1321 !(flags & NEIGH_UPDATE_F_ADMIN)) 1322 new = old; 1323 } 1324 } 1325 1326 /* Update timestamp only once we know we will make a change to the 1327 * neighbour entry. Otherwise we risk to move the locktime window with 1328 * noop updates and ignore relevant ARP updates. 1329 */ 1330 if (new != old || lladdr != neigh->ha) 1331 neigh->updated = jiffies; 1332 1333 if (new != old) { 1334 neigh_del_timer(neigh); 1335 if (new & NUD_PROBE) 1336 atomic_set(&neigh->probes, 0); 1337 if (new & NUD_IN_TIMER) 1338 neigh_add_timer(neigh, (jiffies + 1339 ((new & NUD_REACHABLE) ? 1340 neigh->parms->reachable_time : 1341 0))); 1342 neigh->nud_state = new; 1343 notify = 1; 1344 } 1345 1346 if (lladdr != neigh->ha) { 1347 write_seqlock(&neigh->ha_lock); 1348 memcpy(&neigh->ha, lladdr, dev->addr_len); 1349 write_sequnlock(&neigh->ha_lock); 1350 neigh_update_hhs(neigh); 1351 if (!(new & NUD_CONNECTED)) 1352 neigh->confirmed = jiffies - 1353 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1); 1354 notify = 1; 1355 } 1356 if (new == old) 1357 goto out; 1358 if (new & NUD_CONNECTED) 1359 neigh_connect(neigh); 1360 else 1361 neigh_suspect(neigh); 1362 if (!(old & NUD_VALID)) { 1363 struct sk_buff *skb; 1364 1365 /* Again: avoid dead loop if something went wrong */ 1366 1367 while (neigh->nud_state & NUD_VALID && 1368 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { 1369 struct dst_entry *dst = skb_dst(skb); 1370 struct neighbour *n2, *n1 = neigh; 1371 write_unlock_bh(&neigh->lock); 1372 1373 rcu_read_lock(); 1374 1375 /* Why not just use 'neigh' as-is? The problem is that 1376 * things such as shaper, eql, and sch_teql can end up 1377 * using alternative, different, neigh objects to output 1378 * the packet in the output path. So what we need to do 1379 * here is re-lookup the top-level neigh in the path so 1380 * we can reinject the packet there. 1381 */ 1382 n2 = NULL; 1383 if (dst && dst->obsolete != DST_OBSOLETE_DEAD) { 1384 n2 = dst_neigh_lookup_skb(dst, skb); 1385 if (n2) 1386 n1 = n2; 1387 } 1388 n1->output(n1, skb); 1389 if (n2) 1390 neigh_release(n2); 1391 rcu_read_unlock(); 1392 1393 write_lock_bh(&neigh->lock); 1394 } 1395 __skb_queue_purge(&neigh->arp_queue); 1396 neigh->arp_queue_len_bytes = 0; 1397 } 1398 out: 1399 if (update_isrouter) 1400 neigh_update_is_router(neigh, flags, ¬ify); 1401 write_unlock_bh(&neigh->lock); 1402 1403 if (((new ^ old) & NUD_PERMANENT) || ext_learn_change) 1404 neigh_update_gc_list(neigh); 1405 1406 if (notify) 1407 neigh_update_notify(neigh, nlmsg_pid); 1408 1409 trace_neigh_update_done(neigh, err); 1410 1411 return err; 1412 } 1413 1414 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, 1415 u32 flags, u32 nlmsg_pid) 1416 { 1417 return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL); 1418 } 1419 EXPORT_SYMBOL(neigh_update); 1420 1421 /* Update the neigh to listen temporarily for probe responses, even if it is 1422 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing. 1423 */ 1424 void __neigh_set_probe_once(struct neighbour *neigh) 1425 { 1426 if (neigh->dead) 1427 return; 1428 neigh->updated = jiffies; 1429 if (!(neigh->nud_state & NUD_FAILED)) 1430 return; 1431 neigh->nud_state = NUD_INCOMPLETE; 1432 atomic_set(&neigh->probes, neigh_max_probes(neigh)); 1433 neigh_add_timer(neigh, 1434 jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), 1435 HZ/100)); 1436 } 1437 EXPORT_SYMBOL(__neigh_set_probe_once); 1438 1439 struct neighbour *neigh_event_ns(struct neigh_table *tbl, 1440 u8 *lladdr, void *saddr, 1441 struct net_device *dev) 1442 { 1443 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev, 1444 lladdr || !dev->addr_len); 1445 if (neigh) 1446 neigh_update(neigh, lladdr, NUD_STALE, 1447 NEIGH_UPDATE_F_OVERRIDE, 0); 1448 return neigh; 1449 } 1450 EXPORT_SYMBOL(neigh_event_ns); 1451 1452 /* called with read_lock_bh(&n->lock); */ 1453 static void neigh_hh_init(struct neighbour *n) 1454 { 1455 struct net_device *dev = n->dev; 1456 __be16 prot = n->tbl->protocol; 1457 struct hh_cache *hh = &n->hh; 1458 1459 write_lock_bh(&n->lock); 1460 1461 /* Only one thread can come in here and initialize the 1462 * hh_cache entry. 1463 */ 1464 if (!hh->hh_len) 1465 dev->header_ops->cache(n, hh, prot); 1466 1467 write_unlock_bh(&n->lock); 1468 } 1469 1470 /* Slow and careful. */ 1471 1472 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb) 1473 { 1474 int rc = 0; 1475 1476 if (!neigh_event_send(neigh, skb)) { 1477 int err; 1478 struct net_device *dev = neigh->dev; 1479 unsigned int seq; 1480 1481 if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len)) 1482 neigh_hh_init(neigh); 1483 1484 do { 1485 __skb_pull(skb, skb_network_offset(skb)); 1486 seq = read_seqbegin(&neigh->ha_lock); 1487 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 1488 neigh->ha, NULL, skb->len); 1489 } while (read_seqretry(&neigh->ha_lock, seq)); 1490 1491 if (err >= 0) 1492 rc = dev_queue_xmit(skb); 1493 else 1494 goto out_kfree_skb; 1495 } 1496 out: 1497 return rc; 1498 out_kfree_skb: 1499 rc = -EINVAL; 1500 kfree_skb(skb); 1501 goto out; 1502 } 1503 EXPORT_SYMBOL(neigh_resolve_output); 1504 1505 /* As fast as possible without hh cache */ 1506 1507 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb) 1508 { 1509 struct net_device *dev = neigh->dev; 1510 unsigned int seq; 1511 int err; 1512 1513 do { 1514 __skb_pull(skb, skb_network_offset(skb)); 1515 seq = read_seqbegin(&neigh->ha_lock); 1516 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 1517 neigh->ha, NULL, skb->len); 1518 } while (read_seqretry(&neigh->ha_lock, seq)); 1519 1520 if (err >= 0) 1521 err = dev_queue_xmit(skb); 1522 else { 1523 err = -EINVAL; 1524 kfree_skb(skb); 1525 } 1526 return err; 1527 } 1528 EXPORT_SYMBOL(neigh_connected_output); 1529 1530 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb) 1531 { 1532 return dev_queue_xmit(skb); 1533 } 1534 EXPORT_SYMBOL(neigh_direct_output); 1535 1536 static void neigh_proxy_process(struct timer_list *t) 1537 { 1538 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer); 1539 long sched_next = 0; 1540 unsigned long now = jiffies; 1541 struct sk_buff *skb, *n; 1542 1543 spin_lock(&tbl->proxy_queue.lock); 1544 1545 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) { 1546 long tdif = NEIGH_CB(skb)->sched_next - now; 1547 1548 if (tdif <= 0) { 1549 struct net_device *dev = skb->dev; 1550 1551 __skb_unlink(skb, &tbl->proxy_queue); 1552 if (tbl->proxy_redo && netif_running(dev)) { 1553 rcu_read_lock(); 1554 tbl->proxy_redo(skb); 1555 rcu_read_unlock(); 1556 } else { 1557 kfree_skb(skb); 1558 } 1559 1560 dev_put(dev); 1561 } else if (!sched_next || tdif < sched_next) 1562 sched_next = tdif; 1563 } 1564 del_timer(&tbl->proxy_timer); 1565 if (sched_next) 1566 mod_timer(&tbl->proxy_timer, jiffies + sched_next); 1567 spin_unlock(&tbl->proxy_queue.lock); 1568 } 1569 1570 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, 1571 struct sk_buff *skb) 1572 { 1573 unsigned long sched_next = jiffies + 1574 prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY)); 1575 1576 if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) { 1577 kfree_skb(skb); 1578 return; 1579 } 1580 1581 NEIGH_CB(skb)->sched_next = sched_next; 1582 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED; 1583 1584 spin_lock(&tbl->proxy_queue.lock); 1585 if (del_timer(&tbl->proxy_timer)) { 1586 if (time_before(tbl->proxy_timer.expires, sched_next)) 1587 sched_next = tbl->proxy_timer.expires; 1588 } 1589 skb_dst_drop(skb); 1590 dev_hold(skb->dev); 1591 __skb_queue_tail(&tbl->proxy_queue, skb); 1592 mod_timer(&tbl->proxy_timer, sched_next); 1593 spin_unlock(&tbl->proxy_queue.lock); 1594 } 1595 EXPORT_SYMBOL(pneigh_enqueue); 1596 1597 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl, 1598 struct net *net, int ifindex) 1599 { 1600 struct neigh_parms *p; 1601 1602 list_for_each_entry(p, &tbl->parms_list, list) { 1603 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) || 1604 (!p->dev && !ifindex && net_eq(net, &init_net))) 1605 return p; 1606 } 1607 1608 return NULL; 1609 } 1610 1611 struct neigh_parms *neigh_parms_alloc(struct net_device *dev, 1612 struct neigh_table *tbl) 1613 { 1614 struct neigh_parms *p; 1615 struct net *net = dev_net(dev); 1616 const struct net_device_ops *ops = dev->netdev_ops; 1617 1618 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL); 1619 if (p) { 1620 p->tbl = tbl; 1621 refcount_set(&p->refcnt, 1); 1622 p->reachable_time = 1623 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 1624 dev_hold(dev); 1625 p->dev = dev; 1626 write_pnet(&p->net, net); 1627 p->sysctl_table = NULL; 1628 1629 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { 1630 dev_put(dev); 1631 kfree(p); 1632 return NULL; 1633 } 1634 1635 write_lock_bh(&tbl->lock); 1636 list_add(&p->list, &tbl->parms.list); 1637 write_unlock_bh(&tbl->lock); 1638 1639 neigh_parms_data_state_cleanall(p); 1640 } 1641 return p; 1642 } 1643 EXPORT_SYMBOL(neigh_parms_alloc); 1644 1645 static void neigh_rcu_free_parms(struct rcu_head *head) 1646 { 1647 struct neigh_parms *parms = 1648 container_of(head, struct neigh_parms, rcu_head); 1649 1650 neigh_parms_put(parms); 1651 } 1652 1653 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms) 1654 { 1655 if (!parms || parms == &tbl->parms) 1656 return; 1657 write_lock_bh(&tbl->lock); 1658 list_del(&parms->list); 1659 parms->dead = 1; 1660 write_unlock_bh(&tbl->lock); 1661 dev_put(parms->dev); 1662 call_rcu(&parms->rcu_head, neigh_rcu_free_parms); 1663 } 1664 EXPORT_SYMBOL(neigh_parms_release); 1665 1666 static void neigh_parms_destroy(struct neigh_parms *parms) 1667 { 1668 kfree(parms); 1669 } 1670 1671 static struct lock_class_key neigh_table_proxy_queue_class; 1672 1673 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly; 1674 1675 void neigh_table_init(int index, struct neigh_table *tbl) 1676 { 1677 unsigned long now = jiffies; 1678 unsigned long phsize; 1679 1680 INIT_LIST_HEAD(&tbl->parms_list); 1681 INIT_LIST_HEAD(&tbl->gc_list); 1682 list_add(&tbl->parms.list, &tbl->parms_list); 1683 write_pnet(&tbl->parms.net, &init_net); 1684 refcount_set(&tbl->parms.refcnt, 1); 1685 tbl->parms.reachable_time = 1686 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME)); 1687 1688 tbl->stats = alloc_percpu(struct neigh_statistics); 1689 if (!tbl->stats) 1690 panic("cannot create neighbour cache statistics"); 1691 1692 #ifdef CONFIG_PROC_FS 1693 if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat, 1694 &neigh_stat_seq_ops, tbl)) 1695 panic("cannot create neighbour proc dir entry"); 1696 #endif 1697 1698 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3)); 1699 1700 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *); 1701 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL); 1702 1703 if (!tbl->nht || !tbl->phash_buckets) 1704 panic("cannot allocate neighbour cache hashes"); 1705 1706 if (!tbl->entry_size) 1707 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) + 1708 tbl->key_len, NEIGH_PRIV_ALIGN); 1709 else 1710 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN); 1711 1712 rwlock_init(&tbl->lock); 1713 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work); 1714 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work, 1715 tbl->parms.reachable_time); 1716 timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0); 1717 skb_queue_head_init_class(&tbl->proxy_queue, 1718 &neigh_table_proxy_queue_class); 1719 1720 tbl->last_flush = now; 1721 tbl->last_rand = now + tbl->parms.reachable_time * 20; 1722 1723 neigh_tables[index] = tbl; 1724 } 1725 EXPORT_SYMBOL(neigh_table_init); 1726 1727 int neigh_table_clear(int index, struct neigh_table *tbl) 1728 { 1729 neigh_tables[index] = NULL; 1730 /* It is not clean... Fix it to unload IPv6 module safely */ 1731 cancel_delayed_work_sync(&tbl->gc_work); 1732 del_timer_sync(&tbl->proxy_timer); 1733 pneigh_queue_purge(&tbl->proxy_queue); 1734 neigh_ifdown(tbl, NULL); 1735 if (atomic_read(&tbl->entries)) 1736 pr_crit("neighbour leakage\n"); 1737 1738 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu, 1739 neigh_hash_free_rcu); 1740 tbl->nht = NULL; 1741 1742 kfree(tbl->phash_buckets); 1743 tbl->phash_buckets = NULL; 1744 1745 remove_proc_entry(tbl->id, init_net.proc_net_stat); 1746 1747 free_percpu(tbl->stats); 1748 tbl->stats = NULL; 1749 1750 return 0; 1751 } 1752 EXPORT_SYMBOL(neigh_table_clear); 1753 1754 static struct neigh_table *neigh_find_table(int family) 1755 { 1756 struct neigh_table *tbl = NULL; 1757 1758 switch (family) { 1759 case AF_INET: 1760 tbl = neigh_tables[NEIGH_ARP_TABLE]; 1761 break; 1762 case AF_INET6: 1763 tbl = neigh_tables[NEIGH_ND_TABLE]; 1764 break; 1765 case AF_DECnet: 1766 tbl = neigh_tables[NEIGH_DN_TABLE]; 1767 break; 1768 } 1769 1770 return tbl; 1771 } 1772 1773 const struct nla_policy nda_policy[NDA_MAX+1] = { 1774 [NDA_UNSPEC] = { .strict_start_type = NDA_NH_ID }, 1775 [NDA_DST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1776 [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1777 [NDA_CACHEINFO] = { .len = sizeof(struct nda_cacheinfo) }, 1778 [NDA_PROBES] = { .type = NLA_U32 }, 1779 [NDA_VLAN] = { .type = NLA_U16 }, 1780 [NDA_PORT] = { .type = NLA_U16 }, 1781 [NDA_VNI] = { .type = NLA_U32 }, 1782 [NDA_IFINDEX] = { .type = NLA_U32 }, 1783 [NDA_MASTER] = { .type = NLA_U32 }, 1784 [NDA_PROTOCOL] = { .type = NLA_U8 }, 1785 [NDA_NH_ID] = { .type = NLA_U32 }, 1786 [NDA_FDB_EXT_ATTRS] = { .type = NLA_NESTED }, 1787 }; 1788 1789 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, 1790 struct netlink_ext_ack *extack) 1791 { 1792 struct net *net = sock_net(skb->sk); 1793 struct ndmsg *ndm; 1794 struct nlattr *dst_attr; 1795 struct neigh_table *tbl; 1796 struct neighbour *neigh; 1797 struct net_device *dev = NULL; 1798 int err = -EINVAL; 1799 1800 ASSERT_RTNL(); 1801 if (nlmsg_len(nlh) < sizeof(*ndm)) 1802 goto out; 1803 1804 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST); 1805 if (!dst_attr) { 1806 NL_SET_ERR_MSG(extack, "Network address not specified"); 1807 goto out; 1808 } 1809 1810 ndm = nlmsg_data(nlh); 1811 if (ndm->ndm_ifindex) { 1812 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 1813 if (dev == NULL) { 1814 err = -ENODEV; 1815 goto out; 1816 } 1817 } 1818 1819 tbl = neigh_find_table(ndm->ndm_family); 1820 if (tbl == NULL) 1821 return -EAFNOSUPPORT; 1822 1823 if (nla_len(dst_attr) < (int)tbl->key_len) { 1824 NL_SET_ERR_MSG(extack, "Invalid network address"); 1825 goto out; 1826 } 1827 1828 if (ndm->ndm_flags & NTF_PROXY) { 1829 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev); 1830 goto out; 1831 } 1832 1833 if (dev == NULL) 1834 goto out; 1835 1836 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev); 1837 if (neigh == NULL) { 1838 err = -ENOENT; 1839 goto out; 1840 } 1841 1842 err = __neigh_update(neigh, NULL, NUD_FAILED, 1843 NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN, 1844 NETLINK_CB(skb).portid, extack); 1845 write_lock_bh(&tbl->lock); 1846 neigh_release(neigh); 1847 neigh_remove_one(neigh, tbl); 1848 write_unlock_bh(&tbl->lock); 1849 1850 out: 1851 return err; 1852 } 1853 1854 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, 1855 struct netlink_ext_ack *extack) 1856 { 1857 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE | 1858 NEIGH_UPDATE_F_OVERRIDE_ISROUTER; 1859 struct net *net = sock_net(skb->sk); 1860 struct ndmsg *ndm; 1861 struct nlattr *tb[NDA_MAX+1]; 1862 struct neigh_table *tbl; 1863 struct net_device *dev = NULL; 1864 struct neighbour *neigh; 1865 void *dst, *lladdr; 1866 u8 protocol = 0; 1867 int err; 1868 1869 ASSERT_RTNL(); 1870 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, 1871 nda_policy, extack); 1872 if (err < 0) 1873 goto out; 1874 1875 err = -EINVAL; 1876 if (!tb[NDA_DST]) { 1877 NL_SET_ERR_MSG(extack, "Network address not specified"); 1878 goto out; 1879 } 1880 1881 ndm = nlmsg_data(nlh); 1882 if (ndm->ndm_ifindex) { 1883 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 1884 if (dev == NULL) { 1885 err = -ENODEV; 1886 goto out; 1887 } 1888 1889 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) { 1890 NL_SET_ERR_MSG(extack, "Invalid link address"); 1891 goto out; 1892 } 1893 } 1894 1895 tbl = neigh_find_table(ndm->ndm_family); 1896 if (tbl == NULL) 1897 return -EAFNOSUPPORT; 1898 1899 if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) { 1900 NL_SET_ERR_MSG(extack, "Invalid network address"); 1901 goto out; 1902 } 1903 1904 dst = nla_data(tb[NDA_DST]); 1905 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL; 1906 1907 if (tb[NDA_PROTOCOL]) 1908 protocol = nla_get_u8(tb[NDA_PROTOCOL]); 1909 1910 if (ndm->ndm_flags & NTF_PROXY) { 1911 struct pneigh_entry *pn; 1912 1913 err = -ENOBUFS; 1914 pn = pneigh_lookup(tbl, net, dst, dev, 1); 1915 if (pn) { 1916 pn->flags = ndm->ndm_flags; 1917 if (protocol) 1918 pn->protocol = protocol; 1919 err = 0; 1920 } 1921 goto out; 1922 } 1923 1924 if (!dev) { 1925 NL_SET_ERR_MSG(extack, "Device not specified"); 1926 goto out; 1927 } 1928 1929 if (tbl->allow_add && !tbl->allow_add(dev, extack)) { 1930 err = -EINVAL; 1931 goto out; 1932 } 1933 1934 neigh = neigh_lookup(tbl, dst, dev); 1935 if (neigh == NULL) { 1936 bool exempt_from_gc; 1937 1938 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { 1939 err = -ENOENT; 1940 goto out; 1941 } 1942 1943 exempt_from_gc = ndm->ndm_state & NUD_PERMANENT || 1944 ndm->ndm_flags & NTF_EXT_LEARNED; 1945 neigh = ___neigh_create(tbl, dst, dev, exempt_from_gc, true); 1946 if (IS_ERR(neigh)) { 1947 err = PTR_ERR(neigh); 1948 goto out; 1949 } 1950 } else { 1951 if (nlh->nlmsg_flags & NLM_F_EXCL) { 1952 err = -EEXIST; 1953 neigh_release(neigh); 1954 goto out; 1955 } 1956 1957 if (!(nlh->nlmsg_flags & NLM_F_REPLACE)) 1958 flags &= ~(NEIGH_UPDATE_F_OVERRIDE | 1959 NEIGH_UPDATE_F_OVERRIDE_ISROUTER); 1960 } 1961 1962 if (protocol) 1963 neigh->protocol = protocol; 1964 1965 if (ndm->ndm_flags & NTF_EXT_LEARNED) 1966 flags |= NEIGH_UPDATE_F_EXT_LEARNED; 1967 1968 if (ndm->ndm_flags & NTF_ROUTER) 1969 flags |= NEIGH_UPDATE_F_ISROUTER; 1970 1971 if (ndm->ndm_flags & NTF_USE) { 1972 neigh_event_send(neigh, NULL); 1973 err = 0; 1974 } else 1975 err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags, 1976 NETLINK_CB(skb).portid, extack); 1977 1978 neigh_release(neigh); 1979 1980 out: 1981 return err; 1982 } 1983 1984 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms) 1985 { 1986 struct nlattr *nest; 1987 1988 nest = nla_nest_start_noflag(skb, NDTA_PARMS); 1989 if (nest == NULL) 1990 return -ENOBUFS; 1991 1992 if ((parms->dev && 1993 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) || 1994 nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) || 1995 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, 1996 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) || 1997 /* approximative value for deprecated QUEUE_LEN (in packets) */ 1998 nla_put_u32(skb, NDTPA_QUEUE_LEN, 1999 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) || 2000 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) || 2001 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) || 2002 nla_put_u32(skb, NDTPA_UCAST_PROBES, 2003 NEIGH_VAR(parms, UCAST_PROBES)) || 2004 nla_put_u32(skb, NDTPA_MCAST_PROBES, 2005 NEIGH_VAR(parms, MCAST_PROBES)) || 2006 nla_put_u32(skb, NDTPA_MCAST_REPROBES, 2007 NEIGH_VAR(parms, MCAST_REPROBES)) || 2008 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time, 2009 NDTPA_PAD) || 2010 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME, 2011 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) || 2012 nla_put_msecs(skb, NDTPA_GC_STALETIME, 2013 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) || 2014 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME, 2015 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) || 2016 nla_put_msecs(skb, NDTPA_RETRANS_TIME, 2017 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) || 2018 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, 2019 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) || 2020 nla_put_msecs(skb, NDTPA_PROXY_DELAY, 2021 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) || 2022 nla_put_msecs(skb, NDTPA_LOCKTIME, 2023 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD)) 2024 goto nla_put_failure; 2025 return nla_nest_end(skb, nest); 2026 2027 nla_put_failure: 2028 nla_nest_cancel(skb, nest); 2029 return -EMSGSIZE; 2030 } 2031 2032 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, 2033 u32 pid, u32 seq, int type, int flags) 2034 { 2035 struct nlmsghdr *nlh; 2036 struct ndtmsg *ndtmsg; 2037 2038 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags); 2039 if (nlh == NULL) 2040 return -EMSGSIZE; 2041 2042 ndtmsg = nlmsg_data(nlh); 2043 2044 read_lock_bh(&tbl->lock); 2045 ndtmsg->ndtm_family = tbl->family; 2046 ndtmsg->ndtm_pad1 = 0; 2047 ndtmsg->ndtm_pad2 = 0; 2048 2049 if (nla_put_string(skb, NDTA_NAME, tbl->id) || 2050 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) || 2051 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) || 2052 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) || 2053 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3)) 2054 goto nla_put_failure; 2055 { 2056 unsigned long now = jiffies; 2057 long flush_delta = now - tbl->last_flush; 2058 long rand_delta = now - tbl->last_rand; 2059 struct neigh_hash_table *nht; 2060 struct ndt_config ndc = { 2061 .ndtc_key_len = tbl->key_len, 2062 .ndtc_entry_size = tbl->entry_size, 2063 .ndtc_entries = atomic_read(&tbl->entries), 2064 .ndtc_last_flush = jiffies_to_msecs(flush_delta), 2065 .ndtc_last_rand = jiffies_to_msecs(rand_delta), 2066 .ndtc_proxy_qlen = tbl->proxy_queue.qlen, 2067 }; 2068 2069 rcu_read_lock_bh(); 2070 nht = rcu_dereference_bh(tbl->nht); 2071 ndc.ndtc_hash_rnd = nht->hash_rnd[0]; 2072 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1); 2073 rcu_read_unlock_bh(); 2074 2075 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc)) 2076 goto nla_put_failure; 2077 } 2078 2079 { 2080 int cpu; 2081 struct ndt_stats ndst; 2082 2083 memset(&ndst, 0, sizeof(ndst)); 2084 2085 for_each_possible_cpu(cpu) { 2086 struct neigh_statistics *st; 2087 2088 st = per_cpu_ptr(tbl->stats, cpu); 2089 ndst.ndts_allocs += st->allocs; 2090 ndst.ndts_destroys += st->destroys; 2091 ndst.ndts_hash_grows += st->hash_grows; 2092 ndst.ndts_res_failed += st->res_failed; 2093 ndst.ndts_lookups += st->lookups; 2094 ndst.ndts_hits += st->hits; 2095 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast; 2096 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast; 2097 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs; 2098 ndst.ndts_forced_gc_runs += st->forced_gc_runs; 2099 ndst.ndts_table_fulls += st->table_fulls; 2100 } 2101 2102 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst, 2103 NDTA_PAD)) 2104 goto nla_put_failure; 2105 } 2106 2107 BUG_ON(tbl->parms.dev); 2108 if (neightbl_fill_parms(skb, &tbl->parms) < 0) 2109 goto nla_put_failure; 2110 2111 read_unlock_bh(&tbl->lock); 2112 nlmsg_end(skb, nlh); 2113 return 0; 2114 2115 nla_put_failure: 2116 read_unlock_bh(&tbl->lock); 2117 nlmsg_cancel(skb, nlh); 2118 return -EMSGSIZE; 2119 } 2120 2121 static int neightbl_fill_param_info(struct sk_buff *skb, 2122 struct neigh_table *tbl, 2123 struct neigh_parms *parms, 2124 u32 pid, u32 seq, int type, 2125 unsigned int flags) 2126 { 2127 struct ndtmsg *ndtmsg; 2128 struct nlmsghdr *nlh; 2129 2130 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags); 2131 if (nlh == NULL) 2132 return -EMSGSIZE; 2133 2134 ndtmsg = nlmsg_data(nlh); 2135 2136 read_lock_bh(&tbl->lock); 2137 ndtmsg->ndtm_family = tbl->family; 2138 ndtmsg->ndtm_pad1 = 0; 2139 ndtmsg->ndtm_pad2 = 0; 2140 2141 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 || 2142 neightbl_fill_parms(skb, parms) < 0) 2143 goto errout; 2144 2145 read_unlock_bh(&tbl->lock); 2146 nlmsg_end(skb, nlh); 2147 return 0; 2148 errout: 2149 read_unlock_bh(&tbl->lock); 2150 nlmsg_cancel(skb, nlh); 2151 return -EMSGSIZE; 2152 } 2153 2154 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = { 2155 [NDTA_NAME] = { .type = NLA_STRING }, 2156 [NDTA_THRESH1] = { .type = NLA_U32 }, 2157 [NDTA_THRESH2] = { .type = NLA_U32 }, 2158 [NDTA_THRESH3] = { .type = NLA_U32 }, 2159 [NDTA_GC_INTERVAL] = { .type = NLA_U64 }, 2160 [NDTA_PARMS] = { .type = NLA_NESTED }, 2161 }; 2162 2163 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = { 2164 [NDTPA_IFINDEX] = { .type = NLA_U32 }, 2165 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 }, 2166 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 }, 2167 [NDTPA_APP_PROBES] = { .type = NLA_U32 }, 2168 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 }, 2169 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 }, 2170 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 }, 2171 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 }, 2172 [NDTPA_GC_STALETIME] = { .type = NLA_U64 }, 2173 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 }, 2174 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 }, 2175 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 }, 2176 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 }, 2177 [NDTPA_LOCKTIME] = { .type = NLA_U64 }, 2178 }; 2179 2180 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, 2181 struct netlink_ext_ack *extack) 2182 { 2183 struct net *net = sock_net(skb->sk); 2184 struct neigh_table *tbl; 2185 struct ndtmsg *ndtmsg; 2186 struct nlattr *tb[NDTA_MAX+1]; 2187 bool found = false; 2188 int err, tidx; 2189 2190 err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX, 2191 nl_neightbl_policy, extack); 2192 if (err < 0) 2193 goto errout; 2194 2195 if (tb[NDTA_NAME] == NULL) { 2196 err = -EINVAL; 2197 goto errout; 2198 } 2199 2200 ndtmsg = nlmsg_data(nlh); 2201 2202 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) { 2203 tbl = neigh_tables[tidx]; 2204 if (!tbl) 2205 continue; 2206 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family) 2207 continue; 2208 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) { 2209 found = true; 2210 break; 2211 } 2212 } 2213 2214 if (!found) 2215 return -ENOENT; 2216 2217 /* 2218 * We acquire tbl->lock to be nice to the periodic timers and 2219 * make sure they always see a consistent set of values. 2220 */ 2221 write_lock_bh(&tbl->lock); 2222 2223 if (tb[NDTA_PARMS]) { 2224 struct nlattr *tbp[NDTPA_MAX+1]; 2225 struct neigh_parms *p; 2226 int i, ifindex = 0; 2227 2228 err = nla_parse_nested_deprecated(tbp, NDTPA_MAX, 2229 tb[NDTA_PARMS], 2230 nl_ntbl_parm_policy, extack); 2231 if (err < 0) 2232 goto errout_tbl_lock; 2233 2234 if (tbp[NDTPA_IFINDEX]) 2235 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]); 2236 2237 p = lookup_neigh_parms(tbl, net, ifindex); 2238 if (p == NULL) { 2239 err = -ENOENT; 2240 goto errout_tbl_lock; 2241 } 2242 2243 for (i = 1; i <= NDTPA_MAX; i++) { 2244 if (tbp[i] == NULL) 2245 continue; 2246 2247 switch (i) { 2248 case NDTPA_QUEUE_LEN: 2249 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES, 2250 nla_get_u32(tbp[i]) * 2251 SKB_TRUESIZE(ETH_FRAME_LEN)); 2252 break; 2253 case NDTPA_QUEUE_LENBYTES: 2254 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES, 2255 nla_get_u32(tbp[i])); 2256 break; 2257 case NDTPA_PROXY_QLEN: 2258 NEIGH_VAR_SET(p, PROXY_QLEN, 2259 nla_get_u32(tbp[i])); 2260 break; 2261 case NDTPA_APP_PROBES: 2262 NEIGH_VAR_SET(p, APP_PROBES, 2263 nla_get_u32(tbp[i])); 2264 break; 2265 case NDTPA_UCAST_PROBES: 2266 NEIGH_VAR_SET(p, UCAST_PROBES, 2267 nla_get_u32(tbp[i])); 2268 break; 2269 case NDTPA_MCAST_PROBES: 2270 NEIGH_VAR_SET(p, MCAST_PROBES, 2271 nla_get_u32(tbp[i])); 2272 break; 2273 case NDTPA_MCAST_REPROBES: 2274 NEIGH_VAR_SET(p, MCAST_REPROBES, 2275 nla_get_u32(tbp[i])); 2276 break; 2277 case NDTPA_BASE_REACHABLE_TIME: 2278 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME, 2279 nla_get_msecs(tbp[i])); 2280 /* update reachable_time as well, otherwise, the change will 2281 * only be effective after the next time neigh_periodic_work 2282 * decides to recompute it (can be multiple minutes) 2283 */ 2284 p->reachable_time = 2285 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 2286 break; 2287 case NDTPA_GC_STALETIME: 2288 NEIGH_VAR_SET(p, GC_STALETIME, 2289 nla_get_msecs(tbp[i])); 2290 break; 2291 case NDTPA_DELAY_PROBE_TIME: 2292 NEIGH_VAR_SET(p, DELAY_PROBE_TIME, 2293 nla_get_msecs(tbp[i])); 2294 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p); 2295 break; 2296 case NDTPA_RETRANS_TIME: 2297 NEIGH_VAR_SET(p, RETRANS_TIME, 2298 nla_get_msecs(tbp[i])); 2299 break; 2300 case NDTPA_ANYCAST_DELAY: 2301 NEIGH_VAR_SET(p, ANYCAST_DELAY, 2302 nla_get_msecs(tbp[i])); 2303 break; 2304 case NDTPA_PROXY_DELAY: 2305 NEIGH_VAR_SET(p, PROXY_DELAY, 2306 nla_get_msecs(tbp[i])); 2307 break; 2308 case NDTPA_LOCKTIME: 2309 NEIGH_VAR_SET(p, LOCKTIME, 2310 nla_get_msecs(tbp[i])); 2311 break; 2312 } 2313 } 2314 } 2315 2316 err = -ENOENT; 2317 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] || 2318 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) && 2319 !net_eq(net, &init_net)) 2320 goto errout_tbl_lock; 2321 2322 if (tb[NDTA_THRESH1]) 2323 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]); 2324 2325 if (tb[NDTA_THRESH2]) 2326 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]); 2327 2328 if (tb[NDTA_THRESH3]) 2329 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]); 2330 2331 if (tb[NDTA_GC_INTERVAL]) 2332 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]); 2333 2334 err = 0; 2335 2336 errout_tbl_lock: 2337 write_unlock_bh(&tbl->lock); 2338 errout: 2339 return err; 2340 } 2341 2342 static int neightbl_valid_dump_info(const struct nlmsghdr *nlh, 2343 struct netlink_ext_ack *extack) 2344 { 2345 struct ndtmsg *ndtm; 2346 2347 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) { 2348 NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request"); 2349 return -EINVAL; 2350 } 2351 2352 ndtm = nlmsg_data(nlh); 2353 if (ndtm->ndtm_pad1 || ndtm->ndtm_pad2) { 2354 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request"); 2355 return -EINVAL; 2356 } 2357 2358 if (nlmsg_attrlen(nlh, sizeof(*ndtm))) { 2359 NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request"); 2360 return -EINVAL; 2361 } 2362 2363 return 0; 2364 } 2365 2366 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) 2367 { 2368 const struct nlmsghdr *nlh = cb->nlh; 2369 struct net *net = sock_net(skb->sk); 2370 int family, tidx, nidx = 0; 2371 int tbl_skip = cb->args[0]; 2372 int neigh_skip = cb->args[1]; 2373 struct neigh_table *tbl; 2374 2375 if (cb->strict_check) { 2376 int err = neightbl_valid_dump_info(nlh, cb->extack); 2377 2378 if (err < 0) 2379 return err; 2380 } 2381 2382 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 2383 2384 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) { 2385 struct neigh_parms *p; 2386 2387 tbl = neigh_tables[tidx]; 2388 if (!tbl) 2389 continue; 2390 2391 if (tidx < tbl_skip || (family && tbl->family != family)) 2392 continue; 2393 2394 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid, 2395 nlh->nlmsg_seq, RTM_NEWNEIGHTBL, 2396 NLM_F_MULTI) < 0) 2397 break; 2398 2399 nidx = 0; 2400 p = list_next_entry(&tbl->parms, list); 2401 list_for_each_entry_from(p, &tbl->parms_list, list) { 2402 if (!net_eq(neigh_parms_net(p), net)) 2403 continue; 2404 2405 if (nidx < neigh_skip) 2406 goto next; 2407 2408 if (neightbl_fill_param_info(skb, tbl, p, 2409 NETLINK_CB(cb->skb).portid, 2410 nlh->nlmsg_seq, 2411 RTM_NEWNEIGHTBL, 2412 NLM_F_MULTI) < 0) 2413 goto out; 2414 next: 2415 nidx++; 2416 } 2417 2418 neigh_skip = 0; 2419 } 2420 out: 2421 cb->args[0] = tidx; 2422 cb->args[1] = nidx; 2423 2424 return skb->len; 2425 } 2426 2427 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh, 2428 u32 pid, u32 seq, int type, unsigned int flags) 2429 { 2430 unsigned long now = jiffies; 2431 struct nda_cacheinfo ci; 2432 struct nlmsghdr *nlh; 2433 struct ndmsg *ndm; 2434 2435 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags); 2436 if (nlh == NULL) 2437 return -EMSGSIZE; 2438 2439 ndm = nlmsg_data(nlh); 2440 ndm->ndm_family = neigh->ops->family; 2441 ndm->ndm_pad1 = 0; 2442 ndm->ndm_pad2 = 0; 2443 ndm->ndm_flags = neigh->flags; 2444 ndm->ndm_type = neigh->type; 2445 ndm->ndm_ifindex = neigh->dev->ifindex; 2446 2447 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key)) 2448 goto nla_put_failure; 2449 2450 read_lock_bh(&neigh->lock); 2451 ndm->ndm_state = neigh->nud_state; 2452 if (neigh->nud_state & NUD_VALID) { 2453 char haddr[MAX_ADDR_LEN]; 2454 2455 neigh_ha_snapshot(haddr, neigh, neigh->dev); 2456 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) { 2457 read_unlock_bh(&neigh->lock); 2458 goto nla_put_failure; 2459 } 2460 } 2461 2462 ci.ndm_used = jiffies_to_clock_t(now - neigh->used); 2463 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed); 2464 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated); 2465 ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1; 2466 read_unlock_bh(&neigh->lock); 2467 2468 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) || 2469 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) 2470 goto nla_put_failure; 2471 2472 if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol)) 2473 goto nla_put_failure; 2474 2475 nlmsg_end(skb, nlh); 2476 return 0; 2477 2478 nla_put_failure: 2479 nlmsg_cancel(skb, nlh); 2480 return -EMSGSIZE; 2481 } 2482 2483 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn, 2484 u32 pid, u32 seq, int type, unsigned int flags, 2485 struct neigh_table *tbl) 2486 { 2487 struct nlmsghdr *nlh; 2488 struct ndmsg *ndm; 2489 2490 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags); 2491 if (nlh == NULL) 2492 return -EMSGSIZE; 2493 2494 ndm = nlmsg_data(nlh); 2495 ndm->ndm_family = tbl->family; 2496 ndm->ndm_pad1 = 0; 2497 ndm->ndm_pad2 = 0; 2498 ndm->ndm_flags = pn->flags | NTF_PROXY; 2499 ndm->ndm_type = RTN_UNICAST; 2500 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0; 2501 ndm->ndm_state = NUD_NONE; 2502 2503 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key)) 2504 goto nla_put_failure; 2505 2506 if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol)) 2507 goto nla_put_failure; 2508 2509 nlmsg_end(skb, nlh); 2510 return 0; 2511 2512 nla_put_failure: 2513 nlmsg_cancel(skb, nlh); 2514 return -EMSGSIZE; 2515 } 2516 2517 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid) 2518 { 2519 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 2520 __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid); 2521 } 2522 2523 static bool neigh_master_filtered(struct net_device *dev, int master_idx) 2524 { 2525 struct net_device *master; 2526 2527 if (!master_idx) 2528 return false; 2529 2530 master = dev ? netdev_master_upper_dev_get(dev) : NULL; 2531 2532 /* 0 is already used to denote NDA_MASTER wasn't passed, therefore need another 2533 * invalid value for ifindex to denote "no master". 2534 */ 2535 if (master_idx == -1) 2536 return !!master; 2537 2538 if (!master || master->ifindex != master_idx) 2539 return true; 2540 2541 return false; 2542 } 2543 2544 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx) 2545 { 2546 if (filter_idx && (!dev || dev->ifindex != filter_idx)) 2547 return true; 2548 2549 return false; 2550 } 2551 2552 struct neigh_dump_filter { 2553 int master_idx; 2554 int dev_idx; 2555 }; 2556 2557 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, 2558 struct netlink_callback *cb, 2559 struct neigh_dump_filter *filter) 2560 { 2561 struct net *net = sock_net(skb->sk); 2562 struct neighbour *n; 2563 int rc, h, s_h = cb->args[1]; 2564 int idx, s_idx = idx = cb->args[2]; 2565 struct neigh_hash_table *nht; 2566 unsigned int flags = NLM_F_MULTI; 2567 2568 if (filter->dev_idx || filter->master_idx) 2569 flags |= NLM_F_DUMP_FILTERED; 2570 2571 rcu_read_lock_bh(); 2572 nht = rcu_dereference_bh(tbl->nht); 2573 2574 for (h = s_h; h < (1 << nht->hash_shift); h++) { 2575 if (h > s_h) 2576 s_idx = 0; 2577 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; 2578 n != NULL; 2579 n = rcu_dereference_bh(n->next)) { 2580 if (idx < s_idx || !net_eq(dev_net(n->dev), net)) 2581 goto next; 2582 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) || 2583 neigh_master_filtered(n->dev, filter->master_idx)) 2584 goto next; 2585 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid, 2586 cb->nlh->nlmsg_seq, 2587 RTM_NEWNEIGH, 2588 flags) < 0) { 2589 rc = -1; 2590 goto out; 2591 } 2592 next: 2593 idx++; 2594 } 2595 } 2596 rc = skb->len; 2597 out: 2598 rcu_read_unlock_bh(); 2599 cb->args[1] = h; 2600 cb->args[2] = idx; 2601 return rc; 2602 } 2603 2604 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, 2605 struct netlink_callback *cb, 2606 struct neigh_dump_filter *filter) 2607 { 2608 struct pneigh_entry *n; 2609 struct net *net = sock_net(skb->sk); 2610 int rc, h, s_h = cb->args[3]; 2611 int idx, s_idx = idx = cb->args[4]; 2612 unsigned int flags = NLM_F_MULTI; 2613 2614 if (filter->dev_idx || filter->master_idx) 2615 flags |= NLM_F_DUMP_FILTERED; 2616 2617 read_lock_bh(&tbl->lock); 2618 2619 for (h = s_h; h <= PNEIGH_HASHMASK; h++) { 2620 if (h > s_h) 2621 s_idx = 0; 2622 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { 2623 if (idx < s_idx || pneigh_net(n) != net) 2624 goto next; 2625 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) || 2626 neigh_master_filtered(n->dev, filter->master_idx)) 2627 goto next; 2628 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid, 2629 cb->nlh->nlmsg_seq, 2630 RTM_NEWNEIGH, flags, tbl) < 0) { 2631 read_unlock_bh(&tbl->lock); 2632 rc = -1; 2633 goto out; 2634 } 2635 next: 2636 idx++; 2637 } 2638 } 2639 2640 read_unlock_bh(&tbl->lock); 2641 rc = skb->len; 2642 out: 2643 cb->args[3] = h; 2644 cb->args[4] = idx; 2645 return rc; 2646 2647 } 2648 2649 static int neigh_valid_dump_req(const struct nlmsghdr *nlh, 2650 bool strict_check, 2651 struct neigh_dump_filter *filter, 2652 struct netlink_ext_ack *extack) 2653 { 2654 struct nlattr *tb[NDA_MAX + 1]; 2655 int err, i; 2656 2657 if (strict_check) { 2658 struct ndmsg *ndm; 2659 2660 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 2661 NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request"); 2662 return -EINVAL; 2663 } 2664 2665 ndm = nlmsg_data(nlh); 2666 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex || 2667 ndm->ndm_state || ndm->ndm_type) { 2668 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request"); 2669 return -EINVAL; 2670 } 2671 2672 if (ndm->ndm_flags & ~NTF_PROXY) { 2673 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request"); 2674 return -EINVAL; 2675 } 2676 2677 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), 2678 tb, NDA_MAX, nda_policy, 2679 extack); 2680 } else { 2681 err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb, 2682 NDA_MAX, nda_policy, extack); 2683 } 2684 if (err < 0) 2685 return err; 2686 2687 for (i = 0; i <= NDA_MAX; ++i) { 2688 if (!tb[i]) 2689 continue; 2690 2691 /* all new attributes should require strict_check */ 2692 switch (i) { 2693 case NDA_IFINDEX: 2694 filter->dev_idx = nla_get_u32(tb[i]); 2695 break; 2696 case NDA_MASTER: 2697 filter->master_idx = nla_get_u32(tb[i]); 2698 break; 2699 default: 2700 if (strict_check) { 2701 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request"); 2702 return -EINVAL; 2703 } 2704 } 2705 } 2706 2707 return 0; 2708 } 2709 2710 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) 2711 { 2712 const struct nlmsghdr *nlh = cb->nlh; 2713 struct neigh_dump_filter filter = {}; 2714 struct neigh_table *tbl; 2715 int t, family, s_t; 2716 int proxy = 0; 2717 int err; 2718 2719 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 2720 2721 /* check for full ndmsg structure presence, family member is 2722 * the same for both structures 2723 */ 2724 if (nlmsg_len(nlh) >= sizeof(struct ndmsg) && 2725 ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY) 2726 proxy = 1; 2727 2728 err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack); 2729 if (err < 0 && cb->strict_check) 2730 return err; 2731 2732 s_t = cb->args[0]; 2733 2734 for (t = 0; t < NEIGH_NR_TABLES; t++) { 2735 tbl = neigh_tables[t]; 2736 2737 if (!tbl) 2738 continue; 2739 if (t < s_t || (family && tbl->family != family)) 2740 continue; 2741 if (t > s_t) 2742 memset(&cb->args[1], 0, sizeof(cb->args) - 2743 sizeof(cb->args[0])); 2744 if (proxy) 2745 err = pneigh_dump_table(tbl, skb, cb, &filter); 2746 else 2747 err = neigh_dump_table(tbl, skb, cb, &filter); 2748 if (err < 0) 2749 break; 2750 } 2751 2752 cb->args[0] = t; 2753 return skb->len; 2754 } 2755 2756 static int neigh_valid_get_req(const struct nlmsghdr *nlh, 2757 struct neigh_table **tbl, 2758 void **dst, int *dev_idx, u8 *ndm_flags, 2759 struct netlink_ext_ack *extack) 2760 { 2761 struct nlattr *tb[NDA_MAX + 1]; 2762 struct ndmsg *ndm; 2763 int err, i; 2764 2765 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 2766 NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request"); 2767 return -EINVAL; 2768 } 2769 2770 ndm = nlmsg_data(nlh); 2771 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 2772 ndm->ndm_type) { 2773 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request"); 2774 return -EINVAL; 2775 } 2776 2777 if (ndm->ndm_flags & ~NTF_PROXY) { 2778 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request"); 2779 return -EINVAL; 2780 } 2781 2782 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 2783 NDA_MAX, nda_policy, extack); 2784 if (err < 0) 2785 return err; 2786 2787 *ndm_flags = ndm->ndm_flags; 2788 *dev_idx = ndm->ndm_ifindex; 2789 *tbl = neigh_find_table(ndm->ndm_family); 2790 if (*tbl == NULL) { 2791 NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request"); 2792 return -EAFNOSUPPORT; 2793 } 2794 2795 for (i = 0; i <= NDA_MAX; ++i) { 2796 if (!tb[i]) 2797 continue; 2798 2799 switch (i) { 2800 case NDA_DST: 2801 if (nla_len(tb[i]) != (int)(*tbl)->key_len) { 2802 NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request"); 2803 return -EINVAL; 2804 } 2805 *dst = nla_data(tb[i]); 2806 break; 2807 default: 2808 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request"); 2809 return -EINVAL; 2810 } 2811 } 2812 2813 return 0; 2814 } 2815 2816 static inline size_t neigh_nlmsg_size(void) 2817 { 2818 return NLMSG_ALIGN(sizeof(struct ndmsg)) 2819 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */ 2820 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */ 2821 + nla_total_size(sizeof(struct nda_cacheinfo)) 2822 + nla_total_size(4) /* NDA_PROBES */ 2823 + nla_total_size(1); /* NDA_PROTOCOL */ 2824 } 2825 2826 static int neigh_get_reply(struct net *net, struct neighbour *neigh, 2827 u32 pid, u32 seq) 2828 { 2829 struct sk_buff *skb; 2830 int err = 0; 2831 2832 skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL); 2833 if (!skb) 2834 return -ENOBUFS; 2835 2836 err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0); 2837 if (err) { 2838 kfree_skb(skb); 2839 goto errout; 2840 } 2841 2842 err = rtnl_unicast(skb, net, pid); 2843 errout: 2844 return err; 2845 } 2846 2847 static inline size_t pneigh_nlmsg_size(void) 2848 { 2849 return NLMSG_ALIGN(sizeof(struct ndmsg)) 2850 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */ 2851 + nla_total_size(1); /* NDA_PROTOCOL */ 2852 } 2853 2854 static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh, 2855 u32 pid, u32 seq, struct neigh_table *tbl) 2856 { 2857 struct sk_buff *skb; 2858 int err = 0; 2859 2860 skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL); 2861 if (!skb) 2862 return -ENOBUFS; 2863 2864 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl); 2865 if (err) { 2866 kfree_skb(skb); 2867 goto errout; 2868 } 2869 2870 err = rtnl_unicast(skb, net, pid); 2871 errout: 2872 return err; 2873 } 2874 2875 static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, 2876 struct netlink_ext_ack *extack) 2877 { 2878 struct net *net = sock_net(in_skb->sk); 2879 struct net_device *dev = NULL; 2880 struct neigh_table *tbl = NULL; 2881 struct neighbour *neigh; 2882 void *dst = NULL; 2883 u8 ndm_flags = 0; 2884 int dev_idx = 0; 2885 int err; 2886 2887 err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags, 2888 extack); 2889 if (err < 0) 2890 return err; 2891 2892 if (dev_idx) { 2893 dev = __dev_get_by_index(net, dev_idx); 2894 if (!dev) { 2895 NL_SET_ERR_MSG(extack, "Unknown device ifindex"); 2896 return -ENODEV; 2897 } 2898 } 2899 2900 if (!dst) { 2901 NL_SET_ERR_MSG(extack, "Network address not specified"); 2902 return -EINVAL; 2903 } 2904 2905 if (ndm_flags & NTF_PROXY) { 2906 struct pneigh_entry *pn; 2907 2908 pn = pneigh_lookup(tbl, net, dst, dev, 0); 2909 if (!pn) { 2910 NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found"); 2911 return -ENOENT; 2912 } 2913 return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid, 2914 nlh->nlmsg_seq, tbl); 2915 } 2916 2917 if (!dev) { 2918 NL_SET_ERR_MSG(extack, "No device specified"); 2919 return -EINVAL; 2920 } 2921 2922 neigh = neigh_lookup(tbl, dst, dev); 2923 if (!neigh) { 2924 NL_SET_ERR_MSG(extack, "Neighbour entry not found"); 2925 return -ENOENT; 2926 } 2927 2928 err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid, 2929 nlh->nlmsg_seq); 2930 2931 neigh_release(neigh); 2932 2933 return err; 2934 } 2935 2936 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie) 2937 { 2938 int chain; 2939 struct neigh_hash_table *nht; 2940 2941 rcu_read_lock_bh(); 2942 nht = rcu_dereference_bh(tbl->nht); 2943 2944 read_lock(&tbl->lock); /* avoid resizes */ 2945 for (chain = 0; chain < (1 << nht->hash_shift); chain++) { 2946 struct neighbour *n; 2947 2948 for (n = rcu_dereference_bh(nht->hash_buckets[chain]); 2949 n != NULL; 2950 n = rcu_dereference_bh(n->next)) 2951 cb(n, cookie); 2952 } 2953 read_unlock(&tbl->lock); 2954 rcu_read_unlock_bh(); 2955 } 2956 EXPORT_SYMBOL(neigh_for_each); 2957 2958 /* The tbl->lock must be held as a writer and BH disabled. */ 2959 void __neigh_for_each_release(struct neigh_table *tbl, 2960 int (*cb)(struct neighbour *)) 2961 { 2962 int chain; 2963 struct neigh_hash_table *nht; 2964 2965 nht = rcu_dereference_protected(tbl->nht, 2966 lockdep_is_held(&tbl->lock)); 2967 for (chain = 0; chain < (1 << nht->hash_shift); chain++) { 2968 struct neighbour *n; 2969 struct neighbour __rcu **np; 2970 2971 np = &nht->hash_buckets[chain]; 2972 while ((n = rcu_dereference_protected(*np, 2973 lockdep_is_held(&tbl->lock))) != NULL) { 2974 int release; 2975 2976 write_lock(&n->lock); 2977 release = cb(n); 2978 if (release) { 2979 rcu_assign_pointer(*np, 2980 rcu_dereference_protected(n->next, 2981 lockdep_is_held(&tbl->lock))); 2982 neigh_mark_dead(n); 2983 } else 2984 np = &n->next; 2985 write_unlock(&n->lock); 2986 if (release) 2987 neigh_cleanup_and_release(n); 2988 } 2989 } 2990 } 2991 EXPORT_SYMBOL(__neigh_for_each_release); 2992 2993 int neigh_xmit(int index, struct net_device *dev, 2994 const void *addr, struct sk_buff *skb) 2995 { 2996 int err = -EAFNOSUPPORT; 2997 if (likely(index < NEIGH_NR_TABLES)) { 2998 struct neigh_table *tbl; 2999 struct neighbour *neigh; 3000 3001 tbl = neigh_tables[index]; 3002 if (!tbl) 3003 goto out; 3004 rcu_read_lock_bh(); 3005 if (index == NEIGH_ARP_TABLE) { 3006 u32 key = *((u32 *)addr); 3007 3008 neigh = __ipv4_neigh_lookup_noref(dev, key); 3009 } else { 3010 neigh = __neigh_lookup_noref(tbl, addr, dev); 3011 } 3012 if (!neigh) 3013 neigh = __neigh_create(tbl, addr, dev, false); 3014 err = PTR_ERR(neigh); 3015 if (IS_ERR(neigh)) { 3016 rcu_read_unlock_bh(); 3017 goto out_kfree_skb; 3018 } 3019 err = neigh->output(neigh, skb); 3020 rcu_read_unlock_bh(); 3021 } 3022 else if (index == NEIGH_LINK_TABLE) { 3023 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 3024 addr, NULL, skb->len); 3025 if (err < 0) 3026 goto out_kfree_skb; 3027 err = dev_queue_xmit(skb); 3028 } 3029 out: 3030 return err; 3031 out_kfree_skb: 3032 kfree_skb(skb); 3033 goto out; 3034 } 3035 EXPORT_SYMBOL(neigh_xmit); 3036 3037 #ifdef CONFIG_PROC_FS 3038 3039 static struct neighbour *neigh_get_first(struct seq_file *seq) 3040 { 3041 struct neigh_seq_state *state = seq->private; 3042 struct net *net = seq_file_net(seq); 3043 struct neigh_hash_table *nht = state->nht; 3044 struct neighbour *n = NULL; 3045 int bucket; 3046 3047 state->flags &= ~NEIGH_SEQ_IS_PNEIGH; 3048 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) { 3049 n = rcu_dereference_bh(nht->hash_buckets[bucket]); 3050 3051 while (n) { 3052 if (!net_eq(dev_net(n->dev), net)) 3053 goto next; 3054 if (state->neigh_sub_iter) { 3055 loff_t fakep = 0; 3056 void *v; 3057 3058 v = state->neigh_sub_iter(state, n, &fakep); 3059 if (!v) 3060 goto next; 3061 } 3062 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP)) 3063 break; 3064 if (n->nud_state & ~NUD_NOARP) 3065 break; 3066 next: 3067 n = rcu_dereference_bh(n->next); 3068 } 3069 3070 if (n) 3071 break; 3072 } 3073 state->bucket = bucket; 3074 3075 return n; 3076 } 3077 3078 static struct neighbour *neigh_get_next(struct seq_file *seq, 3079 struct neighbour *n, 3080 loff_t *pos) 3081 { 3082 struct neigh_seq_state *state = seq->private; 3083 struct net *net = seq_file_net(seq); 3084 struct neigh_hash_table *nht = state->nht; 3085 3086 if (state->neigh_sub_iter) { 3087 void *v = state->neigh_sub_iter(state, n, pos); 3088 if (v) 3089 return n; 3090 } 3091 n = rcu_dereference_bh(n->next); 3092 3093 while (1) { 3094 while (n) { 3095 if (!net_eq(dev_net(n->dev), net)) 3096 goto next; 3097 if (state->neigh_sub_iter) { 3098 void *v = state->neigh_sub_iter(state, n, pos); 3099 if (v) 3100 return n; 3101 goto next; 3102 } 3103 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP)) 3104 break; 3105 3106 if (n->nud_state & ~NUD_NOARP) 3107 break; 3108 next: 3109 n = rcu_dereference_bh(n->next); 3110 } 3111 3112 if (n) 3113 break; 3114 3115 if (++state->bucket >= (1 << nht->hash_shift)) 3116 break; 3117 3118 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]); 3119 } 3120 3121 if (n && pos) 3122 --(*pos); 3123 return n; 3124 } 3125 3126 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos) 3127 { 3128 struct neighbour *n = neigh_get_first(seq); 3129 3130 if (n) { 3131 --(*pos); 3132 while (*pos) { 3133 n = neigh_get_next(seq, n, pos); 3134 if (!n) 3135 break; 3136 } 3137 } 3138 return *pos ? NULL : n; 3139 } 3140 3141 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq) 3142 { 3143 struct neigh_seq_state *state = seq->private; 3144 struct net *net = seq_file_net(seq); 3145 struct neigh_table *tbl = state->tbl; 3146 struct pneigh_entry *pn = NULL; 3147 int bucket; 3148 3149 state->flags |= NEIGH_SEQ_IS_PNEIGH; 3150 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) { 3151 pn = tbl->phash_buckets[bucket]; 3152 while (pn && !net_eq(pneigh_net(pn), net)) 3153 pn = pn->next; 3154 if (pn) 3155 break; 3156 } 3157 state->bucket = bucket; 3158 3159 return pn; 3160 } 3161 3162 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq, 3163 struct pneigh_entry *pn, 3164 loff_t *pos) 3165 { 3166 struct neigh_seq_state *state = seq->private; 3167 struct net *net = seq_file_net(seq); 3168 struct neigh_table *tbl = state->tbl; 3169 3170 do { 3171 pn = pn->next; 3172 } while (pn && !net_eq(pneigh_net(pn), net)); 3173 3174 while (!pn) { 3175 if (++state->bucket > PNEIGH_HASHMASK) 3176 break; 3177 pn = tbl->phash_buckets[state->bucket]; 3178 while (pn && !net_eq(pneigh_net(pn), net)) 3179 pn = pn->next; 3180 if (pn) 3181 break; 3182 } 3183 3184 if (pn && pos) 3185 --(*pos); 3186 3187 return pn; 3188 } 3189 3190 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos) 3191 { 3192 struct pneigh_entry *pn = pneigh_get_first(seq); 3193 3194 if (pn) { 3195 --(*pos); 3196 while (*pos) { 3197 pn = pneigh_get_next(seq, pn, pos); 3198 if (!pn) 3199 break; 3200 } 3201 } 3202 return *pos ? NULL : pn; 3203 } 3204 3205 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos) 3206 { 3207 struct neigh_seq_state *state = seq->private; 3208 void *rc; 3209 loff_t idxpos = *pos; 3210 3211 rc = neigh_get_idx(seq, &idxpos); 3212 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY)) 3213 rc = pneigh_get_idx(seq, &idxpos); 3214 3215 return rc; 3216 } 3217 3218 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags) 3219 __acquires(tbl->lock) 3220 __acquires(rcu_bh) 3221 { 3222 struct neigh_seq_state *state = seq->private; 3223 3224 state->tbl = tbl; 3225 state->bucket = 0; 3226 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH); 3227 3228 rcu_read_lock_bh(); 3229 state->nht = rcu_dereference_bh(tbl->nht); 3230 read_lock(&tbl->lock); 3231 3232 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN; 3233 } 3234 EXPORT_SYMBOL(neigh_seq_start); 3235 3236 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3237 { 3238 struct neigh_seq_state *state; 3239 void *rc; 3240 3241 if (v == SEQ_START_TOKEN) { 3242 rc = neigh_get_first(seq); 3243 goto out; 3244 } 3245 3246 state = seq->private; 3247 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) { 3248 rc = neigh_get_next(seq, v, NULL); 3249 if (rc) 3250 goto out; 3251 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY)) 3252 rc = pneigh_get_first(seq); 3253 } else { 3254 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY); 3255 rc = pneigh_get_next(seq, v, NULL); 3256 } 3257 out: 3258 ++(*pos); 3259 return rc; 3260 } 3261 EXPORT_SYMBOL(neigh_seq_next); 3262 3263 void neigh_seq_stop(struct seq_file *seq, void *v) 3264 __releases(tbl->lock) 3265 __releases(rcu_bh) 3266 { 3267 struct neigh_seq_state *state = seq->private; 3268 struct neigh_table *tbl = state->tbl; 3269 3270 read_unlock(&tbl->lock); 3271 rcu_read_unlock_bh(); 3272 } 3273 EXPORT_SYMBOL(neigh_seq_stop); 3274 3275 /* statistics via seq_file */ 3276 3277 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos) 3278 { 3279 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file)); 3280 int cpu; 3281 3282 if (*pos == 0) 3283 return SEQ_START_TOKEN; 3284 3285 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { 3286 if (!cpu_possible(cpu)) 3287 continue; 3288 *pos = cpu+1; 3289 return per_cpu_ptr(tbl->stats, cpu); 3290 } 3291 return NULL; 3292 } 3293 3294 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3295 { 3296 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file)); 3297 int cpu; 3298 3299 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { 3300 if (!cpu_possible(cpu)) 3301 continue; 3302 *pos = cpu+1; 3303 return per_cpu_ptr(tbl->stats, cpu); 3304 } 3305 (*pos)++; 3306 return NULL; 3307 } 3308 3309 static void neigh_stat_seq_stop(struct seq_file *seq, void *v) 3310 { 3311 3312 } 3313 3314 static int neigh_stat_seq_show(struct seq_file *seq, void *v) 3315 { 3316 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file)); 3317 struct neigh_statistics *st = v; 3318 3319 if (v == SEQ_START_TOKEN) { 3320 seq_puts(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n"); 3321 return 0; 3322 } 3323 3324 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx " 3325 "%08lx %08lx %08lx " 3326 "%08lx %08lx %08lx\n", 3327 atomic_read(&tbl->entries), 3328 3329 st->allocs, 3330 st->destroys, 3331 st->hash_grows, 3332 3333 st->lookups, 3334 st->hits, 3335 3336 st->res_failed, 3337 3338 st->rcv_probes_mcast, 3339 st->rcv_probes_ucast, 3340 3341 st->periodic_gc_runs, 3342 st->forced_gc_runs, 3343 st->unres_discards, 3344 st->table_fulls 3345 ); 3346 3347 return 0; 3348 } 3349 3350 static const struct seq_operations neigh_stat_seq_ops = { 3351 .start = neigh_stat_seq_start, 3352 .next = neigh_stat_seq_next, 3353 .stop = neigh_stat_seq_stop, 3354 .show = neigh_stat_seq_show, 3355 }; 3356 #endif /* CONFIG_PROC_FS */ 3357 3358 static void __neigh_notify(struct neighbour *n, int type, int flags, 3359 u32 pid) 3360 { 3361 struct net *net = dev_net(n->dev); 3362 struct sk_buff *skb; 3363 int err = -ENOBUFS; 3364 3365 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC); 3366 if (skb == NULL) 3367 goto errout; 3368 3369 err = neigh_fill_info(skb, n, pid, 0, type, flags); 3370 if (err < 0) { 3371 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */ 3372 WARN_ON(err == -EMSGSIZE); 3373 kfree_skb(skb); 3374 goto errout; 3375 } 3376 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 3377 return; 3378 errout: 3379 if (err < 0) 3380 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 3381 } 3382 3383 void neigh_app_ns(struct neighbour *n) 3384 { 3385 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0); 3386 } 3387 EXPORT_SYMBOL(neigh_app_ns); 3388 3389 #ifdef CONFIG_SYSCTL 3390 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN); 3391 3392 static int proc_unres_qlen(struct ctl_table *ctl, int write, 3393 void *buffer, size_t *lenp, loff_t *ppos) 3394 { 3395 int size, ret; 3396 struct ctl_table tmp = *ctl; 3397 3398 tmp.extra1 = SYSCTL_ZERO; 3399 tmp.extra2 = &unres_qlen_max; 3400 tmp.data = &size; 3401 3402 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN); 3403 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 3404 3405 if (write && !ret) 3406 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN); 3407 return ret; 3408 } 3409 3410 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev, 3411 int family) 3412 { 3413 switch (family) { 3414 case AF_INET: 3415 return __in_dev_arp_parms_get_rcu(dev); 3416 case AF_INET6: 3417 return __in6_dev_nd_parms_get_rcu(dev); 3418 } 3419 return NULL; 3420 } 3421 3422 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p, 3423 int index) 3424 { 3425 struct net_device *dev; 3426 int family = neigh_parms_family(p); 3427 3428 rcu_read_lock(); 3429 for_each_netdev_rcu(net, dev) { 3430 struct neigh_parms *dst_p = 3431 neigh_get_dev_parms_rcu(dev, family); 3432 3433 if (dst_p && !test_bit(index, dst_p->data_state)) 3434 dst_p->data[index] = p->data[index]; 3435 } 3436 rcu_read_unlock(); 3437 } 3438 3439 static void neigh_proc_update(struct ctl_table *ctl, int write) 3440 { 3441 struct net_device *dev = ctl->extra1; 3442 struct neigh_parms *p = ctl->extra2; 3443 struct net *net = neigh_parms_net(p); 3444 int index = (int *) ctl->data - p->data; 3445 3446 if (!write) 3447 return; 3448 3449 set_bit(index, p->data_state); 3450 if (index == NEIGH_VAR_DELAY_PROBE_TIME) 3451 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p); 3452 if (!dev) /* NULL dev means this is default value */ 3453 neigh_copy_dflt_parms(net, p, index); 3454 } 3455 3456 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write, 3457 void *buffer, size_t *lenp, 3458 loff_t *ppos) 3459 { 3460 struct ctl_table tmp = *ctl; 3461 int ret; 3462 3463 tmp.extra1 = SYSCTL_ZERO; 3464 tmp.extra2 = SYSCTL_INT_MAX; 3465 3466 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 3467 neigh_proc_update(ctl, write); 3468 return ret; 3469 } 3470 3471 int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer, 3472 size_t *lenp, loff_t *ppos) 3473 { 3474 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 3475 3476 neigh_proc_update(ctl, write); 3477 return ret; 3478 } 3479 EXPORT_SYMBOL(neigh_proc_dointvec); 3480 3481 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer, 3482 size_t *lenp, loff_t *ppos) 3483 { 3484 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); 3485 3486 neigh_proc_update(ctl, write); 3487 return ret; 3488 } 3489 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies); 3490 3491 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write, 3492 void *buffer, size_t *lenp, 3493 loff_t *ppos) 3494 { 3495 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos); 3496 3497 neigh_proc_update(ctl, write); 3498 return ret; 3499 } 3500 3501 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write, 3502 void *buffer, size_t *lenp, loff_t *ppos) 3503 { 3504 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos); 3505 3506 neigh_proc_update(ctl, write); 3507 return ret; 3508 } 3509 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies); 3510 3511 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write, 3512 void *buffer, size_t *lenp, 3513 loff_t *ppos) 3514 { 3515 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos); 3516 3517 neigh_proc_update(ctl, write); 3518 return ret; 3519 } 3520 3521 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write, 3522 void *buffer, size_t *lenp, 3523 loff_t *ppos) 3524 { 3525 struct neigh_parms *p = ctl->extra2; 3526 int ret; 3527 3528 if (strcmp(ctl->procname, "base_reachable_time") == 0) 3529 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); 3530 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0) 3531 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos); 3532 else 3533 ret = -1; 3534 3535 if (write && ret == 0) { 3536 /* update reachable_time as well, otherwise, the change will 3537 * only be effective after the next time neigh_periodic_work 3538 * decides to recompute it 3539 */ 3540 p->reachable_time = 3541 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 3542 } 3543 return ret; 3544 } 3545 3546 #define NEIGH_PARMS_DATA_OFFSET(index) \ 3547 (&((struct neigh_parms *) 0)->data[index]) 3548 3549 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \ 3550 [NEIGH_VAR_ ## attr] = { \ 3551 .procname = name, \ 3552 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \ 3553 .maxlen = sizeof(int), \ 3554 .mode = mval, \ 3555 .proc_handler = proc, \ 3556 } 3557 3558 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \ 3559 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax) 3560 3561 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \ 3562 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies) 3563 3564 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \ 3565 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies) 3566 3567 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \ 3568 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies) 3569 3570 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \ 3571 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen) 3572 3573 static struct neigh_sysctl_table { 3574 struct ctl_table_header *sysctl_header; 3575 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1]; 3576 } neigh_sysctl_template __read_mostly = { 3577 .neigh_vars = { 3578 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"), 3579 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"), 3580 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"), 3581 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"), 3582 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"), 3583 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"), 3584 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"), 3585 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"), 3586 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"), 3587 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"), 3588 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"), 3589 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"), 3590 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"), 3591 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"), 3592 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"), 3593 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"), 3594 [NEIGH_VAR_GC_INTERVAL] = { 3595 .procname = "gc_interval", 3596 .maxlen = sizeof(int), 3597 .mode = 0644, 3598 .proc_handler = proc_dointvec_jiffies, 3599 }, 3600 [NEIGH_VAR_GC_THRESH1] = { 3601 .procname = "gc_thresh1", 3602 .maxlen = sizeof(int), 3603 .mode = 0644, 3604 .extra1 = SYSCTL_ZERO, 3605 .extra2 = SYSCTL_INT_MAX, 3606 .proc_handler = proc_dointvec_minmax, 3607 }, 3608 [NEIGH_VAR_GC_THRESH2] = { 3609 .procname = "gc_thresh2", 3610 .maxlen = sizeof(int), 3611 .mode = 0644, 3612 .extra1 = SYSCTL_ZERO, 3613 .extra2 = SYSCTL_INT_MAX, 3614 .proc_handler = proc_dointvec_minmax, 3615 }, 3616 [NEIGH_VAR_GC_THRESH3] = { 3617 .procname = "gc_thresh3", 3618 .maxlen = sizeof(int), 3619 .mode = 0644, 3620 .extra1 = SYSCTL_ZERO, 3621 .extra2 = SYSCTL_INT_MAX, 3622 .proc_handler = proc_dointvec_minmax, 3623 }, 3624 {}, 3625 }, 3626 }; 3627 3628 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, 3629 proc_handler *handler) 3630 { 3631 int i; 3632 struct neigh_sysctl_table *t; 3633 const char *dev_name_source; 3634 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ]; 3635 char *p_name; 3636 3637 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL); 3638 if (!t) 3639 goto err; 3640 3641 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) { 3642 t->neigh_vars[i].data += (long) p; 3643 t->neigh_vars[i].extra1 = dev; 3644 t->neigh_vars[i].extra2 = p; 3645 } 3646 3647 if (dev) { 3648 dev_name_source = dev->name; 3649 /* Terminate the table early */ 3650 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0, 3651 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL])); 3652 } else { 3653 struct neigh_table *tbl = p->tbl; 3654 dev_name_source = "default"; 3655 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval; 3656 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1; 3657 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2; 3658 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3; 3659 } 3660 3661 if (handler) { 3662 /* RetransTime */ 3663 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler; 3664 /* ReachableTime */ 3665 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler; 3666 /* RetransTime (in milliseconds)*/ 3667 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler; 3668 /* ReachableTime (in milliseconds) */ 3669 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler; 3670 } else { 3671 /* Those handlers will update p->reachable_time after 3672 * base_reachable_time(_ms) is set to ensure the new timer starts being 3673 * applied after the next neighbour update instead of waiting for 3674 * neigh_periodic_work to update its value (can be multiple minutes) 3675 * So any handler that replaces them should do this as well 3676 */ 3677 /* ReachableTime */ 3678 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = 3679 neigh_proc_base_reachable_time; 3680 /* ReachableTime (in milliseconds) */ 3681 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = 3682 neigh_proc_base_reachable_time; 3683 } 3684 3685 /* Don't export sysctls to unprivileged users */ 3686 if (neigh_parms_net(p)->user_ns != &init_user_ns) 3687 t->neigh_vars[0].procname = NULL; 3688 3689 switch (neigh_parms_family(p)) { 3690 case AF_INET: 3691 p_name = "ipv4"; 3692 break; 3693 case AF_INET6: 3694 p_name = "ipv6"; 3695 break; 3696 default: 3697 BUG(); 3698 } 3699 3700 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s", 3701 p_name, dev_name_source); 3702 t->sysctl_header = 3703 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars); 3704 if (!t->sysctl_header) 3705 goto free; 3706 3707 p->sysctl_table = t; 3708 return 0; 3709 3710 free: 3711 kfree(t); 3712 err: 3713 return -ENOBUFS; 3714 } 3715 EXPORT_SYMBOL(neigh_sysctl_register); 3716 3717 void neigh_sysctl_unregister(struct neigh_parms *p) 3718 { 3719 if (p->sysctl_table) { 3720 struct neigh_sysctl_table *t = p->sysctl_table; 3721 p->sysctl_table = NULL; 3722 unregister_net_sysctl_table(t->sysctl_header); 3723 kfree(t); 3724 } 3725 } 3726 EXPORT_SYMBOL(neigh_sysctl_unregister); 3727 3728 #endif /* CONFIG_SYSCTL */ 3729 3730 static int __init neigh_init(void) 3731 { 3732 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0); 3733 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0); 3734 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0); 3735 3736 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info, 3737 0); 3738 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0); 3739 3740 return 0; 3741 } 3742 3743 subsys_initcall(neigh_init); 3744