1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Generic address resolution entity 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 8 * 9 * Fixes: 10 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add. 11 * Harald Welte Add neighbour cache statistics like rtstat 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/slab.h> 17 #include <linux/kmemleak.h> 18 #include <linux/types.h> 19 #include <linux/kernel.h> 20 #include <linux/module.h> 21 #include <linux/socket.h> 22 #include <linux/netdevice.h> 23 #include <linux/proc_fs.h> 24 #ifdef CONFIG_SYSCTL 25 #include <linux/sysctl.h> 26 #endif 27 #include <linux/times.h> 28 #include <net/net_namespace.h> 29 #include <net/neighbour.h> 30 #include <net/arp.h> 31 #include <net/dst.h> 32 #include <net/sock.h> 33 #include <net/netevent.h> 34 #include <net/netlink.h> 35 #include <linux/rtnetlink.h> 36 #include <linux/random.h> 37 #include <linux/string.h> 38 #include <linux/log2.h> 39 #include <linux/inetdevice.h> 40 #include <net/addrconf.h> 41 42 #include <trace/events/neigh.h> 43 44 #define DEBUG 45 #define NEIGH_DEBUG 1 46 #define neigh_dbg(level, fmt, ...) \ 47 do { \ 48 if (level <= NEIGH_DEBUG) \ 49 pr_debug(fmt, ##__VA_ARGS__); \ 50 } while (0) 51 52 #define PNEIGH_HASHMASK 0xF 53 54 static void neigh_timer_handler(struct timer_list *t); 55 static void __neigh_notify(struct neighbour *n, int type, int flags, 56 u32 pid); 57 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid); 58 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, 59 struct net_device *dev); 60 61 #ifdef CONFIG_PROC_FS 62 static const struct seq_operations neigh_stat_seq_ops; 63 #endif 64 65 /* 66 Neighbour hash table buckets are protected with rwlock tbl->lock. 67 68 - All the scans/updates to hash buckets MUST be made under this lock. 69 - NOTHING clever should be made under this lock: no callbacks 70 to protocol backends, no attempts to send something to network. 71 It will result in deadlocks, if backend/driver wants to use neighbour 72 cache. 73 - If the entry requires some non-trivial actions, increase 74 its reference count and release table lock. 75 76 Neighbour entries are protected: 77 - with reference count. 78 - with rwlock neigh->lock 79 80 Reference count prevents destruction. 81 82 neigh->lock mainly serializes ll address data and its validity state. 83 However, the same lock is used to protect another entry fields: 84 - timer 85 - resolution queue 86 87 Again, nothing clever shall be made under neigh->lock, 88 the most complicated procedure, which we allow is dev->hard_header. 89 It is supposed, that dev->hard_header is simplistic and does 90 not make callbacks to neighbour tables. 91 */ 92 93 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb) 94 { 95 kfree_skb(skb); 96 return -ENETDOWN; 97 } 98 99 static void neigh_cleanup_and_release(struct neighbour *neigh) 100 { 101 trace_neigh_cleanup_and_release(neigh, 0); 102 __neigh_notify(neigh, RTM_DELNEIGH, 0, 0); 103 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 104 neigh_release(neigh); 105 } 106 107 /* 108 * It is random distribution in the interval (1/2)*base...(3/2)*base. 109 * It corresponds to default IPv6 settings and is not overridable, 110 * because it is really reasonable choice. 111 */ 112 113 unsigned long neigh_rand_reach_time(unsigned long base) 114 { 115 return base ? (prandom_u32() % base) + (base >> 1) : 0; 116 } 117 EXPORT_SYMBOL(neigh_rand_reach_time); 118 119 static void neigh_mark_dead(struct neighbour *n) 120 { 121 n->dead = 1; 122 if (!list_empty(&n->gc_list)) { 123 list_del_init(&n->gc_list); 124 atomic_dec(&n->tbl->gc_entries); 125 } 126 } 127 128 static void neigh_update_gc_list(struct neighbour *n) 129 { 130 bool on_gc_list, exempt_from_gc; 131 132 write_lock_bh(&n->tbl->lock); 133 write_lock(&n->lock); 134 135 /* remove from the gc list if new state is permanent or if neighbor 136 * is externally learned; otherwise entry should be on the gc list 137 */ 138 exempt_from_gc = n->nud_state & NUD_PERMANENT || 139 n->flags & NTF_EXT_LEARNED; 140 on_gc_list = !list_empty(&n->gc_list); 141 142 if (exempt_from_gc && on_gc_list) { 143 list_del_init(&n->gc_list); 144 atomic_dec(&n->tbl->gc_entries); 145 } else if (!exempt_from_gc && !on_gc_list) { 146 /* add entries to the tail; cleaning removes from the front */ 147 list_add_tail(&n->gc_list, &n->tbl->gc_list); 148 atomic_inc(&n->tbl->gc_entries); 149 } 150 151 write_unlock(&n->lock); 152 write_unlock_bh(&n->tbl->lock); 153 } 154 155 static bool neigh_update_ext_learned(struct neighbour *neigh, u32 flags, 156 int *notify) 157 { 158 bool rc = false; 159 u8 ndm_flags; 160 161 if (!(flags & NEIGH_UPDATE_F_ADMIN)) 162 return rc; 163 164 ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0; 165 if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) { 166 if (ndm_flags & NTF_EXT_LEARNED) 167 neigh->flags |= NTF_EXT_LEARNED; 168 else 169 neigh->flags &= ~NTF_EXT_LEARNED; 170 rc = true; 171 *notify = 1; 172 } 173 174 return rc; 175 } 176 177 static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np, 178 struct neigh_table *tbl) 179 { 180 bool retval = false; 181 182 write_lock(&n->lock); 183 if (refcount_read(&n->refcnt) == 1) { 184 struct neighbour *neigh; 185 186 neigh = rcu_dereference_protected(n->next, 187 lockdep_is_held(&tbl->lock)); 188 rcu_assign_pointer(*np, neigh); 189 neigh_mark_dead(n); 190 retval = true; 191 } 192 write_unlock(&n->lock); 193 if (retval) 194 neigh_cleanup_and_release(n); 195 return retval; 196 } 197 198 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl) 199 { 200 struct neigh_hash_table *nht; 201 void *pkey = ndel->primary_key; 202 u32 hash_val; 203 struct neighbour *n; 204 struct neighbour __rcu **np; 205 206 nht = rcu_dereference_protected(tbl->nht, 207 lockdep_is_held(&tbl->lock)); 208 hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd); 209 hash_val = hash_val >> (32 - nht->hash_shift); 210 211 np = &nht->hash_buckets[hash_val]; 212 while ((n = rcu_dereference_protected(*np, 213 lockdep_is_held(&tbl->lock)))) { 214 if (n == ndel) 215 return neigh_del(n, np, tbl); 216 np = &n->next; 217 } 218 return false; 219 } 220 221 static int neigh_forced_gc(struct neigh_table *tbl) 222 { 223 int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2; 224 unsigned long tref = jiffies - 5 * HZ; 225 struct neighbour *n, *tmp; 226 int shrunk = 0; 227 228 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs); 229 230 write_lock_bh(&tbl->lock); 231 232 list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) { 233 if (refcount_read(&n->refcnt) == 1) { 234 bool remove = false; 235 236 write_lock(&n->lock); 237 if ((n->nud_state == NUD_FAILED) || 238 time_after(tref, n->updated)) 239 remove = true; 240 write_unlock(&n->lock); 241 242 if (remove && neigh_remove_one(n, tbl)) 243 shrunk++; 244 if (shrunk >= max_clean) 245 break; 246 } 247 } 248 249 tbl->last_flush = jiffies; 250 251 write_unlock_bh(&tbl->lock); 252 253 return shrunk; 254 } 255 256 static void neigh_add_timer(struct neighbour *n, unsigned long when) 257 { 258 neigh_hold(n); 259 if (unlikely(mod_timer(&n->timer, when))) { 260 printk("NEIGH: BUG, double timer add, state is %x\n", 261 n->nud_state); 262 dump_stack(); 263 } 264 } 265 266 static int neigh_del_timer(struct neighbour *n) 267 { 268 if ((n->nud_state & NUD_IN_TIMER) && 269 del_timer(&n->timer)) { 270 neigh_release(n); 271 return 1; 272 } 273 return 0; 274 } 275 276 static void pneigh_queue_purge(struct sk_buff_head *list) 277 { 278 struct sk_buff *skb; 279 280 while ((skb = skb_dequeue(list)) != NULL) { 281 dev_put(skb->dev); 282 kfree_skb(skb); 283 } 284 } 285 286 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev, 287 bool skip_perm) 288 { 289 int i; 290 struct neigh_hash_table *nht; 291 292 nht = rcu_dereference_protected(tbl->nht, 293 lockdep_is_held(&tbl->lock)); 294 295 for (i = 0; i < (1 << nht->hash_shift); i++) { 296 struct neighbour *n; 297 struct neighbour __rcu **np = &nht->hash_buckets[i]; 298 299 while ((n = rcu_dereference_protected(*np, 300 lockdep_is_held(&tbl->lock))) != NULL) { 301 if (dev && n->dev != dev) { 302 np = &n->next; 303 continue; 304 } 305 if (skip_perm && n->nud_state & NUD_PERMANENT) { 306 np = &n->next; 307 continue; 308 } 309 rcu_assign_pointer(*np, 310 rcu_dereference_protected(n->next, 311 lockdep_is_held(&tbl->lock))); 312 write_lock(&n->lock); 313 neigh_del_timer(n); 314 neigh_mark_dead(n); 315 if (refcount_read(&n->refcnt) != 1) { 316 /* The most unpleasant situation. 317 We must destroy neighbour entry, 318 but someone still uses it. 319 320 The destroy will be delayed until 321 the last user releases us, but 322 we must kill timers etc. and move 323 it to safe state. 324 */ 325 __skb_queue_purge(&n->arp_queue); 326 n->arp_queue_len_bytes = 0; 327 n->output = neigh_blackhole; 328 if (n->nud_state & NUD_VALID) 329 n->nud_state = NUD_NOARP; 330 else 331 n->nud_state = NUD_NONE; 332 neigh_dbg(2, "neigh %p is stray\n", n); 333 } 334 write_unlock(&n->lock); 335 neigh_cleanup_and_release(n); 336 } 337 } 338 } 339 340 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev) 341 { 342 write_lock_bh(&tbl->lock); 343 neigh_flush_dev(tbl, dev, false); 344 write_unlock_bh(&tbl->lock); 345 } 346 EXPORT_SYMBOL(neigh_changeaddr); 347 348 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev, 349 bool skip_perm) 350 { 351 write_lock_bh(&tbl->lock); 352 neigh_flush_dev(tbl, dev, skip_perm); 353 pneigh_ifdown_and_unlock(tbl, dev); 354 355 del_timer_sync(&tbl->proxy_timer); 356 pneigh_queue_purge(&tbl->proxy_queue); 357 return 0; 358 } 359 360 int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev) 361 { 362 __neigh_ifdown(tbl, dev, true); 363 return 0; 364 } 365 EXPORT_SYMBOL(neigh_carrier_down); 366 367 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) 368 { 369 __neigh_ifdown(tbl, dev, false); 370 return 0; 371 } 372 EXPORT_SYMBOL(neigh_ifdown); 373 374 static struct neighbour *neigh_alloc(struct neigh_table *tbl, 375 struct net_device *dev, 376 bool exempt_from_gc) 377 { 378 struct neighbour *n = NULL; 379 unsigned long now = jiffies; 380 int entries; 381 382 if (exempt_from_gc) 383 goto do_alloc; 384 385 entries = atomic_inc_return(&tbl->gc_entries) - 1; 386 if (entries >= tbl->gc_thresh3 || 387 (entries >= tbl->gc_thresh2 && 388 time_after(now, tbl->last_flush + 5 * HZ))) { 389 if (!neigh_forced_gc(tbl) && 390 entries >= tbl->gc_thresh3) { 391 net_info_ratelimited("%s: neighbor table overflow!\n", 392 tbl->id); 393 NEIGH_CACHE_STAT_INC(tbl, table_fulls); 394 goto out_entries; 395 } 396 } 397 398 do_alloc: 399 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC); 400 if (!n) 401 goto out_entries; 402 403 __skb_queue_head_init(&n->arp_queue); 404 rwlock_init(&n->lock); 405 seqlock_init(&n->ha_lock); 406 n->updated = n->used = now; 407 n->nud_state = NUD_NONE; 408 n->output = neigh_blackhole; 409 seqlock_init(&n->hh.hh_lock); 410 n->parms = neigh_parms_clone(&tbl->parms); 411 timer_setup(&n->timer, neigh_timer_handler, 0); 412 413 NEIGH_CACHE_STAT_INC(tbl, allocs); 414 n->tbl = tbl; 415 refcount_set(&n->refcnt, 1); 416 n->dead = 1; 417 INIT_LIST_HEAD(&n->gc_list); 418 419 atomic_inc(&tbl->entries); 420 out: 421 return n; 422 423 out_entries: 424 if (!exempt_from_gc) 425 atomic_dec(&tbl->gc_entries); 426 goto out; 427 } 428 429 static void neigh_get_hash_rnd(u32 *x) 430 { 431 *x = get_random_u32() | 1; 432 } 433 434 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift) 435 { 436 size_t size = (1 << shift) * sizeof(struct neighbour *); 437 struct neigh_hash_table *ret; 438 struct neighbour __rcu **buckets; 439 int i; 440 441 ret = kmalloc(sizeof(*ret), GFP_ATOMIC); 442 if (!ret) 443 return NULL; 444 if (size <= PAGE_SIZE) { 445 buckets = kzalloc(size, GFP_ATOMIC); 446 } else { 447 buckets = (struct neighbour __rcu **) 448 __get_free_pages(GFP_ATOMIC | __GFP_ZERO, 449 get_order(size)); 450 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC); 451 } 452 if (!buckets) { 453 kfree(ret); 454 return NULL; 455 } 456 ret->hash_buckets = buckets; 457 ret->hash_shift = shift; 458 for (i = 0; i < NEIGH_NUM_HASH_RND; i++) 459 neigh_get_hash_rnd(&ret->hash_rnd[i]); 460 return ret; 461 } 462 463 static void neigh_hash_free_rcu(struct rcu_head *head) 464 { 465 struct neigh_hash_table *nht = container_of(head, 466 struct neigh_hash_table, 467 rcu); 468 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *); 469 struct neighbour __rcu **buckets = nht->hash_buckets; 470 471 if (size <= PAGE_SIZE) { 472 kfree(buckets); 473 } else { 474 kmemleak_free(buckets); 475 free_pages((unsigned long)buckets, get_order(size)); 476 } 477 kfree(nht); 478 } 479 480 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl, 481 unsigned long new_shift) 482 { 483 unsigned int i, hash; 484 struct neigh_hash_table *new_nht, *old_nht; 485 486 NEIGH_CACHE_STAT_INC(tbl, hash_grows); 487 488 old_nht = rcu_dereference_protected(tbl->nht, 489 lockdep_is_held(&tbl->lock)); 490 new_nht = neigh_hash_alloc(new_shift); 491 if (!new_nht) 492 return old_nht; 493 494 for (i = 0; i < (1 << old_nht->hash_shift); i++) { 495 struct neighbour *n, *next; 496 497 for (n = rcu_dereference_protected(old_nht->hash_buckets[i], 498 lockdep_is_held(&tbl->lock)); 499 n != NULL; 500 n = next) { 501 hash = tbl->hash(n->primary_key, n->dev, 502 new_nht->hash_rnd); 503 504 hash >>= (32 - new_nht->hash_shift); 505 next = rcu_dereference_protected(n->next, 506 lockdep_is_held(&tbl->lock)); 507 508 rcu_assign_pointer(n->next, 509 rcu_dereference_protected( 510 new_nht->hash_buckets[hash], 511 lockdep_is_held(&tbl->lock))); 512 rcu_assign_pointer(new_nht->hash_buckets[hash], n); 513 } 514 } 515 516 rcu_assign_pointer(tbl->nht, new_nht); 517 call_rcu(&old_nht->rcu, neigh_hash_free_rcu); 518 return new_nht; 519 } 520 521 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, 522 struct net_device *dev) 523 { 524 struct neighbour *n; 525 526 NEIGH_CACHE_STAT_INC(tbl, lookups); 527 528 rcu_read_lock_bh(); 529 n = __neigh_lookup_noref(tbl, pkey, dev); 530 if (n) { 531 if (!refcount_inc_not_zero(&n->refcnt)) 532 n = NULL; 533 NEIGH_CACHE_STAT_INC(tbl, hits); 534 } 535 536 rcu_read_unlock_bh(); 537 return n; 538 } 539 EXPORT_SYMBOL(neigh_lookup); 540 541 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, 542 const void *pkey) 543 { 544 struct neighbour *n; 545 unsigned int key_len = tbl->key_len; 546 u32 hash_val; 547 struct neigh_hash_table *nht; 548 549 NEIGH_CACHE_STAT_INC(tbl, lookups); 550 551 rcu_read_lock_bh(); 552 nht = rcu_dereference_bh(tbl->nht); 553 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift); 554 555 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); 556 n != NULL; 557 n = rcu_dereference_bh(n->next)) { 558 if (!memcmp(n->primary_key, pkey, key_len) && 559 net_eq(dev_net(n->dev), net)) { 560 if (!refcount_inc_not_zero(&n->refcnt)) 561 n = NULL; 562 NEIGH_CACHE_STAT_INC(tbl, hits); 563 break; 564 } 565 } 566 567 rcu_read_unlock_bh(); 568 return n; 569 } 570 EXPORT_SYMBOL(neigh_lookup_nodev); 571 572 static struct neighbour *___neigh_create(struct neigh_table *tbl, 573 const void *pkey, 574 struct net_device *dev, 575 bool exempt_from_gc, bool want_ref) 576 { 577 struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev, exempt_from_gc); 578 u32 hash_val; 579 unsigned int key_len = tbl->key_len; 580 int error; 581 struct neigh_hash_table *nht; 582 583 trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc); 584 585 if (!n) { 586 rc = ERR_PTR(-ENOBUFS); 587 goto out; 588 } 589 590 memcpy(n->primary_key, pkey, key_len); 591 n->dev = dev; 592 dev_hold(dev); 593 594 /* Protocol specific setup. */ 595 if (tbl->constructor && (error = tbl->constructor(n)) < 0) { 596 rc = ERR_PTR(error); 597 goto out_neigh_release; 598 } 599 600 if (dev->netdev_ops->ndo_neigh_construct) { 601 error = dev->netdev_ops->ndo_neigh_construct(dev, n); 602 if (error < 0) { 603 rc = ERR_PTR(error); 604 goto out_neigh_release; 605 } 606 } 607 608 /* Device specific setup. */ 609 if (n->parms->neigh_setup && 610 (error = n->parms->neigh_setup(n)) < 0) { 611 rc = ERR_PTR(error); 612 goto out_neigh_release; 613 } 614 615 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1); 616 617 write_lock_bh(&tbl->lock); 618 nht = rcu_dereference_protected(tbl->nht, 619 lockdep_is_held(&tbl->lock)); 620 621 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift)) 622 nht = neigh_hash_grow(tbl, nht->hash_shift + 1); 623 624 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift); 625 626 if (n->parms->dead) { 627 rc = ERR_PTR(-EINVAL); 628 goto out_tbl_unlock; 629 } 630 631 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val], 632 lockdep_is_held(&tbl->lock)); 633 n1 != NULL; 634 n1 = rcu_dereference_protected(n1->next, 635 lockdep_is_held(&tbl->lock))) { 636 if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) { 637 if (want_ref) 638 neigh_hold(n1); 639 rc = n1; 640 goto out_tbl_unlock; 641 } 642 } 643 644 n->dead = 0; 645 if (!exempt_from_gc) 646 list_add_tail(&n->gc_list, &n->tbl->gc_list); 647 648 if (want_ref) 649 neigh_hold(n); 650 rcu_assign_pointer(n->next, 651 rcu_dereference_protected(nht->hash_buckets[hash_val], 652 lockdep_is_held(&tbl->lock))); 653 rcu_assign_pointer(nht->hash_buckets[hash_val], n); 654 write_unlock_bh(&tbl->lock); 655 neigh_dbg(2, "neigh %p is created\n", n); 656 rc = n; 657 out: 658 return rc; 659 out_tbl_unlock: 660 write_unlock_bh(&tbl->lock); 661 out_neigh_release: 662 if (!exempt_from_gc) 663 atomic_dec(&tbl->gc_entries); 664 neigh_release(n); 665 goto out; 666 } 667 668 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, 669 struct net_device *dev, bool want_ref) 670 { 671 return ___neigh_create(tbl, pkey, dev, false, want_ref); 672 } 673 EXPORT_SYMBOL(__neigh_create); 674 675 static u32 pneigh_hash(const void *pkey, unsigned int key_len) 676 { 677 u32 hash_val = *(u32 *)(pkey + key_len - 4); 678 hash_val ^= (hash_val >> 16); 679 hash_val ^= hash_val >> 8; 680 hash_val ^= hash_val >> 4; 681 hash_val &= PNEIGH_HASHMASK; 682 return hash_val; 683 } 684 685 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n, 686 struct net *net, 687 const void *pkey, 688 unsigned int key_len, 689 struct net_device *dev) 690 { 691 while (n) { 692 if (!memcmp(n->key, pkey, key_len) && 693 net_eq(pneigh_net(n), net) && 694 (n->dev == dev || !n->dev)) 695 return n; 696 n = n->next; 697 } 698 return NULL; 699 } 700 701 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, 702 struct net *net, const void *pkey, struct net_device *dev) 703 { 704 unsigned int key_len = tbl->key_len; 705 u32 hash_val = pneigh_hash(pkey, key_len); 706 707 return __pneigh_lookup_1(tbl->phash_buckets[hash_val], 708 net, pkey, key_len, dev); 709 } 710 EXPORT_SYMBOL_GPL(__pneigh_lookup); 711 712 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, 713 struct net *net, const void *pkey, 714 struct net_device *dev, int creat) 715 { 716 struct pneigh_entry *n; 717 unsigned int key_len = tbl->key_len; 718 u32 hash_val = pneigh_hash(pkey, key_len); 719 720 read_lock_bh(&tbl->lock); 721 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val], 722 net, pkey, key_len, dev); 723 read_unlock_bh(&tbl->lock); 724 725 if (n || !creat) 726 goto out; 727 728 ASSERT_RTNL(); 729 730 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL); 731 if (!n) 732 goto out; 733 734 n->protocol = 0; 735 write_pnet(&n->net, net); 736 memcpy(n->key, pkey, key_len); 737 n->dev = dev; 738 if (dev) 739 dev_hold(dev); 740 741 if (tbl->pconstructor && tbl->pconstructor(n)) { 742 if (dev) 743 dev_put(dev); 744 kfree(n); 745 n = NULL; 746 goto out; 747 } 748 749 write_lock_bh(&tbl->lock); 750 n->next = tbl->phash_buckets[hash_val]; 751 tbl->phash_buckets[hash_val] = n; 752 write_unlock_bh(&tbl->lock); 753 out: 754 return n; 755 } 756 EXPORT_SYMBOL(pneigh_lookup); 757 758 759 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey, 760 struct net_device *dev) 761 { 762 struct pneigh_entry *n, **np; 763 unsigned int key_len = tbl->key_len; 764 u32 hash_val = pneigh_hash(pkey, key_len); 765 766 write_lock_bh(&tbl->lock); 767 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL; 768 np = &n->next) { 769 if (!memcmp(n->key, pkey, key_len) && n->dev == dev && 770 net_eq(pneigh_net(n), net)) { 771 *np = n->next; 772 write_unlock_bh(&tbl->lock); 773 if (tbl->pdestructor) 774 tbl->pdestructor(n); 775 if (n->dev) 776 dev_put(n->dev); 777 kfree(n); 778 return 0; 779 } 780 } 781 write_unlock_bh(&tbl->lock); 782 return -ENOENT; 783 } 784 785 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, 786 struct net_device *dev) 787 { 788 struct pneigh_entry *n, **np, *freelist = NULL; 789 u32 h; 790 791 for (h = 0; h <= PNEIGH_HASHMASK; h++) { 792 np = &tbl->phash_buckets[h]; 793 while ((n = *np) != NULL) { 794 if (!dev || n->dev == dev) { 795 *np = n->next; 796 n->next = freelist; 797 freelist = n; 798 continue; 799 } 800 np = &n->next; 801 } 802 } 803 write_unlock_bh(&tbl->lock); 804 while ((n = freelist)) { 805 freelist = n->next; 806 n->next = NULL; 807 if (tbl->pdestructor) 808 tbl->pdestructor(n); 809 if (n->dev) 810 dev_put(n->dev); 811 kfree(n); 812 } 813 return -ENOENT; 814 } 815 816 static void neigh_parms_destroy(struct neigh_parms *parms); 817 818 static inline void neigh_parms_put(struct neigh_parms *parms) 819 { 820 if (refcount_dec_and_test(&parms->refcnt)) 821 neigh_parms_destroy(parms); 822 } 823 824 /* 825 * neighbour must already be out of the table; 826 * 827 */ 828 void neigh_destroy(struct neighbour *neigh) 829 { 830 struct net_device *dev = neigh->dev; 831 832 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys); 833 834 if (!neigh->dead) { 835 pr_warn("Destroying alive neighbour %p\n", neigh); 836 dump_stack(); 837 return; 838 } 839 840 if (neigh_del_timer(neigh)) 841 pr_warn("Impossible event\n"); 842 843 write_lock_bh(&neigh->lock); 844 __skb_queue_purge(&neigh->arp_queue); 845 write_unlock_bh(&neigh->lock); 846 neigh->arp_queue_len_bytes = 0; 847 848 if (dev->netdev_ops->ndo_neigh_destroy) 849 dev->netdev_ops->ndo_neigh_destroy(dev, neigh); 850 851 dev_put(dev); 852 neigh_parms_put(neigh->parms); 853 854 neigh_dbg(2, "neigh %p is destroyed\n", neigh); 855 856 atomic_dec(&neigh->tbl->entries); 857 kfree_rcu(neigh, rcu); 858 } 859 EXPORT_SYMBOL(neigh_destroy); 860 861 /* Neighbour state is suspicious; 862 disable fast path. 863 864 Called with write_locked neigh. 865 */ 866 static void neigh_suspect(struct neighbour *neigh) 867 { 868 neigh_dbg(2, "neigh %p is suspected\n", neigh); 869 870 neigh->output = neigh->ops->output; 871 } 872 873 /* Neighbour state is OK; 874 enable fast path. 875 876 Called with write_locked neigh. 877 */ 878 static void neigh_connect(struct neighbour *neigh) 879 { 880 neigh_dbg(2, "neigh %p is connected\n", neigh); 881 882 neigh->output = neigh->ops->connected_output; 883 } 884 885 static void neigh_periodic_work(struct work_struct *work) 886 { 887 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work); 888 struct neighbour *n; 889 struct neighbour __rcu **np; 890 unsigned int i; 891 struct neigh_hash_table *nht; 892 893 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs); 894 895 write_lock_bh(&tbl->lock); 896 nht = rcu_dereference_protected(tbl->nht, 897 lockdep_is_held(&tbl->lock)); 898 899 /* 900 * periodically recompute ReachableTime from random function 901 */ 902 903 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) { 904 struct neigh_parms *p; 905 tbl->last_rand = jiffies; 906 list_for_each_entry(p, &tbl->parms_list, list) 907 p->reachable_time = 908 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 909 } 910 911 if (atomic_read(&tbl->entries) < tbl->gc_thresh1) 912 goto out; 913 914 for (i = 0 ; i < (1 << nht->hash_shift); i++) { 915 np = &nht->hash_buckets[i]; 916 917 while ((n = rcu_dereference_protected(*np, 918 lockdep_is_held(&tbl->lock))) != NULL) { 919 unsigned int state; 920 921 write_lock(&n->lock); 922 923 state = n->nud_state; 924 if ((state & (NUD_PERMANENT | NUD_IN_TIMER)) || 925 (n->flags & NTF_EXT_LEARNED)) { 926 write_unlock(&n->lock); 927 goto next_elt; 928 } 929 930 if (time_before(n->used, n->confirmed)) 931 n->used = n->confirmed; 932 933 if (refcount_read(&n->refcnt) == 1 && 934 (state == NUD_FAILED || 935 time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) { 936 *np = n->next; 937 neigh_mark_dead(n); 938 write_unlock(&n->lock); 939 neigh_cleanup_and_release(n); 940 continue; 941 } 942 write_unlock(&n->lock); 943 944 next_elt: 945 np = &n->next; 946 } 947 /* 948 * It's fine to release lock here, even if hash table 949 * grows while we are preempted. 950 */ 951 write_unlock_bh(&tbl->lock); 952 cond_resched(); 953 write_lock_bh(&tbl->lock); 954 nht = rcu_dereference_protected(tbl->nht, 955 lockdep_is_held(&tbl->lock)); 956 } 957 out: 958 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks. 959 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2 960 * BASE_REACHABLE_TIME. 961 */ 962 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work, 963 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1); 964 write_unlock_bh(&tbl->lock); 965 } 966 967 static __inline__ int neigh_max_probes(struct neighbour *n) 968 { 969 struct neigh_parms *p = n->parms; 970 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) + 971 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) : 972 NEIGH_VAR(p, MCAST_PROBES)); 973 } 974 975 static void neigh_invalidate(struct neighbour *neigh) 976 __releases(neigh->lock) 977 __acquires(neigh->lock) 978 { 979 struct sk_buff *skb; 980 981 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed); 982 neigh_dbg(2, "neigh %p is failed\n", neigh); 983 neigh->updated = jiffies; 984 985 /* It is very thin place. report_unreachable is very complicated 986 routine. Particularly, it can hit the same neighbour entry! 987 988 So that, we try to be accurate and avoid dead loop. --ANK 989 */ 990 while (neigh->nud_state == NUD_FAILED && 991 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { 992 write_unlock(&neigh->lock); 993 neigh->ops->error_report(neigh, skb); 994 write_lock(&neigh->lock); 995 } 996 __skb_queue_purge(&neigh->arp_queue); 997 neigh->arp_queue_len_bytes = 0; 998 } 999 1000 static void neigh_probe(struct neighbour *neigh) 1001 __releases(neigh->lock) 1002 { 1003 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue); 1004 /* keep skb alive even if arp_queue overflows */ 1005 if (skb) 1006 skb = skb_clone(skb, GFP_ATOMIC); 1007 write_unlock(&neigh->lock); 1008 if (neigh->ops->solicit) 1009 neigh->ops->solicit(neigh, skb); 1010 atomic_inc(&neigh->probes); 1011 consume_skb(skb); 1012 } 1013 1014 /* Called when a timer expires for a neighbour entry. */ 1015 1016 static void neigh_timer_handler(struct timer_list *t) 1017 { 1018 unsigned long now, next; 1019 struct neighbour *neigh = from_timer(neigh, t, timer); 1020 unsigned int state; 1021 int notify = 0; 1022 1023 write_lock(&neigh->lock); 1024 1025 state = neigh->nud_state; 1026 now = jiffies; 1027 next = now + HZ; 1028 1029 if (!(state & NUD_IN_TIMER)) 1030 goto out; 1031 1032 if (state & NUD_REACHABLE) { 1033 if (time_before_eq(now, 1034 neigh->confirmed + neigh->parms->reachable_time)) { 1035 neigh_dbg(2, "neigh %p is still alive\n", neigh); 1036 next = neigh->confirmed + neigh->parms->reachable_time; 1037 } else if (time_before_eq(now, 1038 neigh->used + 1039 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) { 1040 neigh_dbg(2, "neigh %p is delayed\n", neigh); 1041 neigh->nud_state = NUD_DELAY; 1042 neigh->updated = jiffies; 1043 neigh_suspect(neigh); 1044 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME); 1045 } else { 1046 neigh_dbg(2, "neigh %p is suspected\n", neigh); 1047 neigh->nud_state = NUD_STALE; 1048 neigh->updated = jiffies; 1049 neigh_suspect(neigh); 1050 notify = 1; 1051 } 1052 } else if (state & NUD_DELAY) { 1053 if (time_before_eq(now, 1054 neigh->confirmed + 1055 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) { 1056 neigh_dbg(2, "neigh %p is now reachable\n", neigh); 1057 neigh->nud_state = NUD_REACHABLE; 1058 neigh->updated = jiffies; 1059 neigh_connect(neigh); 1060 notify = 1; 1061 next = neigh->confirmed + neigh->parms->reachable_time; 1062 } else { 1063 neigh_dbg(2, "neigh %p is probed\n", neigh); 1064 neigh->nud_state = NUD_PROBE; 1065 neigh->updated = jiffies; 1066 atomic_set(&neigh->probes, 0); 1067 notify = 1; 1068 next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME); 1069 } 1070 } else { 1071 /* NUD_PROBE|NUD_INCOMPLETE */ 1072 next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME); 1073 } 1074 1075 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) && 1076 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) { 1077 neigh->nud_state = NUD_FAILED; 1078 notify = 1; 1079 neigh_invalidate(neigh); 1080 goto out; 1081 } 1082 1083 if (neigh->nud_state & NUD_IN_TIMER) { 1084 if (time_before(next, jiffies + HZ/2)) 1085 next = jiffies + HZ/2; 1086 if (!mod_timer(&neigh->timer, next)) 1087 neigh_hold(neigh); 1088 } 1089 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) { 1090 neigh_probe(neigh); 1091 } else { 1092 out: 1093 write_unlock(&neigh->lock); 1094 } 1095 1096 if (notify) 1097 neigh_update_notify(neigh, 0); 1098 1099 trace_neigh_timer_handler(neigh, 0); 1100 1101 neigh_release(neigh); 1102 } 1103 1104 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) 1105 { 1106 int rc; 1107 bool immediate_probe = false; 1108 1109 write_lock_bh(&neigh->lock); 1110 1111 rc = 0; 1112 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)) 1113 goto out_unlock_bh; 1114 if (neigh->dead) 1115 goto out_dead; 1116 1117 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) { 1118 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) + 1119 NEIGH_VAR(neigh->parms, APP_PROBES)) { 1120 unsigned long next, now = jiffies; 1121 1122 atomic_set(&neigh->probes, 1123 NEIGH_VAR(neigh->parms, UCAST_PROBES)); 1124 neigh_del_timer(neigh); 1125 neigh->nud_state = NUD_INCOMPLETE; 1126 neigh->updated = now; 1127 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), 1128 HZ/2); 1129 neigh_add_timer(neigh, next); 1130 immediate_probe = true; 1131 } else { 1132 neigh->nud_state = NUD_FAILED; 1133 neigh->updated = jiffies; 1134 write_unlock_bh(&neigh->lock); 1135 1136 kfree_skb(skb); 1137 return 1; 1138 } 1139 } else if (neigh->nud_state & NUD_STALE) { 1140 neigh_dbg(2, "neigh %p is delayed\n", neigh); 1141 neigh_del_timer(neigh); 1142 neigh->nud_state = NUD_DELAY; 1143 neigh->updated = jiffies; 1144 neigh_add_timer(neigh, jiffies + 1145 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME)); 1146 } 1147 1148 if (neigh->nud_state == NUD_INCOMPLETE) { 1149 if (skb) { 1150 while (neigh->arp_queue_len_bytes + skb->truesize > 1151 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) { 1152 struct sk_buff *buff; 1153 1154 buff = __skb_dequeue(&neigh->arp_queue); 1155 if (!buff) 1156 break; 1157 neigh->arp_queue_len_bytes -= buff->truesize; 1158 kfree_skb(buff); 1159 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); 1160 } 1161 skb_dst_force(skb); 1162 __skb_queue_tail(&neigh->arp_queue, skb); 1163 neigh->arp_queue_len_bytes += skb->truesize; 1164 } 1165 rc = 1; 1166 } 1167 out_unlock_bh: 1168 if (immediate_probe) 1169 neigh_probe(neigh); 1170 else 1171 write_unlock(&neigh->lock); 1172 local_bh_enable(); 1173 trace_neigh_event_send_done(neigh, rc); 1174 return rc; 1175 1176 out_dead: 1177 if (neigh->nud_state & NUD_STALE) 1178 goto out_unlock_bh; 1179 write_unlock_bh(&neigh->lock); 1180 kfree_skb(skb); 1181 trace_neigh_event_send_dead(neigh, 1); 1182 return 1; 1183 } 1184 EXPORT_SYMBOL(__neigh_event_send); 1185 1186 static void neigh_update_hhs(struct neighbour *neigh) 1187 { 1188 struct hh_cache *hh; 1189 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *) 1190 = NULL; 1191 1192 if (neigh->dev->header_ops) 1193 update = neigh->dev->header_ops->cache_update; 1194 1195 if (update) { 1196 hh = &neigh->hh; 1197 if (READ_ONCE(hh->hh_len)) { 1198 write_seqlock_bh(&hh->hh_lock); 1199 update(hh, neigh->dev, neigh->ha); 1200 write_sequnlock_bh(&hh->hh_lock); 1201 } 1202 } 1203 } 1204 1205 1206 1207 /* Generic update routine. 1208 -- lladdr is new lladdr or NULL, if it is not supplied. 1209 -- new is new state. 1210 -- flags 1211 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr, 1212 if it is different. 1213 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected" 1214 lladdr instead of overriding it 1215 if it is different. 1216 NEIGH_UPDATE_F_ADMIN means that the change is administrative. 1217 1218 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing 1219 NTF_ROUTER flag. 1220 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as 1221 a router. 1222 1223 Caller MUST hold reference count on the entry. 1224 */ 1225 1226 static int __neigh_update(struct neighbour *neigh, const u8 *lladdr, 1227 u8 new, u32 flags, u32 nlmsg_pid, 1228 struct netlink_ext_ack *extack) 1229 { 1230 bool ext_learn_change = false; 1231 u8 old; 1232 int err; 1233 int notify = 0; 1234 struct net_device *dev; 1235 int update_isrouter = 0; 1236 1237 trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid); 1238 1239 write_lock_bh(&neigh->lock); 1240 1241 dev = neigh->dev; 1242 old = neigh->nud_state; 1243 err = -EPERM; 1244 1245 if (!(flags & NEIGH_UPDATE_F_ADMIN) && 1246 (old & (NUD_NOARP | NUD_PERMANENT))) 1247 goto out; 1248 if (neigh->dead) { 1249 NL_SET_ERR_MSG(extack, "Neighbor entry is now dead"); 1250 goto out; 1251 } 1252 1253 ext_learn_change = neigh_update_ext_learned(neigh, flags, ¬ify); 1254 1255 if (!(new & NUD_VALID)) { 1256 neigh_del_timer(neigh); 1257 if (old & NUD_CONNECTED) 1258 neigh_suspect(neigh); 1259 neigh->nud_state = new; 1260 err = 0; 1261 notify = old & NUD_VALID; 1262 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) && 1263 (new & NUD_FAILED)) { 1264 neigh_invalidate(neigh); 1265 notify = 1; 1266 } 1267 goto out; 1268 } 1269 1270 /* Compare new lladdr with cached one */ 1271 if (!dev->addr_len) { 1272 /* First case: device needs no address. */ 1273 lladdr = neigh->ha; 1274 } else if (lladdr) { 1275 /* The second case: if something is already cached 1276 and a new address is proposed: 1277 - compare new & old 1278 - if they are different, check override flag 1279 */ 1280 if ((old & NUD_VALID) && 1281 !memcmp(lladdr, neigh->ha, dev->addr_len)) 1282 lladdr = neigh->ha; 1283 } else { 1284 /* No address is supplied; if we know something, 1285 use it, otherwise discard the request. 1286 */ 1287 err = -EINVAL; 1288 if (!(old & NUD_VALID)) { 1289 NL_SET_ERR_MSG(extack, "No link layer address given"); 1290 goto out; 1291 } 1292 lladdr = neigh->ha; 1293 } 1294 1295 /* Update confirmed timestamp for neighbour entry after we 1296 * received ARP packet even if it doesn't change IP to MAC binding. 1297 */ 1298 if (new & NUD_CONNECTED) 1299 neigh->confirmed = jiffies; 1300 1301 /* If entry was valid and address is not changed, 1302 do not change entry state, if new one is STALE. 1303 */ 1304 err = 0; 1305 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER; 1306 if (old & NUD_VALID) { 1307 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) { 1308 update_isrouter = 0; 1309 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) && 1310 (old & NUD_CONNECTED)) { 1311 lladdr = neigh->ha; 1312 new = NUD_STALE; 1313 } else 1314 goto out; 1315 } else { 1316 if (lladdr == neigh->ha && new == NUD_STALE && 1317 !(flags & NEIGH_UPDATE_F_ADMIN)) 1318 new = old; 1319 } 1320 } 1321 1322 /* Update timestamp only once we know we will make a change to the 1323 * neighbour entry. Otherwise we risk to move the locktime window with 1324 * noop updates and ignore relevant ARP updates. 1325 */ 1326 if (new != old || lladdr != neigh->ha) 1327 neigh->updated = jiffies; 1328 1329 if (new != old) { 1330 neigh_del_timer(neigh); 1331 if (new & NUD_PROBE) 1332 atomic_set(&neigh->probes, 0); 1333 if (new & NUD_IN_TIMER) 1334 neigh_add_timer(neigh, (jiffies + 1335 ((new & NUD_REACHABLE) ? 1336 neigh->parms->reachable_time : 1337 0))); 1338 neigh->nud_state = new; 1339 notify = 1; 1340 } 1341 1342 if (lladdr != neigh->ha) { 1343 write_seqlock(&neigh->ha_lock); 1344 memcpy(&neigh->ha, lladdr, dev->addr_len); 1345 write_sequnlock(&neigh->ha_lock); 1346 neigh_update_hhs(neigh); 1347 if (!(new & NUD_CONNECTED)) 1348 neigh->confirmed = jiffies - 1349 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1); 1350 notify = 1; 1351 } 1352 if (new == old) 1353 goto out; 1354 if (new & NUD_CONNECTED) 1355 neigh_connect(neigh); 1356 else 1357 neigh_suspect(neigh); 1358 if (!(old & NUD_VALID)) { 1359 struct sk_buff *skb; 1360 1361 /* Again: avoid dead loop if something went wrong */ 1362 1363 while (neigh->nud_state & NUD_VALID && 1364 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { 1365 struct dst_entry *dst = skb_dst(skb); 1366 struct neighbour *n2, *n1 = neigh; 1367 write_unlock_bh(&neigh->lock); 1368 1369 rcu_read_lock(); 1370 1371 /* Why not just use 'neigh' as-is? The problem is that 1372 * things such as shaper, eql, and sch_teql can end up 1373 * using alternative, different, neigh objects to output 1374 * the packet in the output path. So what we need to do 1375 * here is re-lookup the top-level neigh in the path so 1376 * we can reinject the packet there. 1377 */ 1378 n2 = NULL; 1379 if (dst) { 1380 n2 = dst_neigh_lookup_skb(dst, skb); 1381 if (n2) 1382 n1 = n2; 1383 } 1384 n1->output(n1, skb); 1385 if (n2) 1386 neigh_release(n2); 1387 rcu_read_unlock(); 1388 1389 write_lock_bh(&neigh->lock); 1390 } 1391 __skb_queue_purge(&neigh->arp_queue); 1392 neigh->arp_queue_len_bytes = 0; 1393 } 1394 out: 1395 if (update_isrouter) 1396 neigh_update_is_router(neigh, flags, ¬ify); 1397 write_unlock_bh(&neigh->lock); 1398 1399 if (((new ^ old) & NUD_PERMANENT) || ext_learn_change) 1400 neigh_update_gc_list(neigh); 1401 1402 if (notify) 1403 neigh_update_notify(neigh, nlmsg_pid); 1404 1405 trace_neigh_update_done(neigh, err); 1406 1407 return err; 1408 } 1409 1410 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, 1411 u32 flags, u32 nlmsg_pid) 1412 { 1413 return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL); 1414 } 1415 EXPORT_SYMBOL(neigh_update); 1416 1417 /* Update the neigh to listen temporarily for probe responses, even if it is 1418 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing. 1419 */ 1420 void __neigh_set_probe_once(struct neighbour *neigh) 1421 { 1422 if (neigh->dead) 1423 return; 1424 neigh->updated = jiffies; 1425 if (!(neigh->nud_state & NUD_FAILED)) 1426 return; 1427 neigh->nud_state = NUD_INCOMPLETE; 1428 atomic_set(&neigh->probes, neigh_max_probes(neigh)); 1429 neigh_add_timer(neigh, 1430 jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME)); 1431 } 1432 EXPORT_SYMBOL(__neigh_set_probe_once); 1433 1434 struct neighbour *neigh_event_ns(struct neigh_table *tbl, 1435 u8 *lladdr, void *saddr, 1436 struct net_device *dev) 1437 { 1438 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev, 1439 lladdr || !dev->addr_len); 1440 if (neigh) 1441 neigh_update(neigh, lladdr, NUD_STALE, 1442 NEIGH_UPDATE_F_OVERRIDE, 0); 1443 return neigh; 1444 } 1445 EXPORT_SYMBOL(neigh_event_ns); 1446 1447 /* called with read_lock_bh(&n->lock); */ 1448 static void neigh_hh_init(struct neighbour *n) 1449 { 1450 struct net_device *dev = n->dev; 1451 __be16 prot = n->tbl->protocol; 1452 struct hh_cache *hh = &n->hh; 1453 1454 write_lock_bh(&n->lock); 1455 1456 /* Only one thread can come in here and initialize the 1457 * hh_cache entry. 1458 */ 1459 if (!hh->hh_len) 1460 dev->header_ops->cache(n, hh, prot); 1461 1462 write_unlock_bh(&n->lock); 1463 } 1464 1465 /* Slow and careful. */ 1466 1467 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb) 1468 { 1469 int rc = 0; 1470 1471 if (!neigh_event_send(neigh, skb)) { 1472 int err; 1473 struct net_device *dev = neigh->dev; 1474 unsigned int seq; 1475 1476 if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len)) 1477 neigh_hh_init(neigh); 1478 1479 do { 1480 __skb_pull(skb, skb_network_offset(skb)); 1481 seq = read_seqbegin(&neigh->ha_lock); 1482 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 1483 neigh->ha, NULL, skb->len); 1484 } while (read_seqretry(&neigh->ha_lock, seq)); 1485 1486 if (err >= 0) 1487 rc = dev_queue_xmit(skb); 1488 else 1489 goto out_kfree_skb; 1490 } 1491 out: 1492 return rc; 1493 out_kfree_skb: 1494 rc = -EINVAL; 1495 kfree_skb(skb); 1496 goto out; 1497 } 1498 EXPORT_SYMBOL(neigh_resolve_output); 1499 1500 /* As fast as possible without hh cache */ 1501 1502 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb) 1503 { 1504 struct net_device *dev = neigh->dev; 1505 unsigned int seq; 1506 int err; 1507 1508 do { 1509 __skb_pull(skb, skb_network_offset(skb)); 1510 seq = read_seqbegin(&neigh->ha_lock); 1511 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 1512 neigh->ha, NULL, skb->len); 1513 } while (read_seqretry(&neigh->ha_lock, seq)); 1514 1515 if (err >= 0) 1516 err = dev_queue_xmit(skb); 1517 else { 1518 err = -EINVAL; 1519 kfree_skb(skb); 1520 } 1521 return err; 1522 } 1523 EXPORT_SYMBOL(neigh_connected_output); 1524 1525 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb) 1526 { 1527 return dev_queue_xmit(skb); 1528 } 1529 EXPORT_SYMBOL(neigh_direct_output); 1530 1531 static void neigh_proxy_process(struct timer_list *t) 1532 { 1533 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer); 1534 long sched_next = 0; 1535 unsigned long now = jiffies; 1536 struct sk_buff *skb, *n; 1537 1538 spin_lock(&tbl->proxy_queue.lock); 1539 1540 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) { 1541 long tdif = NEIGH_CB(skb)->sched_next - now; 1542 1543 if (tdif <= 0) { 1544 struct net_device *dev = skb->dev; 1545 1546 __skb_unlink(skb, &tbl->proxy_queue); 1547 if (tbl->proxy_redo && netif_running(dev)) { 1548 rcu_read_lock(); 1549 tbl->proxy_redo(skb); 1550 rcu_read_unlock(); 1551 } else { 1552 kfree_skb(skb); 1553 } 1554 1555 dev_put(dev); 1556 } else if (!sched_next || tdif < sched_next) 1557 sched_next = tdif; 1558 } 1559 del_timer(&tbl->proxy_timer); 1560 if (sched_next) 1561 mod_timer(&tbl->proxy_timer, jiffies + sched_next); 1562 spin_unlock(&tbl->proxy_queue.lock); 1563 } 1564 1565 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, 1566 struct sk_buff *skb) 1567 { 1568 unsigned long now = jiffies; 1569 1570 unsigned long sched_next = now + (prandom_u32() % 1571 NEIGH_VAR(p, PROXY_DELAY)); 1572 1573 if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) { 1574 kfree_skb(skb); 1575 return; 1576 } 1577 1578 NEIGH_CB(skb)->sched_next = sched_next; 1579 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED; 1580 1581 spin_lock(&tbl->proxy_queue.lock); 1582 if (del_timer(&tbl->proxy_timer)) { 1583 if (time_before(tbl->proxy_timer.expires, sched_next)) 1584 sched_next = tbl->proxy_timer.expires; 1585 } 1586 skb_dst_drop(skb); 1587 dev_hold(skb->dev); 1588 __skb_queue_tail(&tbl->proxy_queue, skb); 1589 mod_timer(&tbl->proxy_timer, sched_next); 1590 spin_unlock(&tbl->proxy_queue.lock); 1591 } 1592 EXPORT_SYMBOL(pneigh_enqueue); 1593 1594 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl, 1595 struct net *net, int ifindex) 1596 { 1597 struct neigh_parms *p; 1598 1599 list_for_each_entry(p, &tbl->parms_list, list) { 1600 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) || 1601 (!p->dev && !ifindex && net_eq(net, &init_net))) 1602 return p; 1603 } 1604 1605 return NULL; 1606 } 1607 1608 struct neigh_parms *neigh_parms_alloc(struct net_device *dev, 1609 struct neigh_table *tbl) 1610 { 1611 struct neigh_parms *p; 1612 struct net *net = dev_net(dev); 1613 const struct net_device_ops *ops = dev->netdev_ops; 1614 1615 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL); 1616 if (p) { 1617 p->tbl = tbl; 1618 refcount_set(&p->refcnt, 1); 1619 p->reachable_time = 1620 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 1621 dev_hold(dev); 1622 p->dev = dev; 1623 write_pnet(&p->net, net); 1624 p->sysctl_table = NULL; 1625 1626 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { 1627 dev_put(dev); 1628 kfree(p); 1629 return NULL; 1630 } 1631 1632 write_lock_bh(&tbl->lock); 1633 list_add(&p->list, &tbl->parms.list); 1634 write_unlock_bh(&tbl->lock); 1635 1636 neigh_parms_data_state_cleanall(p); 1637 } 1638 return p; 1639 } 1640 EXPORT_SYMBOL(neigh_parms_alloc); 1641 1642 static void neigh_rcu_free_parms(struct rcu_head *head) 1643 { 1644 struct neigh_parms *parms = 1645 container_of(head, struct neigh_parms, rcu_head); 1646 1647 neigh_parms_put(parms); 1648 } 1649 1650 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms) 1651 { 1652 if (!parms || parms == &tbl->parms) 1653 return; 1654 write_lock_bh(&tbl->lock); 1655 list_del(&parms->list); 1656 parms->dead = 1; 1657 write_unlock_bh(&tbl->lock); 1658 if (parms->dev) 1659 dev_put(parms->dev); 1660 call_rcu(&parms->rcu_head, neigh_rcu_free_parms); 1661 } 1662 EXPORT_SYMBOL(neigh_parms_release); 1663 1664 static void neigh_parms_destroy(struct neigh_parms *parms) 1665 { 1666 kfree(parms); 1667 } 1668 1669 static struct lock_class_key neigh_table_proxy_queue_class; 1670 1671 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly; 1672 1673 void neigh_table_init(int index, struct neigh_table *tbl) 1674 { 1675 unsigned long now = jiffies; 1676 unsigned long phsize; 1677 1678 INIT_LIST_HEAD(&tbl->parms_list); 1679 INIT_LIST_HEAD(&tbl->gc_list); 1680 list_add(&tbl->parms.list, &tbl->parms_list); 1681 write_pnet(&tbl->parms.net, &init_net); 1682 refcount_set(&tbl->parms.refcnt, 1); 1683 tbl->parms.reachable_time = 1684 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME)); 1685 1686 tbl->stats = alloc_percpu(struct neigh_statistics); 1687 if (!tbl->stats) 1688 panic("cannot create neighbour cache statistics"); 1689 1690 #ifdef CONFIG_PROC_FS 1691 if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat, 1692 &neigh_stat_seq_ops, tbl)) 1693 panic("cannot create neighbour proc dir entry"); 1694 #endif 1695 1696 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3)); 1697 1698 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *); 1699 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL); 1700 1701 if (!tbl->nht || !tbl->phash_buckets) 1702 panic("cannot allocate neighbour cache hashes"); 1703 1704 if (!tbl->entry_size) 1705 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) + 1706 tbl->key_len, NEIGH_PRIV_ALIGN); 1707 else 1708 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN); 1709 1710 rwlock_init(&tbl->lock); 1711 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work); 1712 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work, 1713 tbl->parms.reachable_time); 1714 timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0); 1715 skb_queue_head_init_class(&tbl->proxy_queue, 1716 &neigh_table_proxy_queue_class); 1717 1718 tbl->last_flush = now; 1719 tbl->last_rand = now + tbl->parms.reachable_time * 20; 1720 1721 neigh_tables[index] = tbl; 1722 } 1723 EXPORT_SYMBOL(neigh_table_init); 1724 1725 int neigh_table_clear(int index, struct neigh_table *tbl) 1726 { 1727 neigh_tables[index] = NULL; 1728 /* It is not clean... Fix it to unload IPv6 module safely */ 1729 cancel_delayed_work_sync(&tbl->gc_work); 1730 del_timer_sync(&tbl->proxy_timer); 1731 pneigh_queue_purge(&tbl->proxy_queue); 1732 neigh_ifdown(tbl, NULL); 1733 if (atomic_read(&tbl->entries)) 1734 pr_crit("neighbour leakage\n"); 1735 1736 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu, 1737 neigh_hash_free_rcu); 1738 tbl->nht = NULL; 1739 1740 kfree(tbl->phash_buckets); 1741 tbl->phash_buckets = NULL; 1742 1743 remove_proc_entry(tbl->id, init_net.proc_net_stat); 1744 1745 free_percpu(tbl->stats); 1746 tbl->stats = NULL; 1747 1748 return 0; 1749 } 1750 EXPORT_SYMBOL(neigh_table_clear); 1751 1752 static struct neigh_table *neigh_find_table(int family) 1753 { 1754 struct neigh_table *tbl = NULL; 1755 1756 switch (family) { 1757 case AF_INET: 1758 tbl = neigh_tables[NEIGH_ARP_TABLE]; 1759 break; 1760 case AF_INET6: 1761 tbl = neigh_tables[NEIGH_ND_TABLE]; 1762 break; 1763 case AF_DECnet: 1764 tbl = neigh_tables[NEIGH_DN_TABLE]; 1765 break; 1766 } 1767 1768 return tbl; 1769 } 1770 1771 const struct nla_policy nda_policy[NDA_MAX+1] = { 1772 [NDA_DST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1773 [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, 1774 [NDA_CACHEINFO] = { .len = sizeof(struct nda_cacheinfo) }, 1775 [NDA_PROBES] = { .type = NLA_U32 }, 1776 [NDA_VLAN] = { .type = NLA_U16 }, 1777 [NDA_PORT] = { .type = NLA_U16 }, 1778 [NDA_VNI] = { .type = NLA_U32 }, 1779 [NDA_IFINDEX] = { .type = NLA_U32 }, 1780 [NDA_MASTER] = { .type = NLA_U32 }, 1781 [NDA_PROTOCOL] = { .type = NLA_U8 }, 1782 }; 1783 1784 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, 1785 struct netlink_ext_ack *extack) 1786 { 1787 struct net *net = sock_net(skb->sk); 1788 struct ndmsg *ndm; 1789 struct nlattr *dst_attr; 1790 struct neigh_table *tbl; 1791 struct neighbour *neigh; 1792 struct net_device *dev = NULL; 1793 int err = -EINVAL; 1794 1795 ASSERT_RTNL(); 1796 if (nlmsg_len(nlh) < sizeof(*ndm)) 1797 goto out; 1798 1799 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST); 1800 if (!dst_attr) { 1801 NL_SET_ERR_MSG(extack, "Network address not specified"); 1802 goto out; 1803 } 1804 1805 ndm = nlmsg_data(nlh); 1806 if (ndm->ndm_ifindex) { 1807 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 1808 if (dev == NULL) { 1809 err = -ENODEV; 1810 goto out; 1811 } 1812 } 1813 1814 tbl = neigh_find_table(ndm->ndm_family); 1815 if (tbl == NULL) 1816 return -EAFNOSUPPORT; 1817 1818 if (nla_len(dst_attr) < (int)tbl->key_len) { 1819 NL_SET_ERR_MSG(extack, "Invalid network address"); 1820 goto out; 1821 } 1822 1823 if (ndm->ndm_flags & NTF_PROXY) { 1824 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev); 1825 goto out; 1826 } 1827 1828 if (dev == NULL) 1829 goto out; 1830 1831 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev); 1832 if (neigh == NULL) { 1833 err = -ENOENT; 1834 goto out; 1835 } 1836 1837 err = __neigh_update(neigh, NULL, NUD_FAILED, 1838 NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN, 1839 NETLINK_CB(skb).portid, extack); 1840 write_lock_bh(&tbl->lock); 1841 neigh_release(neigh); 1842 neigh_remove_one(neigh, tbl); 1843 write_unlock_bh(&tbl->lock); 1844 1845 out: 1846 return err; 1847 } 1848 1849 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, 1850 struct netlink_ext_ack *extack) 1851 { 1852 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE | 1853 NEIGH_UPDATE_F_OVERRIDE_ISROUTER; 1854 struct net *net = sock_net(skb->sk); 1855 struct ndmsg *ndm; 1856 struct nlattr *tb[NDA_MAX+1]; 1857 struct neigh_table *tbl; 1858 struct net_device *dev = NULL; 1859 struct neighbour *neigh; 1860 void *dst, *lladdr; 1861 u8 protocol = 0; 1862 int err; 1863 1864 ASSERT_RTNL(); 1865 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, 1866 nda_policy, extack); 1867 if (err < 0) 1868 goto out; 1869 1870 err = -EINVAL; 1871 if (!tb[NDA_DST]) { 1872 NL_SET_ERR_MSG(extack, "Network address not specified"); 1873 goto out; 1874 } 1875 1876 ndm = nlmsg_data(nlh); 1877 if (ndm->ndm_ifindex) { 1878 dev = __dev_get_by_index(net, ndm->ndm_ifindex); 1879 if (dev == NULL) { 1880 err = -ENODEV; 1881 goto out; 1882 } 1883 1884 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) { 1885 NL_SET_ERR_MSG(extack, "Invalid link address"); 1886 goto out; 1887 } 1888 } 1889 1890 tbl = neigh_find_table(ndm->ndm_family); 1891 if (tbl == NULL) 1892 return -EAFNOSUPPORT; 1893 1894 if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) { 1895 NL_SET_ERR_MSG(extack, "Invalid network address"); 1896 goto out; 1897 } 1898 1899 dst = nla_data(tb[NDA_DST]); 1900 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL; 1901 1902 if (tb[NDA_PROTOCOL]) 1903 protocol = nla_get_u8(tb[NDA_PROTOCOL]); 1904 1905 if (ndm->ndm_flags & NTF_PROXY) { 1906 struct pneigh_entry *pn; 1907 1908 err = -ENOBUFS; 1909 pn = pneigh_lookup(tbl, net, dst, dev, 1); 1910 if (pn) { 1911 pn->flags = ndm->ndm_flags; 1912 if (protocol) 1913 pn->protocol = protocol; 1914 err = 0; 1915 } 1916 goto out; 1917 } 1918 1919 if (!dev) { 1920 NL_SET_ERR_MSG(extack, "Device not specified"); 1921 goto out; 1922 } 1923 1924 if (tbl->allow_add && !tbl->allow_add(dev, extack)) { 1925 err = -EINVAL; 1926 goto out; 1927 } 1928 1929 neigh = neigh_lookup(tbl, dst, dev); 1930 if (neigh == NULL) { 1931 bool exempt_from_gc; 1932 1933 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { 1934 err = -ENOENT; 1935 goto out; 1936 } 1937 1938 exempt_from_gc = ndm->ndm_state & NUD_PERMANENT || 1939 ndm->ndm_flags & NTF_EXT_LEARNED; 1940 neigh = ___neigh_create(tbl, dst, dev, exempt_from_gc, true); 1941 if (IS_ERR(neigh)) { 1942 err = PTR_ERR(neigh); 1943 goto out; 1944 } 1945 } else { 1946 if (nlh->nlmsg_flags & NLM_F_EXCL) { 1947 err = -EEXIST; 1948 neigh_release(neigh); 1949 goto out; 1950 } 1951 1952 if (!(nlh->nlmsg_flags & NLM_F_REPLACE)) 1953 flags &= ~(NEIGH_UPDATE_F_OVERRIDE | 1954 NEIGH_UPDATE_F_OVERRIDE_ISROUTER); 1955 } 1956 1957 if (ndm->ndm_flags & NTF_EXT_LEARNED) 1958 flags |= NEIGH_UPDATE_F_EXT_LEARNED; 1959 1960 if (ndm->ndm_flags & NTF_ROUTER) 1961 flags |= NEIGH_UPDATE_F_ISROUTER; 1962 1963 if (ndm->ndm_flags & NTF_USE) { 1964 neigh_event_send(neigh, NULL); 1965 err = 0; 1966 } else 1967 err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags, 1968 NETLINK_CB(skb).portid, extack); 1969 1970 if (protocol) 1971 neigh->protocol = protocol; 1972 1973 neigh_release(neigh); 1974 1975 out: 1976 return err; 1977 } 1978 1979 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms) 1980 { 1981 struct nlattr *nest; 1982 1983 nest = nla_nest_start_noflag(skb, NDTA_PARMS); 1984 if (nest == NULL) 1985 return -ENOBUFS; 1986 1987 if ((parms->dev && 1988 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) || 1989 nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) || 1990 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, 1991 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) || 1992 /* approximative value for deprecated QUEUE_LEN (in packets) */ 1993 nla_put_u32(skb, NDTPA_QUEUE_LEN, 1994 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) || 1995 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) || 1996 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) || 1997 nla_put_u32(skb, NDTPA_UCAST_PROBES, 1998 NEIGH_VAR(parms, UCAST_PROBES)) || 1999 nla_put_u32(skb, NDTPA_MCAST_PROBES, 2000 NEIGH_VAR(parms, MCAST_PROBES)) || 2001 nla_put_u32(skb, NDTPA_MCAST_REPROBES, 2002 NEIGH_VAR(parms, MCAST_REPROBES)) || 2003 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time, 2004 NDTPA_PAD) || 2005 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME, 2006 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) || 2007 nla_put_msecs(skb, NDTPA_GC_STALETIME, 2008 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) || 2009 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME, 2010 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) || 2011 nla_put_msecs(skb, NDTPA_RETRANS_TIME, 2012 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) || 2013 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, 2014 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) || 2015 nla_put_msecs(skb, NDTPA_PROXY_DELAY, 2016 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) || 2017 nla_put_msecs(skb, NDTPA_LOCKTIME, 2018 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD)) 2019 goto nla_put_failure; 2020 return nla_nest_end(skb, nest); 2021 2022 nla_put_failure: 2023 nla_nest_cancel(skb, nest); 2024 return -EMSGSIZE; 2025 } 2026 2027 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, 2028 u32 pid, u32 seq, int type, int flags) 2029 { 2030 struct nlmsghdr *nlh; 2031 struct ndtmsg *ndtmsg; 2032 2033 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags); 2034 if (nlh == NULL) 2035 return -EMSGSIZE; 2036 2037 ndtmsg = nlmsg_data(nlh); 2038 2039 read_lock_bh(&tbl->lock); 2040 ndtmsg->ndtm_family = tbl->family; 2041 ndtmsg->ndtm_pad1 = 0; 2042 ndtmsg->ndtm_pad2 = 0; 2043 2044 if (nla_put_string(skb, NDTA_NAME, tbl->id) || 2045 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) || 2046 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) || 2047 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) || 2048 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3)) 2049 goto nla_put_failure; 2050 { 2051 unsigned long now = jiffies; 2052 long flush_delta = now - tbl->last_flush; 2053 long rand_delta = now - tbl->last_rand; 2054 struct neigh_hash_table *nht; 2055 struct ndt_config ndc = { 2056 .ndtc_key_len = tbl->key_len, 2057 .ndtc_entry_size = tbl->entry_size, 2058 .ndtc_entries = atomic_read(&tbl->entries), 2059 .ndtc_last_flush = jiffies_to_msecs(flush_delta), 2060 .ndtc_last_rand = jiffies_to_msecs(rand_delta), 2061 .ndtc_proxy_qlen = tbl->proxy_queue.qlen, 2062 }; 2063 2064 rcu_read_lock_bh(); 2065 nht = rcu_dereference_bh(tbl->nht); 2066 ndc.ndtc_hash_rnd = nht->hash_rnd[0]; 2067 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1); 2068 rcu_read_unlock_bh(); 2069 2070 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc)) 2071 goto nla_put_failure; 2072 } 2073 2074 { 2075 int cpu; 2076 struct ndt_stats ndst; 2077 2078 memset(&ndst, 0, sizeof(ndst)); 2079 2080 for_each_possible_cpu(cpu) { 2081 struct neigh_statistics *st; 2082 2083 st = per_cpu_ptr(tbl->stats, cpu); 2084 ndst.ndts_allocs += st->allocs; 2085 ndst.ndts_destroys += st->destroys; 2086 ndst.ndts_hash_grows += st->hash_grows; 2087 ndst.ndts_res_failed += st->res_failed; 2088 ndst.ndts_lookups += st->lookups; 2089 ndst.ndts_hits += st->hits; 2090 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast; 2091 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast; 2092 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs; 2093 ndst.ndts_forced_gc_runs += st->forced_gc_runs; 2094 ndst.ndts_table_fulls += st->table_fulls; 2095 } 2096 2097 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst, 2098 NDTA_PAD)) 2099 goto nla_put_failure; 2100 } 2101 2102 BUG_ON(tbl->parms.dev); 2103 if (neightbl_fill_parms(skb, &tbl->parms) < 0) 2104 goto nla_put_failure; 2105 2106 read_unlock_bh(&tbl->lock); 2107 nlmsg_end(skb, nlh); 2108 return 0; 2109 2110 nla_put_failure: 2111 read_unlock_bh(&tbl->lock); 2112 nlmsg_cancel(skb, nlh); 2113 return -EMSGSIZE; 2114 } 2115 2116 static int neightbl_fill_param_info(struct sk_buff *skb, 2117 struct neigh_table *tbl, 2118 struct neigh_parms *parms, 2119 u32 pid, u32 seq, int type, 2120 unsigned int flags) 2121 { 2122 struct ndtmsg *ndtmsg; 2123 struct nlmsghdr *nlh; 2124 2125 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags); 2126 if (nlh == NULL) 2127 return -EMSGSIZE; 2128 2129 ndtmsg = nlmsg_data(nlh); 2130 2131 read_lock_bh(&tbl->lock); 2132 ndtmsg->ndtm_family = tbl->family; 2133 ndtmsg->ndtm_pad1 = 0; 2134 ndtmsg->ndtm_pad2 = 0; 2135 2136 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 || 2137 neightbl_fill_parms(skb, parms) < 0) 2138 goto errout; 2139 2140 read_unlock_bh(&tbl->lock); 2141 nlmsg_end(skb, nlh); 2142 return 0; 2143 errout: 2144 read_unlock_bh(&tbl->lock); 2145 nlmsg_cancel(skb, nlh); 2146 return -EMSGSIZE; 2147 } 2148 2149 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = { 2150 [NDTA_NAME] = { .type = NLA_STRING }, 2151 [NDTA_THRESH1] = { .type = NLA_U32 }, 2152 [NDTA_THRESH2] = { .type = NLA_U32 }, 2153 [NDTA_THRESH3] = { .type = NLA_U32 }, 2154 [NDTA_GC_INTERVAL] = { .type = NLA_U64 }, 2155 [NDTA_PARMS] = { .type = NLA_NESTED }, 2156 }; 2157 2158 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = { 2159 [NDTPA_IFINDEX] = { .type = NLA_U32 }, 2160 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 }, 2161 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 }, 2162 [NDTPA_APP_PROBES] = { .type = NLA_U32 }, 2163 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 }, 2164 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 }, 2165 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 }, 2166 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 }, 2167 [NDTPA_GC_STALETIME] = { .type = NLA_U64 }, 2168 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 }, 2169 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 }, 2170 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 }, 2171 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 }, 2172 [NDTPA_LOCKTIME] = { .type = NLA_U64 }, 2173 }; 2174 2175 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, 2176 struct netlink_ext_ack *extack) 2177 { 2178 struct net *net = sock_net(skb->sk); 2179 struct neigh_table *tbl; 2180 struct ndtmsg *ndtmsg; 2181 struct nlattr *tb[NDTA_MAX+1]; 2182 bool found = false; 2183 int err, tidx; 2184 2185 err = nlmsg_parse_deprecated(nlh, sizeof(*ndtmsg), tb, NDTA_MAX, 2186 nl_neightbl_policy, extack); 2187 if (err < 0) 2188 goto errout; 2189 2190 if (tb[NDTA_NAME] == NULL) { 2191 err = -EINVAL; 2192 goto errout; 2193 } 2194 2195 ndtmsg = nlmsg_data(nlh); 2196 2197 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) { 2198 tbl = neigh_tables[tidx]; 2199 if (!tbl) 2200 continue; 2201 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family) 2202 continue; 2203 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) { 2204 found = true; 2205 break; 2206 } 2207 } 2208 2209 if (!found) 2210 return -ENOENT; 2211 2212 /* 2213 * We acquire tbl->lock to be nice to the periodic timers and 2214 * make sure they always see a consistent set of values. 2215 */ 2216 write_lock_bh(&tbl->lock); 2217 2218 if (tb[NDTA_PARMS]) { 2219 struct nlattr *tbp[NDTPA_MAX+1]; 2220 struct neigh_parms *p; 2221 int i, ifindex = 0; 2222 2223 err = nla_parse_nested_deprecated(tbp, NDTPA_MAX, 2224 tb[NDTA_PARMS], 2225 nl_ntbl_parm_policy, extack); 2226 if (err < 0) 2227 goto errout_tbl_lock; 2228 2229 if (tbp[NDTPA_IFINDEX]) 2230 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]); 2231 2232 p = lookup_neigh_parms(tbl, net, ifindex); 2233 if (p == NULL) { 2234 err = -ENOENT; 2235 goto errout_tbl_lock; 2236 } 2237 2238 for (i = 1; i <= NDTPA_MAX; i++) { 2239 if (tbp[i] == NULL) 2240 continue; 2241 2242 switch (i) { 2243 case NDTPA_QUEUE_LEN: 2244 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES, 2245 nla_get_u32(tbp[i]) * 2246 SKB_TRUESIZE(ETH_FRAME_LEN)); 2247 break; 2248 case NDTPA_QUEUE_LENBYTES: 2249 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES, 2250 nla_get_u32(tbp[i])); 2251 break; 2252 case NDTPA_PROXY_QLEN: 2253 NEIGH_VAR_SET(p, PROXY_QLEN, 2254 nla_get_u32(tbp[i])); 2255 break; 2256 case NDTPA_APP_PROBES: 2257 NEIGH_VAR_SET(p, APP_PROBES, 2258 nla_get_u32(tbp[i])); 2259 break; 2260 case NDTPA_UCAST_PROBES: 2261 NEIGH_VAR_SET(p, UCAST_PROBES, 2262 nla_get_u32(tbp[i])); 2263 break; 2264 case NDTPA_MCAST_PROBES: 2265 NEIGH_VAR_SET(p, MCAST_PROBES, 2266 nla_get_u32(tbp[i])); 2267 break; 2268 case NDTPA_MCAST_REPROBES: 2269 NEIGH_VAR_SET(p, MCAST_REPROBES, 2270 nla_get_u32(tbp[i])); 2271 break; 2272 case NDTPA_BASE_REACHABLE_TIME: 2273 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME, 2274 nla_get_msecs(tbp[i])); 2275 /* update reachable_time as well, otherwise, the change will 2276 * only be effective after the next time neigh_periodic_work 2277 * decides to recompute it (can be multiple minutes) 2278 */ 2279 p->reachable_time = 2280 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 2281 break; 2282 case NDTPA_GC_STALETIME: 2283 NEIGH_VAR_SET(p, GC_STALETIME, 2284 nla_get_msecs(tbp[i])); 2285 break; 2286 case NDTPA_DELAY_PROBE_TIME: 2287 NEIGH_VAR_SET(p, DELAY_PROBE_TIME, 2288 nla_get_msecs(tbp[i])); 2289 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p); 2290 break; 2291 case NDTPA_RETRANS_TIME: 2292 NEIGH_VAR_SET(p, RETRANS_TIME, 2293 nla_get_msecs(tbp[i])); 2294 break; 2295 case NDTPA_ANYCAST_DELAY: 2296 NEIGH_VAR_SET(p, ANYCAST_DELAY, 2297 nla_get_msecs(tbp[i])); 2298 break; 2299 case NDTPA_PROXY_DELAY: 2300 NEIGH_VAR_SET(p, PROXY_DELAY, 2301 nla_get_msecs(tbp[i])); 2302 break; 2303 case NDTPA_LOCKTIME: 2304 NEIGH_VAR_SET(p, LOCKTIME, 2305 nla_get_msecs(tbp[i])); 2306 break; 2307 } 2308 } 2309 } 2310 2311 err = -ENOENT; 2312 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] || 2313 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) && 2314 !net_eq(net, &init_net)) 2315 goto errout_tbl_lock; 2316 2317 if (tb[NDTA_THRESH1]) 2318 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]); 2319 2320 if (tb[NDTA_THRESH2]) 2321 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]); 2322 2323 if (tb[NDTA_THRESH3]) 2324 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]); 2325 2326 if (tb[NDTA_GC_INTERVAL]) 2327 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]); 2328 2329 err = 0; 2330 2331 errout_tbl_lock: 2332 write_unlock_bh(&tbl->lock); 2333 errout: 2334 return err; 2335 } 2336 2337 static int neightbl_valid_dump_info(const struct nlmsghdr *nlh, 2338 struct netlink_ext_ack *extack) 2339 { 2340 struct ndtmsg *ndtm; 2341 2342 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndtm))) { 2343 NL_SET_ERR_MSG(extack, "Invalid header for neighbor table dump request"); 2344 return -EINVAL; 2345 } 2346 2347 ndtm = nlmsg_data(nlh); 2348 if (ndtm->ndtm_pad1 || ndtm->ndtm_pad2) { 2349 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor table dump request"); 2350 return -EINVAL; 2351 } 2352 2353 if (nlmsg_attrlen(nlh, sizeof(*ndtm))) { 2354 NL_SET_ERR_MSG(extack, "Invalid data after header in neighbor table dump request"); 2355 return -EINVAL; 2356 } 2357 2358 return 0; 2359 } 2360 2361 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) 2362 { 2363 const struct nlmsghdr *nlh = cb->nlh; 2364 struct net *net = sock_net(skb->sk); 2365 int family, tidx, nidx = 0; 2366 int tbl_skip = cb->args[0]; 2367 int neigh_skip = cb->args[1]; 2368 struct neigh_table *tbl; 2369 2370 if (cb->strict_check) { 2371 int err = neightbl_valid_dump_info(nlh, cb->extack); 2372 2373 if (err < 0) 2374 return err; 2375 } 2376 2377 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 2378 2379 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) { 2380 struct neigh_parms *p; 2381 2382 tbl = neigh_tables[tidx]; 2383 if (!tbl) 2384 continue; 2385 2386 if (tidx < tbl_skip || (family && tbl->family != family)) 2387 continue; 2388 2389 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid, 2390 nlh->nlmsg_seq, RTM_NEWNEIGHTBL, 2391 NLM_F_MULTI) < 0) 2392 break; 2393 2394 nidx = 0; 2395 p = list_next_entry(&tbl->parms, list); 2396 list_for_each_entry_from(p, &tbl->parms_list, list) { 2397 if (!net_eq(neigh_parms_net(p), net)) 2398 continue; 2399 2400 if (nidx < neigh_skip) 2401 goto next; 2402 2403 if (neightbl_fill_param_info(skb, tbl, p, 2404 NETLINK_CB(cb->skb).portid, 2405 nlh->nlmsg_seq, 2406 RTM_NEWNEIGHTBL, 2407 NLM_F_MULTI) < 0) 2408 goto out; 2409 next: 2410 nidx++; 2411 } 2412 2413 neigh_skip = 0; 2414 } 2415 out: 2416 cb->args[0] = tidx; 2417 cb->args[1] = nidx; 2418 2419 return skb->len; 2420 } 2421 2422 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh, 2423 u32 pid, u32 seq, int type, unsigned int flags) 2424 { 2425 unsigned long now = jiffies; 2426 struct nda_cacheinfo ci; 2427 struct nlmsghdr *nlh; 2428 struct ndmsg *ndm; 2429 2430 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags); 2431 if (nlh == NULL) 2432 return -EMSGSIZE; 2433 2434 ndm = nlmsg_data(nlh); 2435 ndm->ndm_family = neigh->ops->family; 2436 ndm->ndm_pad1 = 0; 2437 ndm->ndm_pad2 = 0; 2438 ndm->ndm_flags = neigh->flags; 2439 ndm->ndm_type = neigh->type; 2440 ndm->ndm_ifindex = neigh->dev->ifindex; 2441 2442 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key)) 2443 goto nla_put_failure; 2444 2445 read_lock_bh(&neigh->lock); 2446 ndm->ndm_state = neigh->nud_state; 2447 if (neigh->nud_state & NUD_VALID) { 2448 char haddr[MAX_ADDR_LEN]; 2449 2450 neigh_ha_snapshot(haddr, neigh, neigh->dev); 2451 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) { 2452 read_unlock_bh(&neigh->lock); 2453 goto nla_put_failure; 2454 } 2455 } 2456 2457 ci.ndm_used = jiffies_to_clock_t(now - neigh->used); 2458 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed); 2459 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated); 2460 ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1; 2461 read_unlock_bh(&neigh->lock); 2462 2463 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) || 2464 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) 2465 goto nla_put_failure; 2466 2467 if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol)) 2468 goto nla_put_failure; 2469 2470 nlmsg_end(skb, nlh); 2471 return 0; 2472 2473 nla_put_failure: 2474 nlmsg_cancel(skb, nlh); 2475 return -EMSGSIZE; 2476 } 2477 2478 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn, 2479 u32 pid, u32 seq, int type, unsigned int flags, 2480 struct neigh_table *tbl) 2481 { 2482 struct nlmsghdr *nlh; 2483 struct ndmsg *ndm; 2484 2485 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags); 2486 if (nlh == NULL) 2487 return -EMSGSIZE; 2488 2489 ndm = nlmsg_data(nlh); 2490 ndm->ndm_family = tbl->family; 2491 ndm->ndm_pad1 = 0; 2492 ndm->ndm_pad2 = 0; 2493 ndm->ndm_flags = pn->flags | NTF_PROXY; 2494 ndm->ndm_type = RTN_UNICAST; 2495 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0; 2496 ndm->ndm_state = NUD_NONE; 2497 2498 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key)) 2499 goto nla_put_failure; 2500 2501 if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol)) 2502 goto nla_put_failure; 2503 2504 nlmsg_end(skb, nlh); 2505 return 0; 2506 2507 nla_put_failure: 2508 nlmsg_cancel(skb, nlh); 2509 return -EMSGSIZE; 2510 } 2511 2512 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid) 2513 { 2514 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 2515 __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid); 2516 } 2517 2518 static bool neigh_master_filtered(struct net_device *dev, int master_idx) 2519 { 2520 struct net_device *master; 2521 2522 if (!master_idx) 2523 return false; 2524 2525 master = dev ? netdev_master_upper_dev_get(dev) : NULL; 2526 if (!master || master->ifindex != master_idx) 2527 return true; 2528 2529 return false; 2530 } 2531 2532 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx) 2533 { 2534 if (filter_idx && (!dev || dev->ifindex != filter_idx)) 2535 return true; 2536 2537 return false; 2538 } 2539 2540 struct neigh_dump_filter { 2541 int master_idx; 2542 int dev_idx; 2543 }; 2544 2545 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, 2546 struct netlink_callback *cb, 2547 struct neigh_dump_filter *filter) 2548 { 2549 struct net *net = sock_net(skb->sk); 2550 struct neighbour *n; 2551 int rc, h, s_h = cb->args[1]; 2552 int idx, s_idx = idx = cb->args[2]; 2553 struct neigh_hash_table *nht; 2554 unsigned int flags = NLM_F_MULTI; 2555 2556 if (filter->dev_idx || filter->master_idx) 2557 flags |= NLM_F_DUMP_FILTERED; 2558 2559 rcu_read_lock_bh(); 2560 nht = rcu_dereference_bh(tbl->nht); 2561 2562 for (h = s_h; h < (1 << nht->hash_shift); h++) { 2563 if (h > s_h) 2564 s_idx = 0; 2565 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; 2566 n != NULL; 2567 n = rcu_dereference_bh(n->next)) { 2568 if (idx < s_idx || !net_eq(dev_net(n->dev), net)) 2569 goto next; 2570 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) || 2571 neigh_master_filtered(n->dev, filter->master_idx)) 2572 goto next; 2573 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid, 2574 cb->nlh->nlmsg_seq, 2575 RTM_NEWNEIGH, 2576 flags) < 0) { 2577 rc = -1; 2578 goto out; 2579 } 2580 next: 2581 idx++; 2582 } 2583 } 2584 rc = skb->len; 2585 out: 2586 rcu_read_unlock_bh(); 2587 cb->args[1] = h; 2588 cb->args[2] = idx; 2589 return rc; 2590 } 2591 2592 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, 2593 struct netlink_callback *cb, 2594 struct neigh_dump_filter *filter) 2595 { 2596 struct pneigh_entry *n; 2597 struct net *net = sock_net(skb->sk); 2598 int rc, h, s_h = cb->args[3]; 2599 int idx, s_idx = idx = cb->args[4]; 2600 unsigned int flags = NLM_F_MULTI; 2601 2602 if (filter->dev_idx || filter->master_idx) 2603 flags |= NLM_F_DUMP_FILTERED; 2604 2605 read_lock_bh(&tbl->lock); 2606 2607 for (h = s_h; h <= PNEIGH_HASHMASK; h++) { 2608 if (h > s_h) 2609 s_idx = 0; 2610 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { 2611 if (idx < s_idx || pneigh_net(n) != net) 2612 goto next; 2613 if (neigh_ifindex_filtered(n->dev, filter->dev_idx) || 2614 neigh_master_filtered(n->dev, filter->master_idx)) 2615 goto next; 2616 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid, 2617 cb->nlh->nlmsg_seq, 2618 RTM_NEWNEIGH, flags, tbl) < 0) { 2619 read_unlock_bh(&tbl->lock); 2620 rc = -1; 2621 goto out; 2622 } 2623 next: 2624 idx++; 2625 } 2626 } 2627 2628 read_unlock_bh(&tbl->lock); 2629 rc = skb->len; 2630 out: 2631 cb->args[3] = h; 2632 cb->args[4] = idx; 2633 return rc; 2634 2635 } 2636 2637 static int neigh_valid_dump_req(const struct nlmsghdr *nlh, 2638 bool strict_check, 2639 struct neigh_dump_filter *filter, 2640 struct netlink_ext_ack *extack) 2641 { 2642 struct nlattr *tb[NDA_MAX + 1]; 2643 int err, i; 2644 2645 if (strict_check) { 2646 struct ndmsg *ndm; 2647 2648 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 2649 NL_SET_ERR_MSG(extack, "Invalid header for neighbor dump request"); 2650 return -EINVAL; 2651 } 2652 2653 ndm = nlmsg_data(nlh); 2654 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex || 2655 ndm->ndm_state || ndm->ndm_type) { 2656 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request"); 2657 return -EINVAL; 2658 } 2659 2660 if (ndm->ndm_flags & ~NTF_PROXY) { 2661 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request"); 2662 return -EINVAL; 2663 } 2664 2665 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), 2666 tb, NDA_MAX, nda_policy, 2667 extack); 2668 } else { 2669 err = nlmsg_parse_deprecated(nlh, sizeof(struct ndmsg), tb, 2670 NDA_MAX, nda_policy, extack); 2671 } 2672 if (err < 0) 2673 return err; 2674 2675 for (i = 0; i <= NDA_MAX; ++i) { 2676 if (!tb[i]) 2677 continue; 2678 2679 /* all new attributes should require strict_check */ 2680 switch (i) { 2681 case NDA_IFINDEX: 2682 filter->dev_idx = nla_get_u32(tb[i]); 2683 break; 2684 case NDA_MASTER: 2685 filter->master_idx = nla_get_u32(tb[i]); 2686 break; 2687 default: 2688 if (strict_check) { 2689 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor dump request"); 2690 return -EINVAL; 2691 } 2692 } 2693 } 2694 2695 return 0; 2696 } 2697 2698 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) 2699 { 2700 const struct nlmsghdr *nlh = cb->nlh; 2701 struct neigh_dump_filter filter = {}; 2702 struct neigh_table *tbl; 2703 int t, family, s_t; 2704 int proxy = 0; 2705 int err; 2706 2707 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family; 2708 2709 /* check for full ndmsg structure presence, family member is 2710 * the same for both structures 2711 */ 2712 if (nlmsg_len(nlh) >= sizeof(struct ndmsg) && 2713 ((struct ndmsg *)nlmsg_data(nlh))->ndm_flags == NTF_PROXY) 2714 proxy = 1; 2715 2716 err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack); 2717 if (err < 0 && cb->strict_check) 2718 return err; 2719 2720 s_t = cb->args[0]; 2721 2722 for (t = 0; t < NEIGH_NR_TABLES; t++) { 2723 tbl = neigh_tables[t]; 2724 2725 if (!tbl) 2726 continue; 2727 if (t < s_t || (family && tbl->family != family)) 2728 continue; 2729 if (t > s_t) 2730 memset(&cb->args[1], 0, sizeof(cb->args) - 2731 sizeof(cb->args[0])); 2732 if (proxy) 2733 err = pneigh_dump_table(tbl, skb, cb, &filter); 2734 else 2735 err = neigh_dump_table(tbl, skb, cb, &filter); 2736 if (err < 0) 2737 break; 2738 } 2739 2740 cb->args[0] = t; 2741 return skb->len; 2742 } 2743 2744 static int neigh_valid_get_req(const struct nlmsghdr *nlh, 2745 struct neigh_table **tbl, 2746 void **dst, int *dev_idx, u8 *ndm_flags, 2747 struct netlink_ext_ack *extack) 2748 { 2749 struct nlattr *tb[NDA_MAX + 1]; 2750 struct ndmsg *ndm; 2751 int err, i; 2752 2753 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) { 2754 NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request"); 2755 return -EINVAL; 2756 } 2757 2758 ndm = nlmsg_data(nlh); 2759 if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state || 2760 ndm->ndm_type) { 2761 NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request"); 2762 return -EINVAL; 2763 } 2764 2765 if (ndm->ndm_flags & ~NTF_PROXY) { 2766 NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request"); 2767 return -EINVAL; 2768 } 2769 2770 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb, 2771 NDA_MAX, nda_policy, extack); 2772 if (err < 0) 2773 return err; 2774 2775 *ndm_flags = ndm->ndm_flags; 2776 *dev_idx = ndm->ndm_ifindex; 2777 *tbl = neigh_find_table(ndm->ndm_family); 2778 if (*tbl == NULL) { 2779 NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request"); 2780 return -EAFNOSUPPORT; 2781 } 2782 2783 for (i = 0; i <= NDA_MAX; ++i) { 2784 if (!tb[i]) 2785 continue; 2786 2787 switch (i) { 2788 case NDA_DST: 2789 if (nla_len(tb[i]) != (int)(*tbl)->key_len) { 2790 NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request"); 2791 return -EINVAL; 2792 } 2793 *dst = nla_data(tb[i]); 2794 break; 2795 default: 2796 NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request"); 2797 return -EINVAL; 2798 } 2799 } 2800 2801 return 0; 2802 } 2803 2804 static inline size_t neigh_nlmsg_size(void) 2805 { 2806 return NLMSG_ALIGN(sizeof(struct ndmsg)) 2807 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */ 2808 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */ 2809 + nla_total_size(sizeof(struct nda_cacheinfo)) 2810 + nla_total_size(4) /* NDA_PROBES */ 2811 + nla_total_size(1); /* NDA_PROTOCOL */ 2812 } 2813 2814 static int neigh_get_reply(struct net *net, struct neighbour *neigh, 2815 u32 pid, u32 seq) 2816 { 2817 struct sk_buff *skb; 2818 int err = 0; 2819 2820 skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL); 2821 if (!skb) 2822 return -ENOBUFS; 2823 2824 err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0); 2825 if (err) { 2826 kfree_skb(skb); 2827 goto errout; 2828 } 2829 2830 err = rtnl_unicast(skb, net, pid); 2831 errout: 2832 return err; 2833 } 2834 2835 static inline size_t pneigh_nlmsg_size(void) 2836 { 2837 return NLMSG_ALIGN(sizeof(struct ndmsg)) 2838 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */ 2839 + nla_total_size(1); /* NDA_PROTOCOL */ 2840 } 2841 2842 static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh, 2843 u32 pid, u32 seq, struct neigh_table *tbl) 2844 { 2845 struct sk_buff *skb; 2846 int err = 0; 2847 2848 skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL); 2849 if (!skb) 2850 return -ENOBUFS; 2851 2852 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl); 2853 if (err) { 2854 kfree_skb(skb); 2855 goto errout; 2856 } 2857 2858 err = rtnl_unicast(skb, net, pid); 2859 errout: 2860 return err; 2861 } 2862 2863 static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, 2864 struct netlink_ext_ack *extack) 2865 { 2866 struct net *net = sock_net(in_skb->sk); 2867 struct net_device *dev = NULL; 2868 struct neigh_table *tbl = NULL; 2869 struct neighbour *neigh; 2870 void *dst = NULL; 2871 u8 ndm_flags = 0; 2872 int dev_idx = 0; 2873 int err; 2874 2875 err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags, 2876 extack); 2877 if (err < 0) 2878 return err; 2879 2880 if (dev_idx) { 2881 dev = __dev_get_by_index(net, dev_idx); 2882 if (!dev) { 2883 NL_SET_ERR_MSG(extack, "Unknown device ifindex"); 2884 return -ENODEV; 2885 } 2886 } 2887 2888 if (!dst) { 2889 NL_SET_ERR_MSG(extack, "Network address not specified"); 2890 return -EINVAL; 2891 } 2892 2893 if (ndm_flags & NTF_PROXY) { 2894 struct pneigh_entry *pn; 2895 2896 pn = pneigh_lookup(tbl, net, dst, dev, 0); 2897 if (!pn) { 2898 NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found"); 2899 return -ENOENT; 2900 } 2901 return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid, 2902 nlh->nlmsg_seq, tbl); 2903 } 2904 2905 if (!dev) { 2906 NL_SET_ERR_MSG(extack, "No device specified"); 2907 return -EINVAL; 2908 } 2909 2910 neigh = neigh_lookup(tbl, dst, dev); 2911 if (!neigh) { 2912 NL_SET_ERR_MSG(extack, "Neighbour entry not found"); 2913 return -ENOENT; 2914 } 2915 2916 err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid, 2917 nlh->nlmsg_seq); 2918 2919 neigh_release(neigh); 2920 2921 return err; 2922 } 2923 2924 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie) 2925 { 2926 int chain; 2927 struct neigh_hash_table *nht; 2928 2929 rcu_read_lock_bh(); 2930 nht = rcu_dereference_bh(tbl->nht); 2931 2932 read_lock(&tbl->lock); /* avoid resizes */ 2933 for (chain = 0; chain < (1 << nht->hash_shift); chain++) { 2934 struct neighbour *n; 2935 2936 for (n = rcu_dereference_bh(nht->hash_buckets[chain]); 2937 n != NULL; 2938 n = rcu_dereference_bh(n->next)) 2939 cb(n, cookie); 2940 } 2941 read_unlock(&tbl->lock); 2942 rcu_read_unlock_bh(); 2943 } 2944 EXPORT_SYMBOL(neigh_for_each); 2945 2946 /* The tbl->lock must be held as a writer and BH disabled. */ 2947 void __neigh_for_each_release(struct neigh_table *tbl, 2948 int (*cb)(struct neighbour *)) 2949 { 2950 int chain; 2951 struct neigh_hash_table *nht; 2952 2953 nht = rcu_dereference_protected(tbl->nht, 2954 lockdep_is_held(&tbl->lock)); 2955 for (chain = 0; chain < (1 << nht->hash_shift); chain++) { 2956 struct neighbour *n; 2957 struct neighbour __rcu **np; 2958 2959 np = &nht->hash_buckets[chain]; 2960 while ((n = rcu_dereference_protected(*np, 2961 lockdep_is_held(&tbl->lock))) != NULL) { 2962 int release; 2963 2964 write_lock(&n->lock); 2965 release = cb(n); 2966 if (release) { 2967 rcu_assign_pointer(*np, 2968 rcu_dereference_protected(n->next, 2969 lockdep_is_held(&tbl->lock))); 2970 neigh_mark_dead(n); 2971 } else 2972 np = &n->next; 2973 write_unlock(&n->lock); 2974 if (release) 2975 neigh_cleanup_and_release(n); 2976 } 2977 } 2978 } 2979 EXPORT_SYMBOL(__neigh_for_each_release); 2980 2981 int neigh_xmit(int index, struct net_device *dev, 2982 const void *addr, struct sk_buff *skb) 2983 { 2984 int err = -EAFNOSUPPORT; 2985 if (likely(index < NEIGH_NR_TABLES)) { 2986 struct neigh_table *tbl; 2987 struct neighbour *neigh; 2988 2989 tbl = neigh_tables[index]; 2990 if (!tbl) 2991 goto out; 2992 rcu_read_lock_bh(); 2993 if (index == NEIGH_ARP_TABLE) { 2994 u32 key = *((u32 *)addr); 2995 2996 neigh = __ipv4_neigh_lookup_noref(dev, key); 2997 } else { 2998 neigh = __neigh_lookup_noref(tbl, addr, dev); 2999 } 3000 if (!neigh) 3001 neigh = __neigh_create(tbl, addr, dev, false); 3002 err = PTR_ERR(neigh); 3003 if (IS_ERR(neigh)) { 3004 rcu_read_unlock_bh(); 3005 goto out_kfree_skb; 3006 } 3007 err = neigh->output(neigh, skb); 3008 rcu_read_unlock_bh(); 3009 } 3010 else if (index == NEIGH_LINK_TABLE) { 3011 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 3012 addr, NULL, skb->len); 3013 if (err < 0) 3014 goto out_kfree_skb; 3015 err = dev_queue_xmit(skb); 3016 } 3017 out: 3018 return err; 3019 out_kfree_skb: 3020 kfree_skb(skb); 3021 goto out; 3022 } 3023 EXPORT_SYMBOL(neigh_xmit); 3024 3025 #ifdef CONFIG_PROC_FS 3026 3027 static struct neighbour *neigh_get_first(struct seq_file *seq) 3028 { 3029 struct neigh_seq_state *state = seq->private; 3030 struct net *net = seq_file_net(seq); 3031 struct neigh_hash_table *nht = state->nht; 3032 struct neighbour *n = NULL; 3033 int bucket; 3034 3035 state->flags &= ~NEIGH_SEQ_IS_PNEIGH; 3036 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) { 3037 n = rcu_dereference_bh(nht->hash_buckets[bucket]); 3038 3039 while (n) { 3040 if (!net_eq(dev_net(n->dev), net)) 3041 goto next; 3042 if (state->neigh_sub_iter) { 3043 loff_t fakep = 0; 3044 void *v; 3045 3046 v = state->neigh_sub_iter(state, n, &fakep); 3047 if (!v) 3048 goto next; 3049 } 3050 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP)) 3051 break; 3052 if (n->nud_state & ~NUD_NOARP) 3053 break; 3054 next: 3055 n = rcu_dereference_bh(n->next); 3056 } 3057 3058 if (n) 3059 break; 3060 } 3061 state->bucket = bucket; 3062 3063 return n; 3064 } 3065 3066 static struct neighbour *neigh_get_next(struct seq_file *seq, 3067 struct neighbour *n, 3068 loff_t *pos) 3069 { 3070 struct neigh_seq_state *state = seq->private; 3071 struct net *net = seq_file_net(seq); 3072 struct neigh_hash_table *nht = state->nht; 3073 3074 if (state->neigh_sub_iter) { 3075 void *v = state->neigh_sub_iter(state, n, pos); 3076 if (v) 3077 return n; 3078 } 3079 n = rcu_dereference_bh(n->next); 3080 3081 while (1) { 3082 while (n) { 3083 if (!net_eq(dev_net(n->dev), net)) 3084 goto next; 3085 if (state->neigh_sub_iter) { 3086 void *v = state->neigh_sub_iter(state, n, pos); 3087 if (v) 3088 return n; 3089 goto next; 3090 } 3091 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP)) 3092 break; 3093 3094 if (n->nud_state & ~NUD_NOARP) 3095 break; 3096 next: 3097 n = rcu_dereference_bh(n->next); 3098 } 3099 3100 if (n) 3101 break; 3102 3103 if (++state->bucket >= (1 << nht->hash_shift)) 3104 break; 3105 3106 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]); 3107 } 3108 3109 if (n && pos) 3110 --(*pos); 3111 return n; 3112 } 3113 3114 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos) 3115 { 3116 struct neighbour *n = neigh_get_first(seq); 3117 3118 if (n) { 3119 --(*pos); 3120 while (*pos) { 3121 n = neigh_get_next(seq, n, pos); 3122 if (!n) 3123 break; 3124 } 3125 } 3126 return *pos ? NULL : n; 3127 } 3128 3129 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq) 3130 { 3131 struct neigh_seq_state *state = seq->private; 3132 struct net *net = seq_file_net(seq); 3133 struct neigh_table *tbl = state->tbl; 3134 struct pneigh_entry *pn = NULL; 3135 int bucket = state->bucket; 3136 3137 state->flags |= NEIGH_SEQ_IS_PNEIGH; 3138 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) { 3139 pn = tbl->phash_buckets[bucket]; 3140 while (pn && !net_eq(pneigh_net(pn), net)) 3141 pn = pn->next; 3142 if (pn) 3143 break; 3144 } 3145 state->bucket = bucket; 3146 3147 return pn; 3148 } 3149 3150 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq, 3151 struct pneigh_entry *pn, 3152 loff_t *pos) 3153 { 3154 struct neigh_seq_state *state = seq->private; 3155 struct net *net = seq_file_net(seq); 3156 struct neigh_table *tbl = state->tbl; 3157 3158 do { 3159 pn = pn->next; 3160 } while (pn && !net_eq(pneigh_net(pn), net)); 3161 3162 while (!pn) { 3163 if (++state->bucket > PNEIGH_HASHMASK) 3164 break; 3165 pn = tbl->phash_buckets[state->bucket]; 3166 while (pn && !net_eq(pneigh_net(pn), net)) 3167 pn = pn->next; 3168 if (pn) 3169 break; 3170 } 3171 3172 if (pn && pos) 3173 --(*pos); 3174 3175 return pn; 3176 } 3177 3178 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos) 3179 { 3180 struct pneigh_entry *pn = pneigh_get_first(seq); 3181 3182 if (pn) { 3183 --(*pos); 3184 while (*pos) { 3185 pn = pneigh_get_next(seq, pn, pos); 3186 if (!pn) 3187 break; 3188 } 3189 } 3190 return *pos ? NULL : pn; 3191 } 3192 3193 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos) 3194 { 3195 struct neigh_seq_state *state = seq->private; 3196 void *rc; 3197 loff_t idxpos = *pos; 3198 3199 rc = neigh_get_idx(seq, &idxpos); 3200 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY)) 3201 rc = pneigh_get_idx(seq, &idxpos); 3202 3203 return rc; 3204 } 3205 3206 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags) 3207 __acquires(tbl->lock) 3208 __acquires(rcu_bh) 3209 { 3210 struct neigh_seq_state *state = seq->private; 3211 3212 state->tbl = tbl; 3213 state->bucket = 0; 3214 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH); 3215 3216 rcu_read_lock_bh(); 3217 state->nht = rcu_dereference_bh(tbl->nht); 3218 read_lock(&tbl->lock); 3219 3220 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN; 3221 } 3222 EXPORT_SYMBOL(neigh_seq_start); 3223 3224 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3225 { 3226 struct neigh_seq_state *state; 3227 void *rc; 3228 3229 if (v == SEQ_START_TOKEN) { 3230 rc = neigh_get_first(seq); 3231 goto out; 3232 } 3233 3234 state = seq->private; 3235 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) { 3236 rc = neigh_get_next(seq, v, NULL); 3237 if (rc) 3238 goto out; 3239 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY)) 3240 rc = pneigh_get_first(seq); 3241 } else { 3242 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY); 3243 rc = pneigh_get_next(seq, v, NULL); 3244 } 3245 out: 3246 ++(*pos); 3247 return rc; 3248 } 3249 EXPORT_SYMBOL(neigh_seq_next); 3250 3251 void neigh_seq_stop(struct seq_file *seq, void *v) 3252 __releases(tbl->lock) 3253 __releases(rcu_bh) 3254 { 3255 struct neigh_seq_state *state = seq->private; 3256 struct neigh_table *tbl = state->tbl; 3257 3258 read_unlock(&tbl->lock); 3259 rcu_read_unlock_bh(); 3260 } 3261 EXPORT_SYMBOL(neigh_seq_stop); 3262 3263 /* statistics via seq_file */ 3264 3265 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos) 3266 { 3267 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file)); 3268 int cpu; 3269 3270 if (*pos == 0) 3271 return SEQ_START_TOKEN; 3272 3273 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { 3274 if (!cpu_possible(cpu)) 3275 continue; 3276 *pos = cpu+1; 3277 return per_cpu_ptr(tbl->stats, cpu); 3278 } 3279 return NULL; 3280 } 3281 3282 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3283 { 3284 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file)); 3285 int cpu; 3286 3287 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { 3288 if (!cpu_possible(cpu)) 3289 continue; 3290 *pos = cpu+1; 3291 return per_cpu_ptr(tbl->stats, cpu); 3292 } 3293 (*pos)++; 3294 return NULL; 3295 } 3296 3297 static void neigh_stat_seq_stop(struct seq_file *seq, void *v) 3298 { 3299 3300 } 3301 3302 static int neigh_stat_seq_show(struct seq_file *seq, void *v) 3303 { 3304 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file)); 3305 struct neigh_statistics *st = v; 3306 3307 if (v == SEQ_START_TOKEN) { 3308 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n"); 3309 return 0; 3310 } 3311 3312 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx " 3313 "%08lx %08lx %08lx %08lx %08lx %08lx\n", 3314 atomic_read(&tbl->entries), 3315 3316 st->allocs, 3317 st->destroys, 3318 st->hash_grows, 3319 3320 st->lookups, 3321 st->hits, 3322 3323 st->res_failed, 3324 3325 st->rcv_probes_mcast, 3326 st->rcv_probes_ucast, 3327 3328 st->periodic_gc_runs, 3329 st->forced_gc_runs, 3330 st->unres_discards, 3331 st->table_fulls 3332 ); 3333 3334 return 0; 3335 } 3336 3337 static const struct seq_operations neigh_stat_seq_ops = { 3338 .start = neigh_stat_seq_start, 3339 .next = neigh_stat_seq_next, 3340 .stop = neigh_stat_seq_stop, 3341 .show = neigh_stat_seq_show, 3342 }; 3343 #endif /* CONFIG_PROC_FS */ 3344 3345 static void __neigh_notify(struct neighbour *n, int type, int flags, 3346 u32 pid) 3347 { 3348 struct net *net = dev_net(n->dev); 3349 struct sk_buff *skb; 3350 int err = -ENOBUFS; 3351 3352 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC); 3353 if (skb == NULL) 3354 goto errout; 3355 3356 err = neigh_fill_info(skb, n, pid, 0, type, flags); 3357 if (err < 0) { 3358 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */ 3359 WARN_ON(err == -EMSGSIZE); 3360 kfree_skb(skb); 3361 goto errout; 3362 } 3363 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 3364 return; 3365 errout: 3366 if (err < 0) 3367 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 3368 } 3369 3370 void neigh_app_ns(struct neighbour *n) 3371 { 3372 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0); 3373 } 3374 EXPORT_SYMBOL(neigh_app_ns); 3375 3376 #ifdef CONFIG_SYSCTL 3377 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN); 3378 3379 static int proc_unres_qlen(struct ctl_table *ctl, int write, 3380 void __user *buffer, size_t *lenp, loff_t *ppos) 3381 { 3382 int size, ret; 3383 struct ctl_table tmp = *ctl; 3384 3385 tmp.extra1 = SYSCTL_ZERO; 3386 tmp.extra2 = &unres_qlen_max; 3387 tmp.data = &size; 3388 3389 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN); 3390 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 3391 3392 if (write && !ret) 3393 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN); 3394 return ret; 3395 } 3396 3397 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev, 3398 int family) 3399 { 3400 switch (family) { 3401 case AF_INET: 3402 return __in_dev_arp_parms_get_rcu(dev); 3403 case AF_INET6: 3404 return __in6_dev_nd_parms_get_rcu(dev); 3405 } 3406 return NULL; 3407 } 3408 3409 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p, 3410 int index) 3411 { 3412 struct net_device *dev; 3413 int family = neigh_parms_family(p); 3414 3415 rcu_read_lock(); 3416 for_each_netdev_rcu(net, dev) { 3417 struct neigh_parms *dst_p = 3418 neigh_get_dev_parms_rcu(dev, family); 3419 3420 if (dst_p && !test_bit(index, dst_p->data_state)) 3421 dst_p->data[index] = p->data[index]; 3422 } 3423 rcu_read_unlock(); 3424 } 3425 3426 static void neigh_proc_update(struct ctl_table *ctl, int write) 3427 { 3428 struct net_device *dev = ctl->extra1; 3429 struct neigh_parms *p = ctl->extra2; 3430 struct net *net = neigh_parms_net(p); 3431 int index = (int *) ctl->data - p->data; 3432 3433 if (!write) 3434 return; 3435 3436 set_bit(index, p->data_state); 3437 if (index == NEIGH_VAR_DELAY_PROBE_TIME) 3438 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p); 3439 if (!dev) /* NULL dev means this is default value */ 3440 neigh_copy_dflt_parms(net, p, index); 3441 } 3442 3443 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write, 3444 void __user *buffer, 3445 size_t *lenp, loff_t *ppos) 3446 { 3447 struct ctl_table tmp = *ctl; 3448 int ret; 3449 3450 tmp.extra1 = SYSCTL_ZERO; 3451 tmp.extra2 = SYSCTL_INT_MAX; 3452 3453 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 3454 neigh_proc_update(ctl, write); 3455 return ret; 3456 } 3457 3458 int neigh_proc_dointvec(struct ctl_table *ctl, int write, 3459 void __user *buffer, size_t *lenp, loff_t *ppos) 3460 { 3461 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 3462 3463 neigh_proc_update(ctl, write); 3464 return ret; 3465 } 3466 EXPORT_SYMBOL(neigh_proc_dointvec); 3467 3468 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, 3469 void __user *buffer, 3470 size_t *lenp, loff_t *ppos) 3471 { 3472 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); 3473 3474 neigh_proc_update(ctl, write); 3475 return ret; 3476 } 3477 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies); 3478 3479 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write, 3480 void __user *buffer, 3481 size_t *lenp, loff_t *ppos) 3482 { 3483 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos); 3484 3485 neigh_proc_update(ctl, write); 3486 return ret; 3487 } 3488 3489 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write, 3490 void __user *buffer, 3491 size_t *lenp, loff_t *ppos) 3492 { 3493 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos); 3494 3495 neigh_proc_update(ctl, write); 3496 return ret; 3497 } 3498 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies); 3499 3500 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write, 3501 void __user *buffer, 3502 size_t *lenp, loff_t *ppos) 3503 { 3504 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos); 3505 3506 neigh_proc_update(ctl, write); 3507 return ret; 3508 } 3509 3510 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write, 3511 void __user *buffer, 3512 size_t *lenp, loff_t *ppos) 3513 { 3514 struct neigh_parms *p = ctl->extra2; 3515 int ret; 3516 3517 if (strcmp(ctl->procname, "base_reachable_time") == 0) 3518 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); 3519 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0) 3520 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos); 3521 else 3522 ret = -1; 3523 3524 if (write && ret == 0) { 3525 /* update reachable_time as well, otherwise, the change will 3526 * only be effective after the next time neigh_periodic_work 3527 * decides to recompute it 3528 */ 3529 p->reachable_time = 3530 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 3531 } 3532 return ret; 3533 } 3534 3535 #define NEIGH_PARMS_DATA_OFFSET(index) \ 3536 (&((struct neigh_parms *) 0)->data[index]) 3537 3538 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \ 3539 [NEIGH_VAR_ ## attr] = { \ 3540 .procname = name, \ 3541 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \ 3542 .maxlen = sizeof(int), \ 3543 .mode = mval, \ 3544 .proc_handler = proc, \ 3545 } 3546 3547 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \ 3548 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax) 3549 3550 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \ 3551 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies) 3552 3553 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \ 3554 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies) 3555 3556 #define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \ 3557 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies) 3558 3559 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \ 3560 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies) 3561 3562 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \ 3563 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen) 3564 3565 static struct neigh_sysctl_table { 3566 struct ctl_table_header *sysctl_header; 3567 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1]; 3568 } neigh_sysctl_template __read_mostly = { 3569 .neigh_vars = { 3570 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"), 3571 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"), 3572 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"), 3573 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"), 3574 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"), 3575 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"), 3576 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"), 3577 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"), 3578 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"), 3579 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"), 3580 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"), 3581 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"), 3582 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"), 3583 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"), 3584 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"), 3585 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"), 3586 [NEIGH_VAR_GC_INTERVAL] = { 3587 .procname = "gc_interval", 3588 .maxlen = sizeof(int), 3589 .mode = 0644, 3590 .proc_handler = proc_dointvec_jiffies, 3591 }, 3592 [NEIGH_VAR_GC_THRESH1] = { 3593 .procname = "gc_thresh1", 3594 .maxlen = sizeof(int), 3595 .mode = 0644, 3596 .extra1 = SYSCTL_ZERO, 3597 .extra2 = SYSCTL_INT_MAX, 3598 .proc_handler = proc_dointvec_minmax, 3599 }, 3600 [NEIGH_VAR_GC_THRESH2] = { 3601 .procname = "gc_thresh2", 3602 .maxlen = sizeof(int), 3603 .mode = 0644, 3604 .extra1 = SYSCTL_ZERO, 3605 .extra2 = SYSCTL_INT_MAX, 3606 .proc_handler = proc_dointvec_minmax, 3607 }, 3608 [NEIGH_VAR_GC_THRESH3] = { 3609 .procname = "gc_thresh3", 3610 .maxlen = sizeof(int), 3611 .mode = 0644, 3612 .extra1 = SYSCTL_ZERO, 3613 .extra2 = SYSCTL_INT_MAX, 3614 .proc_handler = proc_dointvec_minmax, 3615 }, 3616 {}, 3617 }, 3618 }; 3619 3620 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, 3621 proc_handler *handler) 3622 { 3623 int i; 3624 struct neigh_sysctl_table *t; 3625 const char *dev_name_source; 3626 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ]; 3627 char *p_name; 3628 3629 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL); 3630 if (!t) 3631 goto err; 3632 3633 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) { 3634 t->neigh_vars[i].data += (long) p; 3635 t->neigh_vars[i].extra1 = dev; 3636 t->neigh_vars[i].extra2 = p; 3637 } 3638 3639 if (dev) { 3640 dev_name_source = dev->name; 3641 /* Terminate the table early */ 3642 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0, 3643 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL])); 3644 } else { 3645 struct neigh_table *tbl = p->tbl; 3646 dev_name_source = "default"; 3647 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval; 3648 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1; 3649 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2; 3650 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3; 3651 } 3652 3653 if (handler) { 3654 /* RetransTime */ 3655 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler; 3656 /* ReachableTime */ 3657 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler; 3658 /* RetransTime (in milliseconds)*/ 3659 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler; 3660 /* ReachableTime (in milliseconds) */ 3661 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler; 3662 } else { 3663 /* Those handlers will update p->reachable_time after 3664 * base_reachable_time(_ms) is set to ensure the new timer starts being 3665 * applied after the next neighbour update instead of waiting for 3666 * neigh_periodic_work to update its value (can be multiple minutes) 3667 * So any handler that replaces them should do this as well 3668 */ 3669 /* ReachableTime */ 3670 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = 3671 neigh_proc_base_reachable_time; 3672 /* ReachableTime (in milliseconds) */ 3673 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = 3674 neigh_proc_base_reachable_time; 3675 } 3676 3677 /* Don't export sysctls to unprivileged users */ 3678 if (neigh_parms_net(p)->user_ns != &init_user_ns) 3679 t->neigh_vars[0].procname = NULL; 3680 3681 switch (neigh_parms_family(p)) { 3682 case AF_INET: 3683 p_name = "ipv4"; 3684 break; 3685 case AF_INET6: 3686 p_name = "ipv6"; 3687 break; 3688 default: 3689 BUG(); 3690 } 3691 3692 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s", 3693 p_name, dev_name_source); 3694 t->sysctl_header = 3695 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars); 3696 if (!t->sysctl_header) 3697 goto free; 3698 3699 p->sysctl_table = t; 3700 return 0; 3701 3702 free: 3703 kfree(t); 3704 err: 3705 return -ENOBUFS; 3706 } 3707 EXPORT_SYMBOL(neigh_sysctl_register); 3708 3709 void neigh_sysctl_unregister(struct neigh_parms *p) 3710 { 3711 if (p->sysctl_table) { 3712 struct neigh_sysctl_table *t = p->sysctl_table; 3713 p->sysctl_table = NULL; 3714 unregister_net_sysctl_table(t->sysctl_header); 3715 kfree(t); 3716 } 3717 } 3718 EXPORT_SYMBOL(neigh_sysctl_unregister); 3719 3720 #endif /* CONFIG_SYSCTL */ 3721 3722 static int __init neigh_init(void) 3723 { 3724 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0); 3725 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0); 3726 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0); 3727 3728 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info, 3729 0); 3730 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0); 3731 3732 return 0; 3733 } 3734 3735 subsys_initcall(neigh_init); 3736