1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Forwarding database 4 * Linux ethernet bridge 5 * 6 * Authors: 7 * Lennert Buytenhek <buytenh@gnu.org> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/init.h> 12 #include <linux/rculist.h> 13 #include <linux/spinlock.h> 14 #include <linux/times.h> 15 #include <linux/netdevice.h> 16 #include <linux/etherdevice.h> 17 #include <linux/jhash.h> 18 #include <linux/random.h> 19 #include <linux/slab.h> 20 #include <linux/atomic.h> 21 #include <asm/unaligned.h> 22 #include <linux/if_vlan.h> 23 #include <net/switchdev.h> 24 #include <trace/events/bridge.h> 25 #include "br_private.h" 26 27 static const struct rhashtable_params br_fdb_rht_params = { 28 .head_offset = offsetof(struct net_bridge_fdb_entry, rhnode), 29 .key_offset = offsetof(struct net_bridge_fdb_entry, key), 30 .key_len = sizeof(struct net_bridge_fdb_key), 31 .automatic_shrinking = true, 32 }; 33 34 static struct kmem_cache *br_fdb_cache __read_mostly; 35 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 36 const unsigned char *addr, u16 vid); 37 static void fdb_notify(struct net_bridge *br, 38 const struct net_bridge_fdb_entry *, int, bool); 39 40 int __init br_fdb_init(void) 41 { 42 br_fdb_cache = kmem_cache_create("bridge_fdb_cache", 43 sizeof(struct net_bridge_fdb_entry), 44 0, 45 SLAB_HWCACHE_ALIGN, NULL); 46 if (!br_fdb_cache) 47 return -ENOMEM; 48 49 return 0; 50 } 51 52 void br_fdb_fini(void) 53 { 54 kmem_cache_destroy(br_fdb_cache); 55 } 56 57 int br_fdb_hash_init(struct net_bridge *br) 58 { 59 return rhashtable_init(&br->fdb_hash_tbl, &br_fdb_rht_params); 60 } 61 62 void br_fdb_hash_fini(struct net_bridge *br) 63 { 64 rhashtable_destroy(&br->fdb_hash_tbl); 65 } 66 67 /* if topology_changing then use forward_delay (default 15 sec) 68 * otherwise keep longer (default 5 minutes) 69 */ 70 static inline unsigned long hold_time(const struct net_bridge *br) 71 { 72 return br->topology_change ? br->forward_delay : br->ageing_time; 73 } 74 75 static inline int has_expired(const struct net_bridge *br, 76 const struct net_bridge_fdb_entry *fdb) 77 { 78 return !test_bit(BR_FDB_STATIC, &fdb->flags) && 79 !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags) && 80 time_before_eq(fdb->updated + hold_time(br), jiffies); 81 } 82 83 static void fdb_rcu_free(struct rcu_head *head) 84 { 85 struct net_bridge_fdb_entry *ent 86 = container_of(head, struct net_bridge_fdb_entry, rcu); 87 kmem_cache_free(br_fdb_cache, ent); 88 } 89 90 static struct net_bridge_fdb_entry *fdb_find_rcu(struct rhashtable *tbl, 91 const unsigned char *addr, 92 __u16 vid) 93 { 94 struct net_bridge_fdb_key key; 95 96 WARN_ON_ONCE(!rcu_read_lock_held()); 97 98 key.vlan_id = vid; 99 memcpy(key.addr.addr, addr, sizeof(key.addr.addr)); 100 101 return rhashtable_lookup(tbl, &key, br_fdb_rht_params); 102 } 103 104 /* requires bridge hash_lock */ 105 static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br, 106 const unsigned char *addr, 107 __u16 vid) 108 { 109 struct net_bridge_fdb_entry *fdb; 110 111 lockdep_assert_held_once(&br->hash_lock); 112 113 rcu_read_lock(); 114 fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid); 115 rcu_read_unlock(); 116 117 return fdb; 118 } 119 120 struct net_device *br_fdb_find_port(const struct net_device *br_dev, 121 const unsigned char *addr, 122 __u16 vid) 123 { 124 struct net_bridge_fdb_entry *f; 125 struct net_device *dev = NULL; 126 struct net_bridge *br; 127 128 ASSERT_RTNL(); 129 130 if (!netif_is_bridge_master(br_dev)) 131 return NULL; 132 133 br = netdev_priv(br_dev); 134 rcu_read_lock(); 135 f = br_fdb_find_rcu(br, addr, vid); 136 if (f && f->dst) 137 dev = f->dst->dev; 138 rcu_read_unlock(); 139 140 return dev; 141 } 142 EXPORT_SYMBOL_GPL(br_fdb_find_port); 143 144 struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br, 145 const unsigned char *addr, 146 __u16 vid) 147 { 148 return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid); 149 } 150 151 /* When a static FDB entry is added, the mac address from the entry is 152 * added to the bridge private HW address list and all required ports 153 * are then updated with the new information. 154 * Called under RTNL. 155 */ 156 static void fdb_add_hw_addr(struct net_bridge *br, const unsigned char *addr) 157 { 158 int err; 159 struct net_bridge_port *p; 160 161 ASSERT_RTNL(); 162 163 list_for_each_entry(p, &br->port_list, list) { 164 if (!br_promisc_port(p)) { 165 err = dev_uc_add(p->dev, addr); 166 if (err) 167 goto undo; 168 } 169 } 170 171 return; 172 undo: 173 list_for_each_entry_continue_reverse(p, &br->port_list, list) { 174 if (!br_promisc_port(p)) 175 dev_uc_del(p->dev, addr); 176 } 177 } 178 179 /* When a static FDB entry is deleted, the HW address from that entry is 180 * also removed from the bridge private HW address list and updates all 181 * the ports with needed information. 182 * Called under RTNL. 183 */ 184 static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr) 185 { 186 struct net_bridge_port *p; 187 188 ASSERT_RTNL(); 189 190 list_for_each_entry(p, &br->port_list, list) { 191 if (!br_promisc_port(p)) 192 dev_uc_del(p->dev, addr); 193 } 194 } 195 196 static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f, 197 bool swdev_notify) 198 { 199 trace_fdb_delete(br, f); 200 201 if (test_bit(BR_FDB_STATIC, &f->flags)) 202 fdb_del_hw_addr(br, f->key.addr.addr); 203 204 hlist_del_init_rcu(&f->fdb_node); 205 rhashtable_remove_fast(&br->fdb_hash_tbl, &f->rhnode, 206 br_fdb_rht_params); 207 fdb_notify(br, f, RTM_DELNEIGH, swdev_notify); 208 call_rcu(&f->rcu, fdb_rcu_free); 209 } 210 211 /* Delete a local entry if no other port had the same address. */ 212 static void fdb_delete_local(struct net_bridge *br, 213 const struct net_bridge_port *p, 214 struct net_bridge_fdb_entry *f) 215 { 216 const unsigned char *addr = f->key.addr.addr; 217 struct net_bridge_vlan_group *vg; 218 const struct net_bridge_vlan *v; 219 struct net_bridge_port *op; 220 u16 vid = f->key.vlan_id; 221 222 /* Maybe another port has same hw addr? */ 223 list_for_each_entry(op, &br->port_list, list) { 224 vg = nbp_vlan_group(op); 225 if (op != p && ether_addr_equal(op->dev->dev_addr, addr) && 226 (!vid || br_vlan_find(vg, vid))) { 227 f->dst = op; 228 clear_bit(BR_FDB_ADDED_BY_USER, &f->flags); 229 return; 230 } 231 } 232 233 vg = br_vlan_group(br); 234 v = br_vlan_find(vg, vid); 235 /* Maybe bridge device has same hw addr? */ 236 if (p && ether_addr_equal(br->dev->dev_addr, addr) && 237 (!vid || (v && br_vlan_should_use(v)))) { 238 f->dst = NULL; 239 clear_bit(BR_FDB_ADDED_BY_USER, &f->flags); 240 return; 241 } 242 243 fdb_delete(br, f, true); 244 } 245 246 void br_fdb_find_delete_local(struct net_bridge *br, 247 const struct net_bridge_port *p, 248 const unsigned char *addr, u16 vid) 249 { 250 struct net_bridge_fdb_entry *f; 251 252 spin_lock_bh(&br->hash_lock); 253 f = br_fdb_find(br, addr, vid); 254 if (f && test_bit(BR_FDB_LOCAL, &f->flags) && 255 !test_bit(BR_FDB_ADDED_BY_USER, &f->flags) && f->dst == p) 256 fdb_delete_local(br, p, f); 257 spin_unlock_bh(&br->hash_lock); 258 } 259 260 void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr) 261 { 262 struct net_bridge_vlan_group *vg; 263 struct net_bridge_fdb_entry *f; 264 struct net_bridge *br = p->br; 265 struct net_bridge_vlan *v; 266 267 spin_lock_bh(&br->hash_lock); 268 vg = nbp_vlan_group(p); 269 hlist_for_each_entry(f, &br->fdb_list, fdb_node) { 270 if (f->dst == p && test_bit(BR_FDB_LOCAL, &f->flags) && 271 !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) { 272 /* delete old one */ 273 fdb_delete_local(br, p, f); 274 275 /* if this port has no vlan information 276 * configured, we can safely be done at 277 * this point. 278 */ 279 if (!vg || !vg->num_vlans) 280 goto insert; 281 } 282 } 283 284 insert: 285 /* insert new address, may fail if invalid address or dup. */ 286 fdb_insert(br, p, newaddr, 0); 287 288 if (!vg || !vg->num_vlans) 289 goto done; 290 291 /* Now add entries for every VLAN configured on the port. 292 * This function runs under RTNL so the bitmap will not change 293 * from under us. 294 */ 295 list_for_each_entry(v, &vg->vlan_list, vlist) 296 fdb_insert(br, p, newaddr, v->vid); 297 298 done: 299 spin_unlock_bh(&br->hash_lock); 300 } 301 302 void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) 303 { 304 struct net_bridge_vlan_group *vg; 305 struct net_bridge_fdb_entry *f; 306 struct net_bridge_vlan *v; 307 308 spin_lock_bh(&br->hash_lock); 309 310 /* If old entry was unassociated with any port, then delete it. */ 311 f = br_fdb_find(br, br->dev->dev_addr, 0); 312 if (f && test_bit(BR_FDB_LOCAL, &f->flags) && 313 !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) 314 fdb_delete_local(br, NULL, f); 315 316 fdb_insert(br, NULL, newaddr, 0); 317 vg = br_vlan_group(br); 318 if (!vg || !vg->num_vlans) 319 goto out; 320 /* Now remove and add entries for every VLAN configured on the 321 * bridge. This function runs under RTNL so the bitmap will not 322 * change from under us. 323 */ 324 list_for_each_entry(v, &vg->vlan_list, vlist) { 325 if (!br_vlan_should_use(v)) 326 continue; 327 f = br_fdb_find(br, br->dev->dev_addr, v->vid); 328 if (f && test_bit(BR_FDB_LOCAL, &f->flags) && 329 !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) 330 fdb_delete_local(br, NULL, f); 331 fdb_insert(br, NULL, newaddr, v->vid); 332 } 333 out: 334 spin_unlock_bh(&br->hash_lock); 335 } 336 337 void br_fdb_cleanup(struct work_struct *work) 338 { 339 struct net_bridge *br = container_of(work, struct net_bridge, 340 gc_work.work); 341 struct net_bridge_fdb_entry *f = NULL; 342 unsigned long delay = hold_time(br); 343 unsigned long work_delay = delay; 344 unsigned long now = jiffies; 345 346 /* this part is tricky, in order to avoid blocking learning and 347 * consequently forwarding, we rely on rcu to delete objects with 348 * delayed freeing allowing us to continue traversing 349 */ 350 rcu_read_lock(); 351 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) { 352 unsigned long this_timer = f->updated + delay; 353 354 if (test_bit(BR_FDB_STATIC, &f->flags) || 355 test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) { 356 if (test_bit(BR_FDB_NOTIFY, &f->flags)) { 357 if (time_after(this_timer, now)) 358 work_delay = min(work_delay, 359 this_timer - now); 360 else if (!test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, 361 &f->flags)) 362 fdb_notify(br, f, RTM_NEWNEIGH, false); 363 } 364 continue; 365 } 366 367 if (time_after(this_timer, now)) { 368 work_delay = min(work_delay, this_timer - now); 369 } else { 370 spin_lock_bh(&br->hash_lock); 371 if (!hlist_unhashed(&f->fdb_node)) 372 fdb_delete(br, f, true); 373 spin_unlock_bh(&br->hash_lock); 374 } 375 } 376 rcu_read_unlock(); 377 378 /* Cleanup minimum 10 milliseconds apart */ 379 work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10)); 380 mod_delayed_work(system_long_wq, &br->gc_work, work_delay); 381 } 382 383 /* Completely flush all dynamic entries in forwarding database.*/ 384 void br_fdb_flush(struct net_bridge *br) 385 { 386 struct net_bridge_fdb_entry *f; 387 struct hlist_node *tmp; 388 389 spin_lock_bh(&br->hash_lock); 390 hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) { 391 if (!test_bit(BR_FDB_STATIC, &f->flags)) 392 fdb_delete(br, f, true); 393 } 394 spin_unlock_bh(&br->hash_lock); 395 } 396 397 /* Flush all entries referring to a specific port. 398 * if do_all is set also flush static entries 399 * if vid is set delete all entries that match the vlan_id 400 */ 401 void br_fdb_delete_by_port(struct net_bridge *br, 402 const struct net_bridge_port *p, 403 u16 vid, 404 int do_all) 405 { 406 struct net_bridge_fdb_entry *f; 407 struct hlist_node *tmp; 408 409 spin_lock_bh(&br->hash_lock); 410 hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) { 411 if (f->dst != p) 412 continue; 413 414 if (!do_all) 415 if (test_bit(BR_FDB_STATIC, &f->flags) || 416 (vid && f->key.vlan_id != vid)) 417 continue; 418 419 if (test_bit(BR_FDB_LOCAL, &f->flags)) 420 fdb_delete_local(br, p, f); 421 else 422 fdb_delete(br, f, true); 423 } 424 spin_unlock_bh(&br->hash_lock); 425 } 426 427 #if IS_ENABLED(CONFIG_ATM_LANE) 428 /* Interface used by ATM LANE hook to test 429 * if an addr is on some other bridge port */ 430 int br_fdb_test_addr(struct net_device *dev, unsigned char *addr) 431 { 432 struct net_bridge_fdb_entry *fdb; 433 struct net_bridge_port *port; 434 int ret; 435 436 rcu_read_lock(); 437 port = br_port_get_rcu(dev); 438 if (!port) 439 ret = 0; 440 else { 441 fdb = br_fdb_find_rcu(port->br, addr, 0); 442 ret = fdb && fdb->dst && fdb->dst->dev != dev && 443 fdb->dst->state == BR_STATE_FORWARDING; 444 } 445 rcu_read_unlock(); 446 447 return ret; 448 } 449 #endif /* CONFIG_ATM_LANE */ 450 451 /* 452 * Fill buffer with forwarding table records in 453 * the API format. 454 */ 455 int br_fdb_fillbuf(struct net_bridge *br, void *buf, 456 unsigned long maxnum, unsigned long skip) 457 { 458 struct net_bridge_fdb_entry *f; 459 struct __fdb_entry *fe = buf; 460 int num = 0; 461 462 memset(buf, 0, maxnum*sizeof(struct __fdb_entry)); 463 464 rcu_read_lock(); 465 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) { 466 if (num >= maxnum) 467 break; 468 469 if (has_expired(br, f)) 470 continue; 471 472 /* ignore pseudo entry for local MAC address */ 473 if (!f->dst) 474 continue; 475 476 if (skip) { 477 --skip; 478 continue; 479 } 480 481 /* convert from internal format to API */ 482 memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN); 483 484 /* due to ABI compat need to split into hi/lo */ 485 fe->port_no = f->dst->port_no; 486 fe->port_hi = f->dst->port_no >> 8; 487 488 fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags); 489 if (!test_bit(BR_FDB_STATIC, &f->flags)) 490 fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated); 491 ++fe; 492 ++num; 493 } 494 rcu_read_unlock(); 495 496 return num; 497 } 498 499 static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br, 500 struct net_bridge_port *source, 501 const unsigned char *addr, 502 __u16 vid, 503 unsigned long flags) 504 { 505 struct net_bridge_fdb_entry *fdb; 506 507 fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC); 508 if (fdb) { 509 memcpy(fdb->key.addr.addr, addr, ETH_ALEN); 510 fdb->dst = source; 511 fdb->key.vlan_id = vid; 512 fdb->flags = flags; 513 fdb->updated = fdb->used = jiffies; 514 if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl, 515 &fdb->rhnode, 516 br_fdb_rht_params)) { 517 kmem_cache_free(br_fdb_cache, fdb); 518 fdb = NULL; 519 } else { 520 hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list); 521 } 522 } 523 return fdb; 524 } 525 526 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 527 const unsigned char *addr, u16 vid) 528 { 529 struct net_bridge_fdb_entry *fdb; 530 531 if (!is_valid_ether_addr(addr)) 532 return -EINVAL; 533 534 fdb = br_fdb_find(br, addr, vid); 535 if (fdb) { 536 /* it is okay to have multiple ports with same 537 * address, just use the first one. 538 */ 539 if (test_bit(BR_FDB_LOCAL, &fdb->flags)) 540 return 0; 541 br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n", 542 source ? source->dev->name : br->dev->name, addr, vid); 543 fdb_delete(br, fdb, true); 544 } 545 546 fdb = fdb_create(br, source, addr, vid, 547 BIT(BR_FDB_LOCAL) | BIT(BR_FDB_STATIC)); 548 if (!fdb) 549 return -ENOMEM; 550 551 fdb_add_hw_addr(br, addr); 552 fdb_notify(br, fdb, RTM_NEWNEIGH, true); 553 return 0; 554 } 555 556 /* Add entry for local address of interface */ 557 int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 558 const unsigned char *addr, u16 vid) 559 { 560 int ret; 561 562 spin_lock_bh(&br->hash_lock); 563 ret = fdb_insert(br, source, addr, vid); 564 spin_unlock_bh(&br->hash_lock); 565 return ret; 566 } 567 568 /* returns true if the fdb was modified */ 569 static bool __fdb_mark_active(struct net_bridge_fdb_entry *fdb) 570 { 571 return !!(test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags) && 572 test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags)); 573 } 574 575 void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, 576 const unsigned char *addr, u16 vid, unsigned long flags) 577 { 578 struct net_bridge_fdb_entry *fdb; 579 580 /* some users want to always flood. */ 581 if (hold_time(br) == 0) 582 return; 583 584 fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid); 585 if (likely(fdb)) { 586 /* attempt to update an entry for a local interface */ 587 if (unlikely(test_bit(BR_FDB_LOCAL, &fdb->flags))) { 588 if (net_ratelimit()) 589 br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n", 590 source->dev->name, addr, vid); 591 } else { 592 unsigned long now = jiffies; 593 bool fdb_modified = false; 594 595 if (now != fdb->updated) { 596 fdb->updated = now; 597 fdb_modified = __fdb_mark_active(fdb); 598 } 599 600 /* fastpath: update of existing entry */ 601 if (unlikely(source != fdb->dst && 602 !test_bit(BR_FDB_STICKY, &fdb->flags))) { 603 fdb->dst = source; 604 fdb_modified = true; 605 /* Take over HW learned entry */ 606 if (unlikely(test_bit(BR_FDB_ADDED_BY_EXT_LEARN, 607 &fdb->flags))) 608 clear_bit(BR_FDB_ADDED_BY_EXT_LEARN, 609 &fdb->flags); 610 } 611 612 if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags))) 613 set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); 614 if (unlikely(fdb_modified)) { 615 trace_br_fdb_update(br, source, addr, vid, flags); 616 fdb_notify(br, fdb, RTM_NEWNEIGH, true); 617 } 618 } 619 } else { 620 spin_lock(&br->hash_lock); 621 fdb = fdb_create(br, source, addr, vid, flags); 622 if (fdb) { 623 trace_br_fdb_update(br, source, addr, vid, flags); 624 fdb_notify(br, fdb, RTM_NEWNEIGH, true); 625 } 626 /* else we lose race and someone else inserts 627 * it first, don't bother updating 628 */ 629 spin_unlock(&br->hash_lock); 630 } 631 } 632 633 static int fdb_to_nud(const struct net_bridge *br, 634 const struct net_bridge_fdb_entry *fdb) 635 { 636 if (test_bit(BR_FDB_LOCAL, &fdb->flags)) 637 return NUD_PERMANENT; 638 else if (test_bit(BR_FDB_STATIC, &fdb->flags)) 639 return NUD_NOARP; 640 else if (has_expired(br, fdb)) 641 return NUD_STALE; 642 else 643 return NUD_REACHABLE; 644 } 645 646 static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br, 647 const struct net_bridge_fdb_entry *fdb, 648 u32 portid, u32 seq, int type, unsigned int flags) 649 { 650 unsigned long now = jiffies; 651 struct nda_cacheinfo ci; 652 struct nlmsghdr *nlh; 653 struct ndmsg *ndm; 654 655 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags); 656 if (nlh == NULL) 657 return -EMSGSIZE; 658 659 ndm = nlmsg_data(nlh); 660 ndm->ndm_family = AF_BRIDGE; 661 ndm->ndm_pad1 = 0; 662 ndm->ndm_pad2 = 0; 663 ndm->ndm_flags = 0; 664 ndm->ndm_type = 0; 665 ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex; 666 ndm->ndm_state = fdb_to_nud(br, fdb); 667 668 if (test_bit(BR_FDB_OFFLOADED, &fdb->flags)) 669 ndm->ndm_flags |= NTF_OFFLOADED; 670 if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) 671 ndm->ndm_flags |= NTF_EXT_LEARNED; 672 if (test_bit(BR_FDB_STICKY, &fdb->flags)) 673 ndm->ndm_flags |= NTF_STICKY; 674 675 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr)) 676 goto nla_put_failure; 677 if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex)) 678 goto nla_put_failure; 679 ci.ndm_used = jiffies_to_clock_t(now - fdb->used); 680 ci.ndm_confirmed = 0; 681 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); 682 ci.ndm_refcnt = 0; 683 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) 684 goto nla_put_failure; 685 686 if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16), 687 &fdb->key.vlan_id)) 688 goto nla_put_failure; 689 690 if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) { 691 struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS); 692 u8 notify_bits = FDB_NOTIFY_BIT; 693 694 if (!nest) 695 goto nla_put_failure; 696 if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags)) 697 notify_bits |= FDB_NOTIFY_INACTIVE_BIT; 698 699 if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) { 700 nla_nest_cancel(skb, nest); 701 goto nla_put_failure; 702 } 703 704 nla_nest_end(skb, nest); 705 } 706 707 nlmsg_end(skb, nlh); 708 return 0; 709 710 nla_put_failure: 711 nlmsg_cancel(skb, nlh); 712 return -EMSGSIZE; 713 } 714 715 static inline size_t fdb_nlmsg_size(void) 716 { 717 return NLMSG_ALIGN(sizeof(struct ndmsg)) 718 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ 719 + nla_total_size(sizeof(u32)) /* NDA_MASTER */ 720 + nla_total_size(sizeof(u16)) /* NDA_VLAN */ 721 + nla_total_size(sizeof(struct nda_cacheinfo)) 722 + nla_total_size(0) /* NDA_FDB_EXT_ATTRS */ 723 + nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */ 724 } 725 726 static void fdb_notify(struct net_bridge *br, 727 const struct net_bridge_fdb_entry *fdb, int type, 728 bool swdev_notify) 729 { 730 struct net *net = dev_net(br->dev); 731 struct sk_buff *skb; 732 int err = -ENOBUFS; 733 734 if (swdev_notify) 735 br_switchdev_fdb_notify(fdb, type); 736 737 skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC); 738 if (skb == NULL) 739 goto errout; 740 741 err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0); 742 if (err < 0) { 743 /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */ 744 WARN_ON(err == -EMSGSIZE); 745 kfree_skb(skb); 746 goto errout; 747 } 748 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 749 return; 750 errout: 751 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 752 } 753 754 /* Dump information about entries, in response to GETNEIGH */ 755 int br_fdb_dump(struct sk_buff *skb, 756 struct netlink_callback *cb, 757 struct net_device *dev, 758 struct net_device *filter_dev, 759 int *idx) 760 { 761 struct net_bridge *br = netdev_priv(dev); 762 struct net_bridge_fdb_entry *f; 763 int err = 0; 764 765 if (!(dev->priv_flags & IFF_EBRIDGE)) 766 return err; 767 768 if (!filter_dev) { 769 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx); 770 if (err < 0) 771 return err; 772 } 773 774 rcu_read_lock(); 775 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) { 776 if (*idx < cb->args[2]) 777 goto skip; 778 if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) { 779 if (filter_dev != dev) 780 goto skip; 781 /* !f->dst is a special case for bridge 782 * It means the MAC belongs to the bridge 783 * Therefore need a little more filtering 784 * we only want to dump the !f->dst case 785 */ 786 if (f->dst) 787 goto skip; 788 } 789 if (!filter_dev && f->dst) 790 goto skip; 791 792 err = fdb_fill_info(skb, br, f, 793 NETLINK_CB(cb->skb).portid, 794 cb->nlh->nlmsg_seq, 795 RTM_NEWNEIGH, 796 NLM_F_MULTI); 797 if (err < 0) 798 break; 799 skip: 800 *idx += 1; 801 } 802 rcu_read_unlock(); 803 804 return err; 805 } 806 807 int br_fdb_get(struct sk_buff *skb, 808 struct nlattr *tb[], 809 struct net_device *dev, 810 const unsigned char *addr, 811 u16 vid, u32 portid, u32 seq, 812 struct netlink_ext_ack *extack) 813 { 814 struct net_bridge *br = netdev_priv(dev); 815 struct net_bridge_fdb_entry *f; 816 int err = 0; 817 818 rcu_read_lock(); 819 f = br_fdb_find_rcu(br, addr, vid); 820 if (!f) { 821 NL_SET_ERR_MSG(extack, "Fdb entry not found"); 822 err = -ENOENT; 823 goto errout; 824 } 825 826 err = fdb_fill_info(skb, br, f, portid, seq, 827 RTM_NEWNEIGH, 0); 828 errout: 829 rcu_read_unlock(); 830 return err; 831 } 832 833 /* returns true if the fdb is modified */ 834 static bool fdb_handle_notify(struct net_bridge_fdb_entry *fdb, u8 notify) 835 { 836 bool modified = false; 837 838 /* allow to mark an entry as inactive, usually done on creation */ 839 if ((notify & FDB_NOTIFY_INACTIVE_BIT) && 840 !test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags)) 841 modified = true; 842 843 if ((notify & FDB_NOTIFY_BIT) && 844 !test_and_set_bit(BR_FDB_NOTIFY, &fdb->flags)) { 845 /* enabled activity tracking */ 846 modified = true; 847 } else if (!(notify & FDB_NOTIFY_BIT) && 848 test_and_clear_bit(BR_FDB_NOTIFY, &fdb->flags)) { 849 /* disabled activity tracking, clear notify state */ 850 clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags); 851 modified = true; 852 } 853 854 return modified; 855 } 856 857 /* Update (create or replace) forwarding database entry */ 858 static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, 859 const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid, 860 struct nlattr *nfea_tb[]) 861 { 862 bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY); 863 bool refresh = !nfea_tb[NFEA_DONT_REFRESH]; 864 struct net_bridge_fdb_entry *fdb; 865 u16 state = ndm->ndm_state; 866 bool modified = false; 867 u8 notify = 0; 868 869 /* If the port cannot learn allow only local and static entries */ 870 if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) && 871 !(source->state == BR_STATE_LEARNING || 872 source->state == BR_STATE_FORWARDING)) 873 return -EPERM; 874 875 if (!source && !(state & NUD_PERMANENT)) { 876 pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n", 877 br->dev->name); 878 return -EINVAL; 879 } 880 881 if (is_sticky && (state & NUD_PERMANENT)) 882 return -EINVAL; 883 884 if (nfea_tb[NFEA_ACTIVITY_NOTIFY]) { 885 notify = nla_get_u8(nfea_tb[NFEA_ACTIVITY_NOTIFY]); 886 if ((notify & ~BR_FDB_NOTIFY_SETTABLE_BITS) || 887 (notify & BR_FDB_NOTIFY_SETTABLE_BITS) == FDB_NOTIFY_INACTIVE_BIT) 888 return -EINVAL; 889 } 890 891 fdb = br_fdb_find(br, addr, vid); 892 if (fdb == NULL) { 893 if (!(flags & NLM_F_CREATE)) 894 return -ENOENT; 895 896 fdb = fdb_create(br, source, addr, vid, 0); 897 if (!fdb) 898 return -ENOMEM; 899 900 modified = true; 901 } else { 902 if (flags & NLM_F_EXCL) 903 return -EEXIST; 904 905 if (fdb->dst != source) { 906 fdb->dst = source; 907 modified = true; 908 } 909 } 910 911 if (fdb_to_nud(br, fdb) != state) { 912 if (state & NUD_PERMANENT) { 913 set_bit(BR_FDB_LOCAL, &fdb->flags); 914 if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags)) 915 fdb_add_hw_addr(br, addr); 916 } else if (state & NUD_NOARP) { 917 clear_bit(BR_FDB_LOCAL, &fdb->flags); 918 if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags)) 919 fdb_add_hw_addr(br, addr); 920 } else { 921 clear_bit(BR_FDB_LOCAL, &fdb->flags); 922 if (test_and_clear_bit(BR_FDB_STATIC, &fdb->flags)) 923 fdb_del_hw_addr(br, addr); 924 } 925 926 modified = true; 927 } 928 929 if (is_sticky != test_bit(BR_FDB_STICKY, &fdb->flags)) { 930 change_bit(BR_FDB_STICKY, &fdb->flags); 931 modified = true; 932 } 933 934 if (fdb_handle_notify(fdb, notify)) 935 modified = true; 936 937 set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); 938 939 fdb->used = jiffies; 940 if (modified) { 941 if (refresh) 942 fdb->updated = jiffies; 943 fdb_notify(br, fdb, RTM_NEWNEIGH, true); 944 } 945 946 return 0; 947 } 948 949 static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, 950 struct net_bridge_port *p, const unsigned char *addr, 951 u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[]) 952 { 953 int err = 0; 954 955 if (ndm->ndm_flags & NTF_USE) { 956 if (!p) { 957 pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n", 958 br->dev->name); 959 return -EINVAL; 960 } 961 if (!nbp_state_should_learn(p)) 962 return 0; 963 964 local_bh_disable(); 965 rcu_read_lock(); 966 br_fdb_update(br, p, addr, vid, BIT(BR_FDB_ADDED_BY_USER)); 967 rcu_read_unlock(); 968 local_bh_enable(); 969 } else if (ndm->ndm_flags & NTF_EXT_LEARNED) { 970 err = br_fdb_external_learn_add(br, p, addr, vid, true); 971 } else { 972 spin_lock_bh(&br->hash_lock); 973 err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb); 974 spin_unlock_bh(&br->hash_lock); 975 } 976 977 return err; 978 } 979 980 static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = { 981 [NFEA_ACTIVITY_NOTIFY] = { .type = NLA_U8 }, 982 [NFEA_DONT_REFRESH] = { .type = NLA_FLAG }, 983 }; 984 985 /* Add new permanent fdb entry with RTM_NEWNEIGH */ 986 int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 987 struct net_device *dev, 988 const unsigned char *addr, u16 vid, u16 nlh_flags, 989 struct netlink_ext_ack *extack) 990 { 991 struct nlattr *nfea_tb[NFEA_MAX + 1], *attr; 992 struct net_bridge_vlan_group *vg; 993 struct net_bridge_port *p = NULL; 994 struct net_bridge_vlan *v; 995 struct net_bridge *br = NULL; 996 int err = 0; 997 998 trace_br_fdb_add(ndm, dev, addr, vid, nlh_flags); 999 1000 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) { 1001 pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state); 1002 return -EINVAL; 1003 } 1004 1005 if (is_zero_ether_addr(addr)) { 1006 pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n"); 1007 return -EINVAL; 1008 } 1009 1010 if (dev->priv_flags & IFF_EBRIDGE) { 1011 br = netdev_priv(dev); 1012 vg = br_vlan_group(br); 1013 } else { 1014 p = br_port_get_rtnl(dev); 1015 if (!p) { 1016 pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n", 1017 dev->name); 1018 return -EINVAL; 1019 } 1020 br = p->br; 1021 vg = nbp_vlan_group(p); 1022 } 1023 1024 if (tb[NDA_FDB_EXT_ATTRS]) { 1025 attr = tb[NDA_FDB_EXT_ATTRS]; 1026 err = nla_parse_nested(nfea_tb, NFEA_MAX, attr, 1027 br_nda_fdb_pol, extack); 1028 if (err) 1029 return err; 1030 } else { 1031 memset(nfea_tb, 0, sizeof(struct nlattr *) * (NFEA_MAX + 1)); 1032 } 1033 1034 if (vid) { 1035 v = br_vlan_find(vg, vid); 1036 if (!v || !br_vlan_should_use(v)) { 1037 pr_info("bridge: RTM_NEWNEIGH with unconfigured vlan %d on %s\n", vid, dev->name); 1038 return -EINVAL; 1039 } 1040 1041 /* VID was specified, so use it. */ 1042 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb); 1043 } else { 1044 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb); 1045 if (err || !vg || !vg->num_vlans) 1046 goto out; 1047 1048 /* We have vlans configured on this port and user didn't 1049 * specify a VLAN. To be nice, add/update entry for every 1050 * vlan on this port. 1051 */ 1052 list_for_each_entry(v, &vg->vlan_list, vlist) { 1053 if (!br_vlan_should_use(v)) 1054 continue; 1055 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid, 1056 nfea_tb); 1057 if (err) 1058 goto out; 1059 } 1060 } 1061 1062 out: 1063 return err; 1064 } 1065 1066 static int fdb_delete_by_addr_and_port(struct net_bridge *br, 1067 const struct net_bridge_port *p, 1068 const u8 *addr, u16 vlan) 1069 { 1070 struct net_bridge_fdb_entry *fdb; 1071 1072 fdb = br_fdb_find(br, addr, vlan); 1073 if (!fdb || fdb->dst != p) 1074 return -ENOENT; 1075 1076 fdb_delete(br, fdb, true); 1077 1078 return 0; 1079 } 1080 1081 static int __br_fdb_delete(struct net_bridge *br, 1082 const struct net_bridge_port *p, 1083 const unsigned char *addr, u16 vid) 1084 { 1085 int err; 1086 1087 spin_lock_bh(&br->hash_lock); 1088 err = fdb_delete_by_addr_and_port(br, p, addr, vid); 1089 spin_unlock_bh(&br->hash_lock); 1090 1091 return err; 1092 } 1093 1094 /* Remove neighbor entry with RTM_DELNEIGH */ 1095 int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], 1096 struct net_device *dev, 1097 const unsigned char *addr, u16 vid) 1098 { 1099 struct net_bridge_vlan_group *vg; 1100 struct net_bridge_port *p = NULL; 1101 struct net_bridge_vlan *v; 1102 struct net_bridge *br; 1103 int err; 1104 1105 if (dev->priv_flags & IFF_EBRIDGE) { 1106 br = netdev_priv(dev); 1107 vg = br_vlan_group(br); 1108 } else { 1109 p = br_port_get_rtnl(dev); 1110 if (!p) { 1111 pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n", 1112 dev->name); 1113 return -EINVAL; 1114 } 1115 vg = nbp_vlan_group(p); 1116 br = p->br; 1117 } 1118 1119 if (vid) { 1120 v = br_vlan_find(vg, vid); 1121 if (!v) { 1122 pr_info("bridge: RTM_DELNEIGH with unconfigured vlan %d on %s\n", vid, dev->name); 1123 return -EINVAL; 1124 } 1125 1126 err = __br_fdb_delete(br, p, addr, vid); 1127 } else { 1128 err = -ENOENT; 1129 err &= __br_fdb_delete(br, p, addr, 0); 1130 if (!vg || !vg->num_vlans) 1131 return err; 1132 1133 list_for_each_entry(v, &vg->vlan_list, vlist) { 1134 if (!br_vlan_should_use(v)) 1135 continue; 1136 err &= __br_fdb_delete(br, p, addr, v->vid); 1137 } 1138 } 1139 1140 return err; 1141 } 1142 1143 int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p) 1144 { 1145 struct net_bridge_fdb_entry *f, *tmp; 1146 int err = 0; 1147 1148 ASSERT_RTNL(); 1149 1150 /* the key here is that static entries change only under rtnl */ 1151 rcu_read_lock(); 1152 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) { 1153 /* We only care for static entries */ 1154 if (!test_bit(BR_FDB_STATIC, &f->flags)) 1155 continue; 1156 err = dev_uc_add(p->dev, f->key.addr.addr); 1157 if (err) 1158 goto rollback; 1159 } 1160 done: 1161 rcu_read_unlock(); 1162 1163 return err; 1164 1165 rollback: 1166 hlist_for_each_entry_rcu(tmp, &br->fdb_list, fdb_node) { 1167 /* We only care for static entries */ 1168 if (!test_bit(BR_FDB_STATIC, &tmp->flags)) 1169 continue; 1170 if (tmp == f) 1171 break; 1172 dev_uc_del(p->dev, tmp->key.addr.addr); 1173 } 1174 1175 goto done; 1176 } 1177 1178 void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p) 1179 { 1180 struct net_bridge_fdb_entry *f; 1181 1182 ASSERT_RTNL(); 1183 1184 rcu_read_lock(); 1185 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) { 1186 /* We only care for static entries */ 1187 if (!test_bit(BR_FDB_STATIC, &f->flags)) 1188 continue; 1189 1190 dev_uc_del(p->dev, f->key.addr.addr); 1191 } 1192 rcu_read_unlock(); 1193 } 1194 1195 int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, 1196 const unsigned char *addr, u16 vid, 1197 bool swdev_notify) 1198 { 1199 struct net_bridge_fdb_entry *fdb; 1200 bool modified = false; 1201 int err = 0; 1202 1203 trace_br_fdb_external_learn_add(br, p, addr, vid); 1204 1205 spin_lock_bh(&br->hash_lock); 1206 1207 fdb = br_fdb_find(br, addr, vid); 1208 if (!fdb) { 1209 unsigned long flags = BIT(BR_FDB_ADDED_BY_EXT_LEARN); 1210 1211 if (swdev_notify) 1212 flags |= BIT(BR_FDB_ADDED_BY_USER); 1213 fdb = fdb_create(br, p, addr, vid, flags); 1214 if (!fdb) { 1215 err = -ENOMEM; 1216 goto err_unlock; 1217 } 1218 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); 1219 } else { 1220 fdb->updated = jiffies; 1221 1222 if (fdb->dst != p) { 1223 fdb->dst = p; 1224 modified = true; 1225 } 1226 1227 if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) { 1228 /* Refresh entry */ 1229 fdb->used = jiffies; 1230 } else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) { 1231 /* Take over SW learned entry */ 1232 set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags); 1233 modified = true; 1234 } 1235 1236 if (swdev_notify) 1237 set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); 1238 1239 if (modified) 1240 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); 1241 } 1242 1243 err_unlock: 1244 spin_unlock_bh(&br->hash_lock); 1245 1246 return err; 1247 } 1248 1249 int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p, 1250 const unsigned char *addr, u16 vid, 1251 bool swdev_notify) 1252 { 1253 struct net_bridge_fdb_entry *fdb; 1254 int err = 0; 1255 1256 spin_lock_bh(&br->hash_lock); 1257 1258 fdb = br_fdb_find(br, addr, vid); 1259 if (fdb && test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) 1260 fdb_delete(br, fdb, swdev_notify); 1261 else 1262 err = -ENOENT; 1263 1264 spin_unlock_bh(&br->hash_lock); 1265 1266 return err; 1267 } 1268 1269 void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p, 1270 const unsigned char *addr, u16 vid, bool offloaded) 1271 { 1272 struct net_bridge_fdb_entry *fdb; 1273 1274 spin_lock_bh(&br->hash_lock); 1275 1276 fdb = br_fdb_find(br, addr, vid); 1277 if (fdb && offloaded != test_bit(BR_FDB_OFFLOADED, &fdb->flags)) 1278 change_bit(BR_FDB_OFFLOADED, &fdb->flags); 1279 1280 spin_unlock_bh(&br->hash_lock); 1281 } 1282 1283 void br_fdb_clear_offload(const struct net_device *dev, u16 vid) 1284 { 1285 struct net_bridge_fdb_entry *f; 1286 struct net_bridge_port *p; 1287 1288 ASSERT_RTNL(); 1289 1290 p = br_port_get_rtnl(dev); 1291 if (!p) 1292 return; 1293 1294 spin_lock_bh(&p->br->hash_lock); 1295 hlist_for_each_entry(f, &p->br->fdb_list, fdb_node) { 1296 if (f->dst == p && f->key.vlan_id == vid) 1297 clear_bit(BR_FDB_OFFLOADED, &f->flags); 1298 } 1299 spin_unlock_bh(&p->br->hash_lock); 1300 } 1301 EXPORT_SYMBOL_GPL(br_fdb_clear_offload); 1302