1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/core/dev_addr_lists.c - Functions for handling net device lists 4 * Copyright (c) 2010 Jiri Pirko <jpirko@redhat.com> 5 * 6 * This file contains functions for working with unicast, multicast and device 7 * addresses lists. 8 */ 9 10 #include <linux/netdevice.h> 11 #include <linux/rtnetlink.h> 12 #include <linux/export.h> 13 #include <linux/list.h> 14 15 /* 16 * General list handling functions 17 */ 18 19 static int __hw_addr_insert(struct netdev_hw_addr_list *list, 20 struct netdev_hw_addr *new, int addr_len) 21 { 22 struct rb_node **ins_point = &list->tree.rb_node, *parent = NULL; 23 struct netdev_hw_addr *ha; 24 25 while (*ins_point) { 26 int diff; 27 28 ha = rb_entry(*ins_point, struct netdev_hw_addr, node); 29 diff = memcmp(new->addr, ha->addr, addr_len); 30 if (diff == 0) 31 diff = memcmp(&new->type, &ha->type, sizeof(new->type)); 32 33 parent = *ins_point; 34 if (diff < 0) 35 ins_point = &parent->rb_left; 36 else if (diff > 0) 37 ins_point = &parent->rb_right; 38 else 39 return -EEXIST; 40 } 41 42 rb_link_node_rcu(&new->node, parent, ins_point); 43 rb_insert_color(&new->node, &list->tree); 44 45 return 0; 46 } 47 48 static struct netdev_hw_addr* 49 __hw_addr_create(const unsigned char *addr, int addr_len, 50 unsigned char addr_type, bool global, bool sync) 51 { 52 struct netdev_hw_addr *ha; 53 int alloc_size; 54 55 alloc_size = sizeof(*ha); 56 if (alloc_size < L1_CACHE_BYTES) 57 alloc_size = L1_CACHE_BYTES; 58 ha = kmalloc(alloc_size, GFP_ATOMIC); 59 if (!ha) 60 return NULL; 61 memcpy(ha->addr, addr, addr_len); 62 ha->type = addr_type; 63 ha->refcount = 1; 64 ha->global_use = global; 65 ha->synced = sync ? 1 : 0; 66 ha->sync_cnt = 0; 67 68 return ha; 69 } 70 71 static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, 72 const unsigned char *addr, int addr_len, 73 unsigned char addr_type, bool global, bool sync, 74 int sync_count, bool exclusive) 75 { 76 struct rb_node **ins_point = &list->tree.rb_node, *parent = NULL; 77 struct netdev_hw_addr *ha; 78 79 if (addr_len > MAX_ADDR_LEN) 80 return -EINVAL; 81 82 while (*ins_point) { 83 int diff; 84 85 ha = rb_entry(*ins_point, struct netdev_hw_addr, node); 86 diff = memcmp(addr, ha->addr, addr_len); 87 if (diff == 0) 88 diff = memcmp(&addr_type, &ha->type, sizeof(addr_type)); 89 90 parent = *ins_point; 91 if (diff < 0) { 92 ins_point = &parent->rb_left; 93 } else if (diff > 0) { 94 ins_point = &parent->rb_right; 95 } else { 96 if (exclusive) 97 return -EEXIST; 98 if (global) { 99 /* check if addr is already used as global */ 100 if (ha->global_use) 101 return 0; 102 else 103 ha->global_use = true; 104 } 105 if (sync) { 106 if (ha->synced && sync_count) 107 return -EEXIST; 108 else 109 ha->synced++; 110 } 111 ha->refcount++; 112 return 0; 113 } 114 } 115 116 ha = __hw_addr_create(addr, addr_len, addr_type, global, sync); 117 if (!ha) 118 return -ENOMEM; 119 120 rb_link_node(&ha->node, parent, ins_point); 121 rb_insert_color(&ha->node, &list->tree); 122 123 list_add_tail_rcu(&ha->list, &list->list); 124 list->count++; 125 126 return 0; 127 } 128 129 static int __hw_addr_add(struct netdev_hw_addr_list *list, 130 const unsigned char *addr, int addr_len, 131 unsigned char addr_type) 132 { 133 return __hw_addr_add_ex(list, addr, addr_len, addr_type, false, false, 134 0, false); 135 } 136 137 static int __hw_addr_del_entry(struct netdev_hw_addr_list *list, 138 struct netdev_hw_addr *ha, bool global, 139 bool sync) 140 { 141 if (global && !ha->global_use) 142 return -ENOENT; 143 144 if (sync && !ha->synced) 145 return -ENOENT; 146 147 if (global) 148 ha->global_use = false; 149 150 if (sync) 151 ha->synced--; 152 153 if (--ha->refcount) 154 return 0; 155 156 rb_erase(&ha->node, &list->tree); 157 158 list_del_rcu(&ha->list); 159 kfree_rcu(ha, rcu_head); 160 list->count--; 161 return 0; 162 } 163 164 static struct netdev_hw_addr *__hw_addr_lookup(struct netdev_hw_addr_list *list, 165 const unsigned char *addr, int addr_len, 166 unsigned char addr_type) 167 { 168 struct rb_node *node; 169 170 node = list->tree.rb_node; 171 172 while (node) { 173 struct netdev_hw_addr *ha = rb_entry(node, struct netdev_hw_addr, node); 174 int diff = memcmp(addr, ha->addr, addr_len); 175 176 if (diff == 0 && addr_type) 177 diff = memcmp(&addr_type, &ha->type, sizeof(addr_type)); 178 179 if (diff < 0) 180 node = node->rb_left; 181 else if (diff > 0) 182 node = node->rb_right; 183 else 184 return ha; 185 } 186 187 return NULL; 188 } 189 190 static int __hw_addr_del_ex(struct netdev_hw_addr_list *list, 191 const unsigned char *addr, int addr_len, 192 unsigned char addr_type, bool global, bool sync) 193 { 194 struct netdev_hw_addr *ha = __hw_addr_lookup(list, addr, addr_len, addr_type); 195 196 if (!ha) 197 return -ENOENT; 198 return __hw_addr_del_entry(list, ha, global, sync); 199 } 200 201 static int __hw_addr_del(struct netdev_hw_addr_list *list, 202 const unsigned char *addr, int addr_len, 203 unsigned char addr_type) 204 { 205 return __hw_addr_del_ex(list, addr, addr_len, addr_type, false, false); 206 } 207 208 static int __hw_addr_sync_one(struct netdev_hw_addr_list *to_list, 209 struct netdev_hw_addr *ha, 210 int addr_len) 211 { 212 int err; 213 214 err = __hw_addr_add_ex(to_list, ha->addr, addr_len, ha->type, 215 false, true, ha->sync_cnt, false); 216 if (err && err != -EEXIST) 217 return err; 218 219 if (!err) { 220 ha->sync_cnt++; 221 ha->refcount++; 222 } 223 224 return 0; 225 } 226 227 static void __hw_addr_unsync_one(struct netdev_hw_addr_list *to_list, 228 struct netdev_hw_addr_list *from_list, 229 struct netdev_hw_addr *ha, 230 int addr_len) 231 { 232 int err; 233 234 err = __hw_addr_del_ex(to_list, ha->addr, addr_len, ha->type, 235 false, true); 236 if (err) 237 return; 238 ha->sync_cnt--; 239 /* address on from list is not marked synced */ 240 __hw_addr_del_entry(from_list, ha, false, false); 241 } 242 243 static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list, 244 struct netdev_hw_addr_list *from_list, 245 int addr_len) 246 { 247 int err = 0; 248 struct netdev_hw_addr *ha, *tmp; 249 250 list_for_each_entry_safe(ha, tmp, &from_list->list, list) { 251 if (ha->sync_cnt == ha->refcount) { 252 __hw_addr_unsync_one(to_list, from_list, ha, addr_len); 253 } else { 254 err = __hw_addr_sync_one(to_list, ha, addr_len); 255 if (err) 256 break; 257 } 258 } 259 return err; 260 } 261 262 /* This function only works where there is a strict 1-1 relationship 263 * between source and destionation of they synch. If you ever need to 264 * sync addresses to more then 1 destination, you need to use 265 * __hw_addr_sync_multiple(). 266 */ 267 int __hw_addr_sync(struct netdev_hw_addr_list *to_list, 268 struct netdev_hw_addr_list *from_list, 269 int addr_len) 270 { 271 int err = 0; 272 struct netdev_hw_addr *ha, *tmp; 273 274 list_for_each_entry_safe(ha, tmp, &from_list->list, list) { 275 if (!ha->sync_cnt) { 276 err = __hw_addr_sync_one(to_list, ha, addr_len); 277 if (err) 278 break; 279 } else if (ha->refcount == 1) 280 __hw_addr_unsync_one(to_list, from_list, ha, addr_len); 281 } 282 return err; 283 } 284 EXPORT_SYMBOL(__hw_addr_sync); 285 286 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, 287 struct netdev_hw_addr_list *from_list, 288 int addr_len) 289 { 290 struct netdev_hw_addr *ha, *tmp; 291 292 list_for_each_entry_safe(ha, tmp, &from_list->list, list) { 293 if (ha->sync_cnt) 294 __hw_addr_unsync_one(to_list, from_list, ha, addr_len); 295 } 296 } 297 EXPORT_SYMBOL(__hw_addr_unsync); 298 299 /** 300 * __hw_addr_sync_dev - Synchonize device's multicast list 301 * @list: address list to syncronize 302 * @dev: device to sync 303 * @sync: function to call if address should be added 304 * @unsync: function to call if address should be removed 305 * 306 * This function is intended to be called from the ndo_set_rx_mode 307 * function of devices that require explicit address add/remove 308 * notifications. The unsync function may be NULL in which case 309 * the addresses requiring removal will simply be removed without 310 * any notification to the device. 311 **/ 312 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, 313 struct net_device *dev, 314 int (*sync)(struct net_device *, const unsigned char *), 315 int (*unsync)(struct net_device *, 316 const unsigned char *)) 317 { 318 struct netdev_hw_addr *ha, *tmp; 319 int err; 320 321 /* first go through and flush out any stale entries */ 322 list_for_each_entry_safe(ha, tmp, &list->list, list) { 323 if (!ha->sync_cnt || ha->refcount != 1) 324 continue; 325 326 /* if unsync is defined and fails defer unsyncing address */ 327 if (unsync && unsync(dev, ha->addr)) 328 continue; 329 330 ha->sync_cnt--; 331 __hw_addr_del_entry(list, ha, false, false); 332 } 333 334 /* go through and sync new entries to the list */ 335 list_for_each_entry_safe(ha, tmp, &list->list, list) { 336 if (ha->sync_cnt) 337 continue; 338 339 err = sync(dev, ha->addr); 340 if (err) 341 return err; 342 343 ha->sync_cnt++; 344 ha->refcount++; 345 } 346 347 return 0; 348 } 349 EXPORT_SYMBOL(__hw_addr_sync_dev); 350 351 /** 352 * __hw_addr_ref_sync_dev - Synchronize device's multicast address list taking 353 * into account references 354 * @list: address list to synchronize 355 * @dev: device to sync 356 * @sync: function to call if address or reference on it should be added 357 * @unsync: function to call if address or some reference on it should removed 358 * 359 * This function is intended to be called from the ndo_set_rx_mode 360 * function of devices that require explicit address or references on it 361 * add/remove notifications. The unsync function may be NULL in which case 362 * the addresses or references on it requiring removal will simply be 363 * removed without any notification to the device. That is responsibility of 364 * the driver to identify and distribute address or references on it between 365 * internal address tables. 366 **/ 367 int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, 368 struct net_device *dev, 369 int (*sync)(struct net_device *, 370 const unsigned char *, int), 371 int (*unsync)(struct net_device *, 372 const unsigned char *, int)) 373 { 374 struct netdev_hw_addr *ha, *tmp; 375 int err, ref_cnt; 376 377 /* first go through and flush out any unsynced/stale entries */ 378 list_for_each_entry_safe(ha, tmp, &list->list, list) { 379 /* sync if address is not used */ 380 if ((ha->sync_cnt << 1) <= ha->refcount) 381 continue; 382 383 /* if fails defer unsyncing address */ 384 ref_cnt = ha->refcount - ha->sync_cnt; 385 if (unsync && unsync(dev, ha->addr, ref_cnt)) 386 continue; 387 388 ha->refcount = (ref_cnt << 1) + 1; 389 ha->sync_cnt = ref_cnt; 390 __hw_addr_del_entry(list, ha, false, false); 391 } 392 393 /* go through and sync updated/new entries to the list */ 394 list_for_each_entry_safe(ha, tmp, &list->list, list) { 395 /* sync if address added or reused */ 396 if ((ha->sync_cnt << 1) >= ha->refcount) 397 continue; 398 399 ref_cnt = ha->refcount - ha->sync_cnt; 400 err = sync(dev, ha->addr, ref_cnt); 401 if (err) 402 return err; 403 404 ha->refcount = ref_cnt << 1; 405 ha->sync_cnt = ref_cnt; 406 } 407 408 return 0; 409 } 410 EXPORT_SYMBOL(__hw_addr_ref_sync_dev); 411 412 /** 413 * __hw_addr_ref_unsync_dev - Remove synchronized addresses and references on 414 * it from device 415 * @list: address list to remove synchronized addresses (references on it) from 416 * @dev: device to sync 417 * @unsync: function to call if address and references on it should be removed 418 * 419 * Remove all addresses that were added to the device by 420 * __hw_addr_ref_sync_dev(). This function is intended to be called from the 421 * ndo_stop or ndo_open functions on devices that require explicit address (or 422 * references on it) add/remove notifications. If the unsync function pointer 423 * is NULL then this function can be used to just reset the sync_cnt for the 424 * addresses in the list. 425 **/ 426 void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, 427 struct net_device *dev, 428 int (*unsync)(struct net_device *, 429 const unsigned char *, int)) 430 { 431 struct netdev_hw_addr *ha, *tmp; 432 433 list_for_each_entry_safe(ha, tmp, &list->list, list) { 434 if (!ha->sync_cnt) 435 continue; 436 437 /* if fails defer unsyncing address */ 438 if (unsync && unsync(dev, ha->addr, ha->sync_cnt)) 439 continue; 440 441 ha->refcount -= ha->sync_cnt - 1; 442 ha->sync_cnt = 0; 443 __hw_addr_del_entry(list, ha, false, false); 444 } 445 } 446 EXPORT_SYMBOL(__hw_addr_ref_unsync_dev); 447 448 /** 449 * __hw_addr_unsync_dev - Remove synchronized addresses from device 450 * @list: address list to remove synchronized addresses from 451 * @dev: device to sync 452 * @unsync: function to call if address should be removed 453 * 454 * Remove all addresses that were added to the device by __hw_addr_sync_dev(). 455 * This function is intended to be called from the ndo_stop or ndo_open 456 * functions on devices that require explicit address add/remove 457 * notifications. If the unsync function pointer is NULL then this function 458 * can be used to just reset the sync_cnt for the addresses in the list. 459 **/ 460 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, 461 struct net_device *dev, 462 int (*unsync)(struct net_device *, 463 const unsigned char *)) 464 { 465 struct netdev_hw_addr *ha, *tmp; 466 467 list_for_each_entry_safe(ha, tmp, &list->list, list) { 468 if (!ha->sync_cnt) 469 continue; 470 471 /* if unsync is defined and fails defer unsyncing address */ 472 if (unsync && unsync(dev, ha->addr)) 473 continue; 474 475 ha->sync_cnt--; 476 __hw_addr_del_entry(list, ha, false, false); 477 } 478 } 479 EXPORT_SYMBOL(__hw_addr_unsync_dev); 480 481 static void __hw_addr_flush(struct netdev_hw_addr_list *list) 482 { 483 struct netdev_hw_addr *ha, *tmp; 484 485 list->tree = RB_ROOT; 486 list_for_each_entry_safe(ha, tmp, &list->list, list) { 487 list_del_rcu(&ha->list); 488 kfree_rcu(ha, rcu_head); 489 } 490 list->count = 0; 491 } 492 493 void __hw_addr_init(struct netdev_hw_addr_list *list) 494 { 495 INIT_LIST_HEAD(&list->list); 496 list->count = 0; 497 list->tree = RB_ROOT; 498 } 499 EXPORT_SYMBOL(__hw_addr_init); 500 501 /* 502 * Device addresses handling functions 503 */ 504 505 /* Check that netdev->dev_addr is not written to directly as this would 506 * break the rbtree layout. All changes should go thru dev_addr_set() and co. 507 * Remove this check in mid-2024. 508 */ 509 void dev_addr_check(struct net_device *dev) 510 { 511 if (!memcmp(dev->dev_addr, dev->dev_addr_shadow, MAX_ADDR_LEN)) 512 return; 513 514 netdev_warn(dev, "Current addr: %*ph\n", MAX_ADDR_LEN, dev->dev_addr); 515 netdev_warn(dev, "Expected addr: %*ph\n", 516 MAX_ADDR_LEN, dev->dev_addr_shadow); 517 netdev_WARN(dev, "Incorrect netdev->dev_addr\n"); 518 } 519 520 /** 521 * dev_addr_flush - Flush device address list 522 * @dev: device 523 * 524 * Flush device address list and reset ->dev_addr. 525 * 526 * The caller must hold the rtnl_mutex. 527 */ 528 void dev_addr_flush(struct net_device *dev) 529 { 530 /* rtnl_mutex must be held here */ 531 dev_addr_check(dev); 532 533 __hw_addr_flush(&dev->dev_addrs); 534 dev->dev_addr = NULL; 535 } 536 537 /** 538 * dev_addr_init - Init device address list 539 * @dev: device 540 * 541 * Init device address list and create the first element, 542 * used by ->dev_addr. 543 * 544 * The caller must hold the rtnl_mutex. 545 */ 546 int dev_addr_init(struct net_device *dev) 547 { 548 unsigned char addr[MAX_ADDR_LEN]; 549 struct netdev_hw_addr *ha; 550 int err; 551 552 /* rtnl_mutex must be held here */ 553 554 __hw_addr_init(&dev->dev_addrs); 555 memset(addr, 0, sizeof(addr)); 556 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr), 557 NETDEV_HW_ADDR_T_LAN); 558 if (!err) { 559 /* 560 * Get the first (previously created) address from the list 561 * and set dev_addr pointer to this location. 562 */ 563 ha = list_first_entry(&dev->dev_addrs.list, 564 struct netdev_hw_addr, list); 565 dev->dev_addr = ha->addr; 566 } 567 return err; 568 } 569 570 void dev_addr_mod(struct net_device *dev, unsigned int offset, 571 const void *addr, size_t len) 572 { 573 struct netdev_hw_addr *ha; 574 575 dev_addr_check(dev); 576 577 ha = container_of(dev->dev_addr, struct netdev_hw_addr, addr[0]); 578 rb_erase(&ha->node, &dev->dev_addrs.tree); 579 memcpy(&ha->addr[offset], addr, len); 580 memcpy(&dev->dev_addr_shadow[offset], addr, len); 581 WARN_ON(__hw_addr_insert(&dev->dev_addrs, ha, dev->addr_len)); 582 } 583 EXPORT_SYMBOL(dev_addr_mod); 584 585 /** 586 * dev_addr_add - Add a device address 587 * @dev: device 588 * @addr: address to add 589 * @addr_type: address type 590 * 591 * Add a device address to the device or increase the reference count if 592 * it already exists. 593 * 594 * The caller must hold the rtnl_mutex. 595 */ 596 int dev_addr_add(struct net_device *dev, const unsigned char *addr, 597 unsigned char addr_type) 598 { 599 int err; 600 601 ASSERT_RTNL(); 602 603 err = dev_pre_changeaddr_notify(dev, addr, NULL); 604 if (err) 605 return err; 606 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type); 607 if (!err) 608 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 609 return err; 610 } 611 EXPORT_SYMBOL(dev_addr_add); 612 613 /** 614 * dev_addr_del - Release a device address. 615 * @dev: device 616 * @addr: address to delete 617 * @addr_type: address type 618 * 619 * Release reference to a device address and remove it from the device 620 * if the reference count drops to zero. 621 * 622 * The caller must hold the rtnl_mutex. 623 */ 624 int dev_addr_del(struct net_device *dev, const unsigned char *addr, 625 unsigned char addr_type) 626 { 627 int err; 628 struct netdev_hw_addr *ha; 629 630 ASSERT_RTNL(); 631 632 /* 633 * We can not remove the first address from the list because 634 * dev->dev_addr points to that. 635 */ 636 ha = list_first_entry(&dev->dev_addrs.list, 637 struct netdev_hw_addr, list); 638 if (!memcmp(ha->addr, addr, dev->addr_len) && 639 ha->type == addr_type && ha->refcount == 1) 640 return -ENOENT; 641 642 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len, 643 addr_type); 644 if (!err) 645 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 646 return err; 647 } 648 EXPORT_SYMBOL(dev_addr_del); 649 650 /* 651 * Unicast list handling functions 652 */ 653 654 /** 655 * dev_uc_add_excl - Add a global secondary unicast address 656 * @dev: device 657 * @addr: address to add 658 */ 659 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr) 660 { 661 int err; 662 663 netif_addr_lock_bh(dev); 664 err = __hw_addr_add_ex(&dev->uc, addr, dev->addr_len, 665 NETDEV_HW_ADDR_T_UNICAST, true, false, 666 0, true); 667 if (!err) 668 __dev_set_rx_mode(dev); 669 netif_addr_unlock_bh(dev); 670 return err; 671 } 672 EXPORT_SYMBOL(dev_uc_add_excl); 673 674 /** 675 * dev_uc_add - Add a secondary unicast address 676 * @dev: device 677 * @addr: address to add 678 * 679 * Add a secondary unicast address to the device or increase 680 * the reference count if it already exists. 681 */ 682 int dev_uc_add(struct net_device *dev, const unsigned char *addr) 683 { 684 int err; 685 686 netif_addr_lock_bh(dev); 687 err = __hw_addr_add(&dev->uc, addr, dev->addr_len, 688 NETDEV_HW_ADDR_T_UNICAST); 689 if (!err) 690 __dev_set_rx_mode(dev); 691 netif_addr_unlock_bh(dev); 692 return err; 693 } 694 EXPORT_SYMBOL(dev_uc_add); 695 696 /** 697 * dev_uc_del - Release secondary unicast address. 698 * @dev: device 699 * @addr: address to delete 700 * 701 * Release reference to a secondary unicast address and remove it 702 * from the device if the reference count drops to zero. 703 */ 704 int dev_uc_del(struct net_device *dev, const unsigned char *addr) 705 { 706 int err; 707 708 netif_addr_lock_bh(dev); 709 err = __hw_addr_del(&dev->uc, addr, dev->addr_len, 710 NETDEV_HW_ADDR_T_UNICAST); 711 if (!err) 712 __dev_set_rx_mode(dev); 713 netif_addr_unlock_bh(dev); 714 return err; 715 } 716 EXPORT_SYMBOL(dev_uc_del); 717 718 /** 719 * dev_uc_sync - Synchronize device's unicast list to another device 720 * @to: destination device 721 * @from: source device 722 * 723 * Add newly added addresses to the destination device and release 724 * addresses that have no users left. The source device must be 725 * locked by netif_addr_lock_bh. 726 * 727 * This function is intended to be called from the dev->set_rx_mode 728 * function of layered software devices. This function assumes that 729 * addresses will only ever be synced to the @to devices and no other. 730 */ 731 int dev_uc_sync(struct net_device *to, struct net_device *from) 732 { 733 int err = 0; 734 735 if (to->addr_len != from->addr_len) 736 return -EINVAL; 737 738 netif_addr_lock(to); 739 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); 740 if (!err) 741 __dev_set_rx_mode(to); 742 netif_addr_unlock(to); 743 return err; 744 } 745 EXPORT_SYMBOL(dev_uc_sync); 746 747 /** 748 * dev_uc_sync_multiple - Synchronize device's unicast list to another 749 * device, but allow for multiple calls to sync to multiple devices. 750 * @to: destination device 751 * @from: source device 752 * 753 * Add newly added addresses to the destination device and release 754 * addresses that have been deleted from the source. The source device 755 * must be locked by netif_addr_lock_bh. 756 * 757 * This function is intended to be called from the dev->set_rx_mode 758 * function of layered software devices. It allows for a single source 759 * device to be synced to multiple destination devices. 760 */ 761 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from) 762 { 763 int err = 0; 764 765 if (to->addr_len != from->addr_len) 766 return -EINVAL; 767 768 netif_addr_lock(to); 769 err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len); 770 if (!err) 771 __dev_set_rx_mode(to); 772 netif_addr_unlock(to); 773 return err; 774 } 775 EXPORT_SYMBOL(dev_uc_sync_multiple); 776 777 /** 778 * dev_uc_unsync - Remove synchronized addresses from the destination device 779 * @to: destination device 780 * @from: source device 781 * 782 * Remove all addresses that were added to the destination device by 783 * dev_uc_sync(). This function is intended to be called from the 784 * dev->stop function of layered software devices. 785 */ 786 void dev_uc_unsync(struct net_device *to, struct net_device *from) 787 { 788 if (to->addr_len != from->addr_len) 789 return; 790 791 /* netif_addr_lock_bh() uses lockdep subclass 0, this is okay for two 792 * reasons: 793 * 1) This is always called without any addr_list_lock, so as the 794 * outermost one here, it must be 0. 795 * 2) This is called by some callers after unlinking the upper device, 796 * so the dev->lower_level becomes 1 again. 797 * Therefore, the subclass for 'from' is 0, for 'to' is either 1 or 798 * larger. 799 */ 800 netif_addr_lock_bh(from); 801 netif_addr_lock(to); 802 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); 803 __dev_set_rx_mode(to); 804 netif_addr_unlock(to); 805 netif_addr_unlock_bh(from); 806 } 807 EXPORT_SYMBOL(dev_uc_unsync); 808 809 /** 810 * dev_uc_flush - Flush unicast addresses 811 * @dev: device 812 * 813 * Flush unicast addresses. 814 */ 815 void dev_uc_flush(struct net_device *dev) 816 { 817 netif_addr_lock_bh(dev); 818 __hw_addr_flush(&dev->uc); 819 netif_addr_unlock_bh(dev); 820 } 821 EXPORT_SYMBOL(dev_uc_flush); 822 823 /** 824 * dev_uc_init - Init unicast address list 825 * @dev: device 826 * 827 * Init unicast address list. 828 */ 829 void dev_uc_init(struct net_device *dev) 830 { 831 __hw_addr_init(&dev->uc); 832 } 833 EXPORT_SYMBOL(dev_uc_init); 834 835 /* 836 * Multicast list handling functions 837 */ 838 839 /** 840 * dev_mc_add_excl - Add a global secondary multicast address 841 * @dev: device 842 * @addr: address to add 843 */ 844 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr) 845 { 846 int err; 847 848 netif_addr_lock_bh(dev); 849 err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len, 850 NETDEV_HW_ADDR_T_MULTICAST, true, false, 851 0, true); 852 if (!err) 853 __dev_set_rx_mode(dev); 854 netif_addr_unlock_bh(dev); 855 return err; 856 } 857 EXPORT_SYMBOL(dev_mc_add_excl); 858 859 static int __dev_mc_add(struct net_device *dev, const unsigned char *addr, 860 bool global) 861 { 862 int err; 863 864 netif_addr_lock_bh(dev); 865 err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len, 866 NETDEV_HW_ADDR_T_MULTICAST, global, false, 867 0, false); 868 if (!err) 869 __dev_set_rx_mode(dev); 870 netif_addr_unlock_bh(dev); 871 return err; 872 } 873 /** 874 * dev_mc_add - Add a multicast address 875 * @dev: device 876 * @addr: address to add 877 * 878 * Add a multicast address to the device or increase 879 * the reference count if it already exists. 880 */ 881 int dev_mc_add(struct net_device *dev, const unsigned char *addr) 882 { 883 return __dev_mc_add(dev, addr, false); 884 } 885 EXPORT_SYMBOL(dev_mc_add); 886 887 /** 888 * dev_mc_add_global - Add a global multicast address 889 * @dev: device 890 * @addr: address to add 891 * 892 * Add a global multicast address to the device. 893 */ 894 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr) 895 { 896 return __dev_mc_add(dev, addr, true); 897 } 898 EXPORT_SYMBOL(dev_mc_add_global); 899 900 static int __dev_mc_del(struct net_device *dev, const unsigned char *addr, 901 bool global) 902 { 903 int err; 904 905 netif_addr_lock_bh(dev); 906 err = __hw_addr_del_ex(&dev->mc, addr, dev->addr_len, 907 NETDEV_HW_ADDR_T_MULTICAST, global, false); 908 if (!err) 909 __dev_set_rx_mode(dev); 910 netif_addr_unlock_bh(dev); 911 return err; 912 } 913 914 /** 915 * dev_mc_del - Delete a multicast address. 916 * @dev: device 917 * @addr: address to delete 918 * 919 * Release reference to a multicast address and remove it 920 * from the device if the reference count drops to zero. 921 */ 922 int dev_mc_del(struct net_device *dev, const unsigned char *addr) 923 { 924 return __dev_mc_del(dev, addr, false); 925 } 926 EXPORT_SYMBOL(dev_mc_del); 927 928 /** 929 * dev_mc_del_global - Delete a global multicast address. 930 * @dev: device 931 * @addr: address to delete 932 * 933 * Release reference to a multicast address and remove it 934 * from the device if the reference count drops to zero. 935 */ 936 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr) 937 { 938 return __dev_mc_del(dev, addr, true); 939 } 940 EXPORT_SYMBOL(dev_mc_del_global); 941 942 /** 943 * dev_mc_sync - Synchronize device's multicast list to another device 944 * @to: destination device 945 * @from: source device 946 * 947 * Add newly added addresses to the destination device and release 948 * addresses that have no users left. The source device must be 949 * locked by netif_addr_lock_bh. 950 * 951 * This function is intended to be called from the ndo_set_rx_mode 952 * function of layered software devices. 953 */ 954 int dev_mc_sync(struct net_device *to, struct net_device *from) 955 { 956 int err = 0; 957 958 if (to->addr_len != from->addr_len) 959 return -EINVAL; 960 961 netif_addr_lock(to); 962 err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len); 963 if (!err) 964 __dev_set_rx_mode(to); 965 netif_addr_unlock(to); 966 return err; 967 } 968 EXPORT_SYMBOL(dev_mc_sync); 969 970 /** 971 * dev_mc_sync_multiple - Synchronize device's multicast list to another 972 * device, but allow for multiple calls to sync to multiple devices. 973 * @to: destination device 974 * @from: source device 975 * 976 * Add newly added addresses to the destination device and release 977 * addresses that have no users left. The source device must be 978 * locked by netif_addr_lock_bh. 979 * 980 * This function is intended to be called from the ndo_set_rx_mode 981 * function of layered software devices. It allows for a single 982 * source device to be synced to multiple destination devices. 983 */ 984 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from) 985 { 986 int err = 0; 987 988 if (to->addr_len != from->addr_len) 989 return -EINVAL; 990 991 netif_addr_lock(to); 992 err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len); 993 if (!err) 994 __dev_set_rx_mode(to); 995 netif_addr_unlock(to); 996 return err; 997 } 998 EXPORT_SYMBOL(dev_mc_sync_multiple); 999 1000 /** 1001 * dev_mc_unsync - Remove synchronized addresses from the destination device 1002 * @to: destination device 1003 * @from: source device 1004 * 1005 * Remove all addresses that were added to the destination device by 1006 * dev_mc_sync(). This function is intended to be called from the 1007 * dev->stop function of layered software devices. 1008 */ 1009 void dev_mc_unsync(struct net_device *to, struct net_device *from) 1010 { 1011 if (to->addr_len != from->addr_len) 1012 return; 1013 1014 /* See the above comments inside dev_uc_unsync(). */ 1015 netif_addr_lock_bh(from); 1016 netif_addr_lock(to); 1017 __hw_addr_unsync(&to->mc, &from->mc, to->addr_len); 1018 __dev_set_rx_mode(to); 1019 netif_addr_unlock(to); 1020 netif_addr_unlock_bh(from); 1021 } 1022 EXPORT_SYMBOL(dev_mc_unsync); 1023 1024 /** 1025 * dev_mc_flush - Flush multicast addresses 1026 * @dev: device 1027 * 1028 * Flush multicast addresses. 1029 */ 1030 void dev_mc_flush(struct net_device *dev) 1031 { 1032 netif_addr_lock_bh(dev); 1033 __hw_addr_flush(&dev->mc); 1034 netif_addr_unlock_bh(dev); 1035 } 1036 EXPORT_SYMBOL(dev_mc_flush); 1037 1038 /** 1039 * dev_mc_init - Init multicast address list 1040 * @dev: device 1041 * 1042 * Init multicast address list. 1043 */ 1044 void dev_mc_init(struct net_device *dev) 1045 { 1046 __hw_addr_init(&dev->mc); 1047 } 1048 EXPORT_SYMBOL(dev_mc_init); 1049