1 /* 2 * Copyright (c) 2015, Mellanox Technologies inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include "core_priv.h" 34 35 #include <linux/in.h> 36 #include <linux/in6.h> 37 38 /* For in6_dev_get/in6_dev_put */ 39 #include <net/addrconf.h> 40 #include <net/bonding.h> 41 42 #include <rdma/ib_cache.h> 43 #include <rdma/ib_addr.h> 44 45 static struct workqueue_struct *gid_cache_wq; 46 47 static struct workqueue_struct *gid_cache_wq; 48 49 enum gid_op_type { 50 GID_DEL = 0, 51 GID_ADD 52 }; 53 54 struct update_gid_event_work { 55 struct work_struct work; 56 union ib_gid gid; 57 struct ib_gid_attr gid_attr; 58 enum gid_op_type gid_op; 59 }; 60 61 #define ROCE_NETDEV_CALLBACK_SZ 3 62 struct netdev_event_work_cmd { 63 roce_netdev_callback cb; 64 roce_netdev_filter filter; 65 struct net_device *ndev; 66 struct net_device *filter_ndev; 67 }; 68 69 struct netdev_event_work { 70 struct work_struct work; 71 struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ]; 72 }; 73 74 static const struct { 75 bool (*is_supported)(const struct ib_device *device, u8 port_num); 76 enum ib_gid_type gid_type; 77 } PORT_CAP_TO_GID_TYPE[] = { 78 {rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE}, 79 {rdma_protocol_roce_udp_encap, IB_GID_TYPE_ROCE_UDP_ENCAP}, 80 }; 81 82 #define CAP_TO_GID_TABLE_SIZE ARRAY_SIZE(PORT_CAP_TO_GID_TYPE) 83 84 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port) 85 { 86 int i; 87 unsigned int ret_flags = 0; 88 89 if (!rdma_protocol_roce(ib_dev, port)) 90 return 1UL << IB_GID_TYPE_IB; 91 92 for (i = 0; i < CAP_TO_GID_TABLE_SIZE; i++) 93 if (PORT_CAP_TO_GID_TYPE[i].is_supported(ib_dev, port)) 94 ret_flags |= 1UL << PORT_CAP_TO_GID_TYPE[i].gid_type; 95 96 return ret_flags; 97 } 98 EXPORT_SYMBOL(roce_gid_type_mask_support); 99 100 static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev, 101 u8 port, union ib_gid *gid, 102 struct ib_gid_attr *gid_attr) 103 { 104 int i; 105 unsigned long gid_type_mask = roce_gid_type_mask_support(ib_dev, port); 106 107 for (i = 0; i < IB_GID_TYPE_SIZE; i++) { 108 if ((1UL << i) & gid_type_mask) { 109 gid_attr->gid_type = i; 110 switch (gid_op) { 111 case GID_ADD: 112 ib_cache_gid_add(ib_dev, port, 113 gid, gid_attr); 114 break; 115 case GID_DEL: 116 ib_cache_gid_del(ib_dev, port, 117 gid, gid_attr); 118 break; 119 } 120 } 121 } 122 } 123 124 enum bonding_slave_state { 125 BONDING_SLAVE_STATE_ACTIVE = 1UL << 0, 126 BONDING_SLAVE_STATE_INACTIVE = 1UL << 1, 127 /* No primary slave or the device isn't a slave in bonding */ 128 BONDING_SLAVE_STATE_NA = 1UL << 2, 129 }; 130 131 static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_device *dev, 132 struct net_device *upper) 133 { 134 if (upper && netif_is_bond_master(upper)) { 135 struct net_device *pdev = 136 bond_option_active_slave_get_rcu(netdev_priv(upper)); 137 138 if (pdev) 139 return dev == pdev ? BONDING_SLAVE_STATE_ACTIVE : 140 BONDING_SLAVE_STATE_INACTIVE; 141 } 142 143 return BONDING_SLAVE_STATE_NA; 144 } 145 146 #define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \ 147 BONDING_SLAVE_STATE_NA) 148 static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port, 149 struct net_device *rdma_ndev, void *cookie) 150 { 151 struct net_device *real_dev; 152 int res; 153 154 if (!rdma_ndev) 155 return 0; 156 157 rcu_read_lock(); 158 real_dev = rdma_vlan_dev_real_dev(cookie); 159 if (!real_dev) 160 real_dev = cookie; 161 162 res = ((rdma_is_upper_dev_rcu(rdma_ndev, cookie) && 163 (is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) & 164 REQUIRED_BOND_STATES)) || 165 real_dev == rdma_ndev); 166 167 rcu_read_unlock(); 168 return res; 169 } 170 171 static int is_eth_port_inactive_slave(struct ib_device *ib_dev, u8 port, 172 struct net_device *rdma_ndev, void *cookie) 173 { 174 struct net_device *master_dev; 175 int res; 176 177 if (!rdma_ndev) 178 return 0; 179 180 rcu_read_lock(); 181 master_dev = netdev_master_upper_dev_get_rcu(rdma_ndev); 182 res = is_eth_active_slave_of_bonding_rcu(rdma_ndev, master_dev) == 183 BONDING_SLAVE_STATE_INACTIVE; 184 rcu_read_unlock(); 185 186 return res; 187 } 188 189 static int pass_all_filter(struct ib_device *ib_dev, u8 port, 190 struct net_device *rdma_ndev, void *cookie) 191 { 192 return 1; 193 } 194 195 static int upper_device_filter(struct ib_device *ib_dev, u8 port, 196 struct net_device *rdma_ndev, void *cookie) 197 { 198 int res; 199 200 if (!rdma_ndev) 201 return 0; 202 203 if (rdma_ndev == cookie) 204 return 1; 205 206 rcu_read_lock(); 207 res = rdma_is_upper_dev_rcu(rdma_ndev, cookie); 208 rcu_read_unlock(); 209 210 return res; 211 } 212 213 static void update_gid_ip(enum gid_op_type gid_op, 214 struct ib_device *ib_dev, 215 u8 port, struct net_device *ndev, 216 struct sockaddr *addr) 217 { 218 union ib_gid gid; 219 struct ib_gid_attr gid_attr; 220 221 rdma_ip2gid(addr, &gid); 222 memset(&gid_attr, 0, sizeof(gid_attr)); 223 gid_attr.ndev = ndev; 224 225 update_gid(gid_op, ib_dev, port, &gid, &gid_attr); 226 } 227 228 static void enum_netdev_default_gids(struct ib_device *ib_dev, 229 u8 port, struct net_device *event_ndev, 230 struct net_device *rdma_ndev) 231 { 232 unsigned long gid_type_mask; 233 234 rcu_read_lock(); 235 if (!rdma_ndev || 236 ((rdma_ndev != event_ndev && 237 !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) || 238 is_eth_active_slave_of_bonding_rcu(rdma_ndev, 239 netdev_master_upper_dev_get_rcu(rdma_ndev)) == 240 BONDING_SLAVE_STATE_INACTIVE)) { 241 rcu_read_unlock(); 242 return; 243 } 244 rcu_read_unlock(); 245 246 gid_type_mask = roce_gid_type_mask_support(ib_dev, port); 247 248 ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev, gid_type_mask, 249 IB_CACHE_GID_DEFAULT_MODE_SET); 250 } 251 252 static void bond_delete_netdev_default_gids(struct ib_device *ib_dev, 253 u8 port, 254 struct net_device *event_ndev, 255 struct net_device *rdma_ndev) 256 { 257 struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev); 258 259 if (!rdma_ndev) 260 return; 261 262 if (!real_dev) 263 real_dev = event_ndev; 264 265 rcu_read_lock(); 266 267 if (rdma_is_upper_dev_rcu(rdma_ndev, event_ndev) && 268 is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) == 269 BONDING_SLAVE_STATE_INACTIVE) { 270 unsigned long gid_type_mask; 271 272 rcu_read_unlock(); 273 274 gid_type_mask = roce_gid_type_mask_support(ib_dev, port); 275 276 ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev, 277 gid_type_mask, 278 IB_CACHE_GID_DEFAULT_MODE_DELETE); 279 } else { 280 rcu_read_unlock(); 281 } 282 } 283 284 static void enum_netdev_ipv4_ips(struct ib_device *ib_dev, 285 u8 port, struct net_device *ndev) 286 { 287 struct in_device *in_dev; 288 struct sin_list { 289 struct list_head list; 290 struct sockaddr_in ip; 291 }; 292 struct sin_list *sin_iter; 293 struct sin_list *sin_temp; 294 295 LIST_HEAD(sin_list); 296 if (ndev->reg_state >= NETREG_UNREGISTERING) 297 return; 298 299 rcu_read_lock(); 300 in_dev = __in_dev_get_rcu(ndev); 301 if (!in_dev) { 302 rcu_read_unlock(); 303 return; 304 } 305 306 for_ifa(in_dev) { 307 struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 308 309 if (!entry) 310 continue; 311 312 entry->ip.sin_family = AF_INET; 313 entry->ip.sin_addr.s_addr = ifa->ifa_address; 314 list_add_tail(&entry->list, &sin_list); 315 } 316 endfor_ifa(in_dev); 317 rcu_read_unlock(); 318 319 list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) { 320 update_gid_ip(GID_ADD, ib_dev, port, ndev, 321 (struct sockaddr *)&sin_iter->ip); 322 list_del(&sin_iter->list); 323 kfree(sin_iter); 324 } 325 } 326 327 static void enum_netdev_ipv6_ips(struct ib_device *ib_dev, 328 u8 port, struct net_device *ndev) 329 { 330 struct inet6_ifaddr *ifp; 331 struct inet6_dev *in6_dev; 332 struct sin6_list { 333 struct list_head list; 334 struct sockaddr_in6 sin6; 335 }; 336 struct sin6_list *sin6_iter; 337 struct sin6_list *sin6_temp; 338 struct ib_gid_attr gid_attr = {.ndev = ndev}; 339 LIST_HEAD(sin6_list); 340 341 if (ndev->reg_state >= NETREG_UNREGISTERING) 342 return; 343 344 in6_dev = in6_dev_get(ndev); 345 if (!in6_dev) 346 return; 347 348 read_lock_bh(&in6_dev->lock); 349 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { 350 struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 351 352 if (!entry) 353 continue; 354 355 entry->sin6.sin6_family = AF_INET6; 356 entry->sin6.sin6_addr = ifp->addr; 357 list_add_tail(&entry->list, &sin6_list); 358 } 359 read_unlock_bh(&in6_dev->lock); 360 361 in6_dev_put(in6_dev); 362 363 list_for_each_entry_safe(sin6_iter, sin6_temp, &sin6_list, list) { 364 union ib_gid gid; 365 366 rdma_ip2gid((struct sockaddr *)&sin6_iter->sin6, &gid); 367 update_gid(GID_ADD, ib_dev, port, &gid, &gid_attr); 368 list_del(&sin6_iter->list); 369 kfree(sin6_iter); 370 } 371 } 372 373 static void _add_netdev_ips(struct ib_device *ib_dev, u8 port, 374 struct net_device *ndev) 375 { 376 enum_netdev_ipv4_ips(ib_dev, port, ndev); 377 if (IS_ENABLED(CONFIG_IPV6)) 378 enum_netdev_ipv6_ips(ib_dev, port, ndev); 379 } 380 381 static void add_netdev_ips(struct ib_device *ib_dev, u8 port, 382 struct net_device *rdma_ndev, void *cookie) 383 { 384 enum_netdev_default_gids(ib_dev, port, cookie, rdma_ndev); 385 _add_netdev_ips(ib_dev, port, cookie); 386 } 387 388 static void del_netdev_ips(struct ib_device *ib_dev, u8 port, 389 struct net_device *rdma_ndev, void *cookie) 390 { 391 ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie); 392 } 393 394 static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev, 395 u8 port, 396 struct net_device *rdma_ndev, 397 void *cookie) 398 { 399 struct net *net; 400 struct net_device *ndev; 401 402 /* Lock the rtnl to make sure the netdevs does not move under 403 * our feet 404 */ 405 rtnl_lock(); 406 for_each_net(net) 407 for_each_netdev(net, ndev) 408 if (is_eth_port_of_netdev(ib_dev, port, rdma_ndev, ndev)) 409 add_netdev_ips(ib_dev, port, rdma_ndev, ndev); 410 rtnl_unlock(); 411 } 412 413 /** 414 * rdma_roce_rescan_device - Rescan all of the network devices in the system 415 * and add their gids, as needed, to the relevant RoCE devices. 416 * 417 * @device: the rdma device 418 */ 419 void rdma_roce_rescan_device(struct ib_device *ib_dev) 420 { 421 ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL, 422 enum_all_gids_of_dev_cb, NULL); 423 } 424 EXPORT_SYMBOL(rdma_roce_rescan_device); 425 426 static void callback_for_addr_gid_device_scan(struct ib_device *device, 427 u8 port, 428 struct net_device *rdma_ndev, 429 void *cookie) 430 { 431 struct update_gid_event_work *parsed = cookie; 432 433 return update_gid(parsed->gid_op, device, 434 port, &parsed->gid, 435 &parsed->gid_attr); 436 } 437 438 struct upper_list { 439 struct list_head list; 440 struct net_device *upper; 441 }; 442 443 static int netdev_upper_walk(struct net_device *upper, void *data) 444 { 445 struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 446 struct list_head *upper_list = data; 447 448 if (!entry) 449 return 0; 450 451 list_add_tail(&entry->list, upper_list); 452 dev_hold(upper); 453 entry->upper = upper; 454 455 return 0; 456 } 457 458 static void handle_netdev_upper(struct ib_device *ib_dev, u8 port, 459 void *cookie, 460 void (*handle_netdev)(struct ib_device *ib_dev, 461 u8 port, 462 struct net_device *ndev)) 463 { 464 struct net_device *ndev = cookie; 465 struct upper_list *upper_iter; 466 struct upper_list *upper_temp; 467 LIST_HEAD(upper_list); 468 469 rcu_read_lock(); 470 netdev_walk_all_upper_dev_rcu(ndev, netdev_upper_walk, &upper_list); 471 rcu_read_unlock(); 472 473 handle_netdev(ib_dev, port, ndev); 474 list_for_each_entry_safe(upper_iter, upper_temp, &upper_list, 475 list) { 476 handle_netdev(ib_dev, port, upper_iter->upper); 477 dev_put(upper_iter->upper); 478 list_del(&upper_iter->list); 479 kfree(upper_iter); 480 } 481 } 482 483 static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, 484 struct net_device *event_ndev) 485 { 486 ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev); 487 } 488 489 static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port, 490 struct net_device *rdma_ndev, void *cookie) 491 { 492 handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids); 493 } 494 495 static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port, 496 struct net_device *rdma_ndev, void *cookie) 497 { 498 handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips); 499 } 500 501 static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port, 502 struct net_device *rdma_ndev, 503 void *cookie) 504 { 505 struct net_device *master_ndev; 506 507 rcu_read_lock(); 508 master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev); 509 if (master_ndev) 510 dev_hold(master_ndev); 511 rcu_read_unlock(); 512 513 if (master_ndev) { 514 bond_delete_netdev_default_gids(ib_dev, port, master_ndev, 515 rdma_ndev); 516 dev_put(master_ndev); 517 } 518 } 519 520 static void del_netdev_default_ips(struct ib_device *ib_dev, u8 port, 521 struct net_device *rdma_ndev, void *cookie) 522 { 523 bond_delete_netdev_default_gids(ib_dev, port, cookie, rdma_ndev); 524 } 525 526 /* The following functions operate on all IB devices. netdevice_event and 527 * addr_event execute ib_enum_all_roce_netdevs through a work. 528 * ib_enum_all_roce_netdevs iterates through all IB devices. 529 */ 530 531 static void netdevice_event_work_handler(struct work_struct *_work) 532 { 533 struct netdev_event_work *work = 534 container_of(_work, struct netdev_event_work, work); 535 unsigned int i; 536 537 for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) { 538 ib_enum_all_roce_netdevs(work->cmds[i].filter, 539 work->cmds[i].filter_ndev, 540 work->cmds[i].cb, 541 work->cmds[i].ndev); 542 dev_put(work->cmds[i].ndev); 543 dev_put(work->cmds[i].filter_ndev); 544 } 545 546 kfree(work); 547 } 548 549 static int netdevice_queue_work(struct netdev_event_work_cmd *cmds, 550 struct net_device *ndev) 551 { 552 unsigned int i; 553 struct netdev_event_work *ndev_work = 554 kmalloc(sizeof(*ndev_work), GFP_KERNEL); 555 556 if (!ndev_work) 557 return NOTIFY_DONE; 558 559 memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds)); 560 for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) { 561 if (!ndev_work->cmds[i].ndev) 562 ndev_work->cmds[i].ndev = ndev; 563 if (!ndev_work->cmds[i].filter_ndev) 564 ndev_work->cmds[i].filter_ndev = ndev; 565 dev_hold(ndev_work->cmds[i].ndev); 566 dev_hold(ndev_work->cmds[i].filter_ndev); 567 } 568 INIT_WORK(&ndev_work->work, netdevice_event_work_handler); 569 570 queue_work(gid_cache_wq, &ndev_work->work); 571 572 return NOTIFY_DONE; 573 } 574 575 static const struct netdev_event_work_cmd add_cmd = { 576 .cb = add_netdev_ips, .filter = is_eth_port_of_netdev}; 577 static const struct netdev_event_work_cmd add_cmd_upper_ips = { 578 .cb = add_netdev_upper_ips, .filter = is_eth_port_of_netdev}; 579 580 static void netdevice_event_changeupper(struct netdev_notifier_changeupper_info *changeupper_info, 581 struct netdev_event_work_cmd *cmds) 582 { 583 static const struct netdev_event_work_cmd upper_ips_del_cmd = { 584 .cb = del_netdev_upper_ips, .filter = upper_device_filter}; 585 static const struct netdev_event_work_cmd bonding_default_del_cmd = { 586 .cb = del_netdev_default_ips, .filter = is_eth_port_inactive_slave}; 587 588 if (changeupper_info->linking == false) { 589 cmds[0] = upper_ips_del_cmd; 590 cmds[0].ndev = changeupper_info->upper_dev; 591 cmds[1] = add_cmd; 592 } else { 593 cmds[0] = bonding_default_del_cmd; 594 cmds[0].ndev = changeupper_info->upper_dev; 595 cmds[1] = add_cmd_upper_ips; 596 cmds[1].ndev = changeupper_info->upper_dev; 597 cmds[1].filter_ndev = changeupper_info->upper_dev; 598 } 599 } 600 601 static int netdevice_event(struct notifier_block *this, unsigned long event, 602 void *ptr) 603 { 604 static const struct netdev_event_work_cmd del_cmd = { 605 .cb = del_netdev_ips, .filter = pass_all_filter}; 606 static const struct netdev_event_work_cmd bonding_default_del_cmd_join = { 607 .cb = del_netdev_default_ips_join, .filter = is_eth_port_inactive_slave}; 608 static const struct netdev_event_work_cmd default_del_cmd = { 609 .cb = del_netdev_default_ips, .filter = pass_all_filter}; 610 static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = { 611 .cb = del_netdev_upper_ips, .filter = upper_device_filter}; 612 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 613 struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} }; 614 615 if (ndev->type != ARPHRD_ETHER) 616 return NOTIFY_DONE; 617 618 switch (event) { 619 case NETDEV_REGISTER: 620 case NETDEV_UP: 621 cmds[0] = bonding_default_del_cmd_join; 622 cmds[1] = add_cmd; 623 break; 624 625 case NETDEV_UNREGISTER: 626 if (ndev->reg_state < NETREG_UNREGISTERED) 627 cmds[0] = del_cmd; 628 else 629 return NOTIFY_DONE; 630 break; 631 632 case NETDEV_CHANGEADDR: 633 cmds[0] = default_del_cmd; 634 cmds[1] = add_cmd; 635 break; 636 637 case NETDEV_CHANGEUPPER: 638 netdevice_event_changeupper( 639 container_of(ptr, struct netdev_notifier_changeupper_info, info), 640 cmds); 641 break; 642 643 case NETDEV_BONDING_FAILOVER: 644 cmds[0] = bonding_event_ips_del_cmd; 645 cmds[1] = bonding_default_del_cmd_join; 646 cmds[2] = add_cmd_upper_ips; 647 break; 648 649 default: 650 return NOTIFY_DONE; 651 } 652 653 return netdevice_queue_work(cmds, ndev); 654 } 655 656 static void update_gid_event_work_handler(struct work_struct *_work) 657 { 658 struct update_gid_event_work *work = 659 container_of(_work, struct update_gid_event_work, work); 660 661 ib_enum_all_roce_netdevs(is_eth_port_of_netdev, work->gid_attr.ndev, 662 callback_for_addr_gid_device_scan, work); 663 664 dev_put(work->gid_attr.ndev); 665 kfree(work); 666 } 667 668 static int addr_event(struct notifier_block *this, unsigned long event, 669 struct sockaddr *sa, struct net_device *ndev) 670 { 671 struct update_gid_event_work *work; 672 enum gid_op_type gid_op; 673 674 if (ndev->type != ARPHRD_ETHER) 675 return NOTIFY_DONE; 676 677 switch (event) { 678 case NETDEV_UP: 679 gid_op = GID_ADD; 680 break; 681 682 case NETDEV_DOWN: 683 gid_op = GID_DEL; 684 break; 685 686 default: 687 return NOTIFY_DONE; 688 } 689 690 work = kmalloc(sizeof(*work), GFP_ATOMIC); 691 if (!work) 692 return NOTIFY_DONE; 693 694 INIT_WORK(&work->work, update_gid_event_work_handler); 695 696 rdma_ip2gid(sa, &work->gid); 697 work->gid_op = gid_op; 698 699 memset(&work->gid_attr, 0, sizeof(work->gid_attr)); 700 dev_hold(ndev); 701 work->gid_attr.ndev = ndev; 702 703 queue_work(gid_cache_wq, &work->work); 704 705 return NOTIFY_DONE; 706 } 707 708 static int inetaddr_event(struct notifier_block *this, unsigned long event, 709 void *ptr) 710 { 711 struct sockaddr_in in; 712 struct net_device *ndev; 713 struct in_ifaddr *ifa = ptr; 714 715 in.sin_family = AF_INET; 716 in.sin_addr.s_addr = ifa->ifa_address; 717 ndev = ifa->ifa_dev->dev; 718 719 return addr_event(this, event, (struct sockaddr *)&in, ndev); 720 } 721 722 static int inet6addr_event(struct notifier_block *this, unsigned long event, 723 void *ptr) 724 { 725 struct sockaddr_in6 in6; 726 struct net_device *ndev; 727 struct inet6_ifaddr *ifa6 = ptr; 728 729 in6.sin6_family = AF_INET6; 730 in6.sin6_addr = ifa6->addr; 731 ndev = ifa6->idev->dev; 732 733 return addr_event(this, event, (struct sockaddr *)&in6, ndev); 734 } 735 736 static struct notifier_block nb_netdevice = { 737 .notifier_call = netdevice_event 738 }; 739 740 static struct notifier_block nb_inetaddr = { 741 .notifier_call = inetaddr_event 742 }; 743 744 static struct notifier_block nb_inet6addr = { 745 .notifier_call = inet6addr_event 746 }; 747 748 int __init roce_gid_mgmt_init(void) 749 { 750 gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0); 751 if (!gid_cache_wq) 752 return -ENOMEM; 753 754 register_inetaddr_notifier(&nb_inetaddr); 755 if (IS_ENABLED(CONFIG_IPV6)) 756 register_inet6addr_notifier(&nb_inet6addr); 757 /* We relay on the netdevice notifier to enumerate all 758 * existing devices in the system. Register to this notifier 759 * last to make sure we will not miss any IP add/del 760 * callbacks. 761 */ 762 register_netdevice_notifier(&nb_netdevice); 763 764 return 0; 765 } 766 767 void __exit roce_gid_mgmt_cleanup(void) 768 { 769 if (IS_ENABLED(CONFIG_IPV6)) 770 unregister_inet6addr_notifier(&nb_inet6addr); 771 unregister_inetaddr_notifier(&nb_inetaddr); 772 unregister_netdevice_notifier(&nb_netdevice); 773 /* Ensure all gid deletion tasks complete before we go down, 774 * to avoid any reference to free'd memory. By the time 775 * ib-core is removed, all physical devices have been removed, 776 * so no issue with remaining hardware contexts. 777 */ 778 destroy_workqueue(gid_cache_wq); 779 } 780