1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "ipoib.h" 36 37 #include <linux/module.h> 38 39 #include <linux/init.h> 40 #include <linux/slab.h> 41 #include <linux/kernel.h> 42 #include <linux/vmalloc.h> 43 44 #include <linux/if_arp.h> /* For ARPHRD_xxx */ 45 46 #include <linux/ip.h> 47 #include <linux/in.h> 48 49 #include <linux/jhash.h> 50 #include <net/arp.h> 51 #include <net/addrconf.h> 52 #include <linux/inetdevice.h> 53 #include <rdma/ib_cache.h> 54 55 MODULE_AUTHOR("Roland Dreier"); 56 MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); 57 MODULE_LICENSE("Dual BSD/GPL"); 58 59 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE; 60 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE; 61 62 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444); 63 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue"); 64 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); 65 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); 66 67 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 68 int ipoib_debug_level; 69 70 module_param_named(debug_level, ipoib_debug_level, int, 0644); 71 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 72 #endif 73 74 struct ipoib_path_iter { 75 struct net_device *dev; 76 struct ipoib_path path; 77 }; 78 79 static const u8 ipv4_bcast_addr[] = { 80 0x00, 0xff, 0xff, 0xff, 81 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, 82 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff 83 }; 84 85 struct workqueue_struct *ipoib_workqueue; 86 87 struct ib_sa_client ipoib_sa_client; 88 89 static int ipoib_add_one(struct ib_device *device); 90 static void ipoib_remove_one(struct ib_device *device, void *client_data); 91 static void ipoib_neigh_reclaim(struct rcu_head *rp); 92 static struct net_device *ipoib_get_net_dev_by_params( 93 struct ib_device *dev, u32 port, u16 pkey, 94 const union ib_gid *gid, const struct sockaddr *addr, 95 void *client_data); 96 static int ipoib_set_mac(struct net_device *dev, void *addr); 97 static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr, 98 int cmd); 99 100 static struct ib_client ipoib_client = { 101 .name = "ipoib", 102 .add = ipoib_add_one, 103 .remove = ipoib_remove_one, 104 .get_net_dev_by_params = ipoib_get_net_dev_by_params, 105 }; 106 107 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 108 static int ipoib_netdev_event(struct notifier_block *this, 109 unsigned long event, void *ptr) 110 { 111 struct netdev_notifier_info *ni = ptr; 112 struct net_device *dev = ni->dev; 113 114 if (dev->netdev_ops->ndo_open != ipoib_open) 115 return NOTIFY_DONE; 116 117 switch (event) { 118 case NETDEV_REGISTER: 119 ipoib_create_debug_files(dev); 120 break; 121 case NETDEV_CHANGENAME: 122 ipoib_delete_debug_files(dev); 123 ipoib_create_debug_files(dev); 124 break; 125 case NETDEV_UNREGISTER: 126 ipoib_delete_debug_files(dev); 127 break; 128 } 129 130 return NOTIFY_DONE; 131 } 132 #endif 133 134 int ipoib_open(struct net_device *dev) 135 { 136 struct ipoib_dev_priv *priv = ipoib_priv(dev); 137 138 ipoib_dbg(priv, "bringing up interface\n"); 139 140 netif_carrier_off(dev); 141 142 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 143 144 if (ipoib_ib_dev_open(dev)) { 145 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) 146 return 0; 147 goto err_disable; 148 } 149 150 ipoib_ib_dev_up(dev); 151 152 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 153 struct ipoib_dev_priv *cpriv; 154 155 /* Bring up any child interfaces too */ 156 down_read(&priv->vlan_rwsem); 157 list_for_each_entry(cpriv, &priv->child_intfs, list) { 158 int flags; 159 160 flags = cpriv->dev->flags; 161 if (flags & IFF_UP) 162 continue; 163 164 dev_change_flags(cpriv->dev, flags | IFF_UP, NULL); 165 } 166 up_read(&priv->vlan_rwsem); 167 } else if (priv->parent) { 168 struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent); 169 170 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &ppriv->flags)) 171 ipoib_dbg(priv, "parent device %s is not up, so child device may be not functioning.\n", 172 ppriv->dev->name); 173 } 174 netif_start_queue(dev); 175 176 return 0; 177 178 err_disable: 179 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 180 181 return -EINVAL; 182 } 183 184 static int ipoib_stop(struct net_device *dev) 185 { 186 struct ipoib_dev_priv *priv = ipoib_priv(dev); 187 188 ipoib_dbg(priv, "stopping interface\n"); 189 190 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 191 192 netif_stop_queue(dev); 193 194 ipoib_ib_dev_down(dev); 195 ipoib_ib_dev_stop(dev); 196 197 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 198 struct ipoib_dev_priv *cpriv; 199 200 /* Bring down any child interfaces too */ 201 down_read(&priv->vlan_rwsem); 202 list_for_each_entry(cpriv, &priv->child_intfs, list) { 203 int flags; 204 205 flags = cpriv->dev->flags; 206 if (!(flags & IFF_UP)) 207 continue; 208 209 dev_change_flags(cpriv->dev, flags & ~IFF_UP, NULL); 210 } 211 up_read(&priv->vlan_rwsem); 212 } 213 214 return 0; 215 } 216 217 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features) 218 { 219 struct ipoib_dev_priv *priv = ipoib_priv(dev); 220 221 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) 222 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); 223 224 return features; 225 } 226 227 static int ipoib_change_mtu(struct net_device *dev, int new_mtu) 228 { 229 struct ipoib_dev_priv *priv = ipoib_priv(dev); 230 int ret = 0; 231 232 /* dev->mtu > 2K ==> connected mode */ 233 if (ipoib_cm_admin_enabled(dev)) { 234 if (new_mtu > ipoib_cm_max_mtu(dev)) 235 return -EINVAL; 236 237 if (new_mtu > priv->mcast_mtu) 238 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n", 239 priv->mcast_mtu); 240 241 dev->mtu = new_mtu; 242 return 0; 243 } 244 245 if (new_mtu < (ETH_MIN_MTU + IPOIB_ENCAP_LEN) || 246 new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) 247 return -EINVAL; 248 249 priv->admin_mtu = new_mtu; 250 251 if (priv->mcast_mtu < priv->admin_mtu) 252 ipoib_dbg(priv, "MTU must be smaller than the underlying " 253 "link layer MTU - 4 (%u)\n", priv->mcast_mtu); 254 255 new_mtu = min(priv->mcast_mtu, priv->admin_mtu); 256 257 if (priv->rn_ops->ndo_change_mtu) { 258 bool carrier_status = netif_carrier_ok(dev); 259 260 netif_carrier_off(dev); 261 262 /* notify lower level on the real mtu */ 263 ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu); 264 265 if (carrier_status) 266 netif_carrier_on(dev); 267 } else { 268 dev->mtu = new_mtu; 269 } 270 271 return ret; 272 } 273 274 static void ipoib_get_stats(struct net_device *dev, 275 struct rtnl_link_stats64 *stats) 276 { 277 struct ipoib_dev_priv *priv = ipoib_priv(dev); 278 279 if (priv->rn_ops->ndo_get_stats64) 280 priv->rn_ops->ndo_get_stats64(dev, stats); 281 else 282 netdev_stats_to_stats64(stats, &dev->stats); 283 } 284 285 /* Called with an RCU read lock taken */ 286 static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr, 287 struct net_device *dev) 288 { 289 struct net *net = dev_net(dev); 290 struct in_device *in_dev; 291 struct sockaddr_in *addr_in = (struct sockaddr_in *)addr; 292 struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr; 293 __be32 ret_addr; 294 295 switch (addr->sa_family) { 296 case AF_INET: 297 in_dev = in_dev_get(dev); 298 if (!in_dev) 299 return false; 300 301 ret_addr = inet_confirm_addr(net, in_dev, 0, 302 addr_in->sin_addr.s_addr, 303 RT_SCOPE_HOST); 304 in_dev_put(in_dev); 305 if (ret_addr) 306 return true; 307 308 break; 309 case AF_INET6: 310 if (IS_ENABLED(CONFIG_IPV6) && 311 ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1)) 312 return true; 313 314 break; 315 } 316 return false; 317 } 318 319 /* 320 * Find the master net_device on top of the given net_device. 321 * @dev: base IPoIB net_device 322 * 323 * Returns the master net_device with a reference held, or the same net_device 324 * if no master exists. 325 */ 326 static struct net_device *ipoib_get_master_net_dev(struct net_device *dev) 327 { 328 struct net_device *master; 329 330 rcu_read_lock(); 331 master = netdev_master_upper_dev_get_rcu(dev); 332 if (master) 333 dev_hold(master); 334 rcu_read_unlock(); 335 336 if (master) 337 return master; 338 339 dev_hold(dev); 340 return dev; 341 } 342 343 struct ipoib_walk_data { 344 const struct sockaddr *addr; 345 struct net_device *result; 346 }; 347 348 static int ipoib_upper_walk(struct net_device *upper, 349 struct netdev_nested_priv *priv) 350 { 351 struct ipoib_walk_data *data = (struct ipoib_walk_data *)priv->data; 352 int ret = 0; 353 354 if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) { 355 dev_hold(upper); 356 data->result = upper; 357 ret = 1; 358 } 359 360 return ret; 361 } 362 363 /** 364 * ipoib_get_net_dev_match_addr - Find a net_device matching 365 * the given address, which is an upper device of the given net_device. 366 * 367 * @addr: IP address to look for. 368 * @dev: base IPoIB net_device 369 * 370 * If found, returns the net_device with a reference held. Otherwise return 371 * NULL. 372 */ 373 static struct net_device *ipoib_get_net_dev_match_addr( 374 const struct sockaddr *addr, struct net_device *dev) 375 { 376 struct netdev_nested_priv priv; 377 struct ipoib_walk_data data = { 378 .addr = addr, 379 }; 380 381 priv.data = (void *)&data; 382 rcu_read_lock(); 383 if (ipoib_is_dev_match_addr_rcu(addr, dev)) { 384 dev_hold(dev); 385 data.result = dev; 386 goto out; 387 } 388 389 netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &priv); 390 out: 391 rcu_read_unlock(); 392 return data.result; 393 } 394 395 /* returns the number of IPoIB netdevs on top a given ipoib device matching a 396 * pkey_index and address, if one exists. 397 * 398 * @found_net_dev: contains a matching net_device if the return value >= 1, 399 * with a reference held. */ 400 static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv, 401 const union ib_gid *gid, 402 u16 pkey_index, 403 const struct sockaddr *addr, 404 int nesting, 405 struct net_device **found_net_dev) 406 { 407 struct ipoib_dev_priv *child_priv; 408 struct net_device *net_dev = NULL; 409 int matches = 0; 410 411 if (priv->pkey_index == pkey_index && 412 (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) { 413 if (!addr) { 414 net_dev = ipoib_get_master_net_dev(priv->dev); 415 } else { 416 /* Verify the net_device matches the IP address, as 417 * IPoIB child devices currently share a GID. */ 418 net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev); 419 } 420 if (net_dev) { 421 if (!*found_net_dev) 422 *found_net_dev = net_dev; 423 else 424 dev_put(net_dev); 425 ++matches; 426 } 427 } 428 429 /* Check child interfaces */ 430 down_read_nested(&priv->vlan_rwsem, nesting); 431 list_for_each_entry(child_priv, &priv->child_intfs, list) { 432 matches += ipoib_match_gid_pkey_addr(child_priv, gid, 433 pkey_index, addr, 434 nesting + 1, 435 found_net_dev); 436 if (matches > 1) 437 break; 438 } 439 up_read(&priv->vlan_rwsem); 440 441 return matches; 442 } 443 444 /* Returns the number of matching net_devs found (between 0 and 2). Also 445 * return the matching net_device in the @net_dev parameter, holding a 446 * reference to the net_device, if the number of matches >= 1 */ 447 static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u32 port, 448 u16 pkey_index, 449 const union ib_gid *gid, 450 const struct sockaddr *addr, 451 struct net_device **net_dev) 452 { 453 struct ipoib_dev_priv *priv; 454 int matches = 0; 455 456 *net_dev = NULL; 457 458 list_for_each_entry(priv, dev_list, list) { 459 if (priv->port != port) 460 continue; 461 462 matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index, 463 addr, 0, net_dev); 464 if (matches > 1) 465 break; 466 } 467 468 return matches; 469 } 470 471 static struct net_device *ipoib_get_net_dev_by_params( 472 struct ib_device *dev, u32 port, u16 pkey, 473 const union ib_gid *gid, const struct sockaddr *addr, 474 void *client_data) 475 { 476 struct net_device *net_dev; 477 struct list_head *dev_list = client_data; 478 u16 pkey_index; 479 int matches; 480 int ret; 481 482 if (!rdma_protocol_ib(dev, port)) 483 return NULL; 484 485 ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index); 486 if (ret) 487 return NULL; 488 489 /* See if we can find a unique device matching the L2 parameters */ 490 matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index, 491 gid, NULL, &net_dev); 492 493 switch (matches) { 494 case 0: 495 return NULL; 496 case 1: 497 return net_dev; 498 } 499 500 dev_put(net_dev); 501 502 /* Couldn't find a unique device with L2 parameters only. Use L3 503 * address to uniquely match the net device */ 504 matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index, 505 gid, addr, &net_dev); 506 switch (matches) { 507 case 0: 508 return NULL; 509 default: 510 dev_warn_ratelimited(&dev->dev, 511 "duplicate IP address detected\n"); 512 fallthrough; 513 case 1: 514 return net_dev; 515 } 516 } 517 518 int ipoib_set_mode(struct net_device *dev, const char *buf) 519 { 520 struct ipoib_dev_priv *priv = ipoib_priv(dev); 521 522 if ((test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) && 523 !strcmp(buf, "connected\n")) || 524 (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) && 525 !strcmp(buf, "datagram\n"))) { 526 return 0; 527 } 528 529 /* flush paths if we switch modes so that connections are restarted */ 530 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) { 531 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 532 ipoib_warn(priv, "enabling connected mode " 533 "will cause multicast packet drops\n"); 534 netdev_update_features(dev); 535 dev_set_mtu(dev, ipoib_cm_max_mtu(dev)); 536 netif_set_real_num_tx_queues(dev, 1); 537 rtnl_unlock(); 538 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; 539 540 ipoib_flush_paths(dev); 541 return (!rtnl_trylock()) ? -EBUSY : 0; 542 } 543 544 if (!strcmp(buf, "datagram\n")) { 545 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 546 netdev_update_features(dev); 547 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); 548 netif_set_real_num_tx_queues(dev, dev->num_tx_queues); 549 rtnl_unlock(); 550 ipoib_flush_paths(dev); 551 return (!rtnl_trylock()) ? -EBUSY : 0; 552 } 553 554 return -EINVAL; 555 } 556 557 struct ipoib_path *__path_find(struct net_device *dev, void *gid) 558 { 559 struct ipoib_dev_priv *priv = ipoib_priv(dev); 560 struct rb_node *n = priv->path_tree.rb_node; 561 struct ipoib_path *path; 562 int ret; 563 564 while (n) { 565 path = rb_entry(n, struct ipoib_path, rb_node); 566 567 ret = memcmp(gid, path->pathrec.dgid.raw, 568 sizeof (union ib_gid)); 569 570 if (ret < 0) 571 n = n->rb_left; 572 else if (ret > 0) 573 n = n->rb_right; 574 else 575 return path; 576 } 577 578 return NULL; 579 } 580 581 static int __path_add(struct net_device *dev, struct ipoib_path *path) 582 { 583 struct ipoib_dev_priv *priv = ipoib_priv(dev); 584 struct rb_node **n = &priv->path_tree.rb_node; 585 struct rb_node *pn = NULL; 586 struct ipoib_path *tpath; 587 int ret; 588 589 while (*n) { 590 pn = *n; 591 tpath = rb_entry(pn, struct ipoib_path, rb_node); 592 593 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw, 594 sizeof (union ib_gid)); 595 if (ret < 0) 596 n = &pn->rb_left; 597 else if (ret > 0) 598 n = &pn->rb_right; 599 else 600 return -EEXIST; 601 } 602 603 rb_link_node(&path->rb_node, pn, n); 604 rb_insert_color(&path->rb_node, &priv->path_tree); 605 606 list_add_tail(&path->list, &priv->path_list); 607 608 return 0; 609 } 610 611 static void path_free(struct net_device *dev, struct ipoib_path *path) 612 { 613 struct sk_buff *skb; 614 615 while ((skb = __skb_dequeue(&path->queue))) 616 dev_kfree_skb_irq(skb); 617 618 ipoib_dbg(ipoib_priv(dev), "%s\n", __func__); 619 620 /* remove all neigh connected to this path */ 621 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); 622 623 if (path->ah) 624 ipoib_put_ah(path->ah); 625 626 kfree(path); 627 } 628 629 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 630 631 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev) 632 { 633 struct ipoib_path_iter *iter; 634 635 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 636 if (!iter) 637 return NULL; 638 639 iter->dev = dev; 640 memset(iter->path.pathrec.dgid.raw, 0, 16); 641 642 if (ipoib_path_iter_next(iter)) { 643 kfree(iter); 644 return NULL; 645 } 646 647 return iter; 648 } 649 650 int ipoib_path_iter_next(struct ipoib_path_iter *iter) 651 { 652 struct ipoib_dev_priv *priv = ipoib_priv(iter->dev); 653 struct rb_node *n; 654 struct ipoib_path *path; 655 int ret = 1; 656 657 spin_lock_irq(&priv->lock); 658 659 n = rb_first(&priv->path_tree); 660 661 while (n) { 662 path = rb_entry(n, struct ipoib_path, rb_node); 663 664 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, 665 sizeof (union ib_gid)) < 0) { 666 iter->path = *path; 667 ret = 0; 668 break; 669 } 670 671 n = rb_next(n); 672 } 673 674 spin_unlock_irq(&priv->lock); 675 676 return ret; 677 } 678 679 void ipoib_path_iter_read(struct ipoib_path_iter *iter, 680 struct ipoib_path *path) 681 { 682 *path = iter->path; 683 } 684 685 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 686 687 void ipoib_mark_paths_invalid(struct net_device *dev) 688 { 689 struct ipoib_dev_priv *priv = ipoib_priv(dev); 690 struct ipoib_path *path, *tp; 691 692 spin_lock_irq(&priv->lock); 693 694 list_for_each_entry_safe(path, tp, &priv->path_list, list) { 695 ipoib_dbg(priv, "mark path LID 0x%08x GID %pI6 invalid\n", 696 be32_to_cpu(sa_path_get_dlid(&path->pathrec)), 697 path->pathrec.dgid.raw); 698 if (path->ah) 699 path->ah->valid = 0; 700 } 701 702 spin_unlock_irq(&priv->lock); 703 } 704 705 static void push_pseudo_header(struct sk_buff *skb, const char *daddr) 706 { 707 struct ipoib_pseudo_header *phdr; 708 709 phdr = skb_push(skb, sizeof(*phdr)); 710 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); 711 } 712 713 void ipoib_flush_paths(struct net_device *dev) 714 { 715 struct ipoib_dev_priv *priv = ipoib_priv(dev); 716 struct ipoib_path *path, *tp; 717 LIST_HEAD(remove_list); 718 unsigned long flags; 719 720 netif_tx_lock_bh(dev); 721 spin_lock_irqsave(&priv->lock, flags); 722 723 list_splice_init(&priv->path_list, &remove_list); 724 725 list_for_each_entry(path, &remove_list, list) 726 rb_erase(&path->rb_node, &priv->path_tree); 727 728 list_for_each_entry_safe(path, tp, &remove_list, list) { 729 if (path->query) 730 ib_sa_cancel_query(path->query_id, path->query); 731 spin_unlock_irqrestore(&priv->lock, flags); 732 netif_tx_unlock_bh(dev); 733 wait_for_completion(&path->done); 734 path_free(dev, path); 735 netif_tx_lock_bh(dev); 736 spin_lock_irqsave(&priv->lock, flags); 737 } 738 739 spin_unlock_irqrestore(&priv->lock, flags); 740 netif_tx_unlock_bh(dev); 741 } 742 743 static void path_rec_completion(int status, 744 struct sa_path_rec *pathrec, 745 unsigned int num_prs, void *path_ptr) 746 { 747 struct ipoib_path *path = path_ptr; 748 struct net_device *dev = path->dev; 749 struct ipoib_dev_priv *priv = ipoib_priv(dev); 750 struct ipoib_ah *ah = NULL; 751 struct ipoib_ah *old_ah = NULL; 752 struct ipoib_neigh *neigh, *tn; 753 struct sk_buff_head skqueue; 754 struct sk_buff *skb; 755 unsigned long flags; 756 757 if (!status) 758 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n", 759 be32_to_cpu(sa_path_get_dlid(pathrec)), 760 pathrec->dgid.raw); 761 else 762 ipoib_dbg(priv, "PathRec status %d for GID %pI6\n", 763 status, path->pathrec.dgid.raw); 764 765 skb_queue_head_init(&skqueue); 766 767 if (!status) { 768 struct rdma_ah_attr av; 769 770 if (!ib_init_ah_attr_from_path(priv->ca, priv->port, 771 pathrec, &av, NULL)) { 772 ah = ipoib_create_ah(dev, priv->pd, &av); 773 rdma_destroy_ah_attr(&av); 774 } 775 } 776 777 spin_lock_irqsave(&priv->lock, flags); 778 779 if (!IS_ERR_OR_NULL(ah)) { 780 /* 781 * pathrec.dgid is used as the database key from the LLADDR, 782 * it must remain unchanged even if the SA returns a different 783 * GID to use in the AH. 784 */ 785 if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw, 786 sizeof(union ib_gid))) { 787 ipoib_dbg( 788 priv, 789 "%s got PathRec for gid %pI6 while asked for %pI6\n", 790 dev->name, pathrec->dgid.raw, 791 path->pathrec.dgid.raw); 792 memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw, 793 sizeof(union ib_gid)); 794 } 795 796 path->pathrec = *pathrec; 797 798 old_ah = path->ah; 799 path->ah = ah; 800 801 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", 802 ah, be32_to_cpu(sa_path_get_dlid(pathrec)), 803 pathrec->sl); 804 805 while ((skb = __skb_dequeue(&path->queue))) 806 __skb_queue_tail(&skqueue, skb); 807 808 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) { 809 if (neigh->ah) { 810 WARN_ON(neigh->ah != old_ah); 811 /* 812 * Dropping the ah reference inside 813 * priv->lock is safe here, because we 814 * will hold one more reference from 815 * the original value of path->ah (ie 816 * old_ah). 817 */ 818 ipoib_put_ah(neigh->ah); 819 } 820 kref_get(&path->ah->ref); 821 neigh->ah = path->ah; 822 823 if (ipoib_cm_enabled(dev, neigh->daddr)) { 824 if (!ipoib_cm_get(neigh)) 825 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, 826 path, 827 neigh)); 828 if (!ipoib_cm_get(neigh)) { 829 ipoib_neigh_free(neigh); 830 continue; 831 } 832 } 833 834 while ((skb = __skb_dequeue(&neigh->queue))) 835 __skb_queue_tail(&skqueue, skb); 836 } 837 path->ah->valid = 1; 838 } 839 840 path->query = NULL; 841 complete(&path->done); 842 843 spin_unlock_irqrestore(&priv->lock, flags); 844 845 if (IS_ERR_OR_NULL(ah)) 846 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); 847 848 if (old_ah) 849 ipoib_put_ah(old_ah); 850 851 while ((skb = __skb_dequeue(&skqueue))) { 852 int ret; 853 skb->dev = dev; 854 ret = dev_queue_xmit(skb); 855 if (ret) 856 ipoib_warn(priv, "%s: dev_queue_xmit failed to re-queue packet, ret:%d\n", 857 __func__, ret); 858 } 859 } 860 861 static void init_path_rec(struct ipoib_dev_priv *priv, struct ipoib_path *path, 862 void *gid) 863 { 864 path->dev = priv->dev; 865 866 if (rdma_cap_opa_ah(priv->ca, priv->port)) 867 path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA; 868 else 869 path->pathrec.rec_type = SA_PATH_REC_TYPE_IB; 870 871 memcpy(path->pathrec.dgid.raw, gid, sizeof(union ib_gid)); 872 path->pathrec.sgid = priv->local_gid; 873 path->pathrec.pkey = cpu_to_be16(priv->pkey); 874 path->pathrec.numb_path = 1; 875 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class; 876 } 877 878 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid) 879 { 880 struct ipoib_dev_priv *priv = ipoib_priv(dev); 881 struct ipoib_path *path; 882 883 if (!priv->broadcast) 884 return NULL; 885 886 path = kzalloc(sizeof(*path), GFP_ATOMIC); 887 if (!path) 888 return NULL; 889 890 skb_queue_head_init(&path->queue); 891 892 INIT_LIST_HEAD(&path->neigh_list); 893 894 init_path_rec(priv, path, gid); 895 896 return path; 897 } 898 899 static int path_rec_start(struct net_device *dev, 900 struct ipoib_path *path) 901 { 902 struct ipoib_dev_priv *priv = ipoib_priv(dev); 903 904 ipoib_dbg(priv, "Start path record lookup for %pI6\n", 905 path->pathrec.dgid.raw); 906 907 init_completion(&path->done); 908 909 path->query_id = 910 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port, 911 &path->pathrec, 912 IB_SA_PATH_REC_DGID | 913 IB_SA_PATH_REC_SGID | 914 IB_SA_PATH_REC_NUMB_PATH | 915 IB_SA_PATH_REC_TRAFFIC_CLASS | 916 IB_SA_PATH_REC_PKEY, 917 1000, GFP_ATOMIC, 918 path_rec_completion, 919 path, &path->query); 920 if (path->query_id < 0) { 921 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); 922 path->query = NULL; 923 complete(&path->done); 924 return path->query_id; 925 } 926 927 return 0; 928 } 929 930 static void neigh_refresh_path(struct ipoib_neigh *neigh, u8 *daddr, 931 struct net_device *dev) 932 { 933 struct ipoib_dev_priv *priv = ipoib_priv(dev); 934 struct ipoib_path *path; 935 unsigned long flags; 936 937 spin_lock_irqsave(&priv->lock, flags); 938 939 path = __path_find(dev, daddr + 4); 940 if (!path) 941 goto out; 942 if (!path->query) 943 path_rec_start(dev, path); 944 out: 945 spin_unlock_irqrestore(&priv->lock, flags); 946 } 947 948 static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr, 949 struct net_device *dev) 950 { 951 struct ipoib_dev_priv *priv = ipoib_priv(dev); 952 struct rdma_netdev *rn = netdev_priv(dev); 953 struct ipoib_path *path; 954 struct ipoib_neigh *neigh; 955 unsigned long flags; 956 957 spin_lock_irqsave(&priv->lock, flags); 958 neigh = ipoib_neigh_alloc(daddr, dev); 959 if (!neigh) { 960 spin_unlock_irqrestore(&priv->lock, flags); 961 ++dev->stats.tx_dropped; 962 dev_kfree_skb_any(skb); 963 return NULL; 964 } 965 966 /* To avoid race condition, make sure that the 967 * neigh will be added only once. 968 */ 969 if (unlikely(!list_empty(&neigh->list))) { 970 spin_unlock_irqrestore(&priv->lock, flags); 971 return neigh; 972 } 973 974 path = __path_find(dev, daddr + 4); 975 if (!path) { 976 path = path_rec_create(dev, daddr + 4); 977 if (!path) 978 goto err_path; 979 980 __path_add(dev, path); 981 } 982 983 list_add_tail(&neigh->list, &path->neigh_list); 984 985 if (path->ah && path->ah->valid) { 986 kref_get(&path->ah->ref); 987 neigh->ah = path->ah; 988 989 if (ipoib_cm_enabled(dev, neigh->daddr)) { 990 if (!ipoib_cm_get(neigh)) 991 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh)); 992 if (!ipoib_cm_get(neigh)) { 993 ipoib_neigh_free(neigh); 994 goto err_drop; 995 } 996 if (skb_queue_len(&neigh->queue) < 997 IPOIB_MAX_PATH_REC_QUEUE) { 998 push_pseudo_header(skb, neigh->daddr); 999 __skb_queue_tail(&neigh->queue, skb); 1000 } else { 1001 ipoib_warn(priv, "queue length limit %d. Packet drop.\n", 1002 skb_queue_len(&neigh->queue)); 1003 goto err_drop; 1004 } 1005 } else { 1006 spin_unlock_irqrestore(&priv->lock, flags); 1007 path->ah->last_send = rn->send(dev, skb, path->ah->ah, 1008 IPOIB_QPN(daddr)); 1009 ipoib_neigh_put(neigh); 1010 return NULL; 1011 } 1012 } else { 1013 neigh->ah = NULL; 1014 1015 if (!path->query && path_rec_start(dev, path)) 1016 goto err_path; 1017 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 1018 push_pseudo_header(skb, neigh->daddr); 1019 __skb_queue_tail(&neigh->queue, skb); 1020 } else { 1021 goto err_drop; 1022 } 1023 } 1024 1025 spin_unlock_irqrestore(&priv->lock, flags); 1026 ipoib_neigh_put(neigh); 1027 return NULL; 1028 1029 err_path: 1030 ipoib_neigh_free(neigh); 1031 err_drop: 1032 ++dev->stats.tx_dropped; 1033 dev_kfree_skb_any(skb); 1034 1035 spin_unlock_irqrestore(&priv->lock, flags); 1036 ipoib_neigh_put(neigh); 1037 1038 return NULL; 1039 } 1040 1041 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, 1042 struct ipoib_pseudo_header *phdr) 1043 { 1044 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1045 struct rdma_netdev *rn = netdev_priv(dev); 1046 struct ipoib_path *path; 1047 unsigned long flags; 1048 1049 spin_lock_irqsave(&priv->lock, flags); 1050 1051 /* no broadcast means that all paths are (going to be) not valid */ 1052 if (!priv->broadcast) 1053 goto drop_and_unlock; 1054 1055 path = __path_find(dev, phdr->hwaddr + 4); 1056 if (!path || !path->ah || !path->ah->valid) { 1057 if (!path) { 1058 path = path_rec_create(dev, phdr->hwaddr + 4); 1059 if (!path) 1060 goto drop_and_unlock; 1061 __path_add(dev, path); 1062 } else { 1063 /* 1064 * make sure there are no changes in the existing 1065 * path record 1066 */ 1067 init_path_rec(priv, path, phdr->hwaddr + 4); 1068 } 1069 if (!path->query && path_rec_start(dev, path)) { 1070 goto drop_and_unlock; 1071 } 1072 1073 if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 1074 push_pseudo_header(skb, phdr->hwaddr); 1075 __skb_queue_tail(&path->queue, skb); 1076 goto unlock; 1077 } else { 1078 goto drop_and_unlock; 1079 } 1080 } 1081 1082 spin_unlock_irqrestore(&priv->lock, flags); 1083 ipoib_dbg(priv, "Send unicast ARP to %08x\n", 1084 be32_to_cpu(sa_path_get_dlid(&path->pathrec))); 1085 path->ah->last_send = rn->send(dev, skb, path->ah->ah, 1086 IPOIB_QPN(phdr->hwaddr)); 1087 return; 1088 1089 drop_and_unlock: 1090 ++dev->stats.tx_dropped; 1091 dev_kfree_skb_any(skb); 1092 unlock: 1093 spin_unlock_irqrestore(&priv->lock, flags); 1094 } 1095 1096 static netdev_tx_t ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) 1097 { 1098 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1099 struct rdma_netdev *rn = netdev_priv(dev); 1100 struct ipoib_neigh *neigh; 1101 struct ipoib_pseudo_header *phdr; 1102 struct ipoib_header *header; 1103 unsigned long flags; 1104 1105 phdr = (struct ipoib_pseudo_header *) skb->data; 1106 skb_pull(skb, sizeof(*phdr)); 1107 header = (struct ipoib_header *) skb->data; 1108 1109 if (unlikely(phdr->hwaddr[4] == 0xff)) { 1110 /* multicast, arrange "if" according to probability */ 1111 if ((header->proto != htons(ETH_P_IP)) && 1112 (header->proto != htons(ETH_P_IPV6)) && 1113 (header->proto != htons(ETH_P_ARP)) && 1114 (header->proto != htons(ETH_P_RARP)) && 1115 (header->proto != htons(ETH_P_TIPC))) { 1116 /* ethertype not supported by IPoIB */ 1117 ++dev->stats.tx_dropped; 1118 dev_kfree_skb_any(skb); 1119 return NETDEV_TX_OK; 1120 } 1121 /* Add in the P_Key for multicast*/ 1122 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff; 1123 phdr->hwaddr[9] = priv->pkey & 0xff; 1124 1125 neigh = ipoib_neigh_get(dev, phdr->hwaddr); 1126 if (likely(neigh)) 1127 goto send_using_neigh; 1128 ipoib_mcast_send(dev, phdr->hwaddr, skb); 1129 return NETDEV_TX_OK; 1130 } 1131 1132 /* unicast, arrange "switch" according to probability */ 1133 switch (header->proto) { 1134 case htons(ETH_P_IP): 1135 case htons(ETH_P_IPV6): 1136 case htons(ETH_P_TIPC): 1137 neigh = ipoib_neigh_get(dev, phdr->hwaddr); 1138 if (unlikely(!neigh)) { 1139 neigh = neigh_add_path(skb, phdr->hwaddr, dev); 1140 if (likely(!neigh)) 1141 return NETDEV_TX_OK; 1142 } 1143 break; 1144 case htons(ETH_P_ARP): 1145 case htons(ETH_P_RARP): 1146 /* for unicast ARP and RARP should always perform path find */ 1147 unicast_arp_send(skb, dev, phdr); 1148 return NETDEV_TX_OK; 1149 default: 1150 /* ethertype not supported by IPoIB */ 1151 ++dev->stats.tx_dropped; 1152 dev_kfree_skb_any(skb); 1153 return NETDEV_TX_OK; 1154 } 1155 1156 send_using_neigh: 1157 /* note we now hold a ref to neigh */ 1158 if (ipoib_cm_get(neigh)) { 1159 if (ipoib_cm_up(neigh)) { 1160 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); 1161 goto unref; 1162 } 1163 } else if (neigh->ah && neigh->ah->valid) { 1164 neigh->ah->last_send = rn->send(dev, skb, neigh->ah->ah, 1165 IPOIB_QPN(phdr->hwaddr)); 1166 goto unref; 1167 } else if (neigh->ah) { 1168 neigh_refresh_path(neigh, phdr->hwaddr, dev); 1169 } 1170 1171 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 1172 push_pseudo_header(skb, phdr->hwaddr); 1173 spin_lock_irqsave(&priv->lock, flags); 1174 __skb_queue_tail(&neigh->queue, skb); 1175 spin_unlock_irqrestore(&priv->lock, flags); 1176 } else { 1177 ++dev->stats.tx_dropped; 1178 dev_kfree_skb_any(skb); 1179 } 1180 1181 unref: 1182 ipoib_neigh_put(neigh); 1183 1184 return NETDEV_TX_OK; 1185 } 1186 1187 static void ipoib_timeout(struct net_device *dev, unsigned int txqueue) 1188 { 1189 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1190 struct rdma_netdev *rn = netdev_priv(dev); 1191 1192 if (rn->tx_timeout) { 1193 rn->tx_timeout(dev, txqueue); 1194 return; 1195 } 1196 ipoib_warn(priv, "transmit timeout: latency %d msecs\n", 1197 jiffies_to_msecs(jiffies - dev_trans_start(dev))); 1198 ipoib_warn(priv, 1199 "queue stopped %d, tx_head %u, tx_tail %u, global_tx_head %u, global_tx_tail %u\n", 1200 netif_queue_stopped(dev), priv->tx_head, priv->tx_tail, 1201 priv->global_tx_head, priv->global_tx_tail); 1202 1203 /* XXX reset QP, etc. */ 1204 } 1205 1206 static int ipoib_hard_header(struct sk_buff *skb, 1207 struct net_device *dev, 1208 unsigned short type, 1209 const void *daddr, 1210 const void *saddr, 1211 unsigned int len) 1212 { 1213 struct ipoib_header *header; 1214 1215 header = skb_push(skb, sizeof(*header)); 1216 1217 header->proto = htons(type); 1218 header->reserved = 0; 1219 1220 /* 1221 * we don't rely on dst_entry structure, always stuff the 1222 * destination address into skb hard header so we can figure out where 1223 * to send the packet later. 1224 */ 1225 push_pseudo_header(skb, daddr); 1226 1227 return IPOIB_HARD_LEN; 1228 } 1229 1230 static void ipoib_set_mcast_list(struct net_device *dev) 1231 { 1232 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1233 1234 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { 1235 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set"); 1236 return; 1237 } 1238 1239 queue_work(priv->wq, &priv->restart_task); 1240 } 1241 1242 static int ipoib_get_iflink(const struct net_device *dev) 1243 { 1244 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1245 1246 /* parent interface */ 1247 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) 1248 return dev->ifindex; 1249 1250 /* child/vlan interface */ 1251 return priv->parent->ifindex; 1252 } 1253 1254 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) 1255 { 1256 /* 1257 * Use only the address parts that contributes to spreading 1258 * The subnet prefix is not used as one can not connect to 1259 * same remote port (GUID) using the same remote QPN via two 1260 * different subnets. 1261 */ 1262 /* qpn octets[1:4) & port GUID octets[12:20) */ 1263 u32 *d32 = (u32 *) daddr; 1264 u32 hv; 1265 1266 hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0); 1267 return hv & htbl->mask; 1268 } 1269 1270 struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr) 1271 { 1272 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1273 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1274 struct ipoib_neigh_hash *htbl; 1275 struct ipoib_neigh *neigh = NULL; 1276 u32 hash_val; 1277 1278 rcu_read_lock_bh(); 1279 1280 htbl = rcu_dereference_bh(ntbl->htbl); 1281 1282 if (!htbl) 1283 goto out_unlock; 1284 1285 hash_val = ipoib_addr_hash(htbl, daddr); 1286 for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]); 1287 neigh != NULL; 1288 neigh = rcu_dereference_bh(neigh->hnext)) { 1289 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { 1290 /* found, take one ref on behalf of the caller */ 1291 if (!refcount_inc_not_zero(&neigh->refcnt)) { 1292 /* deleted */ 1293 neigh = NULL; 1294 goto out_unlock; 1295 } 1296 1297 if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)) 1298 neigh->alive = jiffies; 1299 goto out_unlock; 1300 } 1301 } 1302 1303 out_unlock: 1304 rcu_read_unlock_bh(); 1305 return neigh; 1306 } 1307 1308 static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) 1309 { 1310 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1311 struct ipoib_neigh_hash *htbl; 1312 unsigned long neigh_obsolete; 1313 unsigned long dt; 1314 unsigned long flags; 1315 int i; 1316 LIST_HEAD(remove_list); 1317 1318 spin_lock_irqsave(&priv->lock, flags); 1319 1320 htbl = rcu_dereference_protected(ntbl->htbl, 1321 lockdep_is_held(&priv->lock)); 1322 1323 if (!htbl) 1324 goto out_unlock; 1325 1326 /* neigh is obsolete if it was idle for two GC periods */ 1327 dt = 2 * arp_tbl.gc_interval; 1328 neigh_obsolete = jiffies - dt; 1329 1330 for (i = 0; i < htbl->size; i++) { 1331 struct ipoib_neigh *neigh; 1332 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1333 1334 while ((neigh = rcu_dereference_protected(*np, 1335 lockdep_is_held(&priv->lock))) != NULL) { 1336 /* was the neigh idle for two GC periods */ 1337 if (time_after(neigh_obsolete, neigh->alive)) { 1338 1339 ipoib_check_and_add_mcast_sendonly(priv, neigh->daddr + 4, &remove_list); 1340 1341 rcu_assign_pointer(*np, 1342 rcu_dereference_protected(neigh->hnext, 1343 lockdep_is_held(&priv->lock))); 1344 /* remove from path/mc list */ 1345 list_del_init(&neigh->list); 1346 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1347 } else { 1348 np = &neigh->hnext; 1349 } 1350 1351 } 1352 } 1353 1354 out_unlock: 1355 spin_unlock_irqrestore(&priv->lock, flags); 1356 ipoib_mcast_remove_list(&remove_list); 1357 } 1358 1359 static void ipoib_reap_neigh(struct work_struct *work) 1360 { 1361 struct ipoib_dev_priv *priv = 1362 container_of(work, struct ipoib_dev_priv, neigh_reap_task.work); 1363 1364 __ipoib_reap_neigh(priv); 1365 1366 queue_delayed_work(priv->wq, &priv->neigh_reap_task, 1367 arp_tbl.gc_interval); 1368 } 1369 1370 1371 static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr, 1372 struct net_device *dev) 1373 { 1374 struct ipoib_neigh *neigh; 1375 1376 neigh = kzalloc(sizeof(*neigh), GFP_ATOMIC); 1377 if (!neigh) 1378 return NULL; 1379 1380 neigh->dev = dev; 1381 memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr)); 1382 skb_queue_head_init(&neigh->queue); 1383 INIT_LIST_HEAD(&neigh->list); 1384 ipoib_cm_set(neigh, NULL); 1385 /* one ref on behalf of the caller */ 1386 refcount_set(&neigh->refcnt, 1); 1387 1388 return neigh; 1389 } 1390 1391 struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr, 1392 struct net_device *dev) 1393 { 1394 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1395 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1396 struct ipoib_neigh_hash *htbl; 1397 struct ipoib_neigh *neigh; 1398 u32 hash_val; 1399 1400 htbl = rcu_dereference_protected(ntbl->htbl, 1401 lockdep_is_held(&priv->lock)); 1402 if (!htbl) { 1403 neigh = NULL; 1404 goto out_unlock; 1405 } 1406 1407 /* need to add a new neigh, but maybe some other thread succeeded? 1408 * recalc hash, maybe hash resize took place so we do a search 1409 */ 1410 hash_val = ipoib_addr_hash(htbl, daddr); 1411 for (neigh = rcu_dereference_protected(htbl->buckets[hash_val], 1412 lockdep_is_held(&priv->lock)); 1413 neigh != NULL; 1414 neigh = rcu_dereference_protected(neigh->hnext, 1415 lockdep_is_held(&priv->lock))) { 1416 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { 1417 /* found, take one ref on behalf of the caller */ 1418 if (!refcount_inc_not_zero(&neigh->refcnt)) { 1419 /* deleted */ 1420 neigh = NULL; 1421 break; 1422 } 1423 neigh->alive = jiffies; 1424 goto out_unlock; 1425 } 1426 } 1427 1428 neigh = ipoib_neigh_ctor(daddr, dev); 1429 if (!neigh) 1430 goto out_unlock; 1431 1432 /* one ref on behalf of the hash table */ 1433 refcount_inc(&neigh->refcnt); 1434 neigh->alive = jiffies; 1435 /* put in hash */ 1436 rcu_assign_pointer(neigh->hnext, 1437 rcu_dereference_protected(htbl->buckets[hash_val], 1438 lockdep_is_held(&priv->lock))); 1439 rcu_assign_pointer(htbl->buckets[hash_val], neigh); 1440 atomic_inc(&ntbl->entries); 1441 1442 out_unlock: 1443 1444 return neigh; 1445 } 1446 1447 void ipoib_neigh_dtor(struct ipoib_neigh *neigh) 1448 { 1449 /* neigh reference count was dropprd to zero */ 1450 struct net_device *dev = neigh->dev; 1451 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1452 struct sk_buff *skb; 1453 if (neigh->ah) 1454 ipoib_put_ah(neigh->ah); 1455 while ((skb = __skb_dequeue(&neigh->queue))) { 1456 ++dev->stats.tx_dropped; 1457 dev_kfree_skb_any(skb); 1458 } 1459 if (ipoib_cm_get(neigh)) 1460 ipoib_cm_destroy_tx(ipoib_cm_get(neigh)); 1461 ipoib_dbg(ipoib_priv(dev), 1462 "neigh free for %06x %pI6\n", 1463 IPOIB_QPN(neigh->daddr), 1464 neigh->daddr + 4); 1465 kfree(neigh); 1466 if (atomic_dec_and_test(&priv->ntbl.entries)) { 1467 if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags)) 1468 complete(&priv->ntbl.flushed); 1469 } 1470 } 1471 1472 static void ipoib_neigh_reclaim(struct rcu_head *rp) 1473 { 1474 /* Called as a result of removal from hash table */ 1475 struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu); 1476 /* note TX context may hold another ref */ 1477 ipoib_neigh_put(neigh); 1478 } 1479 1480 void ipoib_neigh_free(struct ipoib_neigh *neigh) 1481 { 1482 struct net_device *dev = neigh->dev; 1483 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1484 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1485 struct ipoib_neigh_hash *htbl; 1486 struct ipoib_neigh __rcu **np; 1487 struct ipoib_neigh *n; 1488 u32 hash_val; 1489 1490 htbl = rcu_dereference_protected(ntbl->htbl, 1491 lockdep_is_held(&priv->lock)); 1492 if (!htbl) 1493 return; 1494 1495 hash_val = ipoib_addr_hash(htbl, neigh->daddr); 1496 np = &htbl->buckets[hash_val]; 1497 for (n = rcu_dereference_protected(*np, 1498 lockdep_is_held(&priv->lock)); 1499 n != NULL; 1500 n = rcu_dereference_protected(*np, 1501 lockdep_is_held(&priv->lock))) { 1502 if (n == neigh) { 1503 /* found */ 1504 rcu_assign_pointer(*np, 1505 rcu_dereference_protected(neigh->hnext, 1506 lockdep_is_held(&priv->lock))); 1507 /* remove from parent list */ 1508 list_del_init(&neigh->list); 1509 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1510 return; 1511 } else { 1512 np = &n->hnext; 1513 } 1514 } 1515 } 1516 1517 static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) 1518 { 1519 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1520 struct ipoib_neigh_hash *htbl; 1521 struct ipoib_neigh __rcu **buckets; 1522 u32 size; 1523 1524 clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); 1525 ntbl->htbl = NULL; 1526 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL); 1527 if (!htbl) 1528 return -ENOMEM; 1529 size = roundup_pow_of_two(arp_tbl.gc_thresh3); 1530 buckets = kvcalloc(size, sizeof(*buckets), GFP_KERNEL); 1531 if (!buckets) { 1532 kfree(htbl); 1533 return -ENOMEM; 1534 } 1535 htbl->size = size; 1536 htbl->mask = (size - 1); 1537 htbl->buckets = buckets; 1538 RCU_INIT_POINTER(ntbl->htbl, htbl); 1539 htbl->ntbl = ntbl; 1540 atomic_set(&ntbl->entries, 0); 1541 1542 /* start garbage collection */ 1543 queue_delayed_work(priv->wq, &priv->neigh_reap_task, 1544 arp_tbl.gc_interval); 1545 1546 return 0; 1547 } 1548 1549 static void neigh_hash_free_rcu(struct rcu_head *head) 1550 { 1551 struct ipoib_neigh_hash *htbl = container_of(head, 1552 struct ipoib_neigh_hash, 1553 rcu); 1554 struct ipoib_neigh __rcu **buckets = htbl->buckets; 1555 struct ipoib_neigh_table *ntbl = htbl->ntbl; 1556 1557 kvfree(buckets); 1558 kfree(htbl); 1559 complete(&ntbl->deleted); 1560 } 1561 1562 void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid) 1563 { 1564 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1565 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1566 struct ipoib_neigh_hash *htbl; 1567 unsigned long flags; 1568 int i; 1569 1570 /* remove all neigh connected to a given path or mcast */ 1571 spin_lock_irqsave(&priv->lock, flags); 1572 1573 htbl = rcu_dereference_protected(ntbl->htbl, 1574 lockdep_is_held(&priv->lock)); 1575 1576 if (!htbl) 1577 goto out_unlock; 1578 1579 for (i = 0; i < htbl->size; i++) { 1580 struct ipoib_neigh *neigh; 1581 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1582 1583 while ((neigh = rcu_dereference_protected(*np, 1584 lockdep_is_held(&priv->lock))) != NULL) { 1585 /* delete neighs belong to this parent */ 1586 if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) { 1587 rcu_assign_pointer(*np, 1588 rcu_dereference_protected(neigh->hnext, 1589 lockdep_is_held(&priv->lock))); 1590 /* remove from parent list */ 1591 list_del_init(&neigh->list); 1592 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1593 } else { 1594 np = &neigh->hnext; 1595 } 1596 1597 } 1598 } 1599 out_unlock: 1600 spin_unlock_irqrestore(&priv->lock, flags); 1601 } 1602 1603 static void ipoib_flush_neighs(struct ipoib_dev_priv *priv) 1604 { 1605 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1606 struct ipoib_neigh_hash *htbl; 1607 unsigned long flags; 1608 int i, wait_flushed = 0; 1609 1610 init_completion(&priv->ntbl.flushed); 1611 set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); 1612 1613 spin_lock_irqsave(&priv->lock, flags); 1614 1615 htbl = rcu_dereference_protected(ntbl->htbl, 1616 lockdep_is_held(&priv->lock)); 1617 if (!htbl) 1618 goto out_unlock; 1619 1620 wait_flushed = atomic_read(&priv->ntbl.entries); 1621 if (!wait_flushed) 1622 goto free_htbl; 1623 1624 for (i = 0; i < htbl->size; i++) { 1625 struct ipoib_neigh *neigh; 1626 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1627 1628 while ((neigh = rcu_dereference_protected(*np, 1629 lockdep_is_held(&priv->lock))) != NULL) { 1630 rcu_assign_pointer(*np, 1631 rcu_dereference_protected(neigh->hnext, 1632 lockdep_is_held(&priv->lock))); 1633 /* remove from path/mc list */ 1634 list_del_init(&neigh->list); 1635 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1636 } 1637 } 1638 1639 free_htbl: 1640 rcu_assign_pointer(ntbl->htbl, NULL); 1641 call_rcu(&htbl->rcu, neigh_hash_free_rcu); 1642 1643 out_unlock: 1644 spin_unlock_irqrestore(&priv->lock, flags); 1645 if (wait_flushed) 1646 wait_for_completion(&priv->ntbl.flushed); 1647 } 1648 1649 static void ipoib_neigh_hash_uninit(struct net_device *dev) 1650 { 1651 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1652 1653 ipoib_dbg(priv, "%s\n", __func__); 1654 init_completion(&priv->ntbl.deleted); 1655 1656 cancel_delayed_work_sync(&priv->neigh_reap_task); 1657 1658 ipoib_flush_neighs(priv); 1659 1660 wait_for_completion(&priv->ntbl.deleted); 1661 } 1662 1663 static void ipoib_napi_add(struct net_device *dev) 1664 { 1665 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1666 1667 netif_napi_add_weight(dev, &priv->recv_napi, ipoib_rx_poll, 1668 IPOIB_NUM_WC); 1669 netif_napi_add_weight(dev, &priv->send_napi, ipoib_tx_poll, 1670 MAX_SEND_CQE); 1671 } 1672 1673 static void ipoib_napi_del(struct net_device *dev) 1674 { 1675 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1676 1677 netif_napi_del(&priv->recv_napi); 1678 netif_napi_del(&priv->send_napi); 1679 } 1680 1681 static void ipoib_dev_uninit_default(struct net_device *dev) 1682 { 1683 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1684 1685 ipoib_transport_dev_cleanup(dev); 1686 1687 ipoib_napi_del(dev); 1688 1689 ipoib_cm_dev_cleanup(dev); 1690 1691 kfree(priv->rx_ring); 1692 vfree(priv->tx_ring); 1693 1694 priv->rx_ring = NULL; 1695 priv->tx_ring = NULL; 1696 } 1697 1698 static int ipoib_dev_init_default(struct net_device *dev) 1699 { 1700 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1701 u8 addr_mod[3]; 1702 1703 ipoib_napi_add(dev); 1704 1705 /* Allocate RX/TX "rings" to hold queued skbs */ 1706 priv->rx_ring = kcalloc(ipoib_recvq_size, 1707 sizeof(*priv->rx_ring), 1708 GFP_KERNEL); 1709 if (!priv->rx_ring) 1710 goto out; 1711 1712 priv->tx_ring = vzalloc(array_size(ipoib_sendq_size, 1713 sizeof(*priv->tx_ring))); 1714 if (!priv->tx_ring) { 1715 pr_warn("%s: failed to allocate TX ring (%d entries)\n", 1716 priv->ca->name, ipoib_sendq_size); 1717 goto out_rx_ring_cleanup; 1718 } 1719 1720 /* priv->tx_head, tx_tail and global_tx_tail/head are already 0 */ 1721 1722 if (ipoib_transport_dev_init(dev, priv->ca)) { 1723 pr_warn("%s: ipoib_transport_dev_init failed\n", 1724 priv->ca->name); 1725 goto out_tx_ring_cleanup; 1726 } 1727 1728 /* after qp created set dev address */ 1729 addr_mod[0] = (priv->qp->qp_num >> 16) & 0xff; 1730 addr_mod[1] = (priv->qp->qp_num >> 8) & 0xff; 1731 addr_mod[2] = (priv->qp->qp_num) & 0xff; 1732 dev_addr_mod(priv->dev, 1, addr_mod, sizeof(addr_mod)); 1733 1734 return 0; 1735 1736 out_tx_ring_cleanup: 1737 vfree(priv->tx_ring); 1738 1739 out_rx_ring_cleanup: 1740 kfree(priv->rx_ring); 1741 1742 out: 1743 ipoib_napi_del(dev); 1744 return -ENOMEM; 1745 } 1746 1747 static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr, 1748 int cmd) 1749 { 1750 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1751 1752 if (!priv->rn_ops->ndo_eth_ioctl) 1753 return -EOPNOTSUPP; 1754 1755 return priv->rn_ops->ndo_eth_ioctl(dev, ifr, cmd); 1756 } 1757 1758 static int ipoib_dev_init(struct net_device *dev) 1759 { 1760 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1761 int ret = -ENOMEM; 1762 1763 priv->qp = NULL; 1764 1765 /* 1766 * the various IPoIB tasks assume they will never race against 1767 * themselves, so always use a single thread workqueue 1768 */ 1769 priv->wq = alloc_ordered_workqueue("ipoib_wq", WQ_MEM_RECLAIM); 1770 if (!priv->wq) { 1771 pr_warn("%s: failed to allocate device WQ\n", dev->name); 1772 goto out; 1773 } 1774 1775 /* create pd, which used both for control and datapath*/ 1776 priv->pd = ib_alloc_pd(priv->ca, 0); 1777 if (IS_ERR(priv->pd)) { 1778 pr_warn("%s: failed to allocate PD\n", priv->ca->name); 1779 goto clean_wq; 1780 } 1781 1782 ret = priv->rn_ops->ndo_init(dev); 1783 if (ret) { 1784 pr_warn("%s failed to init HW resource\n", dev->name); 1785 goto out_free_pd; 1786 } 1787 1788 ret = ipoib_neigh_hash_init(priv); 1789 if (ret) { 1790 pr_warn("%s failed to init neigh hash\n", dev->name); 1791 goto out_dev_uninit; 1792 } 1793 1794 if (dev->flags & IFF_UP) { 1795 if (ipoib_ib_dev_open(dev)) { 1796 pr_warn("%s failed to open device\n", dev->name); 1797 ret = -ENODEV; 1798 goto out_hash_uninit; 1799 } 1800 } 1801 1802 return 0; 1803 1804 out_hash_uninit: 1805 ipoib_neigh_hash_uninit(dev); 1806 1807 out_dev_uninit: 1808 ipoib_ib_dev_cleanup(dev); 1809 1810 out_free_pd: 1811 if (priv->pd) { 1812 ib_dealloc_pd(priv->pd); 1813 priv->pd = NULL; 1814 } 1815 1816 clean_wq: 1817 if (priv->wq) { 1818 destroy_workqueue(priv->wq); 1819 priv->wq = NULL; 1820 } 1821 1822 out: 1823 return ret; 1824 } 1825 1826 /* 1827 * This must be called before doing an unregister_netdev on a parent device to 1828 * shutdown the IB event handler. 1829 */ 1830 static void ipoib_parent_unregister_pre(struct net_device *ndev) 1831 { 1832 struct ipoib_dev_priv *priv = ipoib_priv(ndev); 1833 1834 /* 1835 * ipoib_set_mac checks netif_running before pushing work, clearing 1836 * running ensures the it will not add more work. 1837 */ 1838 rtnl_lock(); 1839 dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP, NULL); 1840 rtnl_unlock(); 1841 1842 /* ipoib_event() cannot be running once this returns */ 1843 ib_unregister_event_handler(&priv->event_handler); 1844 1845 /* 1846 * Work on the queue grabs the rtnl lock, so this cannot be done while 1847 * also holding it. 1848 */ 1849 flush_workqueue(ipoib_workqueue); 1850 } 1851 1852 static void ipoib_set_dev_features(struct ipoib_dev_priv *priv) 1853 { 1854 priv->hca_caps = priv->ca->attrs.device_cap_flags; 1855 priv->kernel_caps = priv->ca->attrs.kernel_cap_flags; 1856 1857 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { 1858 priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 1859 1860 if (priv->kernel_caps & IBK_UD_TSO) 1861 priv->dev->hw_features |= NETIF_F_TSO; 1862 1863 priv->dev->features |= priv->dev->hw_features; 1864 } 1865 } 1866 1867 static int ipoib_parent_init(struct net_device *ndev) 1868 { 1869 struct ipoib_dev_priv *priv = ipoib_priv(ndev); 1870 struct ib_port_attr attr; 1871 int result; 1872 1873 result = ib_query_port(priv->ca, priv->port, &attr); 1874 if (result) { 1875 pr_warn("%s: ib_query_port %d failed\n", priv->ca->name, 1876 priv->port); 1877 return result; 1878 } 1879 priv->max_ib_mtu = rdma_mtu_from_attr(priv->ca, priv->port, &attr); 1880 1881 result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey); 1882 if (result) { 1883 pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n", 1884 priv->ca->name, priv->port, result); 1885 return result; 1886 } 1887 1888 result = rdma_query_gid(priv->ca, priv->port, 0, &priv->local_gid); 1889 if (result) { 1890 pr_warn("%s: rdma_query_gid port %d failed (ret = %d)\n", 1891 priv->ca->name, priv->port, result); 1892 return result; 1893 } 1894 dev_addr_mod(priv->dev, 4, priv->local_gid.raw, sizeof(union ib_gid)); 1895 1896 SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent); 1897 priv->dev->dev_port = priv->port - 1; 1898 /* Let's set this one too for backwards compatibility. */ 1899 priv->dev->dev_id = priv->port - 1; 1900 1901 return 0; 1902 } 1903 1904 static void ipoib_child_init(struct net_device *ndev) 1905 { 1906 struct ipoib_dev_priv *priv = ipoib_priv(ndev); 1907 struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent); 1908 1909 priv->max_ib_mtu = ppriv->max_ib_mtu; 1910 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); 1911 if (memchr_inv(priv->dev->dev_addr, 0, INFINIBAND_ALEN)) 1912 memcpy(&priv->local_gid, priv->dev->dev_addr + 4, 1913 sizeof(priv->local_gid)); 1914 else { 1915 __dev_addr_set(priv->dev, ppriv->dev->dev_addr, 1916 INFINIBAND_ALEN); 1917 memcpy(&priv->local_gid, &ppriv->local_gid, 1918 sizeof(priv->local_gid)); 1919 } 1920 } 1921 1922 static int ipoib_ndo_init(struct net_device *ndev) 1923 { 1924 struct ipoib_dev_priv *priv = ipoib_priv(ndev); 1925 int rc; 1926 struct rdma_netdev *rn = netdev_priv(ndev); 1927 1928 if (priv->parent) { 1929 ipoib_child_init(ndev); 1930 } else { 1931 rc = ipoib_parent_init(ndev); 1932 if (rc) 1933 return rc; 1934 } 1935 1936 /* MTU will be reset when mcast join happens */ 1937 ndev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 1938 priv->mcast_mtu = priv->admin_mtu = ndev->mtu; 1939 rn->mtu = priv->mcast_mtu; 1940 ndev->max_mtu = IPOIB_CM_MTU; 1941 1942 ndev->neigh_priv_len = sizeof(struct ipoib_neigh); 1943 1944 /* 1945 * Set the full membership bit, so that we join the right 1946 * broadcast group, etc. 1947 */ 1948 priv->pkey |= 0x8000; 1949 1950 ndev->broadcast[8] = priv->pkey >> 8; 1951 ndev->broadcast[9] = priv->pkey & 0xff; 1952 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); 1953 1954 ipoib_set_dev_features(priv); 1955 1956 rc = ipoib_dev_init(ndev); 1957 if (rc) { 1958 pr_warn("%s: failed to initialize device: %s port %d (ret = %d)\n", 1959 priv->ca->name, priv->dev->name, priv->port, rc); 1960 return rc; 1961 } 1962 1963 if (priv->parent) { 1964 struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent); 1965 1966 dev_hold(priv->parent); 1967 1968 down_write(&ppriv->vlan_rwsem); 1969 list_add_tail(&priv->list, &ppriv->child_intfs); 1970 up_write(&ppriv->vlan_rwsem); 1971 } 1972 1973 return 0; 1974 } 1975 1976 static void ipoib_ndo_uninit(struct net_device *dev) 1977 { 1978 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1979 1980 ASSERT_RTNL(); 1981 1982 /* 1983 * ipoib_remove_one guarantees the children are removed before the 1984 * parent, and that is the only place where a parent can be removed. 1985 */ 1986 WARN_ON(!list_empty(&priv->child_intfs)); 1987 1988 if (priv->parent) { 1989 struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent); 1990 1991 down_write(&ppriv->vlan_rwsem); 1992 list_del(&priv->list); 1993 up_write(&ppriv->vlan_rwsem); 1994 } 1995 1996 ipoib_neigh_hash_uninit(dev); 1997 1998 ipoib_ib_dev_cleanup(dev); 1999 2000 /* no more works over the priv->wq */ 2001 if (priv->wq) { 2002 /* See ipoib_mcast_carrier_on_task() */ 2003 WARN_ON(test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)); 2004 destroy_workqueue(priv->wq); 2005 priv->wq = NULL; 2006 } 2007 2008 if (priv->parent) 2009 dev_put(priv->parent); 2010 } 2011 2012 static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state) 2013 { 2014 struct ipoib_dev_priv *priv = ipoib_priv(dev); 2015 2016 return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state); 2017 } 2018 2019 static int ipoib_get_vf_config(struct net_device *dev, int vf, 2020 struct ifla_vf_info *ivf) 2021 { 2022 struct ipoib_dev_priv *priv = ipoib_priv(dev); 2023 int err; 2024 2025 err = ib_get_vf_config(priv->ca, vf, priv->port, ivf); 2026 if (err) 2027 return err; 2028 2029 ivf->vf = vf; 2030 memcpy(ivf->mac, dev->dev_addr, dev->addr_len); 2031 2032 return 0; 2033 } 2034 2035 static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type) 2036 { 2037 struct ipoib_dev_priv *priv = ipoib_priv(dev); 2038 2039 if (type != IFLA_VF_IB_NODE_GUID && type != IFLA_VF_IB_PORT_GUID) 2040 return -EINVAL; 2041 2042 return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type); 2043 } 2044 2045 static int ipoib_get_vf_guid(struct net_device *dev, int vf, 2046 struct ifla_vf_guid *node_guid, 2047 struct ifla_vf_guid *port_guid) 2048 { 2049 struct ipoib_dev_priv *priv = ipoib_priv(dev); 2050 2051 return ib_get_vf_guid(priv->ca, vf, priv->port, node_guid, port_guid); 2052 } 2053 2054 static int ipoib_get_vf_stats(struct net_device *dev, int vf, 2055 struct ifla_vf_stats *vf_stats) 2056 { 2057 struct ipoib_dev_priv *priv = ipoib_priv(dev); 2058 2059 return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats); 2060 } 2061 2062 static const struct header_ops ipoib_header_ops = { 2063 .create = ipoib_hard_header, 2064 }; 2065 2066 static const struct net_device_ops ipoib_netdev_ops_pf = { 2067 .ndo_init = ipoib_ndo_init, 2068 .ndo_uninit = ipoib_ndo_uninit, 2069 .ndo_open = ipoib_open, 2070 .ndo_stop = ipoib_stop, 2071 .ndo_change_mtu = ipoib_change_mtu, 2072 .ndo_fix_features = ipoib_fix_features, 2073 .ndo_start_xmit = ipoib_start_xmit, 2074 .ndo_tx_timeout = ipoib_timeout, 2075 .ndo_set_rx_mode = ipoib_set_mcast_list, 2076 .ndo_get_iflink = ipoib_get_iflink, 2077 .ndo_set_vf_link_state = ipoib_set_vf_link_state, 2078 .ndo_get_vf_config = ipoib_get_vf_config, 2079 .ndo_get_vf_stats = ipoib_get_vf_stats, 2080 .ndo_get_vf_guid = ipoib_get_vf_guid, 2081 .ndo_set_vf_guid = ipoib_set_vf_guid, 2082 .ndo_set_mac_address = ipoib_set_mac, 2083 .ndo_get_stats64 = ipoib_get_stats, 2084 .ndo_eth_ioctl = ipoib_ioctl, 2085 }; 2086 2087 static const struct net_device_ops ipoib_netdev_ops_vf = { 2088 .ndo_init = ipoib_ndo_init, 2089 .ndo_uninit = ipoib_ndo_uninit, 2090 .ndo_open = ipoib_open, 2091 .ndo_stop = ipoib_stop, 2092 .ndo_change_mtu = ipoib_change_mtu, 2093 .ndo_fix_features = ipoib_fix_features, 2094 .ndo_start_xmit = ipoib_start_xmit, 2095 .ndo_tx_timeout = ipoib_timeout, 2096 .ndo_set_rx_mode = ipoib_set_mcast_list, 2097 .ndo_get_iflink = ipoib_get_iflink, 2098 .ndo_get_stats64 = ipoib_get_stats, 2099 .ndo_eth_ioctl = ipoib_ioctl, 2100 }; 2101 2102 static const struct net_device_ops ipoib_netdev_default_pf = { 2103 .ndo_init = ipoib_dev_init_default, 2104 .ndo_uninit = ipoib_dev_uninit_default, 2105 .ndo_open = ipoib_ib_dev_open_default, 2106 .ndo_stop = ipoib_ib_dev_stop_default, 2107 }; 2108 2109 void ipoib_setup_common(struct net_device *dev) 2110 { 2111 dev->header_ops = &ipoib_header_ops; 2112 dev->netdev_ops = &ipoib_netdev_default_pf; 2113 2114 ipoib_set_ethtool_ops(dev); 2115 2116 dev->watchdog_timeo = HZ; 2117 2118 dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 2119 2120 dev->hard_header_len = IPOIB_HARD_LEN; 2121 dev->addr_len = INFINIBAND_ALEN; 2122 dev->type = ARPHRD_INFINIBAND; 2123 dev->tx_queue_len = ipoib_sendq_size * 2; 2124 dev->features = (NETIF_F_VLAN_CHALLENGED | 2125 NETIF_F_HIGHDMA); 2126 netif_keep_dst(dev); 2127 2128 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); 2129 2130 /* 2131 * unregister_netdev always frees the netdev, we use this mode 2132 * consistently to unify all the various unregister paths, including 2133 * those connected to rtnl_link_ops which require it. 2134 */ 2135 dev->needs_free_netdev = true; 2136 } 2137 2138 static void ipoib_build_priv(struct net_device *dev) 2139 { 2140 struct ipoib_dev_priv *priv = ipoib_priv(dev); 2141 2142 priv->dev = dev; 2143 spin_lock_init(&priv->lock); 2144 init_rwsem(&priv->vlan_rwsem); 2145 mutex_init(&priv->mcast_mutex); 2146 2147 INIT_LIST_HEAD(&priv->path_list); 2148 INIT_LIST_HEAD(&priv->child_intfs); 2149 INIT_LIST_HEAD(&priv->dead_ahs); 2150 INIT_LIST_HEAD(&priv->multicast_list); 2151 2152 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); 2153 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); 2154 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); 2155 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal); 2156 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy); 2157 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); 2158 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); 2159 INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh); 2160 } 2161 2162 static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u32 port, 2163 const char *name) 2164 { 2165 struct net_device *dev; 2166 2167 dev = rdma_alloc_netdev(hca, port, RDMA_NETDEV_IPOIB, name, 2168 NET_NAME_UNKNOWN, ipoib_setup_common); 2169 if (!IS_ERR(dev) || PTR_ERR(dev) != -EOPNOTSUPP) 2170 return dev; 2171 2172 dev = alloc_netdev(sizeof(struct rdma_netdev), name, NET_NAME_UNKNOWN, 2173 ipoib_setup_common); 2174 if (!dev) 2175 return ERR_PTR(-ENOMEM); 2176 return dev; 2177 } 2178 2179 int ipoib_intf_init(struct ib_device *hca, u32 port, const char *name, 2180 struct net_device *dev) 2181 { 2182 struct rdma_netdev *rn = netdev_priv(dev); 2183 struct ipoib_dev_priv *priv; 2184 int rc; 2185 2186 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 2187 if (!priv) 2188 return -ENOMEM; 2189 2190 priv->ca = hca; 2191 priv->port = port; 2192 2193 rc = rdma_init_netdev(hca, port, RDMA_NETDEV_IPOIB, name, 2194 NET_NAME_UNKNOWN, ipoib_setup_common, dev); 2195 if (rc) { 2196 if (rc != -EOPNOTSUPP) 2197 goto out; 2198 2199 rn->send = ipoib_send; 2200 rn->attach_mcast = ipoib_mcast_attach; 2201 rn->detach_mcast = ipoib_mcast_detach; 2202 rn->hca = hca; 2203 2204 rc = netif_set_real_num_tx_queues(dev, 1); 2205 if (rc) 2206 goto out; 2207 2208 rc = netif_set_real_num_rx_queues(dev, 1); 2209 if (rc) 2210 goto out; 2211 } 2212 2213 priv->rn_ops = dev->netdev_ops; 2214 2215 if (hca->attrs.kernel_cap_flags & IBK_VIRTUAL_FUNCTION) 2216 dev->netdev_ops = &ipoib_netdev_ops_vf; 2217 else 2218 dev->netdev_ops = &ipoib_netdev_ops_pf; 2219 2220 rn->clnt_priv = priv; 2221 /* 2222 * Only the child register_netdev flows can handle priv_destructor 2223 * being set, so we force it to NULL here and handle manually until it 2224 * is safe to turn on. 2225 */ 2226 priv->next_priv_destructor = dev->priv_destructor; 2227 dev->priv_destructor = NULL; 2228 2229 ipoib_build_priv(dev); 2230 2231 return 0; 2232 2233 out: 2234 kfree(priv); 2235 return rc; 2236 } 2237 2238 struct net_device *ipoib_intf_alloc(struct ib_device *hca, u32 port, 2239 const char *name) 2240 { 2241 struct net_device *dev; 2242 int rc; 2243 2244 dev = ipoib_alloc_netdev(hca, port, name); 2245 if (IS_ERR(dev)) 2246 return dev; 2247 2248 rc = ipoib_intf_init(hca, port, name, dev); 2249 if (rc) { 2250 free_netdev(dev); 2251 return ERR_PTR(rc); 2252 } 2253 2254 /* 2255 * Upon success the caller must ensure ipoib_intf_free is called or 2256 * register_netdevice succeed'd and priv_destructor is set to 2257 * ipoib_intf_free. 2258 */ 2259 return dev; 2260 } 2261 2262 void ipoib_intf_free(struct net_device *dev) 2263 { 2264 struct ipoib_dev_priv *priv = ipoib_priv(dev); 2265 struct rdma_netdev *rn = netdev_priv(dev); 2266 2267 dev->priv_destructor = priv->next_priv_destructor; 2268 if (dev->priv_destructor) 2269 dev->priv_destructor(dev); 2270 2271 /* 2272 * There are some error flows around register_netdev failing that may 2273 * attempt to call priv_destructor twice, prevent that from happening. 2274 */ 2275 dev->priv_destructor = NULL; 2276 2277 /* unregister/destroy is very complicated. Make bugs more obvious. */ 2278 rn->clnt_priv = NULL; 2279 2280 kfree(priv); 2281 } 2282 2283 static ssize_t pkey_show(struct device *dev, struct device_attribute *attr, 2284 char *buf) 2285 { 2286 struct net_device *ndev = to_net_dev(dev); 2287 struct ipoib_dev_priv *priv = ipoib_priv(ndev); 2288 2289 return sysfs_emit(buf, "0x%04x\n", priv->pkey); 2290 } 2291 static DEVICE_ATTR_RO(pkey); 2292 2293 static ssize_t umcast_show(struct device *dev, struct device_attribute *attr, 2294 char *buf) 2295 { 2296 struct net_device *ndev = to_net_dev(dev); 2297 struct ipoib_dev_priv *priv = ipoib_priv(ndev); 2298 2299 return sysfs_emit(buf, "%d\n", 2300 test_bit(IPOIB_FLAG_UMCAST, &priv->flags)); 2301 } 2302 2303 void ipoib_set_umcast(struct net_device *ndev, int umcast_val) 2304 { 2305 struct ipoib_dev_priv *priv = ipoib_priv(ndev); 2306 2307 if (umcast_val > 0) { 2308 set_bit(IPOIB_FLAG_UMCAST, &priv->flags); 2309 ipoib_warn(priv, "ignoring multicast groups joined directly " 2310 "by userspace\n"); 2311 } else 2312 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags); 2313 } 2314 2315 static ssize_t umcast_store(struct device *dev, struct device_attribute *attr, 2316 const char *buf, size_t count) 2317 { 2318 unsigned long umcast_val = simple_strtoul(buf, NULL, 0); 2319 2320 ipoib_set_umcast(to_net_dev(dev), umcast_val); 2321 2322 return count; 2323 } 2324 static DEVICE_ATTR_RW(umcast); 2325 2326 int ipoib_add_umcast_attr(struct net_device *dev) 2327 { 2328 return device_create_file(&dev->dev, &dev_attr_umcast); 2329 } 2330 2331 static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid) 2332 { 2333 struct ipoib_dev_priv *child_priv; 2334 struct net_device *netdev = priv->dev; 2335 2336 netif_addr_lock_bh(netdev); 2337 2338 memcpy(&priv->local_gid.global.interface_id, 2339 &gid->global.interface_id, 2340 sizeof(gid->global.interface_id)); 2341 dev_addr_mod(netdev, 4, (u8 *)&priv->local_gid, sizeof(priv->local_gid)); 2342 clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); 2343 2344 netif_addr_unlock_bh(netdev); 2345 2346 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 2347 down_read(&priv->vlan_rwsem); 2348 list_for_each_entry(child_priv, &priv->child_intfs, list) 2349 set_base_guid(child_priv, gid); 2350 up_read(&priv->vlan_rwsem); 2351 } 2352 } 2353 2354 static int ipoib_check_lladdr(struct net_device *dev, 2355 struct sockaddr_storage *ss) 2356 { 2357 union ib_gid *gid = (union ib_gid *)(ss->__data + 4); 2358 int ret = 0; 2359 2360 netif_addr_lock_bh(dev); 2361 2362 /* Make sure the QPN, reserved and subnet prefix match the current 2363 * lladdr, it also makes sure the lladdr is unicast. 2364 */ 2365 if (memcmp(dev->dev_addr, ss->__data, 2366 4 + sizeof(gid->global.subnet_prefix)) || 2367 gid->global.interface_id == 0) 2368 ret = -EINVAL; 2369 2370 netif_addr_unlock_bh(dev); 2371 2372 return ret; 2373 } 2374 2375 static int ipoib_set_mac(struct net_device *dev, void *addr) 2376 { 2377 struct ipoib_dev_priv *priv = ipoib_priv(dev); 2378 struct sockaddr_storage *ss = addr; 2379 int ret; 2380 2381 if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev)) 2382 return -EBUSY; 2383 2384 ret = ipoib_check_lladdr(dev, ss); 2385 if (ret) 2386 return ret; 2387 2388 set_base_guid(priv, (union ib_gid *)(ss->__data + 4)); 2389 2390 queue_work(ipoib_workqueue, &priv->flush_light); 2391 2392 return 0; 2393 } 2394 2395 static ssize_t create_child_store(struct device *dev, 2396 struct device_attribute *attr, 2397 const char *buf, size_t count) 2398 { 2399 int pkey; 2400 int ret; 2401 2402 if (sscanf(buf, "%i", &pkey) != 1) 2403 return -EINVAL; 2404 2405 if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000) 2406 return -EINVAL; 2407 2408 ret = ipoib_vlan_add(to_net_dev(dev), pkey); 2409 2410 return ret ? ret : count; 2411 } 2412 static DEVICE_ATTR_WO(create_child); 2413 2414 static ssize_t delete_child_store(struct device *dev, 2415 struct device_attribute *attr, 2416 const char *buf, size_t count) 2417 { 2418 int pkey; 2419 int ret; 2420 2421 if (sscanf(buf, "%i", &pkey) != 1) 2422 return -EINVAL; 2423 2424 if (pkey < 0 || pkey > 0xffff) 2425 return -EINVAL; 2426 2427 ret = ipoib_vlan_delete(to_net_dev(dev), pkey); 2428 2429 return ret ? ret : count; 2430 2431 } 2432 static DEVICE_ATTR_WO(delete_child); 2433 2434 int ipoib_add_pkey_attr(struct net_device *dev) 2435 { 2436 return device_create_file(&dev->dev, &dev_attr_pkey); 2437 } 2438 2439 /* 2440 * We erroneously exposed the iface's port number in the dev_id 2441 * sysfs field long after dev_port was introduced for that purpose[1], 2442 * and we need to stop everyone from relying on that. 2443 * Let's overload the shower routine for the dev_id file here 2444 * to gently bring the issue up. 2445 * 2446 * [1] https://www.spinics.net/lists/netdev/msg272123.html 2447 */ 2448 static ssize_t dev_id_show(struct device *dev, 2449 struct device_attribute *attr, char *buf) 2450 { 2451 struct net_device *ndev = to_net_dev(dev); 2452 2453 /* 2454 * ndev->dev_port will be equal to 0 in old kernel prior to commit 2455 * 9b8b2a323008 ("IB/ipoib: Use dev_port to expose network interface 2456 * port numbers") Zero was chosen as special case for user space 2457 * applications to fallback and query dev_id to check if it has 2458 * different value or not. 2459 * 2460 * Don't print warning in such scenario. 2461 * 2462 * https://github.com/systemd/systemd/blob/master/src/udev/udev-builtin-net_id.c#L358 2463 */ 2464 if (ndev->dev_port && ndev->dev_id == ndev->dev_port) 2465 netdev_info_once(ndev, 2466 "\"%s\" wants to know my dev_id. Should it look at dev_port instead? See Documentation/ABI/testing/sysfs-class-net for more info.\n", 2467 current->comm); 2468 2469 return sysfs_emit(buf, "%#x\n", ndev->dev_id); 2470 } 2471 static DEVICE_ATTR_RO(dev_id); 2472 2473 static int ipoib_intercept_dev_id_attr(struct net_device *dev) 2474 { 2475 device_remove_file(&dev->dev, &dev_attr_dev_id); 2476 return device_create_file(&dev->dev, &dev_attr_dev_id); 2477 } 2478 2479 static struct net_device *ipoib_add_port(const char *format, 2480 struct ib_device *hca, u32 port) 2481 { 2482 struct rtnl_link_ops *ops = ipoib_get_link_ops(); 2483 struct rdma_netdev_alloc_params params; 2484 struct ipoib_dev_priv *priv; 2485 struct net_device *ndev; 2486 int result; 2487 2488 ndev = ipoib_intf_alloc(hca, port, format); 2489 if (IS_ERR(ndev)) { 2490 pr_warn("%s, %d: ipoib_intf_alloc failed %ld\n", hca->name, port, 2491 PTR_ERR(ndev)); 2492 return ndev; 2493 } 2494 priv = ipoib_priv(ndev); 2495 2496 INIT_IB_EVENT_HANDLER(&priv->event_handler, 2497 priv->ca, ipoib_event); 2498 ib_register_event_handler(&priv->event_handler); 2499 2500 /* call event handler to ensure pkey in sync */ 2501 queue_work(ipoib_workqueue, &priv->flush_heavy); 2502 2503 ndev->rtnl_link_ops = ipoib_get_link_ops(); 2504 2505 result = register_netdev(ndev); 2506 if (result) { 2507 pr_warn("%s: couldn't register ipoib port %d; error %d\n", 2508 hca->name, port, result); 2509 2510 ipoib_parent_unregister_pre(ndev); 2511 ipoib_intf_free(ndev); 2512 free_netdev(ndev); 2513 2514 return ERR_PTR(result); 2515 } 2516 2517 if (hca->ops.rdma_netdev_get_params) { 2518 int rc = hca->ops.rdma_netdev_get_params(hca, port, 2519 RDMA_NETDEV_IPOIB, 2520 ¶ms); 2521 2522 if (!rc && ops->priv_size < params.sizeof_priv) 2523 ops->priv_size = params.sizeof_priv; 2524 } 2525 /* 2526 * We cannot set priv_destructor before register_netdev because we 2527 * need priv to be always valid during the error flow to execute 2528 * ipoib_parent_unregister_pre(). Instead handle it manually and only 2529 * enter priv_destructor mode once we are completely registered. 2530 */ 2531 ndev->priv_destructor = ipoib_intf_free; 2532 2533 if (ipoib_intercept_dev_id_attr(ndev)) 2534 goto sysfs_failed; 2535 if (ipoib_cm_add_mode_attr(ndev)) 2536 goto sysfs_failed; 2537 if (ipoib_add_pkey_attr(ndev)) 2538 goto sysfs_failed; 2539 if (ipoib_add_umcast_attr(ndev)) 2540 goto sysfs_failed; 2541 if (device_create_file(&ndev->dev, &dev_attr_create_child)) 2542 goto sysfs_failed; 2543 if (device_create_file(&ndev->dev, &dev_attr_delete_child)) 2544 goto sysfs_failed; 2545 2546 return ndev; 2547 2548 sysfs_failed: 2549 ipoib_parent_unregister_pre(ndev); 2550 unregister_netdev(ndev); 2551 return ERR_PTR(-ENOMEM); 2552 } 2553 2554 static int ipoib_add_one(struct ib_device *device) 2555 { 2556 struct list_head *dev_list; 2557 struct net_device *dev; 2558 struct ipoib_dev_priv *priv; 2559 unsigned int p; 2560 int count = 0; 2561 2562 dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL); 2563 if (!dev_list) 2564 return -ENOMEM; 2565 2566 INIT_LIST_HEAD(dev_list); 2567 2568 rdma_for_each_port (device, p) { 2569 if (!rdma_protocol_ib(device, p)) 2570 continue; 2571 dev = ipoib_add_port("ib%d", device, p); 2572 if (!IS_ERR(dev)) { 2573 priv = ipoib_priv(dev); 2574 list_add_tail(&priv->list, dev_list); 2575 count++; 2576 } 2577 } 2578 2579 if (!count) { 2580 kfree(dev_list); 2581 return -EOPNOTSUPP; 2582 } 2583 2584 ib_set_client_data(device, &ipoib_client, dev_list); 2585 return 0; 2586 } 2587 2588 static void ipoib_remove_one(struct ib_device *device, void *client_data) 2589 { 2590 struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv; 2591 struct list_head *dev_list = client_data; 2592 2593 list_for_each_entry_safe(priv, tmp, dev_list, list) { 2594 LIST_HEAD(head); 2595 ipoib_parent_unregister_pre(priv->dev); 2596 2597 rtnl_lock(); 2598 2599 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, 2600 list) 2601 unregister_netdevice_queue(cpriv->dev, &head); 2602 unregister_netdevice_queue(priv->dev, &head); 2603 unregister_netdevice_many(&head); 2604 2605 rtnl_unlock(); 2606 } 2607 2608 kfree(dev_list); 2609 } 2610 2611 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 2612 static struct notifier_block ipoib_netdev_notifier = { 2613 .notifier_call = ipoib_netdev_event, 2614 }; 2615 #endif 2616 2617 static int __init ipoib_init_module(void) 2618 { 2619 int ret; 2620 2621 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size); 2622 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE); 2623 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE); 2624 2625 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size); 2626 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE); 2627 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); 2628 #ifdef CONFIG_INFINIBAND_IPOIB_CM 2629 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); 2630 ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0); 2631 #endif 2632 2633 /* 2634 * When copying small received packets, we only copy from the 2635 * linear data part of the SKB, so we rely on this condition. 2636 */ 2637 BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE); 2638 2639 ipoib_register_debugfs(); 2640 2641 /* 2642 * We create a global workqueue here that is used for all flush 2643 * operations. However, if you attempt to flush a workqueue 2644 * from a task on that same workqueue, it deadlocks the system. 2645 * We want to be able to flush the tasks associated with a 2646 * specific net device, so we also create a workqueue for each 2647 * netdevice. We queue up the tasks for that device only on 2648 * its private workqueue, and we only queue up flush events 2649 * on our global flush workqueue. This avoids the deadlocks. 2650 */ 2651 ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush", 0); 2652 if (!ipoib_workqueue) { 2653 ret = -ENOMEM; 2654 goto err_fs; 2655 } 2656 2657 ib_sa_register_client(&ipoib_sa_client); 2658 2659 ret = ib_register_client(&ipoib_client); 2660 if (ret) 2661 goto err_sa; 2662 2663 ret = ipoib_netlink_init(); 2664 if (ret) 2665 goto err_client; 2666 2667 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 2668 register_netdevice_notifier(&ipoib_netdev_notifier); 2669 #endif 2670 return 0; 2671 2672 err_client: 2673 ib_unregister_client(&ipoib_client); 2674 2675 err_sa: 2676 ib_sa_unregister_client(&ipoib_sa_client); 2677 destroy_workqueue(ipoib_workqueue); 2678 2679 err_fs: 2680 ipoib_unregister_debugfs(); 2681 2682 return ret; 2683 } 2684 2685 static void __exit ipoib_cleanup_module(void) 2686 { 2687 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 2688 unregister_netdevice_notifier(&ipoib_netdev_notifier); 2689 #endif 2690 ipoib_netlink_fini(); 2691 ib_unregister_client(&ipoib_client); 2692 ib_sa_unregister_client(&ipoib_sa_client); 2693 ipoib_unregister_debugfs(); 2694 destroy_workqueue(ipoib_workqueue); 2695 } 2696 2697 module_init(ipoib_init_module); 2698 module_exit(ipoib_cleanup_module); 2699