1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "ipoib.h" 36 37 #include <linux/module.h> 38 39 #include <linux/init.h> 40 #include <linux/slab.h> 41 #include <linux/kernel.h> 42 #include <linux/vmalloc.h> 43 44 #include <linux/if_arp.h> /* For ARPHRD_xxx */ 45 46 #include <linux/ip.h> 47 #include <linux/in.h> 48 49 #include <linux/jhash.h> 50 #include <net/arp.h> 51 #include <net/addrconf.h> 52 #include <linux/inetdevice.h> 53 #include <rdma/ib_cache.h> 54 55 #define DRV_VERSION "1.0.0" 56 57 const char ipoib_driver_version[] = DRV_VERSION; 58 59 MODULE_AUTHOR("Roland Dreier"); 60 MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); 61 MODULE_LICENSE("Dual BSD/GPL"); 62 MODULE_VERSION(DRV_VERSION); 63 64 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE; 65 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE; 66 67 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444); 68 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue"); 69 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); 70 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); 71 72 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 73 int ipoib_debug_level; 74 75 module_param_named(debug_level, ipoib_debug_level, int, 0644); 76 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 77 #endif 78 79 struct ipoib_path_iter { 80 struct net_device *dev; 81 struct ipoib_path path; 82 }; 83 84 static const u8 ipv4_bcast_addr[] = { 85 0x00, 0xff, 0xff, 0xff, 86 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, 87 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff 88 }; 89 90 struct workqueue_struct *ipoib_workqueue; 91 92 struct ib_sa_client ipoib_sa_client; 93 94 static void ipoib_add_one(struct ib_device *device); 95 static void ipoib_remove_one(struct ib_device *device, void *client_data); 96 static void ipoib_neigh_reclaim(struct rcu_head *rp); 97 static struct net_device *ipoib_get_net_dev_by_params( 98 struct ib_device *dev, u8 port, u16 pkey, 99 const union ib_gid *gid, const struct sockaddr *addr, 100 void *client_data); 101 102 static struct ib_client ipoib_client = { 103 .name = "ipoib", 104 .add = ipoib_add_one, 105 .remove = ipoib_remove_one, 106 .get_net_dev_by_params = ipoib_get_net_dev_by_params, 107 }; 108 109 int ipoib_open(struct net_device *dev) 110 { 111 struct ipoib_dev_priv *priv = netdev_priv(dev); 112 113 ipoib_dbg(priv, "bringing up interface\n"); 114 115 netif_carrier_off(dev); 116 117 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 118 119 if (ipoib_ib_dev_open(dev)) { 120 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) 121 return 0; 122 goto err_disable; 123 } 124 125 if (ipoib_ib_dev_up(dev)) 126 goto err_stop; 127 128 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 129 struct ipoib_dev_priv *cpriv; 130 131 /* Bring up any child interfaces too */ 132 down_read(&priv->vlan_rwsem); 133 list_for_each_entry(cpriv, &priv->child_intfs, list) { 134 int flags; 135 136 flags = cpriv->dev->flags; 137 if (flags & IFF_UP) 138 continue; 139 140 dev_change_flags(cpriv->dev, flags | IFF_UP); 141 } 142 up_read(&priv->vlan_rwsem); 143 } 144 145 netif_start_queue(dev); 146 147 return 0; 148 149 err_stop: 150 ipoib_ib_dev_stop(dev); 151 152 err_disable: 153 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 154 155 return -EINVAL; 156 } 157 158 static int ipoib_stop(struct net_device *dev) 159 { 160 struct ipoib_dev_priv *priv = netdev_priv(dev); 161 162 ipoib_dbg(priv, "stopping interface\n"); 163 164 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 165 166 netif_stop_queue(dev); 167 168 ipoib_ib_dev_down(dev); 169 ipoib_ib_dev_stop(dev); 170 171 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 172 struct ipoib_dev_priv *cpriv; 173 174 /* Bring down any child interfaces too */ 175 down_read(&priv->vlan_rwsem); 176 list_for_each_entry(cpriv, &priv->child_intfs, list) { 177 int flags; 178 179 flags = cpriv->dev->flags; 180 if (!(flags & IFF_UP)) 181 continue; 182 183 dev_change_flags(cpriv->dev, flags & ~IFF_UP); 184 } 185 up_read(&priv->vlan_rwsem); 186 } 187 188 return 0; 189 } 190 191 static void ipoib_uninit(struct net_device *dev) 192 { 193 ipoib_dev_cleanup(dev); 194 } 195 196 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features) 197 { 198 struct ipoib_dev_priv *priv = netdev_priv(dev); 199 200 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) 201 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); 202 203 return features; 204 } 205 206 static int ipoib_change_mtu(struct net_device *dev, int new_mtu) 207 { 208 struct ipoib_dev_priv *priv = netdev_priv(dev); 209 210 /* dev->mtu > 2K ==> connected mode */ 211 if (ipoib_cm_admin_enabled(dev)) { 212 if (new_mtu > ipoib_cm_max_mtu(dev)) 213 return -EINVAL; 214 215 if (new_mtu > priv->mcast_mtu) 216 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n", 217 priv->mcast_mtu); 218 219 dev->mtu = new_mtu; 220 return 0; 221 } 222 223 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) 224 return -EINVAL; 225 226 priv->admin_mtu = new_mtu; 227 228 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); 229 230 return 0; 231 } 232 233 /* Called with an RCU read lock taken */ 234 static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr, 235 struct net_device *dev) 236 { 237 struct net *net = dev_net(dev); 238 struct in_device *in_dev; 239 struct sockaddr_in *addr_in = (struct sockaddr_in *)addr; 240 struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr; 241 __be32 ret_addr; 242 243 switch (addr->sa_family) { 244 case AF_INET: 245 in_dev = in_dev_get(dev); 246 if (!in_dev) 247 return false; 248 249 ret_addr = inet_confirm_addr(net, in_dev, 0, 250 addr_in->sin_addr.s_addr, 251 RT_SCOPE_HOST); 252 in_dev_put(in_dev); 253 if (ret_addr) 254 return true; 255 256 break; 257 case AF_INET6: 258 if (IS_ENABLED(CONFIG_IPV6) && 259 ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1)) 260 return true; 261 262 break; 263 } 264 return false; 265 } 266 267 /** 268 * Find the master net_device on top of the given net_device. 269 * @dev: base IPoIB net_device 270 * 271 * Returns the master net_device with a reference held, or the same net_device 272 * if no master exists. 273 */ 274 static struct net_device *ipoib_get_master_net_dev(struct net_device *dev) 275 { 276 struct net_device *master; 277 278 rcu_read_lock(); 279 master = netdev_master_upper_dev_get_rcu(dev); 280 if (master) 281 dev_hold(master); 282 rcu_read_unlock(); 283 284 if (master) 285 return master; 286 287 dev_hold(dev); 288 return dev; 289 } 290 291 /** 292 * Find a net_device matching the given address, which is an upper device of 293 * the given net_device. 294 * @addr: IP address to look for. 295 * @dev: base IPoIB net_device 296 * 297 * If found, returns the net_device with a reference held. Otherwise return 298 * NULL. 299 */ 300 static struct net_device *ipoib_get_net_dev_match_addr( 301 const struct sockaddr *addr, struct net_device *dev) 302 { 303 struct net_device *upper, 304 *result = NULL; 305 struct list_head *iter; 306 307 rcu_read_lock(); 308 if (ipoib_is_dev_match_addr_rcu(addr, dev)) { 309 dev_hold(dev); 310 result = dev; 311 goto out; 312 } 313 314 netdev_for_each_all_upper_dev_rcu(dev, upper, iter) { 315 if (ipoib_is_dev_match_addr_rcu(addr, upper)) { 316 dev_hold(upper); 317 result = upper; 318 break; 319 } 320 } 321 out: 322 rcu_read_unlock(); 323 return result; 324 } 325 326 /* returns the number of IPoIB netdevs on top a given ipoib device matching a 327 * pkey_index and address, if one exists. 328 * 329 * @found_net_dev: contains a matching net_device if the return value >= 1, 330 * with a reference held. */ 331 static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv, 332 const union ib_gid *gid, 333 u16 pkey_index, 334 const struct sockaddr *addr, 335 int nesting, 336 struct net_device **found_net_dev) 337 { 338 struct ipoib_dev_priv *child_priv; 339 struct net_device *net_dev = NULL; 340 int matches = 0; 341 342 if (priv->pkey_index == pkey_index && 343 (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) { 344 if (!addr) { 345 net_dev = ipoib_get_master_net_dev(priv->dev); 346 } else { 347 /* Verify the net_device matches the IP address, as 348 * IPoIB child devices currently share a GID. */ 349 net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev); 350 } 351 if (net_dev) { 352 if (!*found_net_dev) 353 *found_net_dev = net_dev; 354 else 355 dev_put(net_dev); 356 ++matches; 357 } 358 } 359 360 /* Check child interfaces */ 361 down_read_nested(&priv->vlan_rwsem, nesting); 362 list_for_each_entry(child_priv, &priv->child_intfs, list) { 363 matches += ipoib_match_gid_pkey_addr(child_priv, gid, 364 pkey_index, addr, 365 nesting + 1, 366 found_net_dev); 367 if (matches > 1) 368 break; 369 } 370 up_read(&priv->vlan_rwsem); 371 372 return matches; 373 } 374 375 /* Returns the number of matching net_devs found (between 0 and 2). Also 376 * return the matching net_device in the @net_dev parameter, holding a 377 * reference to the net_device, if the number of matches >= 1 */ 378 static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port, 379 u16 pkey_index, 380 const union ib_gid *gid, 381 const struct sockaddr *addr, 382 struct net_device **net_dev) 383 { 384 struct ipoib_dev_priv *priv; 385 int matches = 0; 386 387 *net_dev = NULL; 388 389 list_for_each_entry(priv, dev_list, list) { 390 if (priv->port != port) 391 continue; 392 393 matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index, 394 addr, 0, net_dev); 395 if (matches > 1) 396 break; 397 } 398 399 return matches; 400 } 401 402 static struct net_device *ipoib_get_net_dev_by_params( 403 struct ib_device *dev, u8 port, u16 pkey, 404 const union ib_gid *gid, const struct sockaddr *addr, 405 void *client_data) 406 { 407 struct net_device *net_dev; 408 struct list_head *dev_list = client_data; 409 u16 pkey_index; 410 int matches; 411 int ret; 412 413 if (!rdma_protocol_ib(dev, port)) 414 return NULL; 415 416 ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index); 417 if (ret) 418 return NULL; 419 420 if (!dev_list) 421 return NULL; 422 423 /* See if we can find a unique device matching the L2 parameters */ 424 matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index, 425 gid, NULL, &net_dev); 426 427 switch (matches) { 428 case 0: 429 return NULL; 430 case 1: 431 return net_dev; 432 } 433 434 dev_put(net_dev); 435 436 /* Couldn't find a unique device with L2 parameters only. Use L3 437 * address to uniquely match the net device */ 438 matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index, 439 gid, addr, &net_dev); 440 switch (matches) { 441 case 0: 442 return NULL; 443 default: 444 dev_warn_ratelimited(&dev->dev, 445 "duplicate IP address detected\n"); 446 /* Fall through */ 447 case 1: 448 return net_dev; 449 } 450 } 451 452 int ipoib_set_mode(struct net_device *dev, const char *buf) 453 { 454 struct ipoib_dev_priv *priv = netdev_priv(dev); 455 456 /* flush paths if we switch modes so that connections are restarted */ 457 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) { 458 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 459 ipoib_warn(priv, "enabling connected mode " 460 "will cause multicast packet drops\n"); 461 netdev_update_features(dev); 462 dev_set_mtu(dev, ipoib_cm_max_mtu(dev)); 463 rtnl_unlock(); 464 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; 465 466 ipoib_flush_paths(dev); 467 rtnl_lock(); 468 return 0; 469 } 470 471 if (!strcmp(buf, "datagram\n")) { 472 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 473 netdev_update_features(dev); 474 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); 475 rtnl_unlock(); 476 ipoib_flush_paths(dev); 477 rtnl_lock(); 478 return 0; 479 } 480 481 return -EINVAL; 482 } 483 484 static struct ipoib_path *__path_find(struct net_device *dev, void *gid) 485 { 486 struct ipoib_dev_priv *priv = netdev_priv(dev); 487 struct rb_node *n = priv->path_tree.rb_node; 488 struct ipoib_path *path; 489 int ret; 490 491 while (n) { 492 path = rb_entry(n, struct ipoib_path, rb_node); 493 494 ret = memcmp(gid, path->pathrec.dgid.raw, 495 sizeof (union ib_gid)); 496 497 if (ret < 0) 498 n = n->rb_left; 499 else if (ret > 0) 500 n = n->rb_right; 501 else 502 return path; 503 } 504 505 return NULL; 506 } 507 508 static int __path_add(struct net_device *dev, struct ipoib_path *path) 509 { 510 struct ipoib_dev_priv *priv = netdev_priv(dev); 511 struct rb_node **n = &priv->path_tree.rb_node; 512 struct rb_node *pn = NULL; 513 struct ipoib_path *tpath; 514 int ret; 515 516 while (*n) { 517 pn = *n; 518 tpath = rb_entry(pn, struct ipoib_path, rb_node); 519 520 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw, 521 sizeof (union ib_gid)); 522 if (ret < 0) 523 n = &pn->rb_left; 524 else if (ret > 0) 525 n = &pn->rb_right; 526 else 527 return -EEXIST; 528 } 529 530 rb_link_node(&path->rb_node, pn, n); 531 rb_insert_color(&path->rb_node, &priv->path_tree); 532 533 list_add_tail(&path->list, &priv->path_list); 534 535 return 0; 536 } 537 538 static void path_free(struct net_device *dev, struct ipoib_path *path) 539 { 540 struct sk_buff *skb; 541 542 while ((skb = __skb_dequeue(&path->queue))) 543 dev_kfree_skb_irq(skb); 544 545 ipoib_dbg(netdev_priv(dev), "path_free\n"); 546 547 /* remove all neigh connected to this path */ 548 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); 549 550 if (path->ah) 551 ipoib_put_ah(path->ah); 552 553 kfree(path); 554 } 555 556 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 557 558 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev) 559 { 560 struct ipoib_path_iter *iter; 561 562 iter = kmalloc(sizeof *iter, GFP_KERNEL); 563 if (!iter) 564 return NULL; 565 566 iter->dev = dev; 567 memset(iter->path.pathrec.dgid.raw, 0, 16); 568 569 if (ipoib_path_iter_next(iter)) { 570 kfree(iter); 571 return NULL; 572 } 573 574 return iter; 575 } 576 577 int ipoib_path_iter_next(struct ipoib_path_iter *iter) 578 { 579 struct ipoib_dev_priv *priv = netdev_priv(iter->dev); 580 struct rb_node *n; 581 struct ipoib_path *path; 582 int ret = 1; 583 584 spin_lock_irq(&priv->lock); 585 586 n = rb_first(&priv->path_tree); 587 588 while (n) { 589 path = rb_entry(n, struct ipoib_path, rb_node); 590 591 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, 592 sizeof (union ib_gid)) < 0) { 593 iter->path = *path; 594 ret = 0; 595 break; 596 } 597 598 n = rb_next(n); 599 } 600 601 spin_unlock_irq(&priv->lock); 602 603 return ret; 604 } 605 606 void ipoib_path_iter_read(struct ipoib_path_iter *iter, 607 struct ipoib_path *path) 608 { 609 *path = iter->path; 610 } 611 612 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 613 614 void ipoib_mark_paths_invalid(struct net_device *dev) 615 { 616 struct ipoib_dev_priv *priv = netdev_priv(dev); 617 struct ipoib_path *path, *tp; 618 619 spin_lock_irq(&priv->lock); 620 621 list_for_each_entry_safe(path, tp, &priv->path_list, list) { 622 ipoib_dbg(priv, "mark path LID 0x%04x GID %pI6 invalid\n", 623 be16_to_cpu(path->pathrec.dlid), 624 path->pathrec.dgid.raw); 625 path->valid = 0; 626 } 627 628 spin_unlock_irq(&priv->lock); 629 } 630 631 void ipoib_flush_paths(struct net_device *dev) 632 { 633 struct ipoib_dev_priv *priv = netdev_priv(dev); 634 struct ipoib_path *path, *tp; 635 LIST_HEAD(remove_list); 636 unsigned long flags; 637 638 netif_tx_lock_bh(dev); 639 spin_lock_irqsave(&priv->lock, flags); 640 641 list_splice_init(&priv->path_list, &remove_list); 642 643 list_for_each_entry(path, &remove_list, list) 644 rb_erase(&path->rb_node, &priv->path_tree); 645 646 list_for_each_entry_safe(path, tp, &remove_list, list) { 647 if (path->query) 648 ib_sa_cancel_query(path->query_id, path->query); 649 spin_unlock_irqrestore(&priv->lock, flags); 650 netif_tx_unlock_bh(dev); 651 wait_for_completion(&path->done); 652 path_free(dev, path); 653 netif_tx_lock_bh(dev); 654 spin_lock_irqsave(&priv->lock, flags); 655 } 656 657 spin_unlock_irqrestore(&priv->lock, flags); 658 netif_tx_unlock_bh(dev); 659 } 660 661 static void path_rec_completion(int status, 662 struct ib_sa_path_rec *pathrec, 663 void *path_ptr) 664 { 665 struct ipoib_path *path = path_ptr; 666 struct net_device *dev = path->dev; 667 struct ipoib_dev_priv *priv = netdev_priv(dev); 668 struct ipoib_ah *ah = NULL; 669 struct ipoib_ah *old_ah = NULL; 670 struct ipoib_neigh *neigh, *tn; 671 struct sk_buff_head skqueue; 672 struct sk_buff *skb; 673 unsigned long flags; 674 675 if (!status) 676 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n", 677 be16_to_cpu(pathrec->dlid), pathrec->dgid.raw); 678 else 679 ipoib_dbg(priv, "PathRec status %d for GID %pI6\n", 680 status, path->pathrec.dgid.raw); 681 682 skb_queue_head_init(&skqueue); 683 684 if (!status) { 685 struct ib_ah_attr av; 686 687 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av)) 688 ah = ipoib_create_ah(dev, priv->pd, &av); 689 } 690 691 spin_lock_irqsave(&priv->lock, flags); 692 693 if (!IS_ERR_OR_NULL(ah)) { 694 path->pathrec = *pathrec; 695 696 old_ah = path->ah; 697 path->ah = ah; 698 699 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", 700 ah, be16_to_cpu(pathrec->dlid), pathrec->sl); 701 702 while ((skb = __skb_dequeue(&path->queue))) 703 __skb_queue_tail(&skqueue, skb); 704 705 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) { 706 if (neigh->ah) { 707 WARN_ON(neigh->ah != old_ah); 708 /* 709 * Dropping the ah reference inside 710 * priv->lock is safe here, because we 711 * will hold one more reference from 712 * the original value of path->ah (ie 713 * old_ah). 714 */ 715 ipoib_put_ah(neigh->ah); 716 } 717 kref_get(&path->ah->ref); 718 neigh->ah = path->ah; 719 720 if (ipoib_cm_enabled(dev, neigh->daddr)) { 721 if (!ipoib_cm_get(neigh)) 722 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, 723 path, 724 neigh)); 725 if (!ipoib_cm_get(neigh)) { 726 ipoib_neigh_free(neigh); 727 continue; 728 } 729 } 730 731 while ((skb = __skb_dequeue(&neigh->queue))) 732 __skb_queue_tail(&skqueue, skb); 733 } 734 path->valid = 1; 735 } 736 737 path->query = NULL; 738 complete(&path->done); 739 740 spin_unlock_irqrestore(&priv->lock, flags); 741 742 if (IS_ERR_OR_NULL(ah)) 743 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); 744 745 if (old_ah) 746 ipoib_put_ah(old_ah); 747 748 while ((skb = __skb_dequeue(&skqueue))) { 749 skb->dev = dev; 750 if (dev_queue_xmit(skb)) 751 ipoib_warn(priv, "dev_queue_xmit failed " 752 "to requeue packet\n"); 753 } 754 } 755 756 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid) 757 { 758 struct ipoib_dev_priv *priv = netdev_priv(dev); 759 struct ipoib_path *path; 760 761 if (!priv->broadcast) 762 return NULL; 763 764 path = kzalloc(sizeof *path, GFP_ATOMIC); 765 if (!path) 766 return NULL; 767 768 path->dev = dev; 769 770 skb_queue_head_init(&path->queue); 771 772 INIT_LIST_HEAD(&path->neigh_list); 773 774 memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid)); 775 path->pathrec.sgid = priv->local_gid; 776 path->pathrec.pkey = cpu_to_be16(priv->pkey); 777 path->pathrec.numb_path = 1; 778 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class; 779 780 return path; 781 } 782 783 static int path_rec_start(struct net_device *dev, 784 struct ipoib_path *path) 785 { 786 struct ipoib_dev_priv *priv = netdev_priv(dev); 787 788 ipoib_dbg(priv, "Start path record lookup for %pI6\n", 789 path->pathrec.dgid.raw); 790 791 init_completion(&path->done); 792 793 path->query_id = 794 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port, 795 &path->pathrec, 796 IB_SA_PATH_REC_DGID | 797 IB_SA_PATH_REC_SGID | 798 IB_SA_PATH_REC_NUMB_PATH | 799 IB_SA_PATH_REC_TRAFFIC_CLASS | 800 IB_SA_PATH_REC_PKEY, 801 1000, GFP_ATOMIC, 802 path_rec_completion, 803 path, &path->query); 804 if (path->query_id < 0) { 805 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); 806 path->query = NULL; 807 complete(&path->done); 808 return path->query_id; 809 } 810 811 return 0; 812 } 813 814 static void neigh_add_path(struct sk_buff *skb, u8 *daddr, 815 struct net_device *dev) 816 { 817 struct ipoib_dev_priv *priv = netdev_priv(dev); 818 struct ipoib_path *path; 819 struct ipoib_neigh *neigh; 820 unsigned long flags; 821 822 spin_lock_irqsave(&priv->lock, flags); 823 neigh = ipoib_neigh_alloc(daddr, dev); 824 if (!neigh) { 825 spin_unlock_irqrestore(&priv->lock, flags); 826 ++dev->stats.tx_dropped; 827 dev_kfree_skb_any(skb); 828 return; 829 } 830 831 path = __path_find(dev, daddr + 4); 832 if (!path) { 833 path = path_rec_create(dev, daddr + 4); 834 if (!path) 835 goto err_path; 836 837 __path_add(dev, path); 838 } 839 840 list_add_tail(&neigh->list, &path->neigh_list); 841 842 if (path->ah) { 843 kref_get(&path->ah->ref); 844 neigh->ah = path->ah; 845 846 if (ipoib_cm_enabled(dev, neigh->daddr)) { 847 if (!ipoib_cm_get(neigh)) 848 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh)); 849 if (!ipoib_cm_get(neigh)) { 850 ipoib_neigh_free(neigh); 851 goto err_drop; 852 } 853 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) 854 __skb_queue_tail(&neigh->queue, skb); 855 else { 856 ipoib_warn(priv, "queue length limit %d. Packet drop.\n", 857 skb_queue_len(&neigh->queue)); 858 goto err_drop; 859 } 860 } else { 861 spin_unlock_irqrestore(&priv->lock, flags); 862 ipoib_send(dev, skb, path->ah, IPOIB_QPN(daddr)); 863 ipoib_neigh_put(neigh); 864 return; 865 } 866 } else { 867 neigh->ah = NULL; 868 869 if (!path->query && path_rec_start(dev, path)) 870 goto err_path; 871 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) 872 __skb_queue_tail(&neigh->queue, skb); 873 else 874 goto err_drop; 875 } 876 877 spin_unlock_irqrestore(&priv->lock, flags); 878 ipoib_neigh_put(neigh); 879 return; 880 881 err_path: 882 ipoib_neigh_free(neigh); 883 err_drop: 884 ++dev->stats.tx_dropped; 885 dev_kfree_skb_any(skb); 886 887 spin_unlock_irqrestore(&priv->lock, flags); 888 ipoib_neigh_put(neigh); 889 } 890 891 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, 892 struct ipoib_cb *cb) 893 { 894 struct ipoib_dev_priv *priv = netdev_priv(dev); 895 struct ipoib_path *path; 896 unsigned long flags; 897 898 spin_lock_irqsave(&priv->lock, flags); 899 900 path = __path_find(dev, cb->hwaddr + 4); 901 if (!path || !path->valid) { 902 int new_path = 0; 903 904 if (!path) { 905 path = path_rec_create(dev, cb->hwaddr + 4); 906 new_path = 1; 907 } 908 if (path) { 909 if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 910 __skb_queue_tail(&path->queue, skb); 911 } else { 912 ++dev->stats.tx_dropped; 913 dev_kfree_skb_any(skb); 914 } 915 916 if (!path->query && path_rec_start(dev, path)) { 917 spin_unlock_irqrestore(&priv->lock, flags); 918 if (new_path) 919 path_free(dev, path); 920 return; 921 } else 922 __path_add(dev, path); 923 } else { 924 ++dev->stats.tx_dropped; 925 dev_kfree_skb_any(skb); 926 } 927 928 spin_unlock_irqrestore(&priv->lock, flags); 929 return; 930 } 931 932 if (path->ah) { 933 ipoib_dbg(priv, "Send unicast ARP to %04x\n", 934 be16_to_cpu(path->pathrec.dlid)); 935 936 spin_unlock_irqrestore(&priv->lock, flags); 937 ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr)); 938 return; 939 } else if ((path->query || !path_rec_start(dev, path)) && 940 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 941 __skb_queue_tail(&path->queue, skb); 942 } else { 943 ++dev->stats.tx_dropped; 944 dev_kfree_skb_any(skb); 945 } 946 947 spin_unlock_irqrestore(&priv->lock, flags); 948 } 949 950 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) 951 { 952 struct ipoib_dev_priv *priv = netdev_priv(dev); 953 struct ipoib_neigh *neigh; 954 struct ipoib_cb *cb = ipoib_skb_cb(skb); 955 struct ipoib_header *header; 956 unsigned long flags; 957 958 header = (struct ipoib_header *) skb->data; 959 960 if (unlikely(cb->hwaddr[4] == 0xff)) { 961 /* multicast, arrange "if" according to probability */ 962 if ((header->proto != htons(ETH_P_IP)) && 963 (header->proto != htons(ETH_P_IPV6)) && 964 (header->proto != htons(ETH_P_ARP)) && 965 (header->proto != htons(ETH_P_RARP)) && 966 (header->proto != htons(ETH_P_TIPC))) { 967 /* ethertype not supported by IPoIB */ 968 ++dev->stats.tx_dropped; 969 dev_kfree_skb_any(skb); 970 return NETDEV_TX_OK; 971 } 972 /* Add in the P_Key for multicast*/ 973 cb->hwaddr[8] = (priv->pkey >> 8) & 0xff; 974 cb->hwaddr[9] = priv->pkey & 0xff; 975 976 neigh = ipoib_neigh_get(dev, cb->hwaddr); 977 if (likely(neigh)) 978 goto send_using_neigh; 979 ipoib_mcast_send(dev, cb->hwaddr, skb); 980 return NETDEV_TX_OK; 981 } 982 983 /* unicast, arrange "switch" according to probability */ 984 switch (header->proto) { 985 case htons(ETH_P_IP): 986 case htons(ETH_P_IPV6): 987 case htons(ETH_P_TIPC): 988 neigh = ipoib_neigh_get(dev, cb->hwaddr); 989 if (unlikely(!neigh)) { 990 neigh_add_path(skb, cb->hwaddr, dev); 991 return NETDEV_TX_OK; 992 } 993 break; 994 case htons(ETH_P_ARP): 995 case htons(ETH_P_RARP): 996 /* for unicast ARP and RARP should always perform path find */ 997 unicast_arp_send(skb, dev, cb); 998 return NETDEV_TX_OK; 999 default: 1000 /* ethertype not supported by IPoIB */ 1001 ++dev->stats.tx_dropped; 1002 dev_kfree_skb_any(skb); 1003 return NETDEV_TX_OK; 1004 } 1005 1006 send_using_neigh: 1007 /* note we now hold a ref to neigh */ 1008 if (ipoib_cm_get(neigh)) { 1009 if (ipoib_cm_up(neigh)) { 1010 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); 1011 goto unref; 1012 } 1013 } else if (neigh->ah) { 1014 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr)); 1015 goto unref; 1016 } 1017 1018 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 1019 spin_lock_irqsave(&priv->lock, flags); 1020 __skb_queue_tail(&neigh->queue, skb); 1021 spin_unlock_irqrestore(&priv->lock, flags); 1022 } else { 1023 ++dev->stats.tx_dropped; 1024 dev_kfree_skb_any(skb); 1025 } 1026 1027 unref: 1028 ipoib_neigh_put(neigh); 1029 1030 return NETDEV_TX_OK; 1031 } 1032 1033 static void ipoib_timeout(struct net_device *dev) 1034 { 1035 struct ipoib_dev_priv *priv = netdev_priv(dev); 1036 1037 ipoib_warn(priv, "transmit timeout: latency %d msecs\n", 1038 jiffies_to_msecs(jiffies - dev->trans_start)); 1039 ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n", 1040 netif_queue_stopped(dev), 1041 priv->tx_head, priv->tx_tail); 1042 /* XXX reset QP, etc. */ 1043 } 1044 1045 static int ipoib_hard_header(struct sk_buff *skb, 1046 struct net_device *dev, 1047 unsigned short type, 1048 const void *daddr, const void *saddr, unsigned len) 1049 { 1050 struct ipoib_header *header; 1051 struct ipoib_cb *cb = ipoib_skb_cb(skb); 1052 1053 header = (struct ipoib_header *) skb_push(skb, sizeof *header); 1054 1055 header->proto = htons(type); 1056 header->reserved = 0; 1057 1058 /* 1059 * we don't rely on dst_entry structure, always stuff the 1060 * destination address into skb->cb so we can figure out where 1061 * to send the packet later. 1062 */ 1063 memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN); 1064 1065 return sizeof *header; 1066 } 1067 1068 static void ipoib_set_mcast_list(struct net_device *dev) 1069 { 1070 struct ipoib_dev_priv *priv = netdev_priv(dev); 1071 1072 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { 1073 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set"); 1074 return; 1075 } 1076 1077 queue_work(priv->wq, &priv->restart_task); 1078 } 1079 1080 static int ipoib_get_iflink(const struct net_device *dev) 1081 { 1082 struct ipoib_dev_priv *priv = netdev_priv(dev); 1083 1084 /* parent interface */ 1085 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) 1086 return dev->ifindex; 1087 1088 /* child/vlan interface */ 1089 return priv->parent->ifindex; 1090 } 1091 1092 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) 1093 { 1094 /* 1095 * Use only the address parts that contributes to spreading 1096 * The subnet prefix is not used as one can not connect to 1097 * same remote port (GUID) using the same remote QPN via two 1098 * different subnets. 1099 */ 1100 /* qpn octets[1:4) & port GUID octets[12:20) */ 1101 u32 *d32 = (u32 *) daddr; 1102 u32 hv; 1103 1104 hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0); 1105 return hv & htbl->mask; 1106 } 1107 1108 struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr) 1109 { 1110 struct ipoib_dev_priv *priv = netdev_priv(dev); 1111 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1112 struct ipoib_neigh_hash *htbl; 1113 struct ipoib_neigh *neigh = NULL; 1114 u32 hash_val; 1115 1116 rcu_read_lock_bh(); 1117 1118 htbl = rcu_dereference_bh(ntbl->htbl); 1119 1120 if (!htbl) 1121 goto out_unlock; 1122 1123 hash_val = ipoib_addr_hash(htbl, daddr); 1124 for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]); 1125 neigh != NULL; 1126 neigh = rcu_dereference_bh(neigh->hnext)) { 1127 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { 1128 /* found, take one ref on behalf of the caller */ 1129 if (!atomic_inc_not_zero(&neigh->refcnt)) { 1130 /* deleted */ 1131 neigh = NULL; 1132 goto out_unlock; 1133 } 1134 neigh->alive = jiffies; 1135 goto out_unlock; 1136 } 1137 } 1138 1139 out_unlock: 1140 rcu_read_unlock_bh(); 1141 return neigh; 1142 } 1143 1144 static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) 1145 { 1146 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1147 struct ipoib_neigh_hash *htbl; 1148 unsigned long neigh_obsolete; 1149 unsigned long dt; 1150 unsigned long flags; 1151 int i; 1152 LIST_HEAD(remove_list); 1153 struct net_device *dev = priv->dev; 1154 1155 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 1156 return; 1157 1158 spin_lock_irqsave(&priv->lock, flags); 1159 1160 htbl = rcu_dereference_protected(ntbl->htbl, 1161 lockdep_is_held(&priv->lock)); 1162 1163 if (!htbl) 1164 goto out_unlock; 1165 1166 /* neigh is obsolete if it was idle for two GC periods */ 1167 dt = 2 * arp_tbl.gc_interval; 1168 neigh_obsolete = jiffies - dt; 1169 /* handle possible race condition */ 1170 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 1171 goto out_unlock; 1172 1173 for (i = 0; i < htbl->size; i++) { 1174 struct ipoib_neigh *neigh; 1175 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1176 1177 while ((neigh = rcu_dereference_protected(*np, 1178 lockdep_is_held(&priv->lock))) != NULL) { 1179 /* was the neigh idle for two GC periods */ 1180 if (time_after(neigh_obsolete, neigh->alive)) { 1181 1182 ipoib_check_and_add_mcast_sendonly(priv, neigh->daddr + 4, &remove_list); 1183 1184 rcu_assign_pointer(*np, 1185 rcu_dereference_protected(neigh->hnext, 1186 lockdep_is_held(&priv->lock))); 1187 /* remove from path/mc list */ 1188 list_del(&neigh->list); 1189 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1190 } else { 1191 np = &neigh->hnext; 1192 } 1193 1194 } 1195 } 1196 1197 out_unlock: 1198 spin_unlock_irqrestore(&priv->lock, flags); 1199 ipoib_mcast_remove_list(dev, &remove_list); 1200 } 1201 1202 static void ipoib_reap_neigh(struct work_struct *work) 1203 { 1204 struct ipoib_dev_priv *priv = 1205 container_of(work, struct ipoib_dev_priv, neigh_reap_task.work); 1206 1207 __ipoib_reap_neigh(priv); 1208 1209 if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 1210 queue_delayed_work(priv->wq, &priv->neigh_reap_task, 1211 arp_tbl.gc_interval); 1212 } 1213 1214 1215 static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr, 1216 struct net_device *dev) 1217 { 1218 struct ipoib_neigh *neigh; 1219 1220 neigh = kzalloc(sizeof *neigh, GFP_ATOMIC); 1221 if (!neigh) 1222 return NULL; 1223 1224 neigh->dev = dev; 1225 memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr)); 1226 skb_queue_head_init(&neigh->queue); 1227 INIT_LIST_HEAD(&neigh->list); 1228 ipoib_cm_set(neigh, NULL); 1229 /* one ref on behalf of the caller */ 1230 atomic_set(&neigh->refcnt, 1); 1231 1232 return neigh; 1233 } 1234 1235 struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr, 1236 struct net_device *dev) 1237 { 1238 struct ipoib_dev_priv *priv = netdev_priv(dev); 1239 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1240 struct ipoib_neigh_hash *htbl; 1241 struct ipoib_neigh *neigh; 1242 u32 hash_val; 1243 1244 htbl = rcu_dereference_protected(ntbl->htbl, 1245 lockdep_is_held(&priv->lock)); 1246 if (!htbl) { 1247 neigh = NULL; 1248 goto out_unlock; 1249 } 1250 1251 /* need to add a new neigh, but maybe some other thread succeeded? 1252 * recalc hash, maybe hash resize took place so we do a search 1253 */ 1254 hash_val = ipoib_addr_hash(htbl, daddr); 1255 for (neigh = rcu_dereference_protected(htbl->buckets[hash_val], 1256 lockdep_is_held(&priv->lock)); 1257 neigh != NULL; 1258 neigh = rcu_dereference_protected(neigh->hnext, 1259 lockdep_is_held(&priv->lock))) { 1260 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { 1261 /* found, take one ref on behalf of the caller */ 1262 if (!atomic_inc_not_zero(&neigh->refcnt)) { 1263 /* deleted */ 1264 neigh = NULL; 1265 break; 1266 } 1267 neigh->alive = jiffies; 1268 goto out_unlock; 1269 } 1270 } 1271 1272 neigh = ipoib_neigh_ctor(daddr, dev); 1273 if (!neigh) 1274 goto out_unlock; 1275 1276 /* one ref on behalf of the hash table */ 1277 atomic_inc(&neigh->refcnt); 1278 neigh->alive = jiffies; 1279 /* put in hash */ 1280 rcu_assign_pointer(neigh->hnext, 1281 rcu_dereference_protected(htbl->buckets[hash_val], 1282 lockdep_is_held(&priv->lock))); 1283 rcu_assign_pointer(htbl->buckets[hash_val], neigh); 1284 atomic_inc(&ntbl->entries); 1285 1286 out_unlock: 1287 1288 return neigh; 1289 } 1290 1291 void ipoib_neigh_dtor(struct ipoib_neigh *neigh) 1292 { 1293 /* neigh reference count was dropprd to zero */ 1294 struct net_device *dev = neigh->dev; 1295 struct ipoib_dev_priv *priv = netdev_priv(dev); 1296 struct sk_buff *skb; 1297 if (neigh->ah) 1298 ipoib_put_ah(neigh->ah); 1299 while ((skb = __skb_dequeue(&neigh->queue))) { 1300 ++dev->stats.tx_dropped; 1301 dev_kfree_skb_any(skb); 1302 } 1303 if (ipoib_cm_get(neigh)) 1304 ipoib_cm_destroy_tx(ipoib_cm_get(neigh)); 1305 ipoib_dbg(netdev_priv(dev), 1306 "neigh free for %06x %pI6\n", 1307 IPOIB_QPN(neigh->daddr), 1308 neigh->daddr + 4); 1309 kfree(neigh); 1310 if (atomic_dec_and_test(&priv->ntbl.entries)) { 1311 if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags)) 1312 complete(&priv->ntbl.flushed); 1313 } 1314 } 1315 1316 static void ipoib_neigh_reclaim(struct rcu_head *rp) 1317 { 1318 /* Called as a result of removal from hash table */ 1319 struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu); 1320 /* note TX context may hold another ref */ 1321 ipoib_neigh_put(neigh); 1322 } 1323 1324 void ipoib_neigh_free(struct ipoib_neigh *neigh) 1325 { 1326 struct net_device *dev = neigh->dev; 1327 struct ipoib_dev_priv *priv = netdev_priv(dev); 1328 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1329 struct ipoib_neigh_hash *htbl; 1330 struct ipoib_neigh __rcu **np; 1331 struct ipoib_neigh *n; 1332 u32 hash_val; 1333 1334 htbl = rcu_dereference_protected(ntbl->htbl, 1335 lockdep_is_held(&priv->lock)); 1336 if (!htbl) 1337 return; 1338 1339 hash_val = ipoib_addr_hash(htbl, neigh->daddr); 1340 np = &htbl->buckets[hash_val]; 1341 for (n = rcu_dereference_protected(*np, 1342 lockdep_is_held(&priv->lock)); 1343 n != NULL; 1344 n = rcu_dereference_protected(*np, 1345 lockdep_is_held(&priv->lock))) { 1346 if (n == neigh) { 1347 /* found */ 1348 rcu_assign_pointer(*np, 1349 rcu_dereference_protected(neigh->hnext, 1350 lockdep_is_held(&priv->lock))); 1351 /* remove from parent list */ 1352 list_del(&neigh->list); 1353 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1354 return; 1355 } else { 1356 np = &n->hnext; 1357 } 1358 } 1359 } 1360 1361 static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) 1362 { 1363 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1364 struct ipoib_neigh_hash *htbl; 1365 struct ipoib_neigh __rcu **buckets; 1366 u32 size; 1367 1368 clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); 1369 ntbl->htbl = NULL; 1370 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL); 1371 if (!htbl) 1372 return -ENOMEM; 1373 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1374 size = roundup_pow_of_two(arp_tbl.gc_thresh3); 1375 buckets = kzalloc(size * sizeof(*buckets), GFP_KERNEL); 1376 if (!buckets) { 1377 kfree(htbl); 1378 return -ENOMEM; 1379 } 1380 htbl->size = size; 1381 htbl->mask = (size - 1); 1382 htbl->buckets = buckets; 1383 RCU_INIT_POINTER(ntbl->htbl, htbl); 1384 htbl->ntbl = ntbl; 1385 atomic_set(&ntbl->entries, 0); 1386 1387 /* start garbage collection */ 1388 clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1389 queue_delayed_work(priv->wq, &priv->neigh_reap_task, 1390 arp_tbl.gc_interval); 1391 1392 return 0; 1393 } 1394 1395 static void neigh_hash_free_rcu(struct rcu_head *head) 1396 { 1397 struct ipoib_neigh_hash *htbl = container_of(head, 1398 struct ipoib_neigh_hash, 1399 rcu); 1400 struct ipoib_neigh __rcu **buckets = htbl->buckets; 1401 struct ipoib_neigh_table *ntbl = htbl->ntbl; 1402 1403 kfree(buckets); 1404 kfree(htbl); 1405 complete(&ntbl->deleted); 1406 } 1407 1408 void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid) 1409 { 1410 struct ipoib_dev_priv *priv = netdev_priv(dev); 1411 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1412 struct ipoib_neigh_hash *htbl; 1413 unsigned long flags; 1414 int i; 1415 1416 /* remove all neigh connected to a given path or mcast */ 1417 spin_lock_irqsave(&priv->lock, flags); 1418 1419 htbl = rcu_dereference_protected(ntbl->htbl, 1420 lockdep_is_held(&priv->lock)); 1421 1422 if (!htbl) 1423 goto out_unlock; 1424 1425 for (i = 0; i < htbl->size; i++) { 1426 struct ipoib_neigh *neigh; 1427 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1428 1429 while ((neigh = rcu_dereference_protected(*np, 1430 lockdep_is_held(&priv->lock))) != NULL) { 1431 /* delete neighs belong to this parent */ 1432 if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) { 1433 rcu_assign_pointer(*np, 1434 rcu_dereference_protected(neigh->hnext, 1435 lockdep_is_held(&priv->lock))); 1436 /* remove from parent list */ 1437 list_del(&neigh->list); 1438 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1439 } else { 1440 np = &neigh->hnext; 1441 } 1442 1443 } 1444 } 1445 out_unlock: 1446 spin_unlock_irqrestore(&priv->lock, flags); 1447 } 1448 1449 static void ipoib_flush_neighs(struct ipoib_dev_priv *priv) 1450 { 1451 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1452 struct ipoib_neigh_hash *htbl; 1453 unsigned long flags; 1454 int i, wait_flushed = 0; 1455 1456 init_completion(&priv->ntbl.flushed); 1457 1458 spin_lock_irqsave(&priv->lock, flags); 1459 1460 htbl = rcu_dereference_protected(ntbl->htbl, 1461 lockdep_is_held(&priv->lock)); 1462 if (!htbl) 1463 goto out_unlock; 1464 1465 wait_flushed = atomic_read(&priv->ntbl.entries); 1466 if (!wait_flushed) 1467 goto free_htbl; 1468 1469 for (i = 0; i < htbl->size; i++) { 1470 struct ipoib_neigh *neigh; 1471 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1472 1473 while ((neigh = rcu_dereference_protected(*np, 1474 lockdep_is_held(&priv->lock))) != NULL) { 1475 rcu_assign_pointer(*np, 1476 rcu_dereference_protected(neigh->hnext, 1477 lockdep_is_held(&priv->lock))); 1478 /* remove from path/mc list */ 1479 list_del(&neigh->list); 1480 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1481 } 1482 } 1483 1484 free_htbl: 1485 rcu_assign_pointer(ntbl->htbl, NULL); 1486 call_rcu(&htbl->rcu, neigh_hash_free_rcu); 1487 1488 out_unlock: 1489 spin_unlock_irqrestore(&priv->lock, flags); 1490 if (wait_flushed) 1491 wait_for_completion(&priv->ntbl.flushed); 1492 } 1493 1494 static void ipoib_neigh_hash_uninit(struct net_device *dev) 1495 { 1496 struct ipoib_dev_priv *priv = netdev_priv(dev); 1497 int stopped; 1498 1499 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); 1500 init_completion(&priv->ntbl.deleted); 1501 set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); 1502 1503 /* Stop GC if called at init fail need to cancel work */ 1504 stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1505 if (!stopped) 1506 cancel_delayed_work(&priv->neigh_reap_task); 1507 1508 ipoib_flush_neighs(priv); 1509 1510 wait_for_completion(&priv->ntbl.deleted); 1511 } 1512 1513 1514 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) 1515 { 1516 struct ipoib_dev_priv *priv = netdev_priv(dev); 1517 1518 /* Allocate RX/TX "rings" to hold queued skbs */ 1519 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, 1520 GFP_KERNEL); 1521 if (!priv->rx_ring) { 1522 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", 1523 ca->name, ipoib_recvq_size); 1524 goto out; 1525 } 1526 1527 priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring); 1528 if (!priv->tx_ring) { 1529 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", 1530 ca->name, ipoib_sendq_size); 1531 goto out_rx_ring_cleanup; 1532 } 1533 1534 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ 1535 1536 if (ipoib_ib_dev_init(dev, ca, port)) 1537 goto out_tx_ring_cleanup; 1538 1539 /* 1540 * Must be after ipoib_ib_dev_init so we can allocate a per 1541 * device wq there and use it here 1542 */ 1543 if (ipoib_neigh_hash_init(priv) < 0) 1544 goto out_dev_uninit; 1545 1546 return 0; 1547 1548 out_dev_uninit: 1549 ipoib_ib_dev_cleanup(dev); 1550 1551 out_tx_ring_cleanup: 1552 vfree(priv->tx_ring); 1553 1554 out_rx_ring_cleanup: 1555 kfree(priv->rx_ring); 1556 1557 out: 1558 return -ENOMEM; 1559 } 1560 1561 void ipoib_dev_cleanup(struct net_device *dev) 1562 { 1563 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv; 1564 LIST_HEAD(head); 1565 1566 ASSERT_RTNL(); 1567 1568 ipoib_delete_debug_files(dev); 1569 1570 /* Delete any child interfaces first */ 1571 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { 1572 /* Stop GC on child */ 1573 set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags); 1574 cancel_delayed_work(&cpriv->neigh_reap_task); 1575 unregister_netdevice_queue(cpriv->dev, &head); 1576 } 1577 unregister_netdevice_many(&head); 1578 1579 /* 1580 * Must be before ipoib_ib_dev_cleanup or we delete an in use 1581 * work queue 1582 */ 1583 ipoib_neigh_hash_uninit(dev); 1584 1585 ipoib_ib_dev_cleanup(dev); 1586 1587 kfree(priv->rx_ring); 1588 vfree(priv->tx_ring); 1589 1590 priv->rx_ring = NULL; 1591 priv->tx_ring = NULL; 1592 } 1593 1594 static const struct header_ops ipoib_header_ops = { 1595 .create = ipoib_hard_header, 1596 }; 1597 1598 static const struct net_device_ops ipoib_netdev_ops = { 1599 .ndo_uninit = ipoib_uninit, 1600 .ndo_open = ipoib_open, 1601 .ndo_stop = ipoib_stop, 1602 .ndo_change_mtu = ipoib_change_mtu, 1603 .ndo_fix_features = ipoib_fix_features, 1604 .ndo_start_xmit = ipoib_start_xmit, 1605 .ndo_tx_timeout = ipoib_timeout, 1606 .ndo_set_rx_mode = ipoib_set_mcast_list, 1607 .ndo_get_iflink = ipoib_get_iflink, 1608 }; 1609 1610 void ipoib_setup(struct net_device *dev) 1611 { 1612 struct ipoib_dev_priv *priv = netdev_priv(dev); 1613 1614 dev->netdev_ops = &ipoib_netdev_ops; 1615 dev->header_ops = &ipoib_header_ops; 1616 1617 ipoib_set_ethtool_ops(dev); 1618 1619 netif_napi_add(dev, &priv->napi, ipoib_poll, NAPI_POLL_WEIGHT); 1620 1621 dev->watchdog_timeo = HZ; 1622 1623 dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 1624 1625 dev->hard_header_len = IPOIB_ENCAP_LEN; 1626 dev->addr_len = INFINIBAND_ALEN; 1627 dev->type = ARPHRD_INFINIBAND; 1628 dev->tx_queue_len = ipoib_sendq_size * 2; 1629 dev->features = (NETIF_F_VLAN_CHALLENGED | 1630 NETIF_F_HIGHDMA); 1631 netif_keep_dst(dev); 1632 1633 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); 1634 1635 priv->dev = dev; 1636 1637 spin_lock_init(&priv->lock); 1638 1639 init_rwsem(&priv->vlan_rwsem); 1640 1641 INIT_LIST_HEAD(&priv->path_list); 1642 INIT_LIST_HEAD(&priv->child_intfs); 1643 INIT_LIST_HEAD(&priv->dead_ahs); 1644 INIT_LIST_HEAD(&priv->multicast_list); 1645 1646 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); 1647 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); 1648 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); 1649 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal); 1650 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy); 1651 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); 1652 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); 1653 INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh); 1654 } 1655 1656 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name) 1657 { 1658 struct net_device *dev; 1659 1660 dev = alloc_netdev((int)sizeof(struct ipoib_dev_priv), name, 1661 NET_NAME_UNKNOWN, ipoib_setup); 1662 if (!dev) 1663 return NULL; 1664 1665 return netdev_priv(dev); 1666 } 1667 1668 static ssize_t show_pkey(struct device *dev, 1669 struct device_attribute *attr, char *buf) 1670 { 1671 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev)); 1672 1673 return sprintf(buf, "0x%04x\n", priv->pkey); 1674 } 1675 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 1676 1677 static ssize_t show_umcast(struct device *dev, 1678 struct device_attribute *attr, char *buf) 1679 { 1680 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev)); 1681 1682 return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags)); 1683 } 1684 1685 void ipoib_set_umcast(struct net_device *ndev, int umcast_val) 1686 { 1687 struct ipoib_dev_priv *priv = netdev_priv(ndev); 1688 1689 if (umcast_val > 0) { 1690 set_bit(IPOIB_FLAG_UMCAST, &priv->flags); 1691 ipoib_warn(priv, "ignoring multicast groups joined directly " 1692 "by userspace\n"); 1693 } else 1694 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags); 1695 } 1696 1697 static ssize_t set_umcast(struct device *dev, 1698 struct device_attribute *attr, 1699 const char *buf, size_t count) 1700 { 1701 unsigned long umcast_val = simple_strtoul(buf, NULL, 0); 1702 1703 ipoib_set_umcast(to_net_dev(dev), umcast_val); 1704 1705 return count; 1706 } 1707 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast); 1708 1709 int ipoib_add_umcast_attr(struct net_device *dev) 1710 { 1711 return device_create_file(&dev->dev, &dev_attr_umcast); 1712 } 1713 1714 static ssize_t create_child(struct device *dev, 1715 struct device_attribute *attr, 1716 const char *buf, size_t count) 1717 { 1718 int pkey; 1719 int ret; 1720 1721 if (sscanf(buf, "%i", &pkey) != 1) 1722 return -EINVAL; 1723 1724 if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000) 1725 return -EINVAL; 1726 1727 /* 1728 * Set the full membership bit, so that we join the right 1729 * broadcast group, etc. 1730 */ 1731 pkey |= 0x8000; 1732 1733 ret = ipoib_vlan_add(to_net_dev(dev), pkey); 1734 1735 return ret ? ret : count; 1736 } 1737 static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child); 1738 1739 static ssize_t delete_child(struct device *dev, 1740 struct device_attribute *attr, 1741 const char *buf, size_t count) 1742 { 1743 int pkey; 1744 int ret; 1745 1746 if (sscanf(buf, "%i", &pkey) != 1) 1747 return -EINVAL; 1748 1749 if (pkey < 0 || pkey > 0xffff) 1750 return -EINVAL; 1751 1752 ret = ipoib_vlan_delete(to_net_dev(dev), pkey); 1753 1754 return ret ? ret : count; 1755 1756 } 1757 static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child); 1758 1759 int ipoib_add_pkey_attr(struct net_device *dev) 1760 { 1761 return device_create_file(&dev->dev, &dev_attr_pkey); 1762 } 1763 1764 int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) 1765 { 1766 priv->hca_caps = hca->attrs.device_cap_flags; 1767 1768 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { 1769 priv->dev->hw_features = NETIF_F_SG | 1770 NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 1771 1772 if (priv->hca_caps & IB_DEVICE_UD_TSO) 1773 priv->dev->hw_features |= NETIF_F_TSO; 1774 1775 priv->dev->features |= priv->dev->hw_features; 1776 } 1777 1778 return 0; 1779 } 1780 1781 static struct net_device *ipoib_add_port(const char *format, 1782 struct ib_device *hca, u8 port) 1783 { 1784 struct ipoib_dev_priv *priv; 1785 struct ib_port_attr attr; 1786 int result = -ENOMEM; 1787 1788 priv = ipoib_intf_alloc(format); 1789 if (!priv) 1790 goto alloc_mem_failed; 1791 1792 SET_NETDEV_DEV(priv->dev, hca->dma_device); 1793 priv->dev->dev_id = port - 1; 1794 1795 result = ib_query_port(hca, port, &attr); 1796 if (!result) 1797 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); 1798 else { 1799 printk(KERN_WARNING "%s: ib_query_port %d failed\n", 1800 hca->name, port); 1801 goto device_init_failed; 1802 } 1803 1804 /* MTU will be reset when mcast join happens */ 1805 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 1806 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; 1807 1808 priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh); 1809 1810 result = ib_query_pkey(hca, port, 0, &priv->pkey); 1811 if (result) { 1812 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", 1813 hca->name, port, result); 1814 goto device_init_failed; 1815 } 1816 1817 result = ipoib_set_dev_features(priv, hca); 1818 if (result) 1819 goto device_init_failed; 1820 1821 /* 1822 * Set the full membership bit, so that we join the right 1823 * broadcast group, etc. 1824 */ 1825 priv->pkey |= 0x8000; 1826 1827 priv->dev->broadcast[8] = priv->pkey >> 8; 1828 priv->dev->broadcast[9] = priv->pkey & 0xff; 1829 1830 result = ib_query_gid(hca, port, 0, &priv->local_gid, NULL); 1831 if (result) { 1832 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", 1833 hca->name, port, result); 1834 goto device_init_failed; 1835 } else 1836 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 1837 1838 result = ipoib_dev_init(priv->dev, hca, port); 1839 if (result < 0) { 1840 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", 1841 hca->name, port, result); 1842 goto device_init_failed; 1843 } 1844 1845 INIT_IB_EVENT_HANDLER(&priv->event_handler, 1846 priv->ca, ipoib_event); 1847 result = ib_register_event_handler(&priv->event_handler); 1848 if (result < 0) { 1849 printk(KERN_WARNING "%s: ib_register_event_handler failed for " 1850 "port %d (ret = %d)\n", 1851 hca->name, port, result); 1852 goto event_failed; 1853 } 1854 1855 result = register_netdev(priv->dev); 1856 if (result) { 1857 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n", 1858 hca->name, port, result); 1859 goto register_failed; 1860 } 1861 1862 ipoib_create_debug_files(priv->dev); 1863 1864 if (ipoib_cm_add_mode_attr(priv->dev)) 1865 goto sysfs_failed; 1866 if (ipoib_add_pkey_attr(priv->dev)) 1867 goto sysfs_failed; 1868 if (ipoib_add_umcast_attr(priv->dev)) 1869 goto sysfs_failed; 1870 if (device_create_file(&priv->dev->dev, &dev_attr_create_child)) 1871 goto sysfs_failed; 1872 if (device_create_file(&priv->dev->dev, &dev_attr_delete_child)) 1873 goto sysfs_failed; 1874 1875 return priv->dev; 1876 1877 sysfs_failed: 1878 ipoib_delete_debug_files(priv->dev); 1879 unregister_netdev(priv->dev); 1880 1881 register_failed: 1882 ib_unregister_event_handler(&priv->event_handler); 1883 flush_workqueue(ipoib_workqueue); 1884 /* Stop GC if started before flush */ 1885 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1886 cancel_delayed_work(&priv->neigh_reap_task); 1887 flush_workqueue(priv->wq); 1888 1889 event_failed: 1890 ipoib_dev_cleanup(priv->dev); 1891 1892 device_init_failed: 1893 free_netdev(priv->dev); 1894 1895 alloc_mem_failed: 1896 return ERR_PTR(result); 1897 } 1898 1899 static void ipoib_add_one(struct ib_device *device) 1900 { 1901 struct list_head *dev_list; 1902 struct net_device *dev; 1903 struct ipoib_dev_priv *priv; 1904 int p; 1905 int count = 0; 1906 1907 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1908 if (!dev_list) 1909 return; 1910 1911 INIT_LIST_HEAD(dev_list); 1912 1913 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { 1914 if (!rdma_protocol_ib(device, p)) 1915 continue; 1916 dev = ipoib_add_port("ib%d", device, p); 1917 if (!IS_ERR(dev)) { 1918 priv = netdev_priv(dev); 1919 list_add_tail(&priv->list, dev_list); 1920 count++; 1921 } 1922 } 1923 1924 if (!count) { 1925 kfree(dev_list); 1926 return; 1927 } 1928 1929 ib_set_client_data(device, &ipoib_client, dev_list); 1930 } 1931 1932 static void ipoib_remove_one(struct ib_device *device, void *client_data) 1933 { 1934 struct ipoib_dev_priv *priv, *tmp; 1935 struct list_head *dev_list = client_data; 1936 1937 if (!dev_list) 1938 return; 1939 1940 list_for_each_entry_safe(priv, tmp, dev_list, list) { 1941 ib_unregister_event_handler(&priv->event_handler); 1942 flush_workqueue(ipoib_workqueue); 1943 1944 rtnl_lock(); 1945 dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); 1946 rtnl_unlock(); 1947 1948 /* Stop GC */ 1949 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1950 cancel_delayed_work(&priv->neigh_reap_task); 1951 flush_workqueue(priv->wq); 1952 1953 unregister_netdev(priv->dev); 1954 free_netdev(priv->dev); 1955 } 1956 1957 kfree(dev_list); 1958 } 1959 1960 static int __init ipoib_init_module(void) 1961 { 1962 int ret; 1963 1964 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size); 1965 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE); 1966 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE); 1967 1968 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size); 1969 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE); 1970 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); 1971 #ifdef CONFIG_INFINIBAND_IPOIB_CM 1972 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); 1973 #endif 1974 1975 /* 1976 * When copying small received packets, we only copy from the 1977 * linear data part of the SKB, so we rely on this condition. 1978 */ 1979 BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE); 1980 1981 ret = ipoib_register_debugfs(); 1982 if (ret) 1983 return ret; 1984 1985 /* 1986 * We create a global workqueue here that is used for all flush 1987 * operations. However, if you attempt to flush a workqueue 1988 * from a task on that same workqueue, it deadlocks the system. 1989 * We want to be able to flush the tasks associated with a 1990 * specific net device, so we also create a workqueue for each 1991 * netdevice. We queue up the tasks for that device only on 1992 * its private workqueue, and we only queue up flush events 1993 * on our global flush workqueue. This avoids the deadlocks. 1994 */ 1995 ipoib_workqueue = create_singlethread_workqueue("ipoib_flush"); 1996 if (!ipoib_workqueue) { 1997 ret = -ENOMEM; 1998 goto err_fs; 1999 } 2000 2001 ib_sa_register_client(&ipoib_sa_client); 2002 2003 ret = ib_register_client(&ipoib_client); 2004 if (ret) 2005 goto err_sa; 2006 2007 ret = ipoib_netlink_init(); 2008 if (ret) 2009 goto err_client; 2010 2011 return 0; 2012 2013 err_client: 2014 ib_unregister_client(&ipoib_client); 2015 2016 err_sa: 2017 ib_sa_unregister_client(&ipoib_sa_client); 2018 destroy_workqueue(ipoib_workqueue); 2019 2020 err_fs: 2021 ipoib_unregister_debugfs(); 2022 2023 return ret; 2024 } 2025 2026 static void __exit ipoib_cleanup_module(void) 2027 { 2028 ipoib_netlink_fini(); 2029 ib_unregister_client(&ipoib_client); 2030 ib_sa_unregister_client(&ipoib_sa_client); 2031 ipoib_unregister_debugfs(); 2032 destroy_workqueue(ipoib_workqueue); 2033 } 2034 2035 module_init(ipoib_init_module); 2036 module_exit(ipoib_cleanup_module); 2037