1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "ipoib.h" 36 37 #include <linux/module.h> 38 39 #include <linux/init.h> 40 #include <linux/slab.h> 41 #include <linux/kernel.h> 42 #include <linux/vmalloc.h> 43 44 #include <linux/if_arp.h> /* For ARPHRD_xxx */ 45 46 #include <linux/ip.h> 47 #include <linux/in.h> 48 49 #include <linux/jhash.h> 50 #include <net/arp.h> 51 #include <net/addrconf.h> 52 #include <linux/inetdevice.h> 53 #include <rdma/ib_cache.h> 54 55 #define DRV_VERSION "1.0.0" 56 57 const char ipoib_driver_version[] = DRV_VERSION; 58 59 MODULE_AUTHOR("Roland Dreier"); 60 MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); 61 MODULE_LICENSE("Dual BSD/GPL"); 62 MODULE_VERSION(DRV_VERSION); 63 64 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE; 65 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE; 66 67 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444); 68 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue"); 69 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); 70 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); 71 72 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 73 int ipoib_debug_level; 74 75 module_param_named(debug_level, ipoib_debug_level, int, 0644); 76 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 77 #endif 78 79 struct ipoib_path_iter { 80 struct net_device *dev; 81 struct ipoib_path path; 82 }; 83 84 static const u8 ipv4_bcast_addr[] = { 85 0x00, 0xff, 0xff, 0xff, 86 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, 87 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff 88 }; 89 90 struct workqueue_struct *ipoib_workqueue; 91 92 struct ib_sa_client ipoib_sa_client; 93 94 static void ipoib_add_one(struct ib_device *device); 95 static void ipoib_remove_one(struct ib_device *device, void *client_data); 96 static void ipoib_neigh_reclaim(struct rcu_head *rp); 97 static struct net_device *ipoib_get_net_dev_by_params( 98 struct ib_device *dev, u8 port, u16 pkey, 99 const union ib_gid *gid, const struct sockaddr *addr, 100 void *client_data); 101 102 static struct ib_client ipoib_client = { 103 .name = "ipoib", 104 .add = ipoib_add_one, 105 .remove = ipoib_remove_one, 106 .get_net_dev_by_params = ipoib_get_net_dev_by_params, 107 }; 108 109 int ipoib_open(struct net_device *dev) 110 { 111 struct ipoib_dev_priv *priv = netdev_priv(dev); 112 113 ipoib_dbg(priv, "bringing up interface\n"); 114 115 netif_carrier_off(dev); 116 117 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 118 119 if (ipoib_ib_dev_open(dev)) { 120 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) 121 return 0; 122 goto err_disable; 123 } 124 125 if (ipoib_ib_dev_up(dev)) 126 goto err_stop; 127 128 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 129 struct ipoib_dev_priv *cpriv; 130 131 /* Bring up any child interfaces too */ 132 down_read(&priv->vlan_rwsem); 133 list_for_each_entry(cpriv, &priv->child_intfs, list) { 134 int flags; 135 136 flags = cpriv->dev->flags; 137 if (flags & IFF_UP) 138 continue; 139 140 dev_change_flags(cpriv->dev, flags | IFF_UP); 141 } 142 up_read(&priv->vlan_rwsem); 143 } 144 145 netif_start_queue(dev); 146 147 return 0; 148 149 err_stop: 150 ipoib_ib_dev_stop(dev); 151 152 err_disable: 153 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 154 155 return -EINVAL; 156 } 157 158 static int ipoib_stop(struct net_device *dev) 159 { 160 struct ipoib_dev_priv *priv = netdev_priv(dev); 161 162 ipoib_dbg(priv, "stopping interface\n"); 163 164 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 165 166 netif_stop_queue(dev); 167 168 ipoib_ib_dev_down(dev); 169 ipoib_ib_dev_stop(dev); 170 171 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 172 struct ipoib_dev_priv *cpriv; 173 174 /* Bring down any child interfaces too */ 175 down_read(&priv->vlan_rwsem); 176 list_for_each_entry(cpriv, &priv->child_intfs, list) { 177 int flags; 178 179 flags = cpriv->dev->flags; 180 if (!(flags & IFF_UP)) 181 continue; 182 183 dev_change_flags(cpriv->dev, flags & ~IFF_UP); 184 } 185 up_read(&priv->vlan_rwsem); 186 } 187 188 return 0; 189 } 190 191 static void ipoib_uninit(struct net_device *dev) 192 { 193 ipoib_dev_cleanup(dev); 194 } 195 196 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features) 197 { 198 struct ipoib_dev_priv *priv = netdev_priv(dev); 199 200 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) 201 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); 202 203 return features; 204 } 205 206 static int ipoib_change_mtu(struct net_device *dev, int new_mtu) 207 { 208 struct ipoib_dev_priv *priv = netdev_priv(dev); 209 210 /* dev->mtu > 2K ==> connected mode */ 211 if (ipoib_cm_admin_enabled(dev)) { 212 if (new_mtu > ipoib_cm_max_mtu(dev)) 213 return -EINVAL; 214 215 if (new_mtu > priv->mcast_mtu) 216 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n", 217 priv->mcast_mtu); 218 219 dev->mtu = new_mtu; 220 return 0; 221 } 222 223 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) 224 return -EINVAL; 225 226 priv->admin_mtu = new_mtu; 227 228 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); 229 230 return 0; 231 } 232 233 /* Called with an RCU read lock taken */ 234 static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr, 235 struct net_device *dev) 236 { 237 struct net *net = dev_net(dev); 238 struct in_device *in_dev; 239 struct sockaddr_in *addr_in = (struct sockaddr_in *)addr; 240 struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr; 241 __be32 ret_addr; 242 243 switch (addr->sa_family) { 244 case AF_INET: 245 in_dev = in_dev_get(dev); 246 if (!in_dev) 247 return false; 248 249 ret_addr = inet_confirm_addr(net, in_dev, 0, 250 addr_in->sin_addr.s_addr, 251 RT_SCOPE_HOST); 252 in_dev_put(in_dev); 253 if (ret_addr) 254 return true; 255 256 break; 257 case AF_INET6: 258 if (IS_ENABLED(CONFIG_IPV6) && 259 ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1)) 260 return true; 261 262 break; 263 } 264 return false; 265 } 266 267 /** 268 * Find the master net_device on top of the given net_device. 269 * @dev: base IPoIB net_device 270 * 271 * Returns the master net_device with a reference held, or the same net_device 272 * if no master exists. 273 */ 274 static struct net_device *ipoib_get_master_net_dev(struct net_device *dev) 275 { 276 struct net_device *master; 277 278 rcu_read_lock(); 279 master = netdev_master_upper_dev_get_rcu(dev); 280 if (master) 281 dev_hold(master); 282 rcu_read_unlock(); 283 284 if (master) 285 return master; 286 287 dev_hold(dev); 288 return dev; 289 } 290 291 /** 292 * Find a net_device matching the given address, which is an upper device of 293 * the given net_device. 294 * @addr: IP address to look for. 295 * @dev: base IPoIB net_device 296 * 297 * If found, returns the net_device with a reference held. Otherwise return 298 * NULL. 299 */ 300 static struct net_device *ipoib_get_net_dev_match_addr( 301 const struct sockaddr *addr, struct net_device *dev) 302 { 303 struct net_device *upper, 304 *result = NULL; 305 struct list_head *iter; 306 307 rcu_read_lock(); 308 if (ipoib_is_dev_match_addr_rcu(addr, dev)) { 309 dev_hold(dev); 310 result = dev; 311 goto out; 312 } 313 314 netdev_for_each_all_upper_dev_rcu(dev, upper, iter) { 315 if (ipoib_is_dev_match_addr_rcu(addr, upper)) { 316 dev_hold(upper); 317 result = upper; 318 break; 319 } 320 } 321 out: 322 rcu_read_unlock(); 323 return result; 324 } 325 326 /* returns the number of IPoIB netdevs on top a given ipoib device matching a 327 * pkey_index and address, if one exists. 328 * 329 * @found_net_dev: contains a matching net_device if the return value >= 1, 330 * with a reference held. */ 331 static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv, 332 const union ib_gid *gid, 333 u16 pkey_index, 334 const struct sockaddr *addr, 335 int nesting, 336 struct net_device **found_net_dev) 337 { 338 struct ipoib_dev_priv *child_priv; 339 struct net_device *net_dev = NULL; 340 int matches = 0; 341 342 if (priv->pkey_index == pkey_index && 343 (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) { 344 if (!addr) { 345 net_dev = ipoib_get_master_net_dev(priv->dev); 346 } else { 347 /* Verify the net_device matches the IP address, as 348 * IPoIB child devices currently share a GID. */ 349 net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev); 350 } 351 if (net_dev) { 352 if (!*found_net_dev) 353 *found_net_dev = net_dev; 354 else 355 dev_put(net_dev); 356 ++matches; 357 } 358 } 359 360 /* Check child interfaces */ 361 down_read_nested(&priv->vlan_rwsem, nesting); 362 list_for_each_entry(child_priv, &priv->child_intfs, list) { 363 matches += ipoib_match_gid_pkey_addr(child_priv, gid, 364 pkey_index, addr, 365 nesting + 1, 366 found_net_dev); 367 if (matches > 1) 368 break; 369 } 370 up_read(&priv->vlan_rwsem); 371 372 return matches; 373 } 374 375 /* Returns the number of matching net_devs found (between 0 and 2). Also 376 * return the matching net_device in the @net_dev parameter, holding a 377 * reference to the net_device, if the number of matches >= 1 */ 378 static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port, 379 u16 pkey_index, 380 const union ib_gid *gid, 381 const struct sockaddr *addr, 382 struct net_device **net_dev) 383 { 384 struct ipoib_dev_priv *priv; 385 int matches = 0; 386 387 *net_dev = NULL; 388 389 list_for_each_entry(priv, dev_list, list) { 390 if (priv->port != port) 391 continue; 392 393 matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index, 394 addr, 0, net_dev); 395 if (matches > 1) 396 break; 397 } 398 399 return matches; 400 } 401 402 static struct net_device *ipoib_get_net_dev_by_params( 403 struct ib_device *dev, u8 port, u16 pkey, 404 const union ib_gid *gid, const struct sockaddr *addr, 405 void *client_data) 406 { 407 struct net_device *net_dev; 408 struct list_head *dev_list = client_data; 409 u16 pkey_index; 410 int matches; 411 int ret; 412 413 if (!rdma_protocol_ib(dev, port)) 414 return NULL; 415 416 ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index); 417 if (ret) 418 return NULL; 419 420 if (!dev_list) 421 return NULL; 422 423 /* See if we can find a unique device matching the L2 parameters */ 424 matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index, 425 gid, NULL, &net_dev); 426 427 switch (matches) { 428 case 0: 429 return NULL; 430 case 1: 431 return net_dev; 432 } 433 434 dev_put(net_dev); 435 436 /* Couldn't find a unique device with L2 parameters only. Use L3 437 * address to uniquely match the net device */ 438 matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index, 439 gid, addr, &net_dev); 440 switch (matches) { 441 case 0: 442 return NULL; 443 default: 444 dev_warn_ratelimited(&dev->dev, 445 "duplicate IP address detected\n"); 446 /* Fall through */ 447 case 1: 448 return net_dev; 449 } 450 } 451 452 int ipoib_set_mode(struct net_device *dev, const char *buf) 453 { 454 struct ipoib_dev_priv *priv = netdev_priv(dev); 455 456 /* flush paths if we switch modes so that connections are restarted */ 457 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) { 458 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 459 ipoib_warn(priv, "enabling connected mode " 460 "will cause multicast packet drops\n"); 461 netdev_update_features(dev); 462 dev_set_mtu(dev, ipoib_cm_max_mtu(dev)); 463 rtnl_unlock(); 464 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; 465 466 ipoib_flush_paths(dev); 467 rtnl_lock(); 468 return 0; 469 } 470 471 if (!strcmp(buf, "datagram\n")) { 472 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 473 netdev_update_features(dev); 474 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); 475 rtnl_unlock(); 476 ipoib_flush_paths(dev); 477 rtnl_lock(); 478 return 0; 479 } 480 481 return -EINVAL; 482 } 483 484 static struct ipoib_path *__path_find(struct net_device *dev, void *gid) 485 { 486 struct ipoib_dev_priv *priv = netdev_priv(dev); 487 struct rb_node *n = priv->path_tree.rb_node; 488 struct ipoib_path *path; 489 int ret; 490 491 while (n) { 492 path = rb_entry(n, struct ipoib_path, rb_node); 493 494 ret = memcmp(gid, path->pathrec.dgid.raw, 495 sizeof (union ib_gid)); 496 497 if (ret < 0) 498 n = n->rb_left; 499 else if (ret > 0) 500 n = n->rb_right; 501 else 502 return path; 503 } 504 505 return NULL; 506 } 507 508 static int __path_add(struct net_device *dev, struct ipoib_path *path) 509 { 510 struct ipoib_dev_priv *priv = netdev_priv(dev); 511 struct rb_node **n = &priv->path_tree.rb_node; 512 struct rb_node *pn = NULL; 513 struct ipoib_path *tpath; 514 int ret; 515 516 while (*n) { 517 pn = *n; 518 tpath = rb_entry(pn, struct ipoib_path, rb_node); 519 520 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw, 521 sizeof (union ib_gid)); 522 if (ret < 0) 523 n = &pn->rb_left; 524 else if (ret > 0) 525 n = &pn->rb_right; 526 else 527 return -EEXIST; 528 } 529 530 rb_link_node(&path->rb_node, pn, n); 531 rb_insert_color(&path->rb_node, &priv->path_tree); 532 533 list_add_tail(&path->list, &priv->path_list); 534 535 return 0; 536 } 537 538 static void path_free(struct net_device *dev, struct ipoib_path *path) 539 { 540 struct sk_buff *skb; 541 542 while ((skb = __skb_dequeue(&path->queue))) 543 dev_kfree_skb_irq(skb); 544 545 ipoib_dbg(netdev_priv(dev), "path_free\n"); 546 547 /* remove all neigh connected to this path */ 548 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); 549 550 if (path->ah) 551 ipoib_put_ah(path->ah); 552 553 kfree(path); 554 } 555 556 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 557 558 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev) 559 { 560 struct ipoib_path_iter *iter; 561 562 iter = kmalloc(sizeof *iter, GFP_KERNEL); 563 if (!iter) 564 return NULL; 565 566 iter->dev = dev; 567 memset(iter->path.pathrec.dgid.raw, 0, 16); 568 569 if (ipoib_path_iter_next(iter)) { 570 kfree(iter); 571 return NULL; 572 } 573 574 return iter; 575 } 576 577 int ipoib_path_iter_next(struct ipoib_path_iter *iter) 578 { 579 struct ipoib_dev_priv *priv = netdev_priv(iter->dev); 580 struct rb_node *n; 581 struct ipoib_path *path; 582 int ret = 1; 583 584 spin_lock_irq(&priv->lock); 585 586 n = rb_first(&priv->path_tree); 587 588 while (n) { 589 path = rb_entry(n, struct ipoib_path, rb_node); 590 591 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, 592 sizeof (union ib_gid)) < 0) { 593 iter->path = *path; 594 ret = 0; 595 break; 596 } 597 598 n = rb_next(n); 599 } 600 601 spin_unlock_irq(&priv->lock); 602 603 return ret; 604 } 605 606 void ipoib_path_iter_read(struct ipoib_path_iter *iter, 607 struct ipoib_path *path) 608 { 609 *path = iter->path; 610 } 611 612 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 613 614 void ipoib_mark_paths_invalid(struct net_device *dev) 615 { 616 struct ipoib_dev_priv *priv = netdev_priv(dev); 617 struct ipoib_path *path, *tp; 618 619 spin_lock_irq(&priv->lock); 620 621 list_for_each_entry_safe(path, tp, &priv->path_list, list) { 622 ipoib_dbg(priv, "mark path LID 0x%04x GID %pI6 invalid\n", 623 be16_to_cpu(path->pathrec.dlid), 624 path->pathrec.dgid.raw); 625 path->valid = 0; 626 } 627 628 spin_unlock_irq(&priv->lock); 629 } 630 631 void ipoib_flush_paths(struct net_device *dev) 632 { 633 struct ipoib_dev_priv *priv = netdev_priv(dev); 634 struct ipoib_path *path, *tp; 635 LIST_HEAD(remove_list); 636 unsigned long flags; 637 638 netif_tx_lock_bh(dev); 639 spin_lock_irqsave(&priv->lock, flags); 640 641 list_splice_init(&priv->path_list, &remove_list); 642 643 list_for_each_entry(path, &remove_list, list) 644 rb_erase(&path->rb_node, &priv->path_tree); 645 646 list_for_each_entry_safe(path, tp, &remove_list, list) { 647 if (path->query) 648 ib_sa_cancel_query(path->query_id, path->query); 649 spin_unlock_irqrestore(&priv->lock, flags); 650 netif_tx_unlock_bh(dev); 651 wait_for_completion(&path->done); 652 path_free(dev, path); 653 netif_tx_lock_bh(dev); 654 spin_lock_irqsave(&priv->lock, flags); 655 } 656 657 spin_unlock_irqrestore(&priv->lock, flags); 658 netif_tx_unlock_bh(dev); 659 } 660 661 static void path_rec_completion(int status, 662 struct ib_sa_path_rec *pathrec, 663 void *path_ptr) 664 { 665 struct ipoib_path *path = path_ptr; 666 struct net_device *dev = path->dev; 667 struct ipoib_dev_priv *priv = netdev_priv(dev); 668 struct ipoib_ah *ah = NULL; 669 struct ipoib_ah *old_ah = NULL; 670 struct ipoib_neigh *neigh, *tn; 671 struct sk_buff_head skqueue; 672 struct sk_buff *skb; 673 unsigned long flags; 674 675 if (!status) 676 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n", 677 be16_to_cpu(pathrec->dlid), pathrec->dgid.raw); 678 else 679 ipoib_dbg(priv, "PathRec status %d for GID %pI6\n", 680 status, path->pathrec.dgid.raw); 681 682 skb_queue_head_init(&skqueue); 683 684 if (!status) { 685 struct ib_ah_attr av; 686 687 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av)) 688 ah = ipoib_create_ah(dev, priv->pd, &av); 689 } 690 691 spin_lock_irqsave(&priv->lock, flags); 692 693 if (!IS_ERR_OR_NULL(ah)) { 694 path->pathrec = *pathrec; 695 696 old_ah = path->ah; 697 path->ah = ah; 698 699 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", 700 ah, be16_to_cpu(pathrec->dlid), pathrec->sl); 701 702 while ((skb = __skb_dequeue(&path->queue))) 703 __skb_queue_tail(&skqueue, skb); 704 705 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) { 706 if (neigh->ah) { 707 WARN_ON(neigh->ah != old_ah); 708 /* 709 * Dropping the ah reference inside 710 * priv->lock is safe here, because we 711 * will hold one more reference from 712 * the original value of path->ah (ie 713 * old_ah). 714 */ 715 ipoib_put_ah(neigh->ah); 716 } 717 kref_get(&path->ah->ref); 718 neigh->ah = path->ah; 719 720 if (ipoib_cm_enabled(dev, neigh->daddr)) { 721 if (!ipoib_cm_get(neigh)) 722 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, 723 path, 724 neigh)); 725 if (!ipoib_cm_get(neigh)) { 726 ipoib_neigh_free(neigh); 727 continue; 728 } 729 } 730 731 while ((skb = __skb_dequeue(&neigh->queue))) 732 __skb_queue_tail(&skqueue, skb); 733 } 734 path->valid = 1; 735 } 736 737 path->query = NULL; 738 complete(&path->done); 739 740 spin_unlock_irqrestore(&priv->lock, flags); 741 742 if (IS_ERR_OR_NULL(ah)) 743 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); 744 745 if (old_ah) 746 ipoib_put_ah(old_ah); 747 748 while ((skb = __skb_dequeue(&skqueue))) { 749 skb->dev = dev; 750 if (dev_queue_xmit(skb)) 751 ipoib_warn(priv, "dev_queue_xmit failed " 752 "to requeue packet\n"); 753 } 754 } 755 756 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid) 757 { 758 struct ipoib_dev_priv *priv = netdev_priv(dev); 759 struct ipoib_path *path; 760 761 if (!priv->broadcast) 762 return NULL; 763 764 path = kzalloc(sizeof *path, GFP_ATOMIC); 765 if (!path) 766 return NULL; 767 768 path->dev = dev; 769 770 skb_queue_head_init(&path->queue); 771 772 INIT_LIST_HEAD(&path->neigh_list); 773 774 memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid)); 775 path->pathrec.sgid = priv->local_gid; 776 path->pathrec.pkey = cpu_to_be16(priv->pkey); 777 path->pathrec.numb_path = 1; 778 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class; 779 780 return path; 781 } 782 783 static int path_rec_start(struct net_device *dev, 784 struct ipoib_path *path) 785 { 786 struct ipoib_dev_priv *priv = netdev_priv(dev); 787 788 ipoib_dbg(priv, "Start path record lookup for %pI6\n", 789 path->pathrec.dgid.raw); 790 791 init_completion(&path->done); 792 793 path->query_id = 794 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port, 795 &path->pathrec, 796 IB_SA_PATH_REC_DGID | 797 IB_SA_PATH_REC_SGID | 798 IB_SA_PATH_REC_NUMB_PATH | 799 IB_SA_PATH_REC_TRAFFIC_CLASS | 800 IB_SA_PATH_REC_PKEY, 801 1000, GFP_ATOMIC, 802 path_rec_completion, 803 path, &path->query); 804 if (path->query_id < 0) { 805 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); 806 path->query = NULL; 807 complete(&path->done); 808 return path->query_id; 809 } 810 811 return 0; 812 } 813 814 static void neigh_add_path(struct sk_buff *skb, u8 *daddr, 815 struct net_device *dev) 816 { 817 struct ipoib_dev_priv *priv = netdev_priv(dev); 818 struct ipoib_path *path; 819 struct ipoib_neigh *neigh; 820 unsigned long flags; 821 822 spin_lock_irqsave(&priv->lock, flags); 823 neigh = ipoib_neigh_alloc(daddr, dev); 824 if (!neigh) { 825 spin_unlock_irqrestore(&priv->lock, flags); 826 ++dev->stats.tx_dropped; 827 dev_kfree_skb_any(skb); 828 return; 829 } 830 831 path = __path_find(dev, daddr + 4); 832 if (!path) { 833 path = path_rec_create(dev, daddr + 4); 834 if (!path) 835 goto err_path; 836 837 __path_add(dev, path); 838 } 839 840 list_add_tail(&neigh->list, &path->neigh_list); 841 842 if (path->ah) { 843 kref_get(&path->ah->ref); 844 neigh->ah = path->ah; 845 846 if (ipoib_cm_enabled(dev, neigh->daddr)) { 847 if (!ipoib_cm_get(neigh)) 848 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh)); 849 if (!ipoib_cm_get(neigh)) { 850 ipoib_neigh_free(neigh); 851 goto err_drop; 852 } 853 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) 854 __skb_queue_tail(&neigh->queue, skb); 855 else { 856 ipoib_warn(priv, "queue length limit %d. Packet drop.\n", 857 skb_queue_len(&neigh->queue)); 858 goto err_drop; 859 } 860 } else { 861 spin_unlock_irqrestore(&priv->lock, flags); 862 ipoib_send(dev, skb, path->ah, IPOIB_QPN(daddr)); 863 ipoib_neigh_put(neigh); 864 return; 865 } 866 } else { 867 neigh->ah = NULL; 868 869 if (!path->query && path_rec_start(dev, path)) 870 goto err_path; 871 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) 872 __skb_queue_tail(&neigh->queue, skb); 873 else 874 goto err_drop; 875 } 876 877 spin_unlock_irqrestore(&priv->lock, flags); 878 ipoib_neigh_put(neigh); 879 return; 880 881 err_path: 882 ipoib_neigh_free(neigh); 883 err_drop: 884 ++dev->stats.tx_dropped; 885 dev_kfree_skb_any(skb); 886 887 spin_unlock_irqrestore(&priv->lock, flags); 888 ipoib_neigh_put(neigh); 889 } 890 891 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, 892 struct ipoib_cb *cb) 893 { 894 struct ipoib_dev_priv *priv = netdev_priv(dev); 895 struct ipoib_path *path; 896 unsigned long flags; 897 898 spin_lock_irqsave(&priv->lock, flags); 899 900 path = __path_find(dev, cb->hwaddr + 4); 901 if (!path || !path->valid) { 902 int new_path = 0; 903 904 if (!path) { 905 path = path_rec_create(dev, cb->hwaddr + 4); 906 new_path = 1; 907 } 908 if (path) { 909 if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 910 __skb_queue_tail(&path->queue, skb); 911 } else { 912 ++dev->stats.tx_dropped; 913 dev_kfree_skb_any(skb); 914 } 915 916 if (!path->query && path_rec_start(dev, path)) { 917 spin_unlock_irqrestore(&priv->lock, flags); 918 if (new_path) 919 path_free(dev, path); 920 return; 921 } else 922 __path_add(dev, path); 923 } else { 924 ++dev->stats.tx_dropped; 925 dev_kfree_skb_any(skb); 926 } 927 928 spin_unlock_irqrestore(&priv->lock, flags); 929 return; 930 } 931 932 if (path->ah) { 933 ipoib_dbg(priv, "Send unicast ARP to %04x\n", 934 be16_to_cpu(path->pathrec.dlid)); 935 936 spin_unlock_irqrestore(&priv->lock, flags); 937 ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr)); 938 return; 939 } else if ((path->query || !path_rec_start(dev, path)) && 940 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 941 __skb_queue_tail(&path->queue, skb); 942 } else { 943 ++dev->stats.tx_dropped; 944 dev_kfree_skb_any(skb); 945 } 946 947 spin_unlock_irqrestore(&priv->lock, flags); 948 } 949 950 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) 951 { 952 struct ipoib_dev_priv *priv = netdev_priv(dev); 953 struct ipoib_neigh *neigh; 954 struct ipoib_cb *cb = ipoib_skb_cb(skb); 955 struct ipoib_header *header; 956 unsigned long flags; 957 958 header = (struct ipoib_header *) skb->data; 959 960 if (unlikely(cb->hwaddr[4] == 0xff)) { 961 /* multicast, arrange "if" according to probability */ 962 if ((header->proto != htons(ETH_P_IP)) && 963 (header->proto != htons(ETH_P_IPV6)) && 964 (header->proto != htons(ETH_P_ARP)) && 965 (header->proto != htons(ETH_P_RARP)) && 966 (header->proto != htons(ETH_P_TIPC))) { 967 /* ethertype not supported by IPoIB */ 968 ++dev->stats.tx_dropped; 969 dev_kfree_skb_any(skb); 970 return NETDEV_TX_OK; 971 } 972 /* Add in the P_Key for multicast*/ 973 cb->hwaddr[8] = (priv->pkey >> 8) & 0xff; 974 cb->hwaddr[9] = priv->pkey & 0xff; 975 976 neigh = ipoib_neigh_get(dev, cb->hwaddr); 977 if (likely(neigh)) 978 goto send_using_neigh; 979 ipoib_mcast_send(dev, cb->hwaddr, skb); 980 return NETDEV_TX_OK; 981 } 982 983 /* unicast, arrange "switch" according to probability */ 984 switch (header->proto) { 985 case htons(ETH_P_IP): 986 case htons(ETH_P_IPV6): 987 case htons(ETH_P_TIPC): 988 neigh = ipoib_neigh_get(dev, cb->hwaddr); 989 if (unlikely(!neigh)) { 990 neigh_add_path(skb, cb->hwaddr, dev); 991 return NETDEV_TX_OK; 992 } 993 break; 994 case htons(ETH_P_ARP): 995 case htons(ETH_P_RARP): 996 /* for unicast ARP and RARP should always perform path find */ 997 unicast_arp_send(skb, dev, cb); 998 return NETDEV_TX_OK; 999 default: 1000 /* ethertype not supported by IPoIB */ 1001 ++dev->stats.tx_dropped; 1002 dev_kfree_skb_any(skb); 1003 return NETDEV_TX_OK; 1004 } 1005 1006 send_using_neigh: 1007 /* note we now hold a ref to neigh */ 1008 if (ipoib_cm_get(neigh)) { 1009 if (ipoib_cm_up(neigh)) { 1010 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); 1011 goto unref; 1012 } 1013 } else if (neigh->ah) { 1014 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr)); 1015 goto unref; 1016 } 1017 1018 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 1019 spin_lock_irqsave(&priv->lock, flags); 1020 __skb_queue_tail(&neigh->queue, skb); 1021 spin_unlock_irqrestore(&priv->lock, flags); 1022 } else { 1023 ++dev->stats.tx_dropped; 1024 dev_kfree_skb_any(skb); 1025 } 1026 1027 unref: 1028 ipoib_neigh_put(neigh); 1029 1030 return NETDEV_TX_OK; 1031 } 1032 1033 static void ipoib_timeout(struct net_device *dev) 1034 { 1035 struct ipoib_dev_priv *priv = netdev_priv(dev); 1036 1037 ipoib_warn(priv, "transmit timeout: latency %d msecs\n", 1038 jiffies_to_msecs(jiffies - dev->trans_start)); 1039 ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n", 1040 netif_queue_stopped(dev), 1041 priv->tx_head, priv->tx_tail); 1042 /* XXX reset QP, etc. */ 1043 } 1044 1045 static int ipoib_hard_header(struct sk_buff *skb, 1046 struct net_device *dev, 1047 unsigned short type, 1048 const void *daddr, const void *saddr, unsigned len) 1049 { 1050 struct ipoib_header *header; 1051 struct ipoib_cb *cb = ipoib_skb_cb(skb); 1052 1053 header = (struct ipoib_header *) skb_push(skb, sizeof *header); 1054 1055 header->proto = htons(type); 1056 header->reserved = 0; 1057 1058 /* 1059 * we don't rely on dst_entry structure, always stuff the 1060 * destination address into skb->cb so we can figure out where 1061 * to send the packet later. 1062 */ 1063 memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN); 1064 1065 return sizeof *header; 1066 } 1067 1068 static void ipoib_set_mcast_list(struct net_device *dev) 1069 { 1070 struct ipoib_dev_priv *priv = netdev_priv(dev); 1071 1072 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { 1073 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set"); 1074 return; 1075 } 1076 1077 queue_work(priv->wq, &priv->restart_task); 1078 } 1079 1080 static int ipoib_get_iflink(const struct net_device *dev) 1081 { 1082 struct ipoib_dev_priv *priv = netdev_priv(dev); 1083 1084 /* parent interface */ 1085 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) 1086 return dev->ifindex; 1087 1088 /* child/vlan interface */ 1089 return priv->parent->ifindex; 1090 } 1091 1092 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) 1093 { 1094 /* 1095 * Use only the address parts that contributes to spreading 1096 * The subnet prefix is not used as one can not connect to 1097 * same remote port (GUID) using the same remote QPN via two 1098 * different subnets. 1099 */ 1100 /* qpn octets[1:4) & port GUID octets[12:20) */ 1101 u32 *d32 = (u32 *) daddr; 1102 u32 hv; 1103 1104 hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0); 1105 return hv & htbl->mask; 1106 } 1107 1108 struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr) 1109 { 1110 struct ipoib_dev_priv *priv = netdev_priv(dev); 1111 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1112 struct ipoib_neigh_hash *htbl; 1113 struct ipoib_neigh *neigh = NULL; 1114 u32 hash_val; 1115 1116 rcu_read_lock_bh(); 1117 1118 htbl = rcu_dereference_bh(ntbl->htbl); 1119 1120 if (!htbl) 1121 goto out_unlock; 1122 1123 hash_val = ipoib_addr_hash(htbl, daddr); 1124 for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]); 1125 neigh != NULL; 1126 neigh = rcu_dereference_bh(neigh->hnext)) { 1127 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { 1128 /* found, take one ref on behalf of the caller */ 1129 if (!atomic_inc_not_zero(&neigh->refcnt)) { 1130 /* deleted */ 1131 neigh = NULL; 1132 goto out_unlock; 1133 } 1134 neigh->alive = jiffies; 1135 goto out_unlock; 1136 } 1137 } 1138 1139 out_unlock: 1140 rcu_read_unlock_bh(); 1141 return neigh; 1142 } 1143 1144 static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) 1145 { 1146 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1147 struct ipoib_neigh_hash *htbl; 1148 unsigned long neigh_obsolete; 1149 unsigned long dt; 1150 unsigned long flags; 1151 int i; 1152 1153 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 1154 return; 1155 1156 spin_lock_irqsave(&priv->lock, flags); 1157 1158 htbl = rcu_dereference_protected(ntbl->htbl, 1159 lockdep_is_held(&priv->lock)); 1160 1161 if (!htbl) 1162 goto out_unlock; 1163 1164 /* neigh is obsolete if it was idle for two GC periods */ 1165 dt = 2 * arp_tbl.gc_interval; 1166 neigh_obsolete = jiffies - dt; 1167 /* handle possible race condition */ 1168 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 1169 goto out_unlock; 1170 1171 for (i = 0; i < htbl->size; i++) { 1172 struct ipoib_neigh *neigh; 1173 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1174 1175 while ((neigh = rcu_dereference_protected(*np, 1176 lockdep_is_held(&priv->lock))) != NULL) { 1177 /* was the neigh idle for two GC periods */ 1178 if (time_after(neigh_obsolete, neigh->alive)) { 1179 rcu_assign_pointer(*np, 1180 rcu_dereference_protected(neigh->hnext, 1181 lockdep_is_held(&priv->lock))); 1182 /* remove from path/mc list */ 1183 list_del(&neigh->list); 1184 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1185 } else { 1186 np = &neigh->hnext; 1187 } 1188 1189 } 1190 } 1191 1192 out_unlock: 1193 spin_unlock_irqrestore(&priv->lock, flags); 1194 } 1195 1196 static void ipoib_reap_neigh(struct work_struct *work) 1197 { 1198 struct ipoib_dev_priv *priv = 1199 container_of(work, struct ipoib_dev_priv, neigh_reap_task.work); 1200 1201 __ipoib_reap_neigh(priv); 1202 1203 if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 1204 queue_delayed_work(priv->wq, &priv->neigh_reap_task, 1205 arp_tbl.gc_interval); 1206 } 1207 1208 1209 static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr, 1210 struct net_device *dev) 1211 { 1212 struct ipoib_neigh *neigh; 1213 1214 neigh = kzalloc(sizeof *neigh, GFP_ATOMIC); 1215 if (!neigh) 1216 return NULL; 1217 1218 neigh->dev = dev; 1219 memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr)); 1220 skb_queue_head_init(&neigh->queue); 1221 INIT_LIST_HEAD(&neigh->list); 1222 ipoib_cm_set(neigh, NULL); 1223 /* one ref on behalf of the caller */ 1224 atomic_set(&neigh->refcnt, 1); 1225 1226 return neigh; 1227 } 1228 1229 struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr, 1230 struct net_device *dev) 1231 { 1232 struct ipoib_dev_priv *priv = netdev_priv(dev); 1233 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1234 struct ipoib_neigh_hash *htbl; 1235 struct ipoib_neigh *neigh; 1236 u32 hash_val; 1237 1238 htbl = rcu_dereference_protected(ntbl->htbl, 1239 lockdep_is_held(&priv->lock)); 1240 if (!htbl) { 1241 neigh = NULL; 1242 goto out_unlock; 1243 } 1244 1245 /* need to add a new neigh, but maybe some other thread succeeded? 1246 * recalc hash, maybe hash resize took place so we do a search 1247 */ 1248 hash_val = ipoib_addr_hash(htbl, daddr); 1249 for (neigh = rcu_dereference_protected(htbl->buckets[hash_val], 1250 lockdep_is_held(&priv->lock)); 1251 neigh != NULL; 1252 neigh = rcu_dereference_protected(neigh->hnext, 1253 lockdep_is_held(&priv->lock))) { 1254 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { 1255 /* found, take one ref on behalf of the caller */ 1256 if (!atomic_inc_not_zero(&neigh->refcnt)) { 1257 /* deleted */ 1258 neigh = NULL; 1259 break; 1260 } 1261 neigh->alive = jiffies; 1262 goto out_unlock; 1263 } 1264 } 1265 1266 neigh = ipoib_neigh_ctor(daddr, dev); 1267 if (!neigh) 1268 goto out_unlock; 1269 1270 /* one ref on behalf of the hash table */ 1271 atomic_inc(&neigh->refcnt); 1272 neigh->alive = jiffies; 1273 /* put in hash */ 1274 rcu_assign_pointer(neigh->hnext, 1275 rcu_dereference_protected(htbl->buckets[hash_val], 1276 lockdep_is_held(&priv->lock))); 1277 rcu_assign_pointer(htbl->buckets[hash_val], neigh); 1278 atomic_inc(&ntbl->entries); 1279 1280 out_unlock: 1281 1282 return neigh; 1283 } 1284 1285 void ipoib_neigh_dtor(struct ipoib_neigh *neigh) 1286 { 1287 /* neigh reference count was dropprd to zero */ 1288 struct net_device *dev = neigh->dev; 1289 struct ipoib_dev_priv *priv = netdev_priv(dev); 1290 struct sk_buff *skb; 1291 if (neigh->ah) 1292 ipoib_put_ah(neigh->ah); 1293 while ((skb = __skb_dequeue(&neigh->queue))) { 1294 ++dev->stats.tx_dropped; 1295 dev_kfree_skb_any(skb); 1296 } 1297 if (ipoib_cm_get(neigh)) 1298 ipoib_cm_destroy_tx(ipoib_cm_get(neigh)); 1299 ipoib_dbg(netdev_priv(dev), 1300 "neigh free for %06x %pI6\n", 1301 IPOIB_QPN(neigh->daddr), 1302 neigh->daddr + 4); 1303 kfree(neigh); 1304 if (atomic_dec_and_test(&priv->ntbl.entries)) { 1305 if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags)) 1306 complete(&priv->ntbl.flushed); 1307 } 1308 } 1309 1310 static void ipoib_neigh_reclaim(struct rcu_head *rp) 1311 { 1312 /* Called as a result of removal from hash table */ 1313 struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu); 1314 /* note TX context may hold another ref */ 1315 ipoib_neigh_put(neigh); 1316 } 1317 1318 void ipoib_neigh_free(struct ipoib_neigh *neigh) 1319 { 1320 struct net_device *dev = neigh->dev; 1321 struct ipoib_dev_priv *priv = netdev_priv(dev); 1322 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1323 struct ipoib_neigh_hash *htbl; 1324 struct ipoib_neigh __rcu **np; 1325 struct ipoib_neigh *n; 1326 u32 hash_val; 1327 1328 htbl = rcu_dereference_protected(ntbl->htbl, 1329 lockdep_is_held(&priv->lock)); 1330 if (!htbl) 1331 return; 1332 1333 hash_val = ipoib_addr_hash(htbl, neigh->daddr); 1334 np = &htbl->buckets[hash_val]; 1335 for (n = rcu_dereference_protected(*np, 1336 lockdep_is_held(&priv->lock)); 1337 n != NULL; 1338 n = rcu_dereference_protected(*np, 1339 lockdep_is_held(&priv->lock))) { 1340 if (n == neigh) { 1341 /* found */ 1342 rcu_assign_pointer(*np, 1343 rcu_dereference_protected(neigh->hnext, 1344 lockdep_is_held(&priv->lock))); 1345 /* remove from parent list */ 1346 list_del(&neigh->list); 1347 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1348 return; 1349 } else { 1350 np = &n->hnext; 1351 } 1352 } 1353 } 1354 1355 static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) 1356 { 1357 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1358 struct ipoib_neigh_hash *htbl; 1359 struct ipoib_neigh __rcu **buckets; 1360 u32 size; 1361 1362 clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); 1363 ntbl->htbl = NULL; 1364 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL); 1365 if (!htbl) 1366 return -ENOMEM; 1367 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1368 size = roundup_pow_of_two(arp_tbl.gc_thresh3); 1369 buckets = kzalloc(size * sizeof(*buckets), GFP_KERNEL); 1370 if (!buckets) { 1371 kfree(htbl); 1372 return -ENOMEM; 1373 } 1374 htbl->size = size; 1375 htbl->mask = (size - 1); 1376 htbl->buckets = buckets; 1377 RCU_INIT_POINTER(ntbl->htbl, htbl); 1378 htbl->ntbl = ntbl; 1379 atomic_set(&ntbl->entries, 0); 1380 1381 /* start garbage collection */ 1382 clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1383 queue_delayed_work(priv->wq, &priv->neigh_reap_task, 1384 arp_tbl.gc_interval); 1385 1386 return 0; 1387 } 1388 1389 static void neigh_hash_free_rcu(struct rcu_head *head) 1390 { 1391 struct ipoib_neigh_hash *htbl = container_of(head, 1392 struct ipoib_neigh_hash, 1393 rcu); 1394 struct ipoib_neigh __rcu **buckets = htbl->buckets; 1395 struct ipoib_neigh_table *ntbl = htbl->ntbl; 1396 1397 kfree(buckets); 1398 kfree(htbl); 1399 complete(&ntbl->deleted); 1400 } 1401 1402 void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid) 1403 { 1404 struct ipoib_dev_priv *priv = netdev_priv(dev); 1405 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1406 struct ipoib_neigh_hash *htbl; 1407 unsigned long flags; 1408 int i; 1409 1410 /* remove all neigh connected to a given path or mcast */ 1411 spin_lock_irqsave(&priv->lock, flags); 1412 1413 htbl = rcu_dereference_protected(ntbl->htbl, 1414 lockdep_is_held(&priv->lock)); 1415 1416 if (!htbl) 1417 goto out_unlock; 1418 1419 for (i = 0; i < htbl->size; i++) { 1420 struct ipoib_neigh *neigh; 1421 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1422 1423 while ((neigh = rcu_dereference_protected(*np, 1424 lockdep_is_held(&priv->lock))) != NULL) { 1425 /* delete neighs belong to this parent */ 1426 if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) { 1427 rcu_assign_pointer(*np, 1428 rcu_dereference_protected(neigh->hnext, 1429 lockdep_is_held(&priv->lock))); 1430 /* remove from parent list */ 1431 list_del(&neigh->list); 1432 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1433 } else { 1434 np = &neigh->hnext; 1435 } 1436 1437 } 1438 } 1439 out_unlock: 1440 spin_unlock_irqrestore(&priv->lock, flags); 1441 } 1442 1443 static void ipoib_flush_neighs(struct ipoib_dev_priv *priv) 1444 { 1445 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1446 struct ipoib_neigh_hash *htbl; 1447 unsigned long flags; 1448 int i, wait_flushed = 0; 1449 1450 init_completion(&priv->ntbl.flushed); 1451 1452 spin_lock_irqsave(&priv->lock, flags); 1453 1454 htbl = rcu_dereference_protected(ntbl->htbl, 1455 lockdep_is_held(&priv->lock)); 1456 if (!htbl) 1457 goto out_unlock; 1458 1459 wait_flushed = atomic_read(&priv->ntbl.entries); 1460 if (!wait_flushed) 1461 goto free_htbl; 1462 1463 for (i = 0; i < htbl->size; i++) { 1464 struct ipoib_neigh *neigh; 1465 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1466 1467 while ((neigh = rcu_dereference_protected(*np, 1468 lockdep_is_held(&priv->lock))) != NULL) { 1469 rcu_assign_pointer(*np, 1470 rcu_dereference_protected(neigh->hnext, 1471 lockdep_is_held(&priv->lock))); 1472 /* remove from path/mc list */ 1473 list_del(&neigh->list); 1474 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1475 } 1476 } 1477 1478 free_htbl: 1479 rcu_assign_pointer(ntbl->htbl, NULL); 1480 call_rcu(&htbl->rcu, neigh_hash_free_rcu); 1481 1482 out_unlock: 1483 spin_unlock_irqrestore(&priv->lock, flags); 1484 if (wait_flushed) 1485 wait_for_completion(&priv->ntbl.flushed); 1486 } 1487 1488 static void ipoib_neigh_hash_uninit(struct net_device *dev) 1489 { 1490 struct ipoib_dev_priv *priv = netdev_priv(dev); 1491 int stopped; 1492 1493 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); 1494 init_completion(&priv->ntbl.deleted); 1495 set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); 1496 1497 /* Stop GC if called at init fail need to cancel work */ 1498 stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1499 if (!stopped) 1500 cancel_delayed_work(&priv->neigh_reap_task); 1501 1502 ipoib_flush_neighs(priv); 1503 1504 wait_for_completion(&priv->ntbl.deleted); 1505 } 1506 1507 1508 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) 1509 { 1510 struct ipoib_dev_priv *priv = netdev_priv(dev); 1511 1512 /* Allocate RX/TX "rings" to hold queued skbs */ 1513 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, 1514 GFP_KERNEL); 1515 if (!priv->rx_ring) { 1516 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", 1517 ca->name, ipoib_recvq_size); 1518 goto out; 1519 } 1520 1521 priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring); 1522 if (!priv->tx_ring) { 1523 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", 1524 ca->name, ipoib_sendq_size); 1525 goto out_rx_ring_cleanup; 1526 } 1527 1528 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ 1529 1530 if (ipoib_ib_dev_init(dev, ca, port)) 1531 goto out_tx_ring_cleanup; 1532 1533 /* 1534 * Must be after ipoib_ib_dev_init so we can allocate a per 1535 * device wq there and use it here 1536 */ 1537 if (ipoib_neigh_hash_init(priv) < 0) 1538 goto out_dev_uninit; 1539 1540 return 0; 1541 1542 out_dev_uninit: 1543 ipoib_ib_dev_cleanup(dev); 1544 1545 out_tx_ring_cleanup: 1546 vfree(priv->tx_ring); 1547 1548 out_rx_ring_cleanup: 1549 kfree(priv->rx_ring); 1550 1551 out: 1552 return -ENOMEM; 1553 } 1554 1555 void ipoib_dev_cleanup(struct net_device *dev) 1556 { 1557 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv; 1558 LIST_HEAD(head); 1559 1560 ASSERT_RTNL(); 1561 1562 ipoib_delete_debug_files(dev); 1563 1564 /* Delete any child interfaces first */ 1565 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { 1566 /* Stop GC on child */ 1567 set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags); 1568 cancel_delayed_work(&cpriv->neigh_reap_task); 1569 unregister_netdevice_queue(cpriv->dev, &head); 1570 } 1571 unregister_netdevice_many(&head); 1572 1573 /* 1574 * Must be before ipoib_ib_dev_cleanup or we delete an in use 1575 * work queue 1576 */ 1577 ipoib_neigh_hash_uninit(dev); 1578 1579 ipoib_ib_dev_cleanup(dev); 1580 1581 kfree(priv->rx_ring); 1582 vfree(priv->tx_ring); 1583 1584 priv->rx_ring = NULL; 1585 priv->tx_ring = NULL; 1586 } 1587 1588 static const struct header_ops ipoib_header_ops = { 1589 .create = ipoib_hard_header, 1590 }; 1591 1592 static const struct net_device_ops ipoib_netdev_ops = { 1593 .ndo_uninit = ipoib_uninit, 1594 .ndo_open = ipoib_open, 1595 .ndo_stop = ipoib_stop, 1596 .ndo_change_mtu = ipoib_change_mtu, 1597 .ndo_fix_features = ipoib_fix_features, 1598 .ndo_start_xmit = ipoib_start_xmit, 1599 .ndo_tx_timeout = ipoib_timeout, 1600 .ndo_set_rx_mode = ipoib_set_mcast_list, 1601 .ndo_get_iflink = ipoib_get_iflink, 1602 }; 1603 1604 void ipoib_setup(struct net_device *dev) 1605 { 1606 struct ipoib_dev_priv *priv = netdev_priv(dev); 1607 1608 dev->netdev_ops = &ipoib_netdev_ops; 1609 dev->header_ops = &ipoib_header_ops; 1610 1611 ipoib_set_ethtool_ops(dev); 1612 1613 netif_napi_add(dev, &priv->napi, ipoib_poll, NAPI_POLL_WEIGHT); 1614 1615 dev->watchdog_timeo = HZ; 1616 1617 dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 1618 1619 dev->hard_header_len = IPOIB_ENCAP_LEN; 1620 dev->addr_len = INFINIBAND_ALEN; 1621 dev->type = ARPHRD_INFINIBAND; 1622 dev->tx_queue_len = ipoib_sendq_size * 2; 1623 dev->features = (NETIF_F_VLAN_CHALLENGED | 1624 NETIF_F_HIGHDMA); 1625 netif_keep_dst(dev); 1626 1627 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); 1628 1629 priv->dev = dev; 1630 1631 spin_lock_init(&priv->lock); 1632 1633 init_rwsem(&priv->vlan_rwsem); 1634 1635 INIT_LIST_HEAD(&priv->path_list); 1636 INIT_LIST_HEAD(&priv->child_intfs); 1637 INIT_LIST_HEAD(&priv->dead_ahs); 1638 INIT_LIST_HEAD(&priv->multicast_list); 1639 1640 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); 1641 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); 1642 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); 1643 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal); 1644 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy); 1645 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); 1646 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); 1647 INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh); 1648 } 1649 1650 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name) 1651 { 1652 struct net_device *dev; 1653 1654 dev = alloc_netdev((int)sizeof(struct ipoib_dev_priv), name, 1655 NET_NAME_UNKNOWN, ipoib_setup); 1656 if (!dev) 1657 return NULL; 1658 1659 return netdev_priv(dev); 1660 } 1661 1662 static ssize_t show_pkey(struct device *dev, 1663 struct device_attribute *attr, char *buf) 1664 { 1665 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev)); 1666 1667 return sprintf(buf, "0x%04x\n", priv->pkey); 1668 } 1669 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 1670 1671 static ssize_t show_umcast(struct device *dev, 1672 struct device_attribute *attr, char *buf) 1673 { 1674 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev)); 1675 1676 return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags)); 1677 } 1678 1679 void ipoib_set_umcast(struct net_device *ndev, int umcast_val) 1680 { 1681 struct ipoib_dev_priv *priv = netdev_priv(ndev); 1682 1683 if (umcast_val > 0) { 1684 set_bit(IPOIB_FLAG_UMCAST, &priv->flags); 1685 ipoib_warn(priv, "ignoring multicast groups joined directly " 1686 "by userspace\n"); 1687 } else 1688 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags); 1689 } 1690 1691 static ssize_t set_umcast(struct device *dev, 1692 struct device_attribute *attr, 1693 const char *buf, size_t count) 1694 { 1695 unsigned long umcast_val = simple_strtoul(buf, NULL, 0); 1696 1697 ipoib_set_umcast(to_net_dev(dev), umcast_val); 1698 1699 return count; 1700 } 1701 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast); 1702 1703 int ipoib_add_umcast_attr(struct net_device *dev) 1704 { 1705 return device_create_file(&dev->dev, &dev_attr_umcast); 1706 } 1707 1708 static ssize_t create_child(struct device *dev, 1709 struct device_attribute *attr, 1710 const char *buf, size_t count) 1711 { 1712 int pkey; 1713 int ret; 1714 1715 if (sscanf(buf, "%i", &pkey) != 1) 1716 return -EINVAL; 1717 1718 if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000) 1719 return -EINVAL; 1720 1721 /* 1722 * Set the full membership bit, so that we join the right 1723 * broadcast group, etc. 1724 */ 1725 pkey |= 0x8000; 1726 1727 ret = ipoib_vlan_add(to_net_dev(dev), pkey); 1728 1729 return ret ? ret : count; 1730 } 1731 static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child); 1732 1733 static ssize_t delete_child(struct device *dev, 1734 struct device_attribute *attr, 1735 const char *buf, size_t count) 1736 { 1737 int pkey; 1738 int ret; 1739 1740 if (sscanf(buf, "%i", &pkey) != 1) 1741 return -EINVAL; 1742 1743 if (pkey < 0 || pkey > 0xffff) 1744 return -EINVAL; 1745 1746 ret = ipoib_vlan_delete(to_net_dev(dev), pkey); 1747 1748 return ret ? ret : count; 1749 1750 } 1751 static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child); 1752 1753 int ipoib_add_pkey_attr(struct net_device *dev) 1754 { 1755 return device_create_file(&dev->dev, &dev_attr_pkey); 1756 } 1757 1758 int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) 1759 { 1760 struct ib_device_attr *device_attr; 1761 int result = -ENOMEM; 1762 1763 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL); 1764 if (!device_attr) { 1765 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n", 1766 hca->name, sizeof *device_attr); 1767 return result; 1768 } 1769 1770 result = ib_query_device(hca, device_attr); 1771 if (result) { 1772 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n", 1773 hca->name, result); 1774 kfree(device_attr); 1775 return result; 1776 } 1777 priv->hca_caps = device_attr->device_cap_flags; 1778 1779 kfree(device_attr); 1780 1781 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { 1782 priv->dev->hw_features = NETIF_F_SG | 1783 NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 1784 1785 if (priv->hca_caps & IB_DEVICE_UD_TSO) 1786 priv->dev->hw_features |= NETIF_F_TSO; 1787 1788 priv->dev->features |= priv->dev->hw_features; 1789 } 1790 1791 return 0; 1792 } 1793 1794 static struct net_device *ipoib_add_port(const char *format, 1795 struct ib_device *hca, u8 port) 1796 { 1797 struct ipoib_dev_priv *priv; 1798 struct ib_port_attr attr; 1799 int result = -ENOMEM; 1800 1801 priv = ipoib_intf_alloc(format); 1802 if (!priv) 1803 goto alloc_mem_failed; 1804 1805 SET_NETDEV_DEV(priv->dev, hca->dma_device); 1806 priv->dev->dev_id = port - 1; 1807 1808 result = ib_query_port(hca, port, &attr); 1809 if (!result) 1810 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); 1811 else { 1812 printk(KERN_WARNING "%s: ib_query_port %d failed\n", 1813 hca->name, port); 1814 goto device_init_failed; 1815 } 1816 1817 /* MTU will be reset when mcast join happens */ 1818 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 1819 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; 1820 1821 priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh); 1822 1823 result = ib_query_pkey(hca, port, 0, &priv->pkey); 1824 if (result) { 1825 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", 1826 hca->name, port, result); 1827 goto device_init_failed; 1828 } 1829 1830 result = ipoib_set_dev_features(priv, hca); 1831 if (result) 1832 goto device_init_failed; 1833 1834 /* 1835 * Set the full membership bit, so that we join the right 1836 * broadcast group, etc. 1837 */ 1838 priv->pkey |= 0x8000; 1839 1840 priv->dev->broadcast[8] = priv->pkey >> 8; 1841 priv->dev->broadcast[9] = priv->pkey & 0xff; 1842 1843 result = ib_query_gid(hca, port, 0, &priv->local_gid); 1844 if (result) { 1845 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", 1846 hca->name, port, result); 1847 goto device_init_failed; 1848 } else 1849 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 1850 1851 result = ipoib_dev_init(priv->dev, hca, port); 1852 if (result < 0) { 1853 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", 1854 hca->name, port, result); 1855 goto device_init_failed; 1856 } 1857 1858 INIT_IB_EVENT_HANDLER(&priv->event_handler, 1859 priv->ca, ipoib_event); 1860 result = ib_register_event_handler(&priv->event_handler); 1861 if (result < 0) { 1862 printk(KERN_WARNING "%s: ib_register_event_handler failed for " 1863 "port %d (ret = %d)\n", 1864 hca->name, port, result); 1865 goto event_failed; 1866 } 1867 1868 result = register_netdev(priv->dev); 1869 if (result) { 1870 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n", 1871 hca->name, port, result); 1872 goto register_failed; 1873 } 1874 1875 ipoib_create_debug_files(priv->dev); 1876 1877 if (ipoib_cm_add_mode_attr(priv->dev)) 1878 goto sysfs_failed; 1879 if (ipoib_add_pkey_attr(priv->dev)) 1880 goto sysfs_failed; 1881 if (ipoib_add_umcast_attr(priv->dev)) 1882 goto sysfs_failed; 1883 if (device_create_file(&priv->dev->dev, &dev_attr_create_child)) 1884 goto sysfs_failed; 1885 if (device_create_file(&priv->dev->dev, &dev_attr_delete_child)) 1886 goto sysfs_failed; 1887 1888 return priv->dev; 1889 1890 sysfs_failed: 1891 ipoib_delete_debug_files(priv->dev); 1892 unregister_netdev(priv->dev); 1893 1894 register_failed: 1895 ib_unregister_event_handler(&priv->event_handler); 1896 flush_workqueue(ipoib_workqueue); 1897 /* Stop GC if started before flush */ 1898 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1899 cancel_delayed_work(&priv->neigh_reap_task); 1900 flush_workqueue(priv->wq); 1901 1902 event_failed: 1903 ipoib_dev_cleanup(priv->dev); 1904 1905 device_init_failed: 1906 free_netdev(priv->dev); 1907 1908 alloc_mem_failed: 1909 return ERR_PTR(result); 1910 } 1911 1912 static void ipoib_add_one(struct ib_device *device) 1913 { 1914 struct list_head *dev_list; 1915 struct net_device *dev; 1916 struct ipoib_dev_priv *priv; 1917 int p; 1918 int count = 0; 1919 1920 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1921 if (!dev_list) 1922 return; 1923 1924 INIT_LIST_HEAD(dev_list); 1925 1926 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { 1927 if (!rdma_protocol_ib(device, p)) 1928 continue; 1929 dev = ipoib_add_port("ib%d", device, p); 1930 if (!IS_ERR(dev)) { 1931 priv = netdev_priv(dev); 1932 list_add_tail(&priv->list, dev_list); 1933 count++; 1934 } 1935 } 1936 1937 if (!count) { 1938 kfree(dev_list); 1939 return; 1940 } 1941 1942 ib_set_client_data(device, &ipoib_client, dev_list); 1943 } 1944 1945 static void ipoib_remove_one(struct ib_device *device, void *client_data) 1946 { 1947 struct ipoib_dev_priv *priv, *tmp; 1948 struct list_head *dev_list = client_data; 1949 1950 if (!dev_list) 1951 return; 1952 1953 list_for_each_entry_safe(priv, tmp, dev_list, list) { 1954 ib_unregister_event_handler(&priv->event_handler); 1955 flush_workqueue(ipoib_workqueue); 1956 1957 rtnl_lock(); 1958 dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); 1959 rtnl_unlock(); 1960 1961 /* Stop GC */ 1962 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1963 cancel_delayed_work(&priv->neigh_reap_task); 1964 flush_workqueue(priv->wq); 1965 1966 unregister_netdev(priv->dev); 1967 free_netdev(priv->dev); 1968 } 1969 1970 kfree(dev_list); 1971 } 1972 1973 static int __init ipoib_init_module(void) 1974 { 1975 int ret; 1976 1977 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size); 1978 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE); 1979 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE); 1980 1981 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size); 1982 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE); 1983 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); 1984 #ifdef CONFIG_INFINIBAND_IPOIB_CM 1985 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); 1986 #endif 1987 1988 /* 1989 * When copying small received packets, we only copy from the 1990 * linear data part of the SKB, so we rely on this condition. 1991 */ 1992 BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE); 1993 1994 ret = ipoib_register_debugfs(); 1995 if (ret) 1996 return ret; 1997 1998 /* 1999 * We create a global workqueue here that is used for all flush 2000 * operations. However, if you attempt to flush a workqueue 2001 * from a task on that same workqueue, it deadlocks the system. 2002 * We want to be able to flush the tasks associated with a 2003 * specific net device, so we also create a workqueue for each 2004 * netdevice. We queue up the tasks for that device only on 2005 * its private workqueue, and we only queue up flush events 2006 * on our global flush workqueue. This avoids the deadlocks. 2007 */ 2008 ipoib_workqueue = create_singlethread_workqueue("ipoib_flush"); 2009 if (!ipoib_workqueue) { 2010 ret = -ENOMEM; 2011 goto err_fs; 2012 } 2013 2014 ib_sa_register_client(&ipoib_sa_client); 2015 2016 ret = ib_register_client(&ipoib_client); 2017 if (ret) 2018 goto err_sa; 2019 2020 ret = ipoib_netlink_init(); 2021 if (ret) 2022 goto err_client; 2023 2024 return 0; 2025 2026 err_client: 2027 ib_unregister_client(&ipoib_client); 2028 2029 err_sa: 2030 ib_sa_unregister_client(&ipoib_sa_client); 2031 destroy_workqueue(ipoib_workqueue); 2032 2033 err_fs: 2034 ipoib_unregister_debugfs(); 2035 2036 return ret; 2037 } 2038 2039 static void __exit ipoib_cleanup_module(void) 2040 { 2041 ipoib_netlink_fini(); 2042 ib_unregister_client(&ipoib_client); 2043 ib_sa_unregister_client(&ipoib_sa_client); 2044 ipoib_unregister_debugfs(); 2045 destroy_workqueue(ipoib_workqueue); 2046 } 2047 2048 module_init(ipoib_init_module); 2049 module_exit(ipoib_cleanup_module); 2050