1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "ipoib.h" 36 37 #include <linux/module.h> 38 39 #include <linux/init.h> 40 #include <linux/slab.h> 41 #include <linux/kernel.h> 42 #include <linux/vmalloc.h> 43 44 #include <linux/if_arp.h> /* For ARPHRD_xxx */ 45 46 #include <linux/ip.h> 47 #include <linux/in.h> 48 49 #include <linux/jhash.h> 50 #include <net/arp.h> 51 #include <net/addrconf.h> 52 #include <linux/inetdevice.h> 53 #include <rdma/ib_cache.h> 54 55 #define DRV_VERSION "1.0.0" 56 57 const char ipoib_driver_version[] = DRV_VERSION; 58 59 MODULE_AUTHOR("Roland Dreier"); 60 MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); 61 MODULE_LICENSE("Dual BSD/GPL"); 62 63 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE; 64 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE; 65 66 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444); 67 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue"); 68 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); 69 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); 70 71 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 72 int ipoib_debug_level; 73 74 module_param_named(debug_level, ipoib_debug_level, int, 0644); 75 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 76 #endif 77 78 struct ipoib_path_iter { 79 struct net_device *dev; 80 struct ipoib_path path; 81 }; 82 83 static const u8 ipv4_bcast_addr[] = { 84 0x00, 0xff, 0xff, 0xff, 85 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, 86 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff 87 }; 88 89 struct workqueue_struct *ipoib_workqueue; 90 91 struct ib_sa_client ipoib_sa_client; 92 93 static void ipoib_add_one(struct ib_device *device); 94 static void ipoib_remove_one(struct ib_device *device, void *client_data); 95 static void ipoib_neigh_reclaim(struct rcu_head *rp); 96 static struct net_device *ipoib_get_net_dev_by_params( 97 struct ib_device *dev, u8 port, u16 pkey, 98 const union ib_gid *gid, const struct sockaddr *addr, 99 void *client_data); 100 static int ipoib_set_mac(struct net_device *dev, void *addr); 101 static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr, 102 int cmd); 103 104 static struct ib_client ipoib_client = { 105 .name = "ipoib", 106 .add = ipoib_add_one, 107 .remove = ipoib_remove_one, 108 .get_net_dev_by_params = ipoib_get_net_dev_by_params, 109 }; 110 111 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 112 static int ipoib_netdev_event(struct notifier_block *this, 113 unsigned long event, void *ptr) 114 { 115 struct netdev_notifier_info *ni = ptr; 116 struct net_device *dev = ni->dev; 117 118 if (dev->netdev_ops->ndo_open != ipoib_open) 119 return NOTIFY_DONE; 120 121 switch (event) { 122 case NETDEV_REGISTER: 123 ipoib_create_debug_files(dev); 124 break; 125 case NETDEV_CHANGENAME: 126 ipoib_delete_debug_files(dev); 127 ipoib_create_debug_files(dev); 128 break; 129 case NETDEV_UNREGISTER: 130 ipoib_delete_debug_files(dev); 131 break; 132 } 133 134 return NOTIFY_DONE; 135 } 136 #endif 137 138 int ipoib_open(struct net_device *dev) 139 { 140 struct ipoib_dev_priv *priv = ipoib_priv(dev); 141 142 ipoib_dbg(priv, "bringing up interface\n"); 143 144 netif_carrier_off(dev); 145 146 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 147 148 priv->sm_fullmember_sendonly_support = false; 149 150 if (ipoib_ib_dev_open(dev)) { 151 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) 152 return 0; 153 goto err_disable; 154 } 155 156 ipoib_ib_dev_up(dev); 157 158 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 159 struct ipoib_dev_priv *cpriv; 160 161 /* Bring up any child interfaces too */ 162 down_read(&priv->vlan_rwsem); 163 list_for_each_entry(cpriv, &priv->child_intfs, list) { 164 int flags; 165 166 flags = cpriv->dev->flags; 167 if (flags & IFF_UP) 168 continue; 169 170 dev_change_flags(cpriv->dev, flags | IFF_UP); 171 } 172 up_read(&priv->vlan_rwsem); 173 } 174 175 netif_start_queue(dev); 176 177 return 0; 178 179 err_disable: 180 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 181 182 return -EINVAL; 183 } 184 185 static int ipoib_stop(struct net_device *dev) 186 { 187 struct ipoib_dev_priv *priv = ipoib_priv(dev); 188 189 ipoib_dbg(priv, "stopping interface\n"); 190 191 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 192 193 netif_stop_queue(dev); 194 195 ipoib_ib_dev_down(dev); 196 ipoib_ib_dev_stop(dev); 197 198 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 199 struct ipoib_dev_priv *cpriv; 200 201 /* Bring down any child interfaces too */ 202 down_read(&priv->vlan_rwsem); 203 list_for_each_entry(cpriv, &priv->child_intfs, list) { 204 int flags; 205 206 flags = cpriv->dev->flags; 207 if (!(flags & IFF_UP)) 208 continue; 209 210 dev_change_flags(cpriv->dev, flags & ~IFF_UP); 211 } 212 up_read(&priv->vlan_rwsem); 213 } 214 215 return 0; 216 } 217 218 static void ipoib_uninit(struct net_device *dev) 219 { 220 ipoib_dev_cleanup(dev); 221 } 222 223 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features) 224 { 225 struct ipoib_dev_priv *priv = ipoib_priv(dev); 226 227 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) 228 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); 229 230 return features; 231 } 232 233 static int ipoib_change_mtu(struct net_device *dev, int new_mtu) 234 { 235 struct ipoib_dev_priv *priv = ipoib_priv(dev); 236 int ret = 0; 237 238 /* dev->mtu > 2K ==> connected mode */ 239 if (ipoib_cm_admin_enabled(dev)) { 240 if (new_mtu > ipoib_cm_max_mtu(dev)) 241 return -EINVAL; 242 243 if (new_mtu > priv->mcast_mtu) 244 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n", 245 priv->mcast_mtu); 246 247 dev->mtu = new_mtu; 248 return 0; 249 } 250 251 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) 252 return -EINVAL; 253 254 priv->admin_mtu = new_mtu; 255 256 if (priv->mcast_mtu < priv->admin_mtu) 257 ipoib_dbg(priv, "MTU must be smaller than the underlying " 258 "link layer MTU - 4 (%u)\n", priv->mcast_mtu); 259 260 new_mtu = min(priv->mcast_mtu, priv->admin_mtu); 261 262 if (priv->rn_ops->ndo_change_mtu) { 263 bool carrier_status = netif_carrier_ok(dev); 264 265 netif_carrier_off(dev); 266 267 /* notify lower level on the real mtu */ 268 ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu); 269 270 if (carrier_status) 271 netif_carrier_on(dev); 272 } else { 273 dev->mtu = new_mtu; 274 } 275 276 return ret; 277 } 278 279 static void ipoib_get_stats(struct net_device *dev, 280 struct rtnl_link_stats64 *stats) 281 { 282 struct ipoib_dev_priv *priv = ipoib_priv(dev); 283 284 if (priv->rn_ops->ndo_get_stats64) 285 priv->rn_ops->ndo_get_stats64(dev, stats); 286 else 287 netdev_stats_to_stats64(stats, &dev->stats); 288 } 289 290 /* Called with an RCU read lock taken */ 291 static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr, 292 struct net_device *dev) 293 { 294 struct net *net = dev_net(dev); 295 struct in_device *in_dev; 296 struct sockaddr_in *addr_in = (struct sockaddr_in *)addr; 297 struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr; 298 __be32 ret_addr; 299 300 switch (addr->sa_family) { 301 case AF_INET: 302 in_dev = in_dev_get(dev); 303 if (!in_dev) 304 return false; 305 306 ret_addr = inet_confirm_addr(net, in_dev, 0, 307 addr_in->sin_addr.s_addr, 308 RT_SCOPE_HOST); 309 in_dev_put(in_dev); 310 if (ret_addr) 311 return true; 312 313 break; 314 case AF_INET6: 315 if (IS_ENABLED(CONFIG_IPV6) && 316 ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1)) 317 return true; 318 319 break; 320 } 321 return false; 322 } 323 324 /** 325 * Find the master net_device on top of the given net_device. 326 * @dev: base IPoIB net_device 327 * 328 * Returns the master net_device with a reference held, or the same net_device 329 * if no master exists. 330 */ 331 static struct net_device *ipoib_get_master_net_dev(struct net_device *dev) 332 { 333 struct net_device *master; 334 335 rcu_read_lock(); 336 master = netdev_master_upper_dev_get_rcu(dev); 337 if (master) 338 dev_hold(master); 339 rcu_read_unlock(); 340 341 if (master) 342 return master; 343 344 dev_hold(dev); 345 return dev; 346 } 347 348 struct ipoib_walk_data { 349 const struct sockaddr *addr; 350 struct net_device *result; 351 }; 352 353 static int ipoib_upper_walk(struct net_device *upper, void *_data) 354 { 355 struct ipoib_walk_data *data = _data; 356 int ret = 0; 357 358 if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) { 359 dev_hold(upper); 360 data->result = upper; 361 ret = 1; 362 } 363 364 return ret; 365 } 366 367 /** 368 * Find a net_device matching the given address, which is an upper device of 369 * the given net_device. 370 * @addr: IP address to look for. 371 * @dev: base IPoIB net_device 372 * 373 * If found, returns the net_device with a reference held. Otherwise return 374 * NULL. 375 */ 376 static struct net_device *ipoib_get_net_dev_match_addr( 377 const struct sockaddr *addr, struct net_device *dev) 378 { 379 struct ipoib_walk_data data = { 380 .addr = addr, 381 }; 382 383 rcu_read_lock(); 384 if (ipoib_is_dev_match_addr_rcu(addr, dev)) { 385 dev_hold(dev); 386 data.result = dev; 387 goto out; 388 } 389 390 netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &data); 391 out: 392 rcu_read_unlock(); 393 return data.result; 394 } 395 396 /* returns the number of IPoIB netdevs on top a given ipoib device matching a 397 * pkey_index and address, if one exists. 398 * 399 * @found_net_dev: contains a matching net_device if the return value >= 1, 400 * with a reference held. */ 401 static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv, 402 const union ib_gid *gid, 403 u16 pkey_index, 404 const struct sockaddr *addr, 405 int nesting, 406 struct net_device **found_net_dev) 407 { 408 struct ipoib_dev_priv *child_priv; 409 struct net_device *net_dev = NULL; 410 int matches = 0; 411 412 if (priv->pkey_index == pkey_index && 413 (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) { 414 if (!addr) { 415 net_dev = ipoib_get_master_net_dev(priv->dev); 416 } else { 417 /* Verify the net_device matches the IP address, as 418 * IPoIB child devices currently share a GID. */ 419 net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev); 420 } 421 if (net_dev) { 422 if (!*found_net_dev) 423 *found_net_dev = net_dev; 424 else 425 dev_put(net_dev); 426 ++matches; 427 } 428 } 429 430 /* Check child interfaces */ 431 down_read_nested(&priv->vlan_rwsem, nesting); 432 list_for_each_entry(child_priv, &priv->child_intfs, list) { 433 matches += ipoib_match_gid_pkey_addr(child_priv, gid, 434 pkey_index, addr, 435 nesting + 1, 436 found_net_dev); 437 if (matches > 1) 438 break; 439 } 440 up_read(&priv->vlan_rwsem); 441 442 return matches; 443 } 444 445 /* Returns the number of matching net_devs found (between 0 and 2). Also 446 * return the matching net_device in the @net_dev parameter, holding a 447 * reference to the net_device, if the number of matches >= 1 */ 448 static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port, 449 u16 pkey_index, 450 const union ib_gid *gid, 451 const struct sockaddr *addr, 452 struct net_device **net_dev) 453 { 454 struct ipoib_dev_priv *priv; 455 int matches = 0; 456 457 *net_dev = NULL; 458 459 list_for_each_entry(priv, dev_list, list) { 460 if (priv->port != port) 461 continue; 462 463 matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index, 464 addr, 0, net_dev); 465 if (matches > 1) 466 break; 467 } 468 469 return matches; 470 } 471 472 static struct net_device *ipoib_get_net_dev_by_params( 473 struct ib_device *dev, u8 port, u16 pkey, 474 const union ib_gid *gid, const struct sockaddr *addr, 475 void *client_data) 476 { 477 struct net_device *net_dev; 478 struct list_head *dev_list = client_data; 479 u16 pkey_index; 480 int matches; 481 int ret; 482 483 if (!rdma_protocol_ib(dev, port)) 484 return NULL; 485 486 ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index); 487 if (ret) 488 return NULL; 489 490 if (!dev_list) 491 return NULL; 492 493 /* See if we can find a unique device matching the L2 parameters */ 494 matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index, 495 gid, NULL, &net_dev); 496 497 switch (matches) { 498 case 0: 499 return NULL; 500 case 1: 501 return net_dev; 502 } 503 504 dev_put(net_dev); 505 506 /* Couldn't find a unique device with L2 parameters only. Use L3 507 * address to uniquely match the net device */ 508 matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index, 509 gid, addr, &net_dev); 510 switch (matches) { 511 case 0: 512 return NULL; 513 default: 514 dev_warn_ratelimited(&dev->dev, 515 "duplicate IP address detected\n"); 516 /* Fall through */ 517 case 1: 518 return net_dev; 519 } 520 } 521 522 int ipoib_set_mode(struct net_device *dev, const char *buf) 523 { 524 struct ipoib_dev_priv *priv = ipoib_priv(dev); 525 526 if ((test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) && 527 !strcmp(buf, "connected\n")) || 528 (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) && 529 !strcmp(buf, "datagram\n"))) { 530 return 0; 531 } 532 533 /* flush paths if we switch modes so that connections are restarted */ 534 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) { 535 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 536 ipoib_warn(priv, "enabling connected mode " 537 "will cause multicast packet drops\n"); 538 netdev_update_features(dev); 539 dev_set_mtu(dev, ipoib_cm_max_mtu(dev)); 540 rtnl_unlock(); 541 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; 542 543 ipoib_flush_paths(dev); 544 return (!rtnl_trylock()) ? -EBUSY : 0; 545 } 546 547 if (!strcmp(buf, "datagram\n")) { 548 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 549 netdev_update_features(dev); 550 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); 551 rtnl_unlock(); 552 ipoib_flush_paths(dev); 553 return (!rtnl_trylock()) ? -EBUSY : 0; 554 } 555 556 return -EINVAL; 557 } 558 559 struct ipoib_path *__path_find(struct net_device *dev, void *gid) 560 { 561 struct ipoib_dev_priv *priv = ipoib_priv(dev); 562 struct rb_node *n = priv->path_tree.rb_node; 563 struct ipoib_path *path; 564 int ret; 565 566 while (n) { 567 path = rb_entry(n, struct ipoib_path, rb_node); 568 569 ret = memcmp(gid, path->pathrec.dgid.raw, 570 sizeof (union ib_gid)); 571 572 if (ret < 0) 573 n = n->rb_left; 574 else if (ret > 0) 575 n = n->rb_right; 576 else 577 return path; 578 } 579 580 return NULL; 581 } 582 583 static int __path_add(struct net_device *dev, struct ipoib_path *path) 584 { 585 struct ipoib_dev_priv *priv = ipoib_priv(dev); 586 struct rb_node **n = &priv->path_tree.rb_node; 587 struct rb_node *pn = NULL; 588 struct ipoib_path *tpath; 589 int ret; 590 591 while (*n) { 592 pn = *n; 593 tpath = rb_entry(pn, struct ipoib_path, rb_node); 594 595 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw, 596 sizeof (union ib_gid)); 597 if (ret < 0) 598 n = &pn->rb_left; 599 else if (ret > 0) 600 n = &pn->rb_right; 601 else 602 return -EEXIST; 603 } 604 605 rb_link_node(&path->rb_node, pn, n); 606 rb_insert_color(&path->rb_node, &priv->path_tree); 607 608 list_add_tail(&path->list, &priv->path_list); 609 610 return 0; 611 } 612 613 static void path_free(struct net_device *dev, struct ipoib_path *path) 614 { 615 struct sk_buff *skb; 616 617 while ((skb = __skb_dequeue(&path->queue))) 618 dev_kfree_skb_irq(skb); 619 620 ipoib_dbg(ipoib_priv(dev), "path_free\n"); 621 622 /* remove all neigh connected to this path */ 623 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); 624 625 if (path->ah) 626 ipoib_put_ah(path->ah); 627 628 kfree(path); 629 } 630 631 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 632 633 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev) 634 { 635 struct ipoib_path_iter *iter; 636 637 iter = kmalloc(sizeof *iter, GFP_KERNEL); 638 if (!iter) 639 return NULL; 640 641 iter->dev = dev; 642 memset(iter->path.pathrec.dgid.raw, 0, 16); 643 644 if (ipoib_path_iter_next(iter)) { 645 kfree(iter); 646 return NULL; 647 } 648 649 return iter; 650 } 651 652 int ipoib_path_iter_next(struct ipoib_path_iter *iter) 653 { 654 struct ipoib_dev_priv *priv = ipoib_priv(iter->dev); 655 struct rb_node *n; 656 struct ipoib_path *path; 657 int ret = 1; 658 659 spin_lock_irq(&priv->lock); 660 661 n = rb_first(&priv->path_tree); 662 663 while (n) { 664 path = rb_entry(n, struct ipoib_path, rb_node); 665 666 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, 667 sizeof (union ib_gid)) < 0) { 668 iter->path = *path; 669 ret = 0; 670 break; 671 } 672 673 n = rb_next(n); 674 } 675 676 spin_unlock_irq(&priv->lock); 677 678 return ret; 679 } 680 681 void ipoib_path_iter_read(struct ipoib_path_iter *iter, 682 struct ipoib_path *path) 683 { 684 *path = iter->path; 685 } 686 687 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 688 689 void ipoib_mark_paths_invalid(struct net_device *dev) 690 { 691 struct ipoib_dev_priv *priv = ipoib_priv(dev); 692 struct ipoib_path *path, *tp; 693 694 spin_lock_irq(&priv->lock); 695 696 list_for_each_entry_safe(path, tp, &priv->path_list, list) { 697 ipoib_dbg(priv, "mark path LID 0x%08x GID %pI6 invalid\n", 698 be32_to_cpu(sa_path_get_dlid(&path->pathrec)), 699 path->pathrec.dgid.raw); 700 path->valid = 0; 701 } 702 703 spin_unlock_irq(&priv->lock); 704 } 705 706 static void push_pseudo_header(struct sk_buff *skb, const char *daddr) 707 { 708 struct ipoib_pseudo_header *phdr; 709 710 phdr = skb_push(skb, sizeof(*phdr)); 711 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); 712 } 713 714 void ipoib_flush_paths(struct net_device *dev) 715 { 716 struct ipoib_dev_priv *priv = ipoib_priv(dev); 717 struct ipoib_path *path, *tp; 718 LIST_HEAD(remove_list); 719 unsigned long flags; 720 721 netif_tx_lock_bh(dev); 722 spin_lock_irqsave(&priv->lock, flags); 723 724 list_splice_init(&priv->path_list, &remove_list); 725 726 list_for_each_entry(path, &remove_list, list) 727 rb_erase(&path->rb_node, &priv->path_tree); 728 729 list_for_each_entry_safe(path, tp, &remove_list, list) { 730 if (path->query) 731 ib_sa_cancel_query(path->query_id, path->query); 732 spin_unlock_irqrestore(&priv->lock, flags); 733 netif_tx_unlock_bh(dev); 734 wait_for_completion(&path->done); 735 path_free(dev, path); 736 netif_tx_lock_bh(dev); 737 spin_lock_irqsave(&priv->lock, flags); 738 } 739 740 spin_unlock_irqrestore(&priv->lock, flags); 741 netif_tx_unlock_bh(dev); 742 } 743 744 static void path_rec_completion(int status, 745 struct sa_path_rec *pathrec, 746 void *path_ptr) 747 { 748 struct ipoib_path *path = path_ptr; 749 struct net_device *dev = path->dev; 750 struct ipoib_dev_priv *priv = ipoib_priv(dev); 751 struct ipoib_ah *ah = NULL; 752 struct ipoib_ah *old_ah = NULL; 753 struct ipoib_neigh *neigh, *tn; 754 struct sk_buff_head skqueue; 755 struct sk_buff *skb; 756 unsigned long flags; 757 758 if (!status) 759 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n", 760 be32_to_cpu(sa_path_get_dlid(pathrec)), 761 pathrec->dgid.raw); 762 else 763 ipoib_dbg(priv, "PathRec status %d for GID %pI6\n", 764 status, path->pathrec.dgid.raw); 765 766 skb_queue_head_init(&skqueue); 767 768 if (!status) { 769 struct rdma_ah_attr av; 770 771 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av)) 772 ah = ipoib_create_ah(dev, priv->pd, &av); 773 } 774 775 spin_lock_irqsave(&priv->lock, flags); 776 777 if (!IS_ERR_OR_NULL(ah)) { 778 /* 779 * pathrec.dgid is used as the database key from the LLADDR, 780 * it must remain unchanged even if the SA returns a different 781 * GID to use in the AH. 782 */ 783 if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw, 784 sizeof(union ib_gid))) { 785 ipoib_dbg( 786 priv, 787 "%s got PathRec for gid %pI6 while asked for %pI6\n", 788 dev->name, pathrec->dgid.raw, 789 path->pathrec.dgid.raw); 790 memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw, 791 sizeof(union ib_gid)); 792 } 793 794 path->pathrec = *pathrec; 795 796 old_ah = path->ah; 797 path->ah = ah; 798 799 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", 800 ah, be32_to_cpu(sa_path_get_dlid(pathrec)), 801 pathrec->sl); 802 803 while ((skb = __skb_dequeue(&path->queue))) 804 __skb_queue_tail(&skqueue, skb); 805 806 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) { 807 if (neigh->ah) { 808 WARN_ON(neigh->ah != old_ah); 809 /* 810 * Dropping the ah reference inside 811 * priv->lock is safe here, because we 812 * will hold one more reference from 813 * the original value of path->ah (ie 814 * old_ah). 815 */ 816 ipoib_put_ah(neigh->ah); 817 } 818 kref_get(&path->ah->ref); 819 neigh->ah = path->ah; 820 821 if (ipoib_cm_enabled(dev, neigh->daddr)) { 822 if (!ipoib_cm_get(neigh)) 823 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, 824 path, 825 neigh)); 826 if (!ipoib_cm_get(neigh)) { 827 ipoib_neigh_free(neigh); 828 continue; 829 } 830 } 831 832 while ((skb = __skb_dequeue(&neigh->queue))) 833 __skb_queue_tail(&skqueue, skb); 834 } 835 path->valid = 1; 836 } 837 838 path->query = NULL; 839 complete(&path->done); 840 841 spin_unlock_irqrestore(&priv->lock, flags); 842 843 if (IS_ERR_OR_NULL(ah)) 844 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); 845 846 if (old_ah) 847 ipoib_put_ah(old_ah); 848 849 while ((skb = __skb_dequeue(&skqueue))) { 850 int ret; 851 skb->dev = dev; 852 ret = dev_queue_xmit(skb); 853 if (ret) 854 ipoib_warn(priv, "%s: dev_queue_xmit failed to re-queue packet, ret:%d\n", 855 __func__, ret); 856 } 857 } 858 859 static void init_path_rec(struct ipoib_dev_priv *priv, struct ipoib_path *path, 860 void *gid) 861 { 862 path->dev = priv->dev; 863 864 if (rdma_cap_opa_ah(priv->ca, priv->port)) 865 path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA; 866 else 867 path->pathrec.rec_type = SA_PATH_REC_TYPE_IB; 868 869 memcpy(path->pathrec.dgid.raw, gid, sizeof(union ib_gid)); 870 path->pathrec.sgid = priv->local_gid; 871 path->pathrec.pkey = cpu_to_be16(priv->pkey); 872 path->pathrec.numb_path = 1; 873 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class; 874 } 875 876 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid) 877 { 878 struct ipoib_dev_priv *priv = ipoib_priv(dev); 879 struct ipoib_path *path; 880 881 if (!priv->broadcast) 882 return NULL; 883 884 path = kzalloc(sizeof *path, GFP_ATOMIC); 885 if (!path) 886 return NULL; 887 888 skb_queue_head_init(&path->queue); 889 890 INIT_LIST_HEAD(&path->neigh_list); 891 892 init_path_rec(priv, path, gid); 893 894 return path; 895 } 896 897 static int path_rec_start(struct net_device *dev, 898 struct ipoib_path *path) 899 { 900 struct ipoib_dev_priv *priv = ipoib_priv(dev); 901 902 ipoib_dbg(priv, "Start path record lookup for %pI6\n", 903 path->pathrec.dgid.raw); 904 905 init_completion(&path->done); 906 907 path->query_id = 908 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port, 909 &path->pathrec, 910 IB_SA_PATH_REC_DGID | 911 IB_SA_PATH_REC_SGID | 912 IB_SA_PATH_REC_NUMB_PATH | 913 IB_SA_PATH_REC_TRAFFIC_CLASS | 914 IB_SA_PATH_REC_PKEY, 915 1000, GFP_ATOMIC, 916 path_rec_completion, 917 path, &path->query); 918 if (path->query_id < 0) { 919 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); 920 path->query = NULL; 921 complete(&path->done); 922 return path->query_id; 923 } 924 925 return 0; 926 } 927 928 static void neigh_add_path(struct sk_buff *skb, u8 *daddr, 929 struct net_device *dev) 930 { 931 struct ipoib_dev_priv *priv = ipoib_priv(dev); 932 struct rdma_netdev *rn = netdev_priv(dev); 933 struct ipoib_path *path; 934 struct ipoib_neigh *neigh; 935 unsigned long flags; 936 937 spin_lock_irqsave(&priv->lock, flags); 938 neigh = ipoib_neigh_alloc(daddr, dev); 939 if (!neigh) { 940 spin_unlock_irqrestore(&priv->lock, flags); 941 ++dev->stats.tx_dropped; 942 dev_kfree_skb_any(skb); 943 return; 944 } 945 946 path = __path_find(dev, daddr + 4); 947 if (!path) { 948 path = path_rec_create(dev, daddr + 4); 949 if (!path) 950 goto err_path; 951 952 __path_add(dev, path); 953 } 954 955 list_add_tail(&neigh->list, &path->neigh_list); 956 957 if (path->ah) { 958 kref_get(&path->ah->ref); 959 neigh->ah = path->ah; 960 961 if (ipoib_cm_enabled(dev, neigh->daddr)) { 962 if (!ipoib_cm_get(neigh)) 963 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh)); 964 if (!ipoib_cm_get(neigh)) { 965 ipoib_neigh_free(neigh); 966 goto err_drop; 967 } 968 if (skb_queue_len(&neigh->queue) < 969 IPOIB_MAX_PATH_REC_QUEUE) { 970 push_pseudo_header(skb, neigh->daddr); 971 __skb_queue_tail(&neigh->queue, skb); 972 } else { 973 ipoib_warn(priv, "queue length limit %d. Packet drop.\n", 974 skb_queue_len(&neigh->queue)); 975 goto err_drop; 976 } 977 } else { 978 spin_unlock_irqrestore(&priv->lock, flags); 979 path->ah->last_send = rn->send(dev, skb, path->ah->ah, 980 IPOIB_QPN(daddr)); 981 ipoib_neigh_put(neigh); 982 return; 983 } 984 } else { 985 neigh->ah = NULL; 986 987 if (!path->query && path_rec_start(dev, path)) 988 goto err_path; 989 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 990 push_pseudo_header(skb, neigh->daddr); 991 __skb_queue_tail(&neigh->queue, skb); 992 } else { 993 goto err_drop; 994 } 995 } 996 997 spin_unlock_irqrestore(&priv->lock, flags); 998 ipoib_neigh_put(neigh); 999 return; 1000 1001 err_path: 1002 ipoib_neigh_free(neigh); 1003 err_drop: 1004 ++dev->stats.tx_dropped; 1005 dev_kfree_skb_any(skb); 1006 1007 spin_unlock_irqrestore(&priv->lock, flags); 1008 ipoib_neigh_put(neigh); 1009 } 1010 1011 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, 1012 struct ipoib_pseudo_header *phdr) 1013 { 1014 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1015 struct rdma_netdev *rn = netdev_priv(dev); 1016 struct ipoib_path *path; 1017 unsigned long flags; 1018 1019 spin_lock_irqsave(&priv->lock, flags); 1020 1021 /* no broadcast means that all paths are (going to be) not valid */ 1022 if (!priv->broadcast) 1023 goto drop_and_unlock; 1024 1025 path = __path_find(dev, phdr->hwaddr + 4); 1026 if (!path || !path->valid) { 1027 int new_path = 0; 1028 1029 if (!path) { 1030 path = path_rec_create(dev, phdr->hwaddr + 4); 1031 new_path = 1; 1032 } 1033 if (path) { 1034 if (!new_path) 1035 /* make sure there is no changes in the existing path record */ 1036 init_path_rec(priv, path, phdr->hwaddr + 4); 1037 1038 if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 1039 push_pseudo_header(skb, phdr->hwaddr); 1040 __skb_queue_tail(&path->queue, skb); 1041 } else { 1042 ++dev->stats.tx_dropped; 1043 dev_kfree_skb_any(skb); 1044 } 1045 1046 if (!path->query && path_rec_start(dev, path)) { 1047 spin_unlock_irqrestore(&priv->lock, flags); 1048 if (new_path) 1049 path_free(dev, path); 1050 return; 1051 } else 1052 __path_add(dev, path); 1053 } else { 1054 goto drop_and_unlock; 1055 } 1056 1057 spin_unlock_irqrestore(&priv->lock, flags); 1058 return; 1059 } 1060 1061 if (path->ah) { 1062 ipoib_dbg(priv, "Send unicast ARP to %08x\n", 1063 be32_to_cpu(sa_path_get_dlid(&path->pathrec))); 1064 1065 spin_unlock_irqrestore(&priv->lock, flags); 1066 path->ah->last_send = rn->send(dev, skb, path->ah->ah, 1067 IPOIB_QPN(phdr->hwaddr)); 1068 return; 1069 } else if ((path->query || !path_rec_start(dev, path)) && 1070 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 1071 push_pseudo_header(skb, phdr->hwaddr); 1072 __skb_queue_tail(&path->queue, skb); 1073 } else { 1074 goto drop_and_unlock; 1075 } 1076 1077 spin_unlock_irqrestore(&priv->lock, flags); 1078 return; 1079 1080 drop_and_unlock: 1081 ++dev->stats.tx_dropped; 1082 dev_kfree_skb_any(skb); 1083 spin_unlock_irqrestore(&priv->lock, flags); 1084 } 1085 1086 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) 1087 { 1088 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1089 struct rdma_netdev *rn = netdev_priv(dev); 1090 struct ipoib_neigh *neigh; 1091 struct ipoib_pseudo_header *phdr; 1092 struct ipoib_header *header; 1093 unsigned long flags; 1094 1095 phdr = (struct ipoib_pseudo_header *) skb->data; 1096 skb_pull(skb, sizeof(*phdr)); 1097 header = (struct ipoib_header *) skb->data; 1098 1099 if (unlikely(phdr->hwaddr[4] == 0xff)) { 1100 /* multicast, arrange "if" according to probability */ 1101 if ((header->proto != htons(ETH_P_IP)) && 1102 (header->proto != htons(ETH_P_IPV6)) && 1103 (header->proto != htons(ETH_P_ARP)) && 1104 (header->proto != htons(ETH_P_RARP)) && 1105 (header->proto != htons(ETH_P_TIPC))) { 1106 /* ethertype not supported by IPoIB */ 1107 ++dev->stats.tx_dropped; 1108 dev_kfree_skb_any(skb); 1109 return NETDEV_TX_OK; 1110 } 1111 /* Add in the P_Key for multicast*/ 1112 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff; 1113 phdr->hwaddr[9] = priv->pkey & 0xff; 1114 1115 neigh = ipoib_neigh_get(dev, phdr->hwaddr); 1116 if (likely(neigh)) 1117 goto send_using_neigh; 1118 ipoib_mcast_send(dev, phdr->hwaddr, skb); 1119 return NETDEV_TX_OK; 1120 } 1121 1122 /* unicast, arrange "switch" according to probability */ 1123 switch (header->proto) { 1124 case htons(ETH_P_IP): 1125 case htons(ETH_P_IPV6): 1126 case htons(ETH_P_TIPC): 1127 neigh = ipoib_neigh_get(dev, phdr->hwaddr); 1128 if (unlikely(!neigh)) { 1129 neigh_add_path(skb, phdr->hwaddr, dev); 1130 return NETDEV_TX_OK; 1131 } 1132 break; 1133 case htons(ETH_P_ARP): 1134 case htons(ETH_P_RARP): 1135 /* for unicast ARP and RARP should always perform path find */ 1136 unicast_arp_send(skb, dev, phdr); 1137 return NETDEV_TX_OK; 1138 default: 1139 /* ethertype not supported by IPoIB */ 1140 ++dev->stats.tx_dropped; 1141 dev_kfree_skb_any(skb); 1142 return NETDEV_TX_OK; 1143 } 1144 1145 send_using_neigh: 1146 /* note we now hold a ref to neigh */ 1147 if (ipoib_cm_get(neigh)) { 1148 if (ipoib_cm_up(neigh)) { 1149 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); 1150 goto unref; 1151 } 1152 } else if (neigh->ah) { 1153 neigh->ah->last_send = rn->send(dev, skb, neigh->ah->ah, 1154 IPOIB_QPN(phdr->hwaddr)); 1155 goto unref; 1156 } 1157 1158 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 1159 push_pseudo_header(skb, phdr->hwaddr); 1160 spin_lock_irqsave(&priv->lock, flags); 1161 __skb_queue_tail(&neigh->queue, skb); 1162 spin_unlock_irqrestore(&priv->lock, flags); 1163 } else { 1164 ++dev->stats.tx_dropped; 1165 dev_kfree_skb_any(skb); 1166 } 1167 1168 unref: 1169 ipoib_neigh_put(neigh); 1170 1171 return NETDEV_TX_OK; 1172 } 1173 1174 static void ipoib_timeout(struct net_device *dev) 1175 { 1176 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1177 1178 ipoib_warn(priv, "transmit timeout: latency %d msecs\n", 1179 jiffies_to_msecs(jiffies - dev_trans_start(dev))); 1180 ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n", 1181 netif_queue_stopped(dev), 1182 priv->tx_head, priv->tx_tail); 1183 /* XXX reset QP, etc. */ 1184 } 1185 1186 static int ipoib_hard_header(struct sk_buff *skb, 1187 struct net_device *dev, 1188 unsigned short type, 1189 const void *daddr, const void *saddr, unsigned len) 1190 { 1191 struct ipoib_header *header; 1192 1193 header = skb_push(skb, sizeof *header); 1194 1195 header->proto = htons(type); 1196 header->reserved = 0; 1197 1198 /* 1199 * we don't rely on dst_entry structure, always stuff the 1200 * destination address into skb hard header so we can figure out where 1201 * to send the packet later. 1202 */ 1203 push_pseudo_header(skb, daddr); 1204 1205 return IPOIB_HARD_LEN; 1206 } 1207 1208 static void ipoib_set_mcast_list(struct net_device *dev) 1209 { 1210 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1211 1212 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { 1213 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set"); 1214 return; 1215 } 1216 1217 queue_work(priv->wq, &priv->restart_task); 1218 } 1219 1220 static int ipoib_get_iflink(const struct net_device *dev) 1221 { 1222 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1223 1224 /* parent interface */ 1225 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) 1226 return dev->ifindex; 1227 1228 /* child/vlan interface */ 1229 return priv->parent->ifindex; 1230 } 1231 1232 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) 1233 { 1234 /* 1235 * Use only the address parts that contributes to spreading 1236 * The subnet prefix is not used as one can not connect to 1237 * same remote port (GUID) using the same remote QPN via two 1238 * different subnets. 1239 */ 1240 /* qpn octets[1:4) & port GUID octets[12:20) */ 1241 u32 *d32 = (u32 *) daddr; 1242 u32 hv; 1243 1244 hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0); 1245 return hv & htbl->mask; 1246 } 1247 1248 struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr) 1249 { 1250 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1251 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1252 struct ipoib_neigh_hash *htbl; 1253 struct ipoib_neigh *neigh = NULL; 1254 u32 hash_val; 1255 1256 rcu_read_lock_bh(); 1257 1258 htbl = rcu_dereference_bh(ntbl->htbl); 1259 1260 if (!htbl) 1261 goto out_unlock; 1262 1263 hash_val = ipoib_addr_hash(htbl, daddr); 1264 for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]); 1265 neigh != NULL; 1266 neigh = rcu_dereference_bh(neigh->hnext)) { 1267 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { 1268 /* found, take one ref on behalf of the caller */ 1269 if (!atomic_inc_not_zero(&neigh->refcnt)) { 1270 /* deleted */ 1271 neigh = NULL; 1272 goto out_unlock; 1273 } 1274 1275 if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)) 1276 neigh->alive = jiffies; 1277 goto out_unlock; 1278 } 1279 } 1280 1281 out_unlock: 1282 rcu_read_unlock_bh(); 1283 return neigh; 1284 } 1285 1286 static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) 1287 { 1288 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1289 struct ipoib_neigh_hash *htbl; 1290 unsigned long neigh_obsolete; 1291 unsigned long dt; 1292 unsigned long flags; 1293 int i; 1294 LIST_HEAD(remove_list); 1295 1296 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 1297 return; 1298 1299 spin_lock_irqsave(&priv->lock, flags); 1300 1301 htbl = rcu_dereference_protected(ntbl->htbl, 1302 lockdep_is_held(&priv->lock)); 1303 1304 if (!htbl) 1305 goto out_unlock; 1306 1307 /* neigh is obsolete if it was idle for two GC periods */ 1308 dt = 2 * arp_tbl.gc_interval; 1309 neigh_obsolete = jiffies - dt; 1310 /* handle possible race condition */ 1311 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 1312 goto out_unlock; 1313 1314 for (i = 0; i < htbl->size; i++) { 1315 struct ipoib_neigh *neigh; 1316 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1317 1318 while ((neigh = rcu_dereference_protected(*np, 1319 lockdep_is_held(&priv->lock))) != NULL) { 1320 /* was the neigh idle for two GC periods */ 1321 if (time_after(neigh_obsolete, neigh->alive)) { 1322 1323 ipoib_check_and_add_mcast_sendonly(priv, neigh->daddr + 4, &remove_list); 1324 1325 rcu_assign_pointer(*np, 1326 rcu_dereference_protected(neigh->hnext, 1327 lockdep_is_held(&priv->lock))); 1328 /* remove from path/mc list */ 1329 list_del_init(&neigh->list); 1330 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1331 } else { 1332 np = &neigh->hnext; 1333 } 1334 1335 } 1336 } 1337 1338 out_unlock: 1339 spin_unlock_irqrestore(&priv->lock, flags); 1340 ipoib_mcast_remove_list(&remove_list); 1341 } 1342 1343 static void ipoib_reap_neigh(struct work_struct *work) 1344 { 1345 struct ipoib_dev_priv *priv = 1346 container_of(work, struct ipoib_dev_priv, neigh_reap_task.work); 1347 1348 __ipoib_reap_neigh(priv); 1349 1350 if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 1351 queue_delayed_work(priv->wq, &priv->neigh_reap_task, 1352 arp_tbl.gc_interval); 1353 } 1354 1355 1356 static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr, 1357 struct net_device *dev) 1358 { 1359 struct ipoib_neigh *neigh; 1360 1361 neigh = kzalloc(sizeof *neigh, GFP_ATOMIC); 1362 if (!neigh) 1363 return NULL; 1364 1365 neigh->dev = dev; 1366 memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr)); 1367 skb_queue_head_init(&neigh->queue); 1368 INIT_LIST_HEAD(&neigh->list); 1369 ipoib_cm_set(neigh, NULL); 1370 /* one ref on behalf of the caller */ 1371 atomic_set(&neigh->refcnt, 1); 1372 1373 return neigh; 1374 } 1375 1376 struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr, 1377 struct net_device *dev) 1378 { 1379 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1380 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1381 struct ipoib_neigh_hash *htbl; 1382 struct ipoib_neigh *neigh; 1383 u32 hash_val; 1384 1385 htbl = rcu_dereference_protected(ntbl->htbl, 1386 lockdep_is_held(&priv->lock)); 1387 if (!htbl) { 1388 neigh = NULL; 1389 goto out_unlock; 1390 } 1391 1392 /* need to add a new neigh, but maybe some other thread succeeded? 1393 * recalc hash, maybe hash resize took place so we do a search 1394 */ 1395 hash_val = ipoib_addr_hash(htbl, daddr); 1396 for (neigh = rcu_dereference_protected(htbl->buckets[hash_val], 1397 lockdep_is_held(&priv->lock)); 1398 neigh != NULL; 1399 neigh = rcu_dereference_protected(neigh->hnext, 1400 lockdep_is_held(&priv->lock))) { 1401 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { 1402 /* found, take one ref on behalf of the caller */ 1403 if (!atomic_inc_not_zero(&neigh->refcnt)) { 1404 /* deleted */ 1405 neigh = NULL; 1406 break; 1407 } 1408 neigh->alive = jiffies; 1409 goto out_unlock; 1410 } 1411 } 1412 1413 neigh = ipoib_neigh_ctor(daddr, dev); 1414 if (!neigh) 1415 goto out_unlock; 1416 1417 /* one ref on behalf of the hash table */ 1418 atomic_inc(&neigh->refcnt); 1419 neigh->alive = jiffies; 1420 /* put in hash */ 1421 rcu_assign_pointer(neigh->hnext, 1422 rcu_dereference_protected(htbl->buckets[hash_val], 1423 lockdep_is_held(&priv->lock))); 1424 rcu_assign_pointer(htbl->buckets[hash_val], neigh); 1425 atomic_inc(&ntbl->entries); 1426 1427 out_unlock: 1428 1429 return neigh; 1430 } 1431 1432 void ipoib_neigh_dtor(struct ipoib_neigh *neigh) 1433 { 1434 /* neigh reference count was dropprd to zero */ 1435 struct net_device *dev = neigh->dev; 1436 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1437 struct sk_buff *skb; 1438 if (neigh->ah) 1439 ipoib_put_ah(neigh->ah); 1440 while ((skb = __skb_dequeue(&neigh->queue))) { 1441 ++dev->stats.tx_dropped; 1442 dev_kfree_skb_any(skb); 1443 } 1444 if (ipoib_cm_get(neigh)) 1445 ipoib_cm_destroy_tx(ipoib_cm_get(neigh)); 1446 ipoib_dbg(ipoib_priv(dev), 1447 "neigh free for %06x %pI6\n", 1448 IPOIB_QPN(neigh->daddr), 1449 neigh->daddr + 4); 1450 kfree(neigh); 1451 if (atomic_dec_and_test(&priv->ntbl.entries)) { 1452 if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags)) 1453 complete(&priv->ntbl.flushed); 1454 } 1455 } 1456 1457 static void ipoib_neigh_reclaim(struct rcu_head *rp) 1458 { 1459 /* Called as a result of removal from hash table */ 1460 struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu); 1461 /* note TX context may hold another ref */ 1462 ipoib_neigh_put(neigh); 1463 } 1464 1465 void ipoib_neigh_free(struct ipoib_neigh *neigh) 1466 { 1467 struct net_device *dev = neigh->dev; 1468 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1469 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1470 struct ipoib_neigh_hash *htbl; 1471 struct ipoib_neigh __rcu **np; 1472 struct ipoib_neigh *n; 1473 u32 hash_val; 1474 1475 htbl = rcu_dereference_protected(ntbl->htbl, 1476 lockdep_is_held(&priv->lock)); 1477 if (!htbl) 1478 return; 1479 1480 hash_val = ipoib_addr_hash(htbl, neigh->daddr); 1481 np = &htbl->buckets[hash_val]; 1482 for (n = rcu_dereference_protected(*np, 1483 lockdep_is_held(&priv->lock)); 1484 n != NULL; 1485 n = rcu_dereference_protected(*np, 1486 lockdep_is_held(&priv->lock))) { 1487 if (n == neigh) { 1488 /* found */ 1489 rcu_assign_pointer(*np, 1490 rcu_dereference_protected(neigh->hnext, 1491 lockdep_is_held(&priv->lock))); 1492 /* remove from parent list */ 1493 list_del_init(&neigh->list); 1494 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1495 return; 1496 } else { 1497 np = &n->hnext; 1498 } 1499 } 1500 } 1501 1502 static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) 1503 { 1504 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1505 struct ipoib_neigh_hash *htbl; 1506 struct ipoib_neigh __rcu **buckets; 1507 u32 size; 1508 1509 clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); 1510 ntbl->htbl = NULL; 1511 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL); 1512 if (!htbl) 1513 return -ENOMEM; 1514 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1515 size = roundup_pow_of_two(arp_tbl.gc_thresh3); 1516 buckets = kzalloc(size * sizeof(*buckets), GFP_KERNEL); 1517 if (!buckets) { 1518 kfree(htbl); 1519 return -ENOMEM; 1520 } 1521 htbl->size = size; 1522 htbl->mask = (size - 1); 1523 htbl->buckets = buckets; 1524 RCU_INIT_POINTER(ntbl->htbl, htbl); 1525 htbl->ntbl = ntbl; 1526 atomic_set(&ntbl->entries, 0); 1527 1528 /* start garbage collection */ 1529 clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1530 queue_delayed_work(priv->wq, &priv->neigh_reap_task, 1531 arp_tbl.gc_interval); 1532 1533 return 0; 1534 } 1535 1536 static void neigh_hash_free_rcu(struct rcu_head *head) 1537 { 1538 struct ipoib_neigh_hash *htbl = container_of(head, 1539 struct ipoib_neigh_hash, 1540 rcu); 1541 struct ipoib_neigh __rcu **buckets = htbl->buckets; 1542 struct ipoib_neigh_table *ntbl = htbl->ntbl; 1543 1544 kfree(buckets); 1545 kfree(htbl); 1546 complete(&ntbl->deleted); 1547 } 1548 1549 void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid) 1550 { 1551 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1552 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1553 struct ipoib_neigh_hash *htbl; 1554 unsigned long flags; 1555 int i; 1556 1557 /* remove all neigh connected to a given path or mcast */ 1558 spin_lock_irqsave(&priv->lock, flags); 1559 1560 htbl = rcu_dereference_protected(ntbl->htbl, 1561 lockdep_is_held(&priv->lock)); 1562 1563 if (!htbl) 1564 goto out_unlock; 1565 1566 for (i = 0; i < htbl->size; i++) { 1567 struct ipoib_neigh *neigh; 1568 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1569 1570 while ((neigh = rcu_dereference_protected(*np, 1571 lockdep_is_held(&priv->lock))) != NULL) { 1572 /* delete neighs belong to this parent */ 1573 if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) { 1574 rcu_assign_pointer(*np, 1575 rcu_dereference_protected(neigh->hnext, 1576 lockdep_is_held(&priv->lock))); 1577 /* remove from parent list */ 1578 list_del_init(&neigh->list); 1579 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1580 } else { 1581 np = &neigh->hnext; 1582 } 1583 1584 } 1585 } 1586 out_unlock: 1587 spin_unlock_irqrestore(&priv->lock, flags); 1588 } 1589 1590 static void ipoib_flush_neighs(struct ipoib_dev_priv *priv) 1591 { 1592 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1593 struct ipoib_neigh_hash *htbl; 1594 unsigned long flags; 1595 int i, wait_flushed = 0; 1596 1597 init_completion(&priv->ntbl.flushed); 1598 set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); 1599 1600 spin_lock_irqsave(&priv->lock, flags); 1601 1602 htbl = rcu_dereference_protected(ntbl->htbl, 1603 lockdep_is_held(&priv->lock)); 1604 if (!htbl) 1605 goto out_unlock; 1606 1607 wait_flushed = atomic_read(&priv->ntbl.entries); 1608 if (!wait_flushed) 1609 goto free_htbl; 1610 1611 for (i = 0; i < htbl->size; i++) { 1612 struct ipoib_neigh *neigh; 1613 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1614 1615 while ((neigh = rcu_dereference_protected(*np, 1616 lockdep_is_held(&priv->lock))) != NULL) { 1617 rcu_assign_pointer(*np, 1618 rcu_dereference_protected(neigh->hnext, 1619 lockdep_is_held(&priv->lock))); 1620 /* remove from path/mc list */ 1621 list_del_init(&neigh->list); 1622 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1623 } 1624 } 1625 1626 free_htbl: 1627 rcu_assign_pointer(ntbl->htbl, NULL); 1628 call_rcu(&htbl->rcu, neigh_hash_free_rcu); 1629 1630 out_unlock: 1631 spin_unlock_irqrestore(&priv->lock, flags); 1632 if (wait_flushed) 1633 wait_for_completion(&priv->ntbl.flushed); 1634 } 1635 1636 static void ipoib_neigh_hash_uninit(struct net_device *dev) 1637 { 1638 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1639 int stopped; 1640 1641 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); 1642 init_completion(&priv->ntbl.deleted); 1643 1644 /* Stop GC if called at init fail need to cancel work */ 1645 stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1646 if (!stopped) 1647 cancel_delayed_work(&priv->neigh_reap_task); 1648 1649 ipoib_flush_neighs(priv); 1650 1651 wait_for_completion(&priv->ntbl.deleted); 1652 } 1653 1654 static void ipoib_napi_add(struct net_device *dev) 1655 { 1656 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1657 1658 netif_napi_add(dev, &priv->recv_napi, ipoib_rx_poll, IPOIB_NUM_WC); 1659 netif_napi_add(dev, &priv->send_napi, ipoib_tx_poll, MAX_SEND_CQE); 1660 } 1661 1662 static void ipoib_napi_del(struct net_device *dev) 1663 { 1664 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1665 1666 netif_napi_del(&priv->recv_napi); 1667 netif_napi_del(&priv->send_napi); 1668 } 1669 1670 static void ipoib_dev_uninit_default(struct net_device *dev) 1671 { 1672 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1673 1674 ipoib_transport_dev_cleanup(dev); 1675 1676 ipoib_napi_del(dev); 1677 1678 ipoib_cm_dev_cleanup(dev); 1679 1680 kfree(priv->rx_ring); 1681 vfree(priv->tx_ring); 1682 1683 priv->rx_ring = NULL; 1684 priv->tx_ring = NULL; 1685 } 1686 1687 static int ipoib_dev_init_default(struct net_device *dev) 1688 { 1689 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1690 1691 ipoib_napi_add(dev); 1692 1693 /* Allocate RX/TX "rings" to hold queued skbs */ 1694 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, 1695 GFP_KERNEL); 1696 if (!priv->rx_ring) 1697 goto out; 1698 1699 priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring); 1700 if (!priv->tx_ring) { 1701 pr_warn("%s: failed to allocate TX ring (%d entries)\n", 1702 priv->ca->name, ipoib_sendq_size); 1703 goto out_rx_ring_cleanup; 1704 } 1705 1706 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ 1707 1708 if (ipoib_transport_dev_init(dev, priv->ca)) { 1709 pr_warn("%s: ipoib_transport_dev_init failed\n", 1710 priv->ca->name); 1711 goto out_tx_ring_cleanup; 1712 } 1713 1714 /* after qp created set dev address */ 1715 priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff; 1716 priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff; 1717 priv->dev->dev_addr[3] = (priv->qp->qp_num) & 0xff; 1718 1719 return 0; 1720 1721 out_tx_ring_cleanup: 1722 vfree(priv->tx_ring); 1723 1724 out_rx_ring_cleanup: 1725 kfree(priv->rx_ring); 1726 1727 out: 1728 ipoib_napi_del(dev); 1729 return -ENOMEM; 1730 } 1731 1732 static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr, 1733 int cmd) 1734 { 1735 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1736 1737 if (!priv->rn_ops->ndo_do_ioctl) 1738 return -EOPNOTSUPP; 1739 1740 return priv->rn_ops->ndo_do_ioctl(dev, ifr, cmd); 1741 } 1742 1743 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) 1744 { 1745 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1746 int ret = -ENOMEM; 1747 1748 priv->ca = ca; 1749 priv->port = port; 1750 priv->qp = NULL; 1751 1752 /* 1753 * the various IPoIB tasks assume they will never race against 1754 * themselves, so always use a single thread workqueue 1755 */ 1756 priv->wq = alloc_ordered_workqueue("ipoib_wq", WQ_MEM_RECLAIM); 1757 if (!priv->wq) { 1758 pr_warn("%s: failed to allocate device WQ\n", dev->name); 1759 goto out; 1760 } 1761 1762 /* create pd, which used both for control and datapath*/ 1763 priv->pd = ib_alloc_pd(priv->ca, 0); 1764 if (IS_ERR(priv->pd)) { 1765 pr_warn("%s: failed to allocate PD\n", ca->name); 1766 goto clean_wq; 1767 } 1768 1769 ret = priv->rn_ops->ndo_init(dev); 1770 if (ret) { 1771 pr_warn("%s failed to init HW resource\n", dev->name); 1772 goto out_free_pd; 1773 } 1774 1775 if (ipoib_neigh_hash_init(priv) < 0) { 1776 pr_warn("%s failed to init neigh hash\n", dev->name); 1777 goto out_dev_uninit; 1778 } 1779 1780 if (dev->flags & IFF_UP) { 1781 if (ipoib_ib_dev_open(dev)) { 1782 pr_warn("%s failed to open device\n", dev->name); 1783 ret = -ENODEV; 1784 goto out_dev_uninit; 1785 } 1786 } 1787 1788 return 0; 1789 1790 out_dev_uninit: 1791 ipoib_ib_dev_cleanup(dev); 1792 1793 out_free_pd: 1794 if (priv->pd) { 1795 ib_dealloc_pd(priv->pd); 1796 priv->pd = NULL; 1797 } 1798 1799 clean_wq: 1800 if (priv->wq) { 1801 destroy_workqueue(priv->wq); 1802 priv->wq = NULL; 1803 } 1804 1805 out: 1806 return ret; 1807 } 1808 1809 void ipoib_dev_cleanup(struct net_device *dev) 1810 { 1811 struct ipoib_dev_priv *priv = ipoib_priv(dev), *cpriv, *tcpriv; 1812 LIST_HEAD(head); 1813 1814 ASSERT_RTNL(); 1815 1816 /* Delete any child interfaces first */ 1817 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { 1818 /* Stop GC on child */ 1819 set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags); 1820 cancel_delayed_work(&cpriv->neigh_reap_task); 1821 unregister_netdevice_queue(cpriv->dev, &head); 1822 } 1823 unregister_netdevice_many(&head); 1824 1825 ipoib_neigh_hash_uninit(dev); 1826 1827 ipoib_ib_dev_cleanup(dev); 1828 1829 /* no more works over the priv->wq */ 1830 if (priv->wq) { 1831 flush_workqueue(priv->wq); 1832 destroy_workqueue(priv->wq); 1833 priv->wq = NULL; 1834 } 1835 } 1836 1837 static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state) 1838 { 1839 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1840 1841 return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state); 1842 } 1843 1844 static int ipoib_get_vf_config(struct net_device *dev, int vf, 1845 struct ifla_vf_info *ivf) 1846 { 1847 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1848 int err; 1849 1850 err = ib_get_vf_config(priv->ca, vf, priv->port, ivf); 1851 if (err) 1852 return err; 1853 1854 ivf->vf = vf; 1855 1856 return 0; 1857 } 1858 1859 static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type) 1860 { 1861 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1862 1863 if (type != IFLA_VF_IB_NODE_GUID && type != IFLA_VF_IB_PORT_GUID) 1864 return -EINVAL; 1865 1866 return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type); 1867 } 1868 1869 static int ipoib_get_vf_stats(struct net_device *dev, int vf, 1870 struct ifla_vf_stats *vf_stats) 1871 { 1872 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1873 1874 return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats); 1875 } 1876 1877 static const struct header_ops ipoib_header_ops = { 1878 .create = ipoib_hard_header, 1879 }; 1880 1881 static const struct net_device_ops ipoib_netdev_ops_pf = { 1882 .ndo_uninit = ipoib_uninit, 1883 .ndo_open = ipoib_open, 1884 .ndo_stop = ipoib_stop, 1885 .ndo_change_mtu = ipoib_change_mtu, 1886 .ndo_fix_features = ipoib_fix_features, 1887 .ndo_start_xmit = ipoib_start_xmit, 1888 .ndo_tx_timeout = ipoib_timeout, 1889 .ndo_set_rx_mode = ipoib_set_mcast_list, 1890 .ndo_get_iflink = ipoib_get_iflink, 1891 .ndo_set_vf_link_state = ipoib_set_vf_link_state, 1892 .ndo_get_vf_config = ipoib_get_vf_config, 1893 .ndo_get_vf_stats = ipoib_get_vf_stats, 1894 .ndo_set_vf_guid = ipoib_set_vf_guid, 1895 .ndo_set_mac_address = ipoib_set_mac, 1896 .ndo_get_stats64 = ipoib_get_stats, 1897 .ndo_do_ioctl = ipoib_ioctl, 1898 }; 1899 1900 static const struct net_device_ops ipoib_netdev_ops_vf = { 1901 .ndo_uninit = ipoib_uninit, 1902 .ndo_open = ipoib_open, 1903 .ndo_stop = ipoib_stop, 1904 .ndo_change_mtu = ipoib_change_mtu, 1905 .ndo_fix_features = ipoib_fix_features, 1906 .ndo_start_xmit = ipoib_start_xmit, 1907 .ndo_tx_timeout = ipoib_timeout, 1908 .ndo_set_rx_mode = ipoib_set_mcast_list, 1909 .ndo_get_iflink = ipoib_get_iflink, 1910 .ndo_get_stats64 = ipoib_get_stats, 1911 .ndo_do_ioctl = ipoib_ioctl, 1912 }; 1913 1914 void ipoib_setup_common(struct net_device *dev) 1915 { 1916 dev->header_ops = &ipoib_header_ops; 1917 1918 ipoib_set_ethtool_ops(dev); 1919 1920 dev->watchdog_timeo = HZ; 1921 1922 dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 1923 1924 dev->hard_header_len = IPOIB_HARD_LEN; 1925 dev->addr_len = INFINIBAND_ALEN; 1926 dev->type = ARPHRD_INFINIBAND; 1927 dev->tx_queue_len = ipoib_sendq_size * 2; 1928 dev->features = (NETIF_F_VLAN_CHALLENGED | 1929 NETIF_F_HIGHDMA); 1930 netif_keep_dst(dev); 1931 1932 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); 1933 } 1934 1935 static void ipoib_build_priv(struct net_device *dev) 1936 { 1937 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1938 1939 priv->dev = dev; 1940 spin_lock_init(&priv->lock); 1941 init_rwsem(&priv->vlan_rwsem); 1942 mutex_init(&priv->mcast_mutex); 1943 mutex_init(&priv->sysfs_mutex); 1944 1945 INIT_LIST_HEAD(&priv->path_list); 1946 INIT_LIST_HEAD(&priv->child_intfs); 1947 INIT_LIST_HEAD(&priv->dead_ahs); 1948 INIT_LIST_HEAD(&priv->multicast_list); 1949 1950 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); 1951 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); 1952 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); 1953 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal); 1954 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy); 1955 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); 1956 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); 1957 INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh); 1958 } 1959 1960 static const struct net_device_ops ipoib_netdev_default_pf = { 1961 .ndo_init = ipoib_dev_init_default, 1962 .ndo_uninit = ipoib_dev_uninit_default, 1963 .ndo_open = ipoib_ib_dev_open_default, 1964 .ndo_stop = ipoib_ib_dev_stop_default, 1965 }; 1966 1967 static struct net_device 1968 *ipoib_create_netdev_default(struct ib_device *hca, 1969 const char *name, 1970 unsigned char name_assign_type, 1971 void (*setup)(struct net_device *)) 1972 { 1973 struct net_device *dev; 1974 struct rdma_netdev *rn; 1975 1976 dev = alloc_netdev((int)sizeof(struct rdma_netdev), 1977 name, 1978 name_assign_type, setup); 1979 if (!dev) 1980 return NULL; 1981 1982 rn = netdev_priv(dev); 1983 1984 rn->send = ipoib_send; 1985 rn->attach_mcast = ipoib_mcast_attach; 1986 rn->detach_mcast = ipoib_mcast_detach; 1987 rn->free_rdma_netdev = free_netdev; 1988 rn->hca = hca; 1989 1990 dev->netdev_ops = &ipoib_netdev_default_pf; 1991 1992 return dev; 1993 } 1994 1995 static struct net_device *ipoib_get_netdev(struct ib_device *hca, u8 port, 1996 const char *name) 1997 { 1998 struct net_device *dev; 1999 2000 if (hca->alloc_rdma_netdev) { 2001 dev = hca->alloc_rdma_netdev(hca, port, 2002 RDMA_NETDEV_IPOIB, name, 2003 NET_NAME_UNKNOWN, 2004 ipoib_setup_common); 2005 if (IS_ERR_OR_NULL(dev) && PTR_ERR(dev) != -EOPNOTSUPP) 2006 return NULL; 2007 } 2008 2009 if (!hca->alloc_rdma_netdev || PTR_ERR(dev) == -EOPNOTSUPP) 2010 dev = ipoib_create_netdev_default(hca, name, NET_NAME_UNKNOWN, 2011 ipoib_setup_common); 2012 2013 return dev; 2014 } 2015 2016 struct ipoib_dev_priv *ipoib_intf_alloc(struct ib_device *hca, u8 port, 2017 const char *name) 2018 { 2019 struct net_device *dev; 2020 struct ipoib_dev_priv *priv; 2021 struct rdma_netdev *rn; 2022 2023 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 2024 if (!priv) 2025 return NULL; 2026 2027 dev = ipoib_get_netdev(hca, port, name); 2028 if (!dev) 2029 goto free_priv; 2030 2031 priv->rn_ops = dev->netdev_ops; 2032 2033 /* fixme : should be after the query_cap */ 2034 if (priv->hca_caps & IB_DEVICE_VIRTUAL_FUNCTION) 2035 dev->netdev_ops = &ipoib_netdev_ops_vf; 2036 else 2037 dev->netdev_ops = &ipoib_netdev_ops_pf; 2038 2039 rn = netdev_priv(dev); 2040 rn->clnt_priv = priv; 2041 ipoib_build_priv(dev); 2042 2043 return priv; 2044 free_priv: 2045 kfree(priv); 2046 return NULL; 2047 } 2048 2049 static ssize_t show_pkey(struct device *dev, 2050 struct device_attribute *attr, char *buf) 2051 { 2052 struct net_device *ndev = to_net_dev(dev); 2053 struct ipoib_dev_priv *priv = ipoib_priv(ndev); 2054 2055 return sprintf(buf, "0x%04x\n", priv->pkey); 2056 } 2057 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 2058 2059 static ssize_t show_umcast(struct device *dev, 2060 struct device_attribute *attr, char *buf) 2061 { 2062 struct net_device *ndev = to_net_dev(dev); 2063 struct ipoib_dev_priv *priv = ipoib_priv(ndev); 2064 2065 return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags)); 2066 } 2067 2068 void ipoib_set_umcast(struct net_device *ndev, int umcast_val) 2069 { 2070 struct ipoib_dev_priv *priv = ipoib_priv(ndev); 2071 2072 if (umcast_val > 0) { 2073 set_bit(IPOIB_FLAG_UMCAST, &priv->flags); 2074 ipoib_warn(priv, "ignoring multicast groups joined directly " 2075 "by userspace\n"); 2076 } else 2077 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags); 2078 } 2079 2080 static ssize_t set_umcast(struct device *dev, 2081 struct device_attribute *attr, 2082 const char *buf, size_t count) 2083 { 2084 unsigned long umcast_val = simple_strtoul(buf, NULL, 0); 2085 2086 ipoib_set_umcast(to_net_dev(dev), umcast_val); 2087 2088 return count; 2089 } 2090 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast); 2091 2092 int ipoib_add_umcast_attr(struct net_device *dev) 2093 { 2094 return device_create_file(&dev->dev, &dev_attr_umcast); 2095 } 2096 2097 static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid) 2098 { 2099 struct ipoib_dev_priv *child_priv; 2100 struct net_device *netdev = priv->dev; 2101 2102 netif_addr_lock_bh(netdev); 2103 2104 memcpy(&priv->local_gid.global.interface_id, 2105 &gid->global.interface_id, 2106 sizeof(gid->global.interface_id)); 2107 memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid)); 2108 clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); 2109 2110 netif_addr_unlock_bh(netdev); 2111 2112 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 2113 down_read(&priv->vlan_rwsem); 2114 list_for_each_entry(child_priv, &priv->child_intfs, list) 2115 set_base_guid(child_priv, gid); 2116 up_read(&priv->vlan_rwsem); 2117 } 2118 } 2119 2120 static int ipoib_check_lladdr(struct net_device *dev, 2121 struct sockaddr_storage *ss) 2122 { 2123 union ib_gid *gid = (union ib_gid *)(ss->__data + 4); 2124 int ret = 0; 2125 2126 netif_addr_lock_bh(dev); 2127 2128 /* Make sure the QPN, reserved and subnet prefix match the current 2129 * lladdr, it also makes sure the lladdr is unicast. 2130 */ 2131 if (memcmp(dev->dev_addr, ss->__data, 2132 4 + sizeof(gid->global.subnet_prefix)) || 2133 gid->global.interface_id == 0) 2134 ret = -EINVAL; 2135 2136 netif_addr_unlock_bh(dev); 2137 2138 return ret; 2139 } 2140 2141 static int ipoib_set_mac(struct net_device *dev, void *addr) 2142 { 2143 struct ipoib_dev_priv *priv = ipoib_priv(dev); 2144 struct sockaddr_storage *ss = addr; 2145 int ret; 2146 2147 if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev)) 2148 return -EBUSY; 2149 2150 ret = ipoib_check_lladdr(dev, ss); 2151 if (ret) 2152 return ret; 2153 2154 set_base_guid(priv, (union ib_gid *)(ss->__data + 4)); 2155 2156 queue_work(ipoib_workqueue, &priv->flush_light); 2157 2158 return 0; 2159 } 2160 2161 static ssize_t create_child(struct device *dev, 2162 struct device_attribute *attr, 2163 const char *buf, size_t count) 2164 { 2165 int pkey; 2166 int ret; 2167 2168 if (sscanf(buf, "%i", &pkey) != 1) 2169 return -EINVAL; 2170 2171 if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000) 2172 return -EINVAL; 2173 2174 /* 2175 * Set the full membership bit, so that we join the right 2176 * broadcast group, etc. 2177 */ 2178 pkey |= 0x8000; 2179 2180 ret = ipoib_vlan_add(to_net_dev(dev), pkey); 2181 2182 return ret ? ret : count; 2183 } 2184 static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child); 2185 2186 static ssize_t delete_child(struct device *dev, 2187 struct device_attribute *attr, 2188 const char *buf, size_t count) 2189 { 2190 int pkey; 2191 int ret; 2192 2193 if (sscanf(buf, "%i", &pkey) != 1) 2194 return -EINVAL; 2195 2196 if (pkey < 0 || pkey > 0xffff) 2197 return -EINVAL; 2198 2199 ret = ipoib_vlan_delete(to_net_dev(dev), pkey); 2200 2201 return ret ? ret : count; 2202 2203 } 2204 static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child); 2205 2206 int ipoib_add_pkey_attr(struct net_device *dev) 2207 { 2208 return device_create_file(&dev->dev, &dev_attr_pkey); 2209 } 2210 2211 void ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) 2212 { 2213 priv->hca_caps = hca->attrs.device_cap_flags; 2214 2215 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { 2216 priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 2217 2218 if (priv->hca_caps & IB_DEVICE_UD_TSO) 2219 priv->dev->hw_features |= NETIF_F_TSO; 2220 2221 priv->dev->features |= priv->dev->hw_features; 2222 } 2223 } 2224 2225 static struct net_device *ipoib_add_port(const char *format, 2226 struct ib_device *hca, u8 port) 2227 { 2228 struct ipoib_dev_priv *priv; 2229 struct ib_port_attr attr; 2230 struct rdma_netdev *rn; 2231 int result = -ENOMEM; 2232 2233 priv = ipoib_intf_alloc(hca, port, format); 2234 if (!priv) { 2235 pr_warn("%s, %d: ipoib_intf_alloc failed\n", hca->name, port); 2236 goto alloc_mem_failed; 2237 } 2238 2239 SET_NETDEV_DEV(priv->dev, hca->dev.parent); 2240 priv->dev->dev_id = port - 1; 2241 2242 result = ib_query_port(hca, port, &attr); 2243 if (result) { 2244 pr_warn("%s: ib_query_port %d failed\n", hca->name, port); 2245 goto device_init_failed; 2246 } 2247 2248 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); 2249 2250 /* MTU will be reset when mcast join happens */ 2251 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 2252 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; 2253 priv->dev->max_mtu = IPOIB_CM_MTU; 2254 2255 priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh); 2256 2257 result = ib_query_pkey(hca, port, 0, &priv->pkey); 2258 if (result) { 2259 pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n", 2260 hca->name, port, result); 2261 goto device_init_failed; 2262 } 2263 2264 ipoib_set_dev_features(priv, hca); 2265 2266 /* 2267 * Set the full membership bit, so that we join the right 2268 * broadcast group, etc. 2269 */ 2270 priv->pkey |= 0x8000; 2271 2272 priv->dev->broadcast[8] = priv->pkey >> 8; 2273 priv->dev->broadcast[9] = priv->pkey & 0xff; 2274 2275 result = ib_query_gid(hca, port, 0, &priv->local_gid, NULL); 2276 if (result) { 2277 pr_warn("%s: ib_query_gid port %d failed (ret = %d)\n", 2278 hca->name, port, result); 2279 goto device_init_failed; 2280 } 2281 2282 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, 2283 sizeof(union ib_gid)); 2284 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); 2285 2286 result = ipoib_dev_init(priv->dev, hca, port); 2287 if (result) { 2288 pr_warn("%s: failed to initialize port %d (ret = %d)\n", 2289 hca->name, port, result); 2290 goto device_init_failed; 2291 } 2292 2293 INIT_IB_EVENT_HANDLER(&priv->event_handler, 2294 priv->ca, ipoib_event); 2295 ib_register_event_handler(&priv->event_handler); 2296 2297 result = register_netdev(priv->dev); 2298 if (result) { 2299 pr_warn("%s: couldn't register ipoib port %d; error %d\n", 2300 hca->name, port, result); 2301 goto register_failed; 2302 } 2303 2304 result = -ENOMEM; 2305 if (ipoib_cm_add_mode_attr(priv->dev)) 2306 goto sysfs_failed; 2307 if (ipoib_add_pkey_attr(priv->dev)) 2308 goto sysfs_failed; 2309 if (ipoib_add_umcast_attr(priv->dev)) 2310 goto sysfs_failed; 2311 if (device_create_file(&priv->dev->dev, &dev_attr_create_child)) 2312 goto sysfs_failed; 2313 if (device_create_file(&priv->dev->dev, &dev_attr_delete_child)) 2314 goto sysfs_failed; 2315 2316 return priv->dev; 2317 2318 sysfs_failed: 2319 unregister_netdev(priv->dev); 2320 2321 register_failed: 2322 ib_unregister_event_handler(&priv->event_handler); 2323 flush_workqueue(ipoib_workqueue); 2324 /* Stop GC if started before flush */ 2325 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 2326 cancel_delayed_work(&priv->neigh_reap_task); 2327 flush_workqueue(priv->wq); 2328 ipoib_dev_cleanup(priv->dev); 2329 2330 device_init_failed: 2331 rn = netdev_priv(priv->dev); 2332 rn->free_rdma_netdev(priv->dev); 2333 kfree(priv); 2334 2335 alloc_mem_failed: 2336 return ERR_PTR(result); 2337 } 2338 2339 static void ipoib_add_one(struct ib_device *device) 2340 { 2341 struct list_head *dev_list; 2342 struct net_device *dev; 2343 struct ipoib_dev_priv *priv; 2344 int p; 2345 int count = 0; 2346 2347 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 2348 if (!dev_list) 2349 return; 2350 2351 INIT_LIST_HEAD(dev_list); 2352 2353 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { 2354 if (!rdma_protocol_ib(device, p)) 2355 continue; 2356 dev = ipoib_add_port("ib%d", device, p); 2357 if (!IS_ERR(dev)) { 2358 priv = ipoib_priv(dev); 2359 list_add_tail(&priv->list, dev_list); 2360 count++; 2361 } 2362 } 2363 2364 if (!count) { 2365 kfree(dev_list); 2366 return; 2367 } 2368 2369 ib_set_client_data(device, &ipoib_client, dev_list); 2370 } 2371 2372 static void ipoib_remove_one(struct ib_device *device, void *client_data) 2373 { 2374 struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv; 2375 struct list_head *dev_list = client_data; 2376 2377 if (!dev_list) 2378 return; 2379 2380 list_for_each_entry_safe(priv, tmp, dev_list, list) { 2381 struct rdma_netdev *parent_rn = netdev_priv(priv->dev); 2382 2383 ib_unregister_event_handler(&priv->event_handler); 2384 flush_workqueue(ipoib_workqueue); 2385 2386 /* mark interface in the middle of destruction */ 2387 set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags); 2388 2389 rtnl_lock(); 2390 dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); 2391 rtnl_unlock(); 2392 2393 /* Stop GC */ 2394 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 2395 cancel_delayed_work(&priv->neigh_reap_task); 2396 flush_workqueue(priv->wq); 2397 2398 /* Wrap rtnl_lock/unlock with mutex to protect sysfs calls */ 2399 mutex_lock(&priv->sysfs_mutex); 2400 unregister_netdev(priv->dev); 2401 mutex_unlock(&priv->sysfs_mutex); 2402 2403 parent_rn->free_rdma_netdev(priv->dev); 2404 2405 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { 2406 struct rdma_netdev *child_rn; 2407 2408 child_rn = netdev_priv(cpriv->dev); 2409 child_rn->free_rdma_netdev(cpriv->dev); 2410 kfree(cpriv); 2411 } 2412 2413 kfree(priv); 2414 } 2415 2416 kfree(dev_list); 2417 } 2418 2419 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 2420 static struct notifier_block ipoib_netdev_notifier = { 2421 .notifier_call = ipoib_netdev_event, 2422 }; 2423 #endif 2424 2425 static int __init ipoib_init_module(void) 2426 { 2427 int ret; 2428 2429 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size); 2430 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE); 2431 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE); 2432 2433 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size); 2434 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE); 2435 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); 2436 #ifdef CONFIG_INFINIBAND_IPOIB_CM 2437 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); 2438 ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0); 2439 #endif 2440 2441 /* 2442 * When copying small received packets, we only copy from the 2443 * linear data part of the SKB, so we rely on this condition. 2444 */ 2445 BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE); 2446 2447 ret = ipoib_register_debugfs(); 2448 if (ret) 2449 return ret; 2450 2451 /* 2452 * We create a global workqueue here that is used for all flush 2453 * operations. However, if you attempt to flush a workqueue 2454 * from a task on that same workqueue, it deadlocks the system. 2455 * We want to be able to flush the tasks associated with a 2456 * specific net device, so we also create a workqueue for each 2457 * netdevice. We queue up the tasks for that device only on 2458 * its private workqueue, and we only queue up flush events 2459 * on our global flush workqueue. This avoids the deadlocks. 2460 */ 2461 ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush", 2462 WQ_MEM_RECLAIM); 2463 if (!ipoib_workqueue) { 2464 ret = -ENOMEM; 2465 goto err_fs; 2466 } 2467 2468 ib_sa_register_client(&ipoib_sa_client); 2469 2470 ret = ib_register_client(&ipoib_client); 2471 if (ret) 2472 goto err_sa; 2473 2474 ret = ipoib_netlink_init(); 2475 if (ret) 2476 goto err_client; 2477 2478 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 2479 register_netdevice_notifier(&ipoib_netdev_notifier); 2480 #endif 2481 return 0; 2482 2483 err_client: 2484 ib_unregister_client(&ipoib_client); 2485 2486 err_sa: 2487 ib_sa_unregister_client(&ipoib_sa_client); 2488 destroy_workqueue(ipoib_workqueue); 2489 2490 err_fs: 2491 ipoib_unregister_debugfs(); 2492 2493 return ret; 2494 } 2495 2496 static void __exit ipoib_cleanup_module(void) 2497 { 2498 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 2499 unregister_netdevice_notifier(&ipoib_netdev_notifier); 2500 #endif 2501 ipoib_netlink_fini(); 2502 ib_unregister_client(&ipoib_client); 2503 ib_sa_unregister_client(&ipoib_sa_client); 2504 ipoib_unregister_debugfs(); 2505 destroy_workqueue(ipoib_workqueue); 2506 } 2507 2508 module_init(ipoib_init_module); 2509 module_exit(ipoib_cleanup_module); 2510