1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "ipoib.h" 36 37 #include <linux/module.h> 38 39 #include <linux/init.h> 40 #include <linux/slab.h> 41 #include <linux/kernel.h> 42 #include <linux/vmalloc.h> 43 44 #include <linux/if_arp.h> /* For ARPHRD_xxx */ 45 46 #include <linux/ip.h> 47 #include <linux/in.h> 48 49 #include <linux/jhash.h> 50 #include <net/arp.h> 51 #include <net/addrconf.h> 52 #include <linux/inetdevice.h> 53 #include <rdma/ib_cache.h> 54 55 #define DRV_VERSION "1.0.0" 56 57 const char ipoib_driver_version[] = DRV_VERSION; 58 59 MODULE_AUTHOR("Roland Dreier"); 60 MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); 61 MODULE_LICENSE("Dual BSD/GPL"); 62 63 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE; 64 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE; 65 66 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444); 67 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue"); 68 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); 69 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); 70 71 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 72 int ipoib_debug_level; 73 74 module_param_named(debug_level, ipoib_debug_level, int, 0644); 75 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 76 #endif 77 78 struct ipoib_path_iter { 79 struct net_device *dev; 80 struct ipoib_path path; 81 }; 82 83 static const u8 ipv4_bcast_addr[] = { 84 0x00, 0xff, 0xff, 0xff, 85 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, 86 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff 87 }; 88 89 struct workqueue_struct *ipoib_workqueue; 90 91 struct ib_sa_client ipoib_sa_client; 92 93 static void ipoib_add_one(struct ib_device *device); 94 static void ipoib_remove_one(struct ib_device *device, void *client_data); 95 static void ipoib_neigh_reclaim(struct rcu_head *rp); 96 static struct net_device *ipoib_get_net_dev_by_params( 97 struct ib_device *dev, u8 port, u16 pkey, 98 const union ib_gid *gid, const struct sockaddr *addr, 99 void *client_data); 100 static int ipoib_set_mac(struct net_device *dev, void *addr); 101 static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr, 102 int cmd); 103 104 static struct ib_client ipoib_client = { 105 .name = "ipoib", 106 .add = ipoib_add_one, 107 .remove = ipoib_remove_one, 108 .get_net_dev_by_params = ipoib_get_net_dev_by_params, 109 }; 110 111 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 112 static int ipoib_netdev_event(struct notifier_block *this, 113 unsigned long event, void *ptr) 114 { 115 struct netdev_notifier_info *ni = ptr; 116 struct net_device *dev = ni->dev; 117 118 if (dev->netdev_ops->ndo_open != ipoib_open) 119 return NOTIFY_DONE; 120 121 switch (event) { 122 case NETDEV_REGISTER: 123 ipoib_create_debug_files(dev); 124 break; 125 case NETDEV_CHANGENAME: 126 ipoib_delete_debug_files(dev); 127 ipoib_create_debug_files(dev); 128 break; 129 case NETDEV_UNREGISTER: 130 ipoib_delete_debug_files(dev); 131 break; 132 } 133 134 return NOTIFY_DONE; 135 } 136 #endif 137 138 int ipoib_open(struct net_device *dev) 139 { 140 struct ipoib_dev_priv *priv = ipoib_priv(dev); 141 142 ipoib_dbg(priv, "bringing up interface\n"); 143 144 netif_carrier_off(dev); 145 146 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 147 148 priv->sm_fullmember_sendonly_support = false; 149 150 if (ipoib_ib_dev_open(dev)) { 151 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) 152 return 0; 153 goto err_disable; 154 } 155 156 ipoib_ib_dev_up(dev); 157 158 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 159 struct ipoib_dev_priv *cpriv; 160 161 /* Bring up any child interfaces too */ 162 down_read(&priv->vlan_rwsem); 163 list_for_each_entry(cpriv, &priv->child_intfs, list) { 164 int flags; 165 166 flags = cpriv->dev->flags; 167 if (flags & IFF_UP) 168 continue; 169 170 dev_change_flags(cpriv->dev, flags | IFF_UP); 171 } 172 up_read(&priv->vlan_rwsem); 173 } 174 175 netif_start_queue(dev); 176 177 return 0; 178 179 err_disable: 180 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 181 182 return -EINVAL; 183 } 184 185 static int ipoib_stop(struct net_device *dev) 186 { 187 struct ipoib_dev_priv *priv = ipoib_priv(dev); 188 189 ipoib_dbg(priv, "stopping interface\n"); 190 191 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 192 193 netif_stop_queue(dev); 194 195 ipoib_ib_dev_down(dev); 196 ipoib_ib_dev_stop(dev); 197 198 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 199 struct ipoib_dev_priv *cpriv; 200 201 /* Bring down any child interfaces too */ 202 down_read(&priv->vlan_rwsem); 203 list_for_each_entry(cpriv, &priv->child_intfs, list) { 204 int flags; 205 206 flags = cpriv->dev->flags; 207 if (!(flags & IFF_UP)) 208 continue; 209 210 dev_change_flags(cpriv->dev, flags & ~IFF_UP); 211 } 212 up_read(&priv->vlan_rwsem); 213 } 214 215 return 0; 216 } 217 218 static void ipoib_uninit(struct net_device *dev) 219 { 220 ipoib_dev_cleanup(dev); 221 } 222 223 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features) 224 { 225 struct ipoib_dev_priv *priv = ipoib_priv(dev); 226 227 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) 228 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); 229 230 return features; 231 } 232 233 static int ipoib_change_mtu(struct net_device *dev, int new_mtu) 234 { 235 struct ipoib_dev_priv *priv = ipoib_priv(dev); 236 int ret = 0; 237 238 /* dev->mtu > 2K ==> connected mode */ 239 if (ipoib_cm_admin_enabled(dev)) { 240 if (new_mtu > ipoib_cm_max_mtu(dev)) 241 return -EINVAL; 242 243 if (new_mtu > priv->mcast_mtu) 244 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n", 245 priv->mcast_mtu); 246 247 dev->mtu = new_mtu; 248 return 0; 249 } 250 251 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) 252 return -EINVAL; 253 254 priv->admin_mtu = new_mtu; 255 256 if (priv->mcast_mtu < priv->admin_mtu) 257 ipoib_dbg(priv, "MTU must be smaller than the underlying " 258 "link layer MTU - 4 (%u)\n", priv->mcast_mtu); 259 260 new_mtu = min(priv->mcast_mtu, priv->admin_mtu); 261 262 if (priv->rn_ops->ndo_change_mtu) { 263 bool carrier_status = netif_carrier_ok(dev); 264 265 netif_carrier_off(dev); 266 267 /* notify lower level on the real mtu */ 268 ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu); 269 270 if (carrier_status) 271 netif_carrier_on(dev); 272 } else { 273 dev->mtu = new_mtu; 274 } 275 276 return ret; 277 } 278 279 static void ipoib_get_stats(struct net_device *dev, 280 struct rtnl_link_stats64 *stats) 281 { 282 struct ipoib_dev_priv *priv = ipoib_priv(dev); 283 284 if (priv->rn_ops->ndo_get_stats64) 285 priv->rn_ops->ndo_get_stats64(dev, stats); 286 else 287 netdev_stats_to_stats64(stats, &dev->stats); 288 } 289 290 /* Called with an RCU read lock taken */ 291 static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr, 292 struct net_device *dev) 293 { 294 struct net *net = dev_net(dev); 295 struct in_device *in_dev; 296 struct sockaddr_in *addr_in = (struct sockaddr_in *)addr; 297 struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr; 298 __be32 ret_addr; 299 300 switch (addr->sa_family) { 301 case AF_INET: 302 in_dev = in_dev_get(dev); 303 if (!in_dev) 304 return false; 305 306 ret_addr = inet_confirm_addr(net, in_dev, 0, 307 addr_in->sin_addr.s_addr, 308 RT_SCOPE_HOST); 309 in_dev_put(in_dev); 310 if (ret_addr) 311 return true; 312 313 break; 314 case AF_INET6: 315 if (IS_ENABLED(CONFIG_IPV6) && 316 ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1)) 317 return true; 318 319 break; 320 } 321 return false; 322 } 323 324 /** 325 * Find the master net_device on top of the given net_device. 326 * @dev: base IPoIB net_device 327 * 328 * Returns the master net_device with a reference held, or the same net_device 329 * if no master exists. 330 */ 331 static struct net_device *ipoib_get_master_net_dev(struct net_device *dev) 332 { 333 struct net_device *master; 334 335 rcu_read_lock(); 336 master = netdev_master_upper_dev_get_rcu(dev); 337 if (master) 338 dev_hold(master); 339 rcu_read_unlock(); 340 341 if (master) 342 return master; 343 344 dev_hold(dev); 345 return dev; 346 } 347 348 struct ipoib_walk_data { 349 const struct sockaddr *addr; 350 struct net_device *result; 351 }; 352 353 static int ipoib_upper_walk(struct net_device *upper, void *_data) 354 { 355 struct ipoib_walk_data *data = _data; 356 int ret = 0; 357 358 if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) { 359 dev_hold(upper); 360 data->result = upper; 361 ret = 1; 362 } 363 364 return ret; 365 } 366 367 /** 368 * Find a net_device matching the given address, which is an upper device of 369 * the given net_device. 370 * @addr: IP address to look for. 371 * @dev: base IPoIB net_device 372 * 373 * If found, returns the net_device with a reference held. Otherwise return 374 * NULL. 375 */ 376 static struct net_device *ipoib_get_net_dev_match_addr( 377 const struct sockaddr *addr, struct net_device *dev) 378 { 379 struct ipoib_walk_data data = { 380 .addr = addr, 381 }; 382 383 rcu_read_lock(); 384 if (ipoib_is_dev_match_addr_rcu(addr, dev)) { 385 dev_hold(dev); 386 data.result = dev; 387 goto out; 388 } 389 390 netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &data); 391 out: 392 rcu_read_unlock(); 393 return data.result; 394 } 395 396 /* returns the number of IPoIB netdevs on top a given ipoib device matching a 397 * pkey_index and address, if one exists. 398 * 399 * @found_net_dev: contains a matching net_device if the return value >= 1, 400 * with a reference held. */ 401 static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv, 402 const union ib_gid *gid, 403 u16 pkey_index, 404 const struct sockaddr *addr, 405 int nesting, 406 struct net_device **found_net_dev) 407 { 408 struct ipoib_dev_priv *child_priv; 409 struct net_device *net_dev = NULL; 410 int matches = 0; 411 412 if (priv->pkey_index == pkey_index && 413 (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) { 414 if (!addr) { 415 net_dev = ipoib_get_master_net_dev(priv->dev); 416 } else { 417 /* Verify the net_device matches the IP address, as 418 * IPoIB child devices currently share a GID. */ 419 net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev); 420 } 421 if (net_dev) { 422 if (!*found_net_dev) 423 *found_net_dev = net_dev; 424 else 425 dev_put(net_dev); 426 ++matches; 427 } 428 } 429 430 /* Check child interfaces */ 431 down_read_nested(&priv->vlan_rwsem, nesting); 432 list_for_each_entry(child_priv, &priv->child_intfs, list) { 433 matches += ipoib_match_gid_pkey_addr(child_priv, gid, 434 pkey_index, addr, 435 nesting + 1, 436 found_net_dev); 437 if (matches > 1) 438 break; 439 } 440 up_read(&priv->vlan_rwsem); 441 442 return matches; 443 } 444 445 /* Returns the number of matching net_devs found (between 0 and 2). Also 446 * return the matching net_device in the @net_dev parameter, holding a 447 * reference to the net_device, if the number of matches >= 1 */ 448 static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port, 449 u16 pkey_index, 450 const union ib_gid *gid, 451 const struct sockaddr *addr, 452 struct net_device **net_dev) 453 { 454 struct ipoib_dev_priv *priv; 455 int matches = 0; 456 457 *net_dev = NULL; 458 459 list_for_each_entry(priv, dev_list, list) { 460 if (priv->port != port) 461 continue; 462 463 matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index, 464 addr, 0, net_dev); 465 if (matches > 1) 466 break; 467 } 468 469 return matches; 470 } 471 472 static struct net_device *ipoib_get_net_dev_by_params( 473 struct ib_device *dev, u8 port, u16 pkey, 474 const union ib_gid *gid, const struct sockaddr *addr, 475 void *client_data) 476 { 477 struct net_device *net_dev; 478 struct list_head *dev_list = client_data; 479 u16 pkey_index; 480 int matches; 481 int ret; 482 483 if (!rdma_protocol_ib(dev, port)) 484 return NULL; 485 486 ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index); 487 if (ret) 488 return NULL; 489 490 if (!dev_list) 491 return NULL; 492 493 /* See if we can find a unique device matching the L2 parameters */ 494 matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index, 495 gid, NULL, &net_dev); 496 497 switch (matches) { 498 case 0: 499 return NULL; 500 case 1: 501 return net_dev; 502 } 503 504 dev_put(net_dev); 505 506 /* Couldn't find a unique device with L2 parameters only. Use L3 507 * address to uniquely match the net device */ 508 matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index, 509 gid, addr, &net_dev); 510 switch (matches) { 511 case 0: 512 return NULL; 513 default: 514 dev_warn_ratelimited(&dev->dev, 515 "duplicate IP address detected\n"); 516 /* Fall through */ 517 case 1: 518 return net_dev; 519 } 520 } 521 522 int ipoib_set_mode(struct net_device *dev, const char *buf) 523 { 524 struct ipoib_dev_priv *priv = ipoib_priv(dev); 525 526 if ((test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) && 527 !strcmp(buf, "connected\n")) || 528 (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) && 529 !strcmp(buf, "datagram\n"))) { 530 return 0; 531 } 532 533 /* flush paths if we switch modes so that connections are restarted */ 534 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) { 535 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 536 ipoib_warn(priv, "enabling connected mode " 537 "will cause multicast packet drops\n"); 538 netdev_update_features(dev); 539 dev_set_mtu(dev, ipoib_cm_max_mtu(dev)); 540 rtnl_unlock(); 541 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; 542 543 ipoib_flush_paths(dev); 544 return (!rtnl_trylock()) ? -EBUSY : 0; 545 } 546 547 if (!strcmp(buf, "datagram\n")) { 548 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 549 netdev_update_features(dev); 550 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); 551 rtnl_unlock(); 552 ipoib_flush_paths(dev); 553 return (!rtnl_trylock()) ? -EBUSY : 0; 554 } 555 556 return -EINVAL; 557 } 558 559 struct ipoib_path *__path_find(struct net_device *dev, void *gid) 560 { 561 struct ipoib_dev_priv *priv = ipoib_priv(dev); 562 struct rb_node *n = priv->path_tree.rb_node; 563 struct ipoib_path *path; 564 int ret; 565 566 while (n) { 567 path = rb_entry(n, struct ipoib_path, rb_node); 568 569 ret = memcmp(gid, path->pathrec.dgid.raw, 570 sizeof (union ib_gid)); 571 572 if (ret < 0) 573 n = n->rb_left; 574 else if (ret > 0) 575 n = n->rb_right; 576 else 577 return path; 578 } 579 580 return NULL; 581 } 582 583 static int __path_add(struct net_device *dev, struct ipoib_path *path) 584 { 585 struct ipoib_dev_priv *priv = ipoib_priv(dev); 586 struct rb_node **n = &priv->path_tree.rb_node; 587 struct rb_node *pn = NULL; 588 struct ipoib_path *tpath; 589 int ret; 590 591 while (*n) { 592 pn = *n; 593 tpath = rb_entry(pn, struct ipoib_path, rb_node); 594 595 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw, 596 sizeof (union ib_gid)); 597 if (ret < 0) 598 n = &pn->rb_left; 599 else if (ret > 0) 600 n = &pn->rb_right; 601 else 602 return -EEXIST; 603 } 604 605 rb_link_node(&path->rb_node, pn, n); 606 rb_insert_color(&path->rb_node, &priv->path_tree); 607 608 list_add_tail(&path->list, &priv->path_list); 609 610 return 0; 611 } 612 613 static void path_free(struct net_device *dev, struct ipoib_path *path) 614 { 615 struct sk_buff *skb; 616 617 while ((skb = __skb_dequeue(&path->queue))) 618 dev_kfree_skb_irq(skb); 619 620 ipoib_dbg(ipoib_priv(dev), "path_free\n"); 621 622 /* remove all neigh connected to this path */ 623 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); 624 625 if (path->ah) 626 ipoib_put_ah(path->ah); 627 628 kfree(path); 629 } 630 631 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 632 633 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev) 634 { 635 struct ipoib_path_iter *iter; 636 637 iter = kmalloc(sizeof *iter, GFP_KERNEL); 638 if (!iter) 639 return NULL; 640 641 iter->dev = dev; 642 memset(iter->path.pathrec.dgid.raw, 0, 16); 643 644 if (ipoib_path_iter_next(iter)) { 645 kfree(iter); 646 return NULL; 647 } 648 649 return iter; 650 } 651 652 int ipoib_path_iter_next(struct ipoib_path_iter *iter) 653 { 654 struct ipoib_dev_priv *priv = ipoib_priv(iter->dev); 655 struct rb_node *n; 656 struct ipoib_path *path; 657 int ret = 1; 658 659 spin_lock_irq(&priv->lock); 660 661 n = rb_first(&priv->path_tree); 662 663 while (n) { 664 path = rb_entry(n, struct ipoib_path, rb_node); 665 666 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, 667 sizeof (union ib_gid)) < 0) { 668 iter->path = *path; 669 ret = 0; 670 break; 671 } 672 673 n = rb_next(n); 674 } 675 676 spin_unlock_irq(&priv->lock); 677 678 return ret; 679 } 680 681 void ipoib_path_iter_read(struct ipoib_path_iter *iter, 682 struct ipoib_path *path) 683 { 684 *path = iter->path; 685 } 686 687 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 688 689 void ipoib_mark_paths_invalid(struct net_device *dev) 690 { 691 struct ipoib_dev_priv *priv = ipoib_priv(dev); 692 struct ipoib_path *path, *tp; 693 694 spin_lock_irq(&priv->lock); 695 696 list_for_each_entry_safe(path, tp, &priv->path_list, list) { 697 ipoib_dbg(priv, "mark path LID 0x%08x GID %pI6 invalid\n", 698 be32_to_cpu(sa_path_get_dlid(&path->pathrec)), 699 path->pathrec.dgid.raw); 700 path->valid = 0; 701 } 702 703 spin_unlock_irq(&priv->lock); 704 } 705 706 static void push_pseudo_header(struct sk_buff *skb, const char *daddr) 707 { 708 struct ipoib_pseudo_header *phdr; 709 710 phdr = skb_push(skb, sizeof(*phdr)); 711 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); 712 } 713 714 void ipoib_flush_paths(struct net_device *dev) 715 { 716 struct ipoib_dev_priv *priv = ipoib_priv(dev); 717 struct ipoib_path *path, *tp; 718 LIST_HEAD(remove_list); 719 unsigned long flags; 720 721 netif_tx_lock_bh(dev); 722 spin_lock_irqsave(&priv->lock, flags); 723 724 list_splice_init(&priv->path_list, &remove_list); 725 726 list_for_each_entry(path, &remove_list, list) 727 rb_erase(&path->rb_node, &priv->path_tree); 728 729 list_for_each_entry_safe(path, tp, &remove_list, list) { 730 if (path->query) 731 ib_sa_cancel_query(path->query_id, path->query); 732 spin_unlock_irqrestore(&priv->lock, flags); 733 netif_tx_unlock_bh(dev); 734 wait_for_completion(&path->done); 735 path_free(dev, path); 736 netif_tx_lock_bh(dev); 737 spin_lock_irqsave(&priv->lock, flags); 738 } 739 740 spin_unlock_irqrestore(&priv->lock, flags); 741 netif_tx_unlock_bh(dev); 742 } 743 744 static void path_rec_completion(int status, 745 struct sa_path_rec *pathrec, 746 void *path_ptr) 747 { 748 struct ipoib_path *path = path_ptr; 749 struct net_device *dev = path->dev; 750 struct ipoib_dev_priv *priv = ipoib_priv(dev); 751 struct ipoib_ah *ah = NULL; 752 struct ipoib_ah *old_ah = NULL; 753 struct ipoib_neigh *neigh, *tn; 754 struct sk_buff_head skqueue; 755 struct sk_buff *skb; 756 unsigned long flags; 757 758 if (!status) 759 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n", 760 be32_to_cpu(sa_path_get_dlid(pathrec)), 761 pathrec->dgid.raw); 762 else 763 ipoib_dbg(priv, "PathRec status %d for GID %pI6\n", 764 status, path->pathrec.dgid.raw); 765 766 skb_queue_head_init(&skqueue); 767 768 if (!status) { 769 struct rdma_ah_attr av; 770 771 if (!ib_init_ah_attr_from_path(priv->ca, priv->port, 772 pathrec, &av)) 773 ah = ipoib_create_ah(dev, priv->pd, &av); 774 } 775 776 spin_lock_irqsave(&priv->lock, flags); 777 778 if (!IS_ERR_OR_NULL(ah)) { 779 /* 780 * pathrec.dgid is used as the database key from the LLADDR, 781 * it must remain unchanged even if the SA returns a different 782 * GID to use in the AH. 783 */ 784 if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw, 785 sizeof(union ib_gid))) { 786 ipoib_dbg( 787 priv, 788 "%s got PathRec for gid %pI6 while asked for %pI6\n", 789 dev->name, pathrec->dgid.raw, 790 path->pathrec.dgid.raw); 791 memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw, 792 sizeof(union ib_gid)); 793 } 794 795 path->pathrec = *pathrec; 796 797 old_ah = path->ah; 798 path->ah = ah; 799 800 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", 801 ah, be32_to_cpu(sa_path_get_dlid(pathrec)), 802 pathrec->sl); 803 804 while ((skb = __skb_dequeue(&path->queue))) 805 __skb_queue_tail(&skqueue, skb); 806 807 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) { 808 if (neigh->ah) { 809 WARN_ON(neigh->ah != old_ah); 810 /* 811 * Dropping the ah reference inside 812 * priv->lock is safe here, because we 813 * will hold one more reference from 814 * the original value of path->ah (ie 815 * old_ah). 816 */ 817 ipoib_put_ah(neigh->ah); 818 } 819 kref_get(&path->ah->ref); 820 neigh->ah = path->ah; 821 822 if (ipoib_cm_enabled(dev, neigh->daddr)) { 823 if (!ipoib_cm_get(neigh)) 824 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, 825 path, 826 neigh)); 827 if (!ipoib_cm_get(neigh)) { 828 ipoib_neigh_free(neigh); 829 continue; 830 } 831 } 832 833 while ((skb = __skb_dequeue(&neigh->queue))) 834 __skb_queue_tail(&skqueue, skb); 835 } 836 path->valid = 1; 837 } 838 839 path->query = NULL; 840 complete(&path->done); 841 842 spin_unlock_irqrestore(&priv->lock, flags); 843 844 if (IS_ERR_OR_NULL(ah)) 845 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); 846 847 if (old_ah) 848 ipoib_put_ah(old_ah); 849 850 while ((skb = __skb_dequeue(&skqueue))) { 851 int ret; 852 skb->dev = dev; 853 ret = dev_queue_xmit(skb); 854 if (ret) 855 ipoib_warn(priv, "%s: dev_queue_xmit failed to re-queue packet, ret:%d\n", 856 __func__, ret); 857 } 858 } 859 860 static void init_path_rec(struct ipoib_dev_priv *priv, struct ipoib_path *path, 861 void *gid) 862 { 863 path->dev = priv->dev; 864 865 if (rdma_cap_opa_ah(priv->ca, priv->port)) 866 path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA; 867 else 868 path->pathrec.rec_type = SA_PATH_REC_TYPE_IB; 869 870 memcpy(path->pathrec.dgid.raw, gid, sizeof(union ib_gid)); 871 path->pathrec.sgid = priv->local_gid; 872 path->pathrec.pkey = cpu_to_be16(priv->pkey); 873 path->pathrec.numb_path = 1; 874 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class; 875 } 876 877 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid) 878 { 879 struct ipoib_dev_priv *priv = ipoib_priv(dev); 880 struct ipoib_path *path; 881 882 if (!priv->broadcast) 883 return NULL; 884 885 path = kzalloc(sizeof *path, GFP_ATOMIC); 886 if (!path) 887 return NULL; 888 889 skb_queue_head_init(&path->queue); 890 891 INIT_LIST_HEAD(&path->neigh_list); 892 893 init_path_rec(priv, path, gid); 894 895 return path; 896 } 897 898 static int path_rec_start(struct net_device *dev, 899 struct ipoib_path *path) 900 { 901 struct ipoib_dev_priv *priv = ipoib_priv(dev); 902 903 ipoib_dbg(priv, "Start path record lookup for %pI6\n", 904 path->pathrec.dgid.raw); 905 906 init_completion(&path->done); 907 908 path->query_id = 909 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port, 910 &path->pathrec, 911 IB_SA_PATH_REC_DGID | 912 IB_SA_PATH_REC_SGID | 913 IB_SA_PATH_REC_NUMB_PATH | 914 IB_SA_PATH_REC_TRAFFIC_CLASS | 915 IB_SA_PATH_REC_PKEY, 916 1000, GFP_ATOMIC, 917 path_rec_completion, 918 path, &path->query); 919 if (path->query_id < 0) { 920 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); 921 path->query = NULL; 922 complete(&path->done); 923 return path->query_id; 924 } 925 926 return 0; 927 } 928 929 static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr, 930 struct net_device *dev) 931 { 932 struct ipoib_dev_priv *priv = ipoib_priv(dev); 933 struct rdma_netdev *rn = netdev_priv(dev); 934 struct ipoib_path *path; 935 struct ipoib_neigh *neigh; 936 unsigned long flags; 937 938 spin_lock_irqsave(&priv->lock, flags); 939 neigh = ipoib_neigh_alloc(daddr, dev); 940 if (!neigh) { 941 spin_unlock_irqrestore(&priv->lock, flags); 942 ++dev->stats.tx_dropped; 943 dev_kfree_skb_any(skb); 944 return NULL; 945 } 946 947 /* To avoid race condition, make sure that the 948 * neigh will be added only once. 949 */ 950 if (unlikely(!list_empty(&neigh->list))) { 951 spin_unlock_irqrestore(&priv->lock, flags); 952 return neigh; 953 } 954 955 path = __path_find(dev, daddr + 4); 956 if (!path) { 957 path = path_rec_create(dev, daddr + 4); 958 if (!path) 959 goto err_path; 960 961 __path_add(dev, path); 962 } 963 964 list_add_tail(&neigh->list, &path->neigh_list); 965 966 if (path->ah) { 967 kref_get(&path->ah->ref); 968 neigh->ah = path->ah; 969 970 if (ipoib_cm_enabled(dev, neigh->daddr)) { 971 if (!ipoib_cm_get(neigh)) 972 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh)); 973 if (!ipoib_cm_get(neigh)) { 974 ipoib_neigh_free(neigh); 975 goto err_drop; 976 } 977 if (skb_queue_len(&neigh->queue) < 978 IPOIB_MAX_PATH_REC_QUEUE) { 979 push_pseudo_header(skb, neigh->daddr); 980 __skb_queue_tail(&neigh->queue, skb); 981 } else { 982 ipoib_warn(priv, "queue length limit %d. Packet drop.\n", 983 skb_queue_len(&neigh->queue)); 984 goto err_drop; 985 } 986 } else { 987 spin_unlock_irqrestore(&priv->lock, flags); 988 path->ah->last_send = rn->send(dev, skb, path->ah->ah, 989 IPOIB_QPN(daddr)); 990 ipoib_neigh_put(neigh); 991 return NULL; 992 } 993 } else { 994 neigh->ah = NULL; 995 996 if (!path->query && path_rec_start(dev, path)) 997 goto err_path; 998 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 999 push_pseudo_header(skb, neigh->daddr); 1000 __skb_queue_tail(&neigh->queue, skb); 1001 } else { 1002 goto err_drop; 1003 } 1004 } 1005 1006 spin_unlock_irqrestore(&priv->lock, flags); 1007 ipoib_neigh_put(neigh); 1008 return NULL; 1009 1010 err_path: 1011 ipoib_neigh_free(neigh); 1012 err_drop: 1013 ++dev->stats.tx_dropped; 1014 dev_kfree_skb_any(skb); 1015 1016 spin_unlock_irqrestore(&priv->lock, flags); 1017 ipoib_neigh_put(neigh); 1018 1019 return NULL; 1020 } 1021 1022 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, 1023 struct ipoib_pseudo_header *phdr) 1024 { 1025 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1026 struct rdma_netdev *rn = netdev_priv(dev); 1027 struct ipoib_path *path; 1028 unsigned long flags; 1029 1030 spin_lock_irqsave(&priv->lock, flags); 1031 1032 /* no broadcast means that all paths are (going to be) not valid */ 1033 if (!priv->broadcast) 1034 goto drop_and_unlock; 1035 1036 path = __path_find(dev, phdr->hwaddr + 4); 1037 if (!path || !path->valid) { 1038 int new_path = 0; 1039 1040 if (!path) { 1041 path = path_rec_create(dev, phdr->hwaddr + 4); 1042 new_path = 1; 1043 } 1044 if (path) { 1045 if (!new_path) 1046 /* make sure there is no changes in the existing path record */ 1047 init_path_rec(priv, path, phdr->hwaddr + 4); 1048 1049 if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 1050 push_pseudo_header(skb, phdr->hwaddr); 1051 __skb_queue_tail(&path->queue, skb); 1052 } else { 1053 ++dev->stats.tx_dropped; 1054 dev_kfree_skb_any(skb); 1055 } 1056 1057 if (!path->query && path_rec_start(dev, path)) { 1058 spin_unlock_irqrestore(&priv->lock, flags); 1059 if (new_path) 1060 path_free(dev, path); 1061 return; 1062 } else 1063 __path_add(dev, path); 1064 } else { 1065 goto drop_and_unlock; 1066 } 1067 1068 spin_unlock_irqrestore(&priv->lock, flags); 1069 return; 1070 } 1071 1072 if (path->ah) { 1073 ipoib_dbg(priv, "Send unicast ARP to %08x\n", 1074 be32_to_cpu(sa_path_get_dlid(&path->pathrec))); 1075 1076 spin_unlock_irqrestore(&priv->lock, flags); 1077 path->ah->last_send = rn->send(dev, skb, path->ah->ah, 1078 IPOIB_QPN(phdr->hwaddr)); 1079 return; 1080 } else if ((path->query || !path_rec_start(dev, path)) && 1081 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 1082 push_pseudo_header(skb, phdr->hwaddr); 1083 __skb_queue_tail(&path->queue, skb); 1084 } else { 1085 goto drop_and_unlock; 1086 } 1087 1088 spin_unlock_irqrestore(&priv->lock, flags); 1089 return; 1090 1091 drop_and_unlock: 1092 ++dev->stats.tx_dropped; 1093 dev_kfree_skb_any(skb); 1094 spin_unlock_irqrestore(&priv->lock, flags); 1095 } 1096 1097 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) 1098 { 1099 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1100 struct rdma_netdev *rn = netdev_priv(dev); 1101 struct ipoib_neigh *neigh; 1102 struct ipoib_pseudo_header *phdr; 1103 struct ipoib_header *header; 1104 unsigned long flags; 1105 1106 phdr = (struct ipoib_pseudo_header *) skb->data; 1107 skb_pull(skb, sizeof(*phdr)); 1108 header = (struct ipoib_header *) skb->data; 1109 1110 if (unlikely(phdr->hwaddr[4] == 0xff)) { 1111 /* multicast, arrange "if" according to probability */ 1112 if ((header->proto != htons(ETH_P_IP)) && 1113 (header->proto != htons(ETH_P_IPV6)) && 1114 (header->proto != htons(ETH_P_ARP)) && 1115 (header->proto != htons(ETH_P_RARP)) && 1116 (header->proto != htons(ETH_P_TIPC))) { 1117 /* ethertype not supported by IPoIB */ 1118 ++dev->stats.tx_dropped; 1119 dev_kfree_skb_any(skb); 1120 return NETDEV_TX_OK; 1121 } 1122 /* Add in the P_Key for multicast*/ 1123 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff; 1124 phdr->hwaddr[9] = priv->pkey & 0xff; 1125 1126 neigh = ipoib_neigh_get(dev, phdr->hwaddr); 1127 if (likely(neigh)) 1128 goto send_using_neigh; 1129 ipoib_mcast_send(dev, phdr->hwaddr, skb); 1130 return NETDEV_TX_OK; 1131 } 1132 1133 /* unicast, arrange "switch" according to probability */ 1134 switch (header->proto) { 1135 case htons(ETH_P_IP): 1136 case htons(ETH_P_IPV6): 1137 case htons(ETH_P_TIPC): 1138 neigh = ipoib_neigh_get(dev, phdr->hwaddr); 1139 if (unlikely(!neigh)) { 1140 neigh = neigh_add_path(skb, phdr->hwaddr, dev); 1141 if (likely(!neigh)) 1142 return NETDEV_TX_OK; 1143 } 1144 break; 1145 case htons(ETH_P_ARP): 1146 case htons(ETH_P_RARP): 1147 /* for unicast ARP and RARP should always perform path find */ 1148 unicast_arp_send(skb, dev, phdr); 1149 return NETDEV_TX_OK; 1150 default: 1151 /* ethertype not supported by IPoIB */ 1152 ++dev->stats.tx_dropped; 1153 dev_kfree_skb_any(skb); 1154 return NETDEV_TX_OK; 1155 } 1156 1157 send_using_neigh: 1158 /* note we now hold a ref to neigh */ 1159 if (ipoib_cm_get(neigh)) { 1160 if (ipoib_cm_up(neigh)) { 1161 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); 1162 goto unref; 1163 } 1164 } else if (neigh->ah) { 1165 neigh->ah->last_send = rn->send(dev, skb, neigh->ah->ah, 1166 IPOIB_QPN(phdr->hwaddr)); 1167 goto unref; 1168 } 1169 1170 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 1171 push_pseudo_header(skb, phdr->hwaddr); 1172 spin_lock_irqsave(&priv->lock, flags); 1173 __skb_queue_tail(&neigh->queue, skb); 1174 spin_unlock_irqrestore(&priv->lock, flags); 1175 } else { 1176 ++dev->stats.tx_dropped; 1177 dev_kfree_skb_any(skb); 1178 } 1179 1180 unref: 1181 ipoib_neigh_put(neigh); 1182 1183 return NETDEV_TX_OK; 1184 } 1185 1186 static void ipoib_timeout(struct net_device *dev) 1187 { 1188 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1189 1190 ipoib_warn(priv, "transmit timeout: latency %d msecs\n", 1191 jiffies_to_msecs(jiffies - dev_trans_start(dev))); 1192 ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n", 1193 netif_queue_stopped(dev), 1194 priv->tx_head, priv->tx_tail); 1195 /* XXX reset QP, etc. */ 1196 } 1197 1198 static int ipoib_hard_header(struct sk_buff *skb, 1199 struct net_device *dev, 1200 unsigned short type, 1201 const void *daddr, const void *saddr, unsigned len) 1202 { 1203 struct ipoib_header *header; 1204 1205 header = skb_push(skb, sizeof *header); 1206 1207 header->proto = htons(type); 1208 header->reserved = 0; 1209 1210 /* 1211 * we don't rely on dst_entry structure, always stuff the 1212 * destination address into skb hard header so we can figure out where 1213 * to send the packet later. 1214 */ 1215 push_pseudo_header(skb, daddr); 1216 1217 return IPOIB_HARD_LEN; 1218 } 1219 1220 static void ipoib_set_mcast_list(struct net_device *dev) 1221 { 1222 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1223 1224 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { 1225 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set"); 1226 return; 1227 } 1228 1229 queue_work(priv->wq, &priv->restart_task); 1230 } 1231 1232 static int ipoib_get_iflink(const struct net_device *dev) 1233 { 1234 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1235 1236 /* parent interface */ 1237 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) 1238 return dev->ifindex; 1239 1240 /* child/vlan interface */ 1241 return priv->parent->ifindex; 1242 } 1243 1244 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) 1245 { 1246 /* 1247 * Use only the address parts that contributes to spreading 1248 * The subnet prefix is not used as one can not connect to 1249 * same remote port (GUID) using the same remote QPN via two 1250 * different subnets. 1251 */ 1252 /* qpn octets[1:4) & port GUID octets[12:20) */ 1253 u32 *d32 = (u32 *) daddr; 1254 u32 hv; 1255 1256 hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0); 1257 return hv & htbl->mask; 1258 } 1259 1260 struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr) 1261 { 1262 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1263 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1264 struct ipoib_neigh_hash *htbl; 1265 struct ipoib_neigh *neigh = NULL; 1266 u32 hash_val; 1267 1268 rcu_read_lock_bh(); 1269 1270 htbl = rcu_dereference_bh(ntbl->htbl); 1271 1272 if (!htbl) 1273 goto out_unlock; 1274 1275 hash_val = ipoib_addr_hash(htbl, daddr); 1276 for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]); 1277 neigh != NULL; 1278 neigh = rcu_dereference_bh(neigh->hnext)) { 1279 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { 1280 /* found, take one ref on behalf of the caller */ 1281 if (!atomic_inc_not_zero(&neigh->refcnt)) { 1282 /* deleted */ 1283 neigh = NULL; 1284 goto out_unlock; 1285 } 1286 1287 if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)) 1288 neigh->alive = jiffies; 1289 goto out_unlock; 1290 } 1291 } 1292 1293 out_unlock: 1294 rcu_read_unlock_bh(); 1295 return neigh; 1296 } 1297 1298 static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) 1299 { 1300 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1301 struct ipoib_neigh_hash *htbl; 1302 unsigned long neigh_obsolete; 1303 unsigned long dt; 1304 unsigned long flags; 1305 int i; 1306 LIST_HEAD(remove_list); 1307 1308 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 1309 return; 1310 1311 spin_lock_irqsave(&priv->lock, flags); 1312 1313 htbl = rcu_dereference_protected(ntbl->htbl, 1314 lockdep_is_held(&priv->lock)); 1315 1316 if (!htbl) 1317 goto out_unlock; 1318 1319 /* neigh is obsolete if it was idle for two GC periods */ 1320 dt = 2 * arp_tbl.gc_interval; 1321 neigh_obsolete = jiffies - dt; 1322 /* handle possible race condition */ 1323 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 1324 goto out_unlock; 1325 1326 for (i = 0; i < htbl->size; i++) { 1327 struct ipoib_neigh *neigh; 1328 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1329 1330 while ((neigh = rcu_dereference_protected(*np, 1331 lockdep_is_held(&priv->lock))) != NULL) { 1332 /* was the neigh idle for two GC periods */ 1333 if (time_after(neigh_obsolete, neigh->alive)) { 1334 1335 ipoib_check_and_add_mcast_sendonly(priv, neigh->daddr + 4, &remove_list); 1336 1337 rcu_assign_pointer(*np, 1338 rcu_dereference_protected(neigh->hnext, 1339 lockdep_is_held(&priv->lock))); 1340 /* remove from path/mc list */ 1341 list_del_init(&neigh->list); 1342 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1343 } else { 1344 np = &neigh->hnext; 1345 } 1346 1347 } 1348 } 1349 1350 out_unlock: 1351 spin_unlock_irqrestore(&priv->lock, flags); 1352 ipoib_mcast_remove_list(&remove_list); 1353 } 1354 1355 static void ipoib_reap_neigh(struct work_struct *work) 1356 { 1357 struct ipoib_dev_priv *priv = 1358 container_of(work, struct ipoib_dev_priv, neigh_reap_task.work); 1359 1360 __ipoib_reap_neigh(priv); 1361 1362 if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 1363 queue_delayed_work(priv->wq, &priv->neigh_reap_task, 1364 arp_tbl.gc_interval); 1365 } 1366 1367 1368 static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr, 1369 struct net_device *dev) 1370 { 1371 struct ipoib_neigh *neigh; 1372 1373 neigh = kzalloc(sizeof *neigh, GFP_ATOMIC); 1374 if (!neigh) 1375 return NULL; 1376 1377 neigh->dev = dev; 1378 memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr)); 1379 skb_queue_head_init(&neigh->queue); 1380 INIT_LIST_HEAD(&neigh->list); 1381 ipoib_cm_set(neigh, NULL); 1382 /* one ref on behalf of the caller */ 1383 atomic_set(&neigh->refcnt, 1); 1384 1385 return neigh; 1386 } 1387 1388 struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr, 1389 struct net_device *dev) 1390 { 1391 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1392 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1393 struct ipoib_neigh_hash *htbl; 1394 struct ipoib_neigh *neigh; 1395 u32 hash_val; 1396 1397 htbl = rcu_dereference_protected(ntbl->htbl, 1398 lockdep_is_held(&priv->lock)); 1399 if (!htbl) { 1400 neigh = NULL; 1401 goto out_unlock; 1402 } 1403 1404 /* need to add a new neigh, but maybe some other thread succeeded? 1405 * recalc hash, maybe hash resize took place so we do a search 1406 */ 1407 hash_val = ipoib_addr_hash(htbl, daddr); 1408 for (neigh = rcu_dereference_protected(htbl->buckets[hash_val], 1409 lockdep_is_held(&priv->lock)); 1410 neigh != NULL; 1411 neigh = rcu_dereference_protected(neigh->hnext, 1412 lockdep_is_held(&priv->lock))) { 1413 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { 1414 /* found, take one ref on behalf of the caller */ 1415 if (!atomic_inc_not_zero(&neigh->refcnt)) { 1416 /* deleted */ 1417 neigh = NULL; 1418 break; 1419 } 1420 neigh->alive = jiffies; 1421 goto out_unlock; 1422 } 1423 } 1424 1425 neigh = ipoib_neigh_ctor(daddr, dev); 1426 if (!neigh) 1427 goto out_unlock; 1428 1429 /* one ref on behalf of the hash table */ 1430 atomic_inc(&neigh->refcnt); 1431 neigh->alive = jiffies; 1432 /* put in hash */ 1433 rcu_assign_pointer(neigh->hnext, 1434 rcu_dereference_protected(htbl->buckets[hash_val], 1435 lockdep_is_held(&priv->lock))); 1436 rcu_assign_pointer(htbl->buckets[hash_val], neigh); 1437 atomic_inc(&ntbl->entries); 1438 1439 out_unlock: 1440 1441 return neigh; 1442 } 1443 1444 void ipoib_neigh_dtor(struct ipoib_neigh *neigh) 1445 { 1446 /* neigh reference count was dropprd to zero */ 1447 struct net_device *dev = neigh->dev; 1448 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1449 struct sk_buff *skb; 1450 if (neigh->ah) 1451 ipoib_put_ah(neigh->ah); 1452 while ((skb = __skb_dequeue(&neigh->queue))) { 1453 ++dev->stats.tx_dropped; 1454 dev_kfree_skb_any(skb); 1455 } 1456 if (ipoib_cm_get(neigh)) 1457 ipoib_cm_destroy_tx(ipoib_cm_get(neigh)); 1458 ipoib_dbg(ipoib_priv(dev), 1459 "neigh free for %06x %pI6\n", 1460 IPOIB_QPN(neigh->daddr), 1461 neigh->daddr + 4); 1462 kfree(neigh); 1463 if (atomic_dec_and_test(&priv->ntbl.entries)) { 1464 if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags)) 1465 complete(&priv->ntbl.flushed); 1466 } 1467 } 1468 1469 static void ipoib_neigh_reclaim(struct rcu_head *rp) 1470 { 1471 /* Called as a result of removal from hash table */ 1472 struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu); 1473 /* note TX context may hold another ref */ 1474 ipoib_neigh_put(neigh); 1475 } 1476 1477 void ipoib_neigh_free(struct ipoib_neigh *neigh) 1478 { 1479 struct net_device *dev = neigh->dev; 1480 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1481 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1482 struct ipoib_neigh_hash *htbl; 1483 struct ipoib_neigh __rcu **np; 1484 struct ipoib_neigh *n; 1485 u32 hash_val; 1486 1487 htbl = rcu_dereference_protected(ntbl->htbl, 1488 lockdep_is_held(&priv->lock)); 1489 if (!htbl) 1490 return; 1491 1492 hash_val = ipoib_addr_hash(htbl, neigh->daddr); 1493 np = &htbl->buckets[hash_val]; 1494 for (n = rcu_dereference_protected(*np, 1495 lockdep_is_held(&priv->lock)); 1496 n != NULL; 1497 n = rcu_dereference_protected(*np, 1498 lockdep_is_held(&priv->lock))) { 1499 if (n == neigh) { 1500 /* found */ 1501 rcu_assign_pointer(*np, 1502 rcu_dereference_protected(neigh->hnext, 1503 lockdep_is_held(&priv->lock))); 1504 /* remove from parent list */ 1505 list_del_init(&neigh->list); 1506 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1507 return; 1508 } else { 1509 np = &n->hnext; 1510 } 1511 } 1512 } 1513 1514 static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) 1515 { 1516 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1517 struct ipoib_neigh_hash *htbl; 1518 struct ipoib_neigh __rcu **buckets; 1519 u32 size; 1520 1521 clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); 1522 ntbl->htbl = NULL; 1523 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL); 1524 if (!htbl) 1525 return -ENOMEM; 1526 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1527 size = roundup_pow_of_two(arp_tbl.gc_thresh3); 1528 buckets = kzalloc(size * sizeof(*buckets), GFP_KERNEL); 1529 if (!buckets) { 1530 kfree(htbl); 1531 return -ENOMEM; 1532 } 1533 htbl->size = size; 1534 htbl->mask = (size - 1); 1535 htbl->buckets = buckets; 1536 RCU_INIT_POINTER(ntbl->htbl, htbl); 1537 htbl->ntbl = ntbl; 1538 atomic_set(&ntbl->entries, 0); 1539 1540 /* start garbage collection */ 1541 clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1542 queue_delayed_work(priv->wq, &priv->neigh_reap_task, 1543 arp_tbl.gc_interval); 1544 1545 return 0; 1546 } 1547 1548 static void neigh_hash_free_rcu(struct rcu_head *head) 1549 { 1550 struct ipoib_neigh_hash *htbl = container_of(head, 1551 struct ipoib_neigh_hash, 1552 rcu); 1553 struct ipoib_neigh __rcu **buckets = htbl->buckets; 1554 struct ipoib_neigh_table *ntbl = htbl->ntbl; 1555 1556 kfree(buckets); 1557 kfree(htbl); 1558 complete(&ntbl->deleted); 1559 } 1560 1561 void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid) 1562 { 1563 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1564 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1565 struct ipoib_neigh_hash *htbl; 1566 unsigned long flags; 1567 int i; 1568 1569 /* remove all neigh connected to a given path or mcast */ 1570 spin_lock_irqsave(&priv->lock, flags); 1571 1572 htbl = rcu_dereference_protected(ntbl->htbl, 1573 lockdep_is_held(&priv->lock)); 1574 1575 if (!htbl) 1576 goto out_unlock; 1577 1578 for (i = 0; i < htbl->size; i++) { 1579 struct ipoib_neigh *neigh; 1580 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1581 1582 while ((neigh = rcu_dereference_protected(*np, 1583 lockdep_is_held(&priv->lock))) != NULL) { 1584 /* delete neighs belong to this parent */ 1585 if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) { 1586 rcu_assign_pointer(*np, 1587 rcu_dereference_protected(neigh->hnext, 1588 lockdep_is_held(&priv->lock))); 1589 /* remove from parent list */ 1590 list_del_init(&neigh->list); 1591 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1592 } else { 1593 np = &neigh->hnext; 1594 } 1595 1596 } 1597 } 1598 out_unlock: 1599 spin_unlock_irqrestore(&priv->lock, flags); 1600 } 1601 1602 static void ipoib_flush_neighs(struct ipoib_dev_priv *priv) 1603 { 1604 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1605 struct ipoib_neigh_hash *htbl; 1606 unsigned long flags; 1607 int i, wait_flushed = 0; 1608 1609 init_completion(&priv->ntbl.flushed); 1610 set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); 1611 1612 spin_lock_irqsave(&priv->lock, flags); 1613 1614 htbl = rcu_dereference_protected(ntbl->htbl, 1615 lockdep_is_held(&priv->lock)); 1616 if (!htbl) 1617 goto out_unlock; 1618 1619 wait_flushed = atomic_read(&priv->ntbl.entries); 1620 if (!wait_flushed) 1621 goto free_htbl; 1622 1623 for (i = 0; i < htbl->size; i++) { 1624 struct ipoib_neigh *neigh; 1625 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1626 1627 while ((neigh = rcu_dereference_protected(*np, 1628 lockdep_is_held(&priv->lock))) != NULL) { 1629 rcu_assign_pointer(*np, 1630 rcu_dereference_protected(neigh->hnext, 1631 lockdep_is_held(&priv->lock))); 1632 /* remove from path/mc list */ 1633 list_del_init(&neigh->list); 1634 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1635 } 1636 } 1637 1638 free_htbl: 1639 rcu_assign_pointer(ntbl->htbl, NULL); 1640 call_rcu(&htbl->rcu, neigh_hash_free_rcu); 1641 1642 out_unlock: 1643 spin_unlock_irqrestore(&priv->lock, flags); 1644 if (wait_flushed) 1645 wait_for_completion(&priv->ntbl.flushed); 1646 } 1647 1648 static void ipoib_neigh_hash_uninit(struct net_device *dev) 1649 { 1650 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1651 int stopped; 1652 1653 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); 1654 init_completion(&priv->ntbl.deleted); 1655 1656 /* Stop GC if called at init fail need to cancel work */ 1657 stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1658 if (!stopped) 1659 cancel_delayed_work(&priv->neigh_reap_task); 1660 1661 ipoib_flush_neighs(priv); 1662 1663 wait_for_completion(&priv->ntbl.deleted); 1664 } 1665 1666 static void ipoib_napi_add(struct net_device *dev) 1667 { 1668 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1669 1670 netif_napi_add(dev, &priv->recv_napi, ipoib_rx_poll, IPOIB_NUM_WC); 1671 netif_napi_add(dev, &priv->send_napi, ipoib_tx_poll, MAX_SEND_CQE); 1672 } 1673 1674 static void ipoib_napi_del(struct net_device *dev) 1675 { 1676 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1677 1678 netif_napi_del(&priv->recv_napi); 1679 netif_napi_del(&priv->send_napi); 1680 } 1681 1682 static void ipoib_dev_uninit_default(struct net_device *dev) 1683 { 1684 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1685 1686 ipoib_transport_dev_cleanup(dev); 1687 1688 ipoib_napi_del(dev); 1689 1690 ipoib_cm_dev_cleanup(dev); 1691 1692 kfree(priv->rx_ring); 1693 vfree(priv->tx_ring); 1694 1695 priv->rx_ring = NULL; 1696 priv->tx_ring = NULL; 1697 } 1698 1699 static int ipoib_dev_init_default(struct net_device *dev) 1700 { 1701 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1702 1703 ipoib_napi_add(dev); 1704 1705 /* Allocate RX/TX "rings" to hold queued skbs */ 1706 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, 1707 GFP_KERNEL); 1708 if (!priv->rx_ring) 1709 goto out; 1710 1711 priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring); 1712 if (!priv->tx_ring) { 1713 pr_warn("%s: failed to allocate TX ring (%d entries)\n", 1714 priv->ca->name, ipoib_sendq_size); 1715 goto out_rx_ring_cleanup; 1716 } 1717 1718 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ 1719 1720 if (ipoib_transport_dev_init(dev, priv->ca)) { 1721 pr_warn("%s: ipoib_transport_dev_init failed\n", 1722 priv->ca->name); 1723 goto out_tx_ring_cleanup; 1724 } 1725 1726 /* after qp created set dev address */ 1727 priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff; 1728 priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff; 1729 priv->dev->dev_addr[3] = (priv->qp->qp_num) & 0xff; 1730 1731 return 0; 1732 1733 out_tx_ring_cleanup: 1734 vfree(priv->tx_ring); 1735 1736 out_rx_ring_cleanup: 1737 kfree(priv->rx_ring); 1738 1739 out: 1740 ipoib_napi_del(dev); 1741 return -ENOMEM; 1742 } 1743 1744 static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr, 1745 int cmd) 1746 { 1747 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1748 1749 if (!priv->rn_ops->ndo_do_ioctl) 1750 return -EOPNOTSUPP; 1751 1752 return priv->rn_ops->ndo_do_ioctl(dev, ifr, cmd); 1753 } 1754 1755 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) 1756 { 1757 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1758 int ret = -ENOMEM; 1759 1760 priv->ca = ca; 1761 priv->port = port; 1762 priv->qp = NULL; 1763 1764 /* 1765 * the various IPoIB tasks assume they will never race against 1766 * themselves, so always use a single thread workqueue 1767 */ 1768 priv->wq = alloc_ordered_workqueue("ipoib_wq", WQ_MEM_RECLAIM); 1769 if (!priv->wq) { 1770 pr_warn("%s: failed to allocate device WQ\n", dev->name); 1771 goto out; 1772 } 1773 1774 /* create pd, which used both for control and datapath*/ 1775 priv->pd = ib_alloc_pd(priv->ca, 0); 1776 if (IS_ERR(priv->pd)) { 1777 pr_warn("%s: failed to allocate PD\n", ca->name); 1778 goto clean_wq; 1779 } 1780 1781 ret = priv->rn_ops->ndo_init(dev); 1782 if (ret) { 1783 pr_warn("%s failed to init HW resource\n", dev->name); 1784 goto out_free_pd; 1785 } 1786 1787 if (ipoib_neigh_hash_init(priv) < 0) { 1788 pr_warn("%s failed to init neigh hash\n", dev->name); 1789 goto out_dev_uninit; 1790 } 1791 1792 if (dev->flags & IFF_UP) { 1793 if (ipoib_ib_dev_open(dev)) { 1794 pr_warn("%s failed to open device\n", dev->name); 1795 ret = -ENODEV; 1796 goto out_dev_uninit; 1797 } 1798 } 1799 1800 return 0; 1801 1802 out_dev_uninit: 1803 ipoib_ib_dev_cleanup(dev); 1804 1805 out_free_pd: 1806 if (priv->pd) { 1807 ib_dealloc_pd(priv->pd); 1808 priv->pd = NULL; 1809 } 1810 1811 clean_wq: 1812 if (priv->wq) { 1813 destroy_workqueue(priv->wq); 1814 priv->wq = NULL; 1815 } 1816 1817 out: 1818 return ret; 1819 } 1820 1821 void ipoib_dev_cleanup(struct net_device *dev) 1822 { 1823 struct ipoib_dev_priv *priv = ipoib_priv(dev), *cpriv, *tcpriv; 1824 LIST_HEAD(head); 1825 1826 ASSERT_RTNL(); 1827 1828 /* Delete any child interfaces first */ 1829 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { 1830 /* Stop GC on child */ 1831 set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags); 1832 cancel_delayed_work(&cpriv->neigh_reap_task); 1833 unregister_netdevice_queue(cpriv->dev, &head); 1834 } 1835 unregister_netdevice_many(&head); 1836 1837 ipoib_neigh_hash_uninit(dev); 1838 1839 ipoib_ib_dev_cleanup(dev); 1840 1841 /* no more works over the priv->wq */ 1842 if (priv->wq) { 1843 flush_workqueue(priv->wq); 1844 destroy_workqueue(priv->wq); 1845 priv->wq = NULL; 1846 } 1847 } 1848 1849 static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state) 1850 { 1851 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1852 1853 return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state); 1854 } 1855 1856 static int ipoib_get_vf_config(struct net_device *dev, int vf, 1857 struct ifla_vf_info *ivf) 1858 { 1859 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1860 int err; 1861 1862 err = ib_get_vf_config(priv->ca, vf, priv->port, ivf); 1863 if (err) 1864 return err; 1865 1866 ivf->vf = vf; 1867 1868 return 0; 1869 } 1870 1871 static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type) 1872 { 1873 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1874 1875 if (type != IFLA_VF_IB_NODE_GUID && type != IFLA_VF_IB_PORT_GUID) 1876 return -EINVAL; 1877 1878 return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type); 1879 } 1880 1881 static int ipoib_get_vf_stats(struct net_device *dev, int vf, 1882 struct ifla_vf_stats *vf_stats) 1883 { 1884 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1885 1886 return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats); 1887 } 1888 1889 static const struct header_ops ipoib_header_ops = { 1890 .create = ipoib_hard_header, 1891 }; 1892 1893 static const struct net_device_ops ipoib_netdev_ops_pf = { 1894 .ndo_uninit = ipoib_uninit, 1895 .ndo_open = ipoib_open, 1896 .ndo_stop = ipoib_stop, 1897 .ndo_change_mtu = ipoib_change_mtu, 1898 .ndo_fix_features = ipoib_fix_features, 1899 .ndo_start_xmit = ipoib_start_xmit, 1900 .ndo_tx_timeout = ipoib_timeout, 1901 .ndo_set_rx_mode = ipoib_set_mcast_list, 1902 .ndo_get_iflink = ipoib_get_iflink, 1903 .ndo_set_vf_link_state = ipoib_set_vf_link_state, 1904 .ndo_get_vf_config = ipoib_get_vf_config, 1905 .ndo_get_vf_stats = ipoib_get_vf_stats, 1906 .ndo_set_vf_guid = ipoib_set_vf_guid, 1907 .ndo_set_mac_address = ipoib_set_mac, 1908 .ndo_get_stats64 = ipoib_get_stats, 1909 .ndo_do_ioctl = ipoib_ioctl, 1910 }; 1911 1912 static const struct net_device_ops ipoib_netdev_ops_vf = { 1913 .ndo_uninit = ipoib_uninit, 1914 .ndo_open = ipoib_open, 1915 .ndo_stop = ipoib_stop, 1916 .ndo_change_mtu = ipoib_change_mtu, 1917 .ndo_fix_features = ipoib_fix_features, 1918 .ndo_start_xmit = ipoib_start_xmit, 1919 .ndo_tx_timeout = ipoib_timeout, 1920 .ndo_set_rx_mode = ipoib_set_mcast_list, 1921 .ndo_get_iflink = ipoib_get_iflink, 1922 .ndo_get_stats64 = ipoib_get_stats, 1923 .ndo_do_ioctl = ipoib_ioctl, 1924 }; 1925 1926 void ipoib_setup_common(struct net_device *dev) 1927 { 1928 dev->header_ops = &ipoib_header_ops; 1929 1930 ipoib_set_ethtool_ops(dev); 1931 1932 dev->watchdog_timeo = HZ; 1933 1934 dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 1935 1936 dev->hard_header_len = IPOIB_HARD_LEN; 1937 dev->addr_len = INFINIBAND_ALEN; 1938 dev->type = ARPHRD_INFINIBAND; 1939 dev->tx_queue_len = ipoib_sendq_size * 2; 1940 dev->features = (NETIF_F_VLAN_CHALLENGED | 1941 NETIF_F_HIGHDMA); 1942 netif_keep_dst(dev); 1943 1944 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); 1945 } 1946 1947 static void ipoib_build_priv(struct net_device *dev) 1948 { 1949 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1950 1951 priv->dev = dev; 1952 spin_lock_init(&priv->lock); 1953 init_rwsem(&priv->vlan_rwsem); 1954 mutex_init(&priv->mcast_mutex); 1955 mutex_init(&priv->sysfs_mutex); 1956 1957 INIT_LIST_HEAD(&priv->path_list); 1958 INIT_LIST_HEAD(&priv->child_intfs); 1959 INIT_LIST_HEAD(&priv->dead_ahs); 1960 INIT_LIST_HEAD(&priv->multicast_list); 1961 1962 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); 1963 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); 1964 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); 1965 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal); 1966 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy); 1967 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); 1968 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); 1969 INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh); 1970 } 1971 1972 static const struct net_device_ops ipoib_netdev_default_pf = { 1973 .ndo_init = ipoib_dev_init_default, 1974 .ndo_uninit = ipoib_dev_uninit_default, 1975 .ndo_open = ipoib_ib_dev_open_default, 1976 .ndo_stop = ipoib_ib_dev_stop_default, 1977 }; 1978 1979 static struct net_device 1980 *ipoib_create_netdev_default(struct ib_device *hca, 1981 const char *name, 1982 unsigned char name_assign_type, 1983 void (*setup)(struct net_device *)) 1984 { 1985 struct net_device *dev; 1986 struct rdma_netdev *rn; 1987 1988 dev = alloc_netdev((int)sizeof(struct rdma_netdev), 1989 name, 1990 name_assign_type, setup); 1991 if (!dev) 1992 return NULL; 1993 1994 rn = netdev_priv(dev); 1995 1996 rn->send = ipoib_send; 1997 rn->attach_mcast = ipoib_mcast_attach; 1998 rn->detach_mcast = ipoib_mcast_detach; 1999 rn->free_rdma_netdev = free_netdev; 2000 rn->hca = hca; 2001 2002 dev->netdev_ops = &ipoib_netdev_default_pf; 2003 2004 return dev; 2005 } 2006 2007 static struct net_device *ipoib_get_netdev(struct ib_device *hca, u8 port, 2008 const char *name) 2009 { 2010 struct net_device *dev; 2011 2012 if (hca->alloc_rdma_netdev) { 2013 dev = hca->alloc_rdma_netdev(hca, port, 2014 RDMA_NETDEV_IPOIB, name, 2015 NET_NAME_UNKNOWN, 2016 ipoib_setup_common); 2017 if (IS_ERR_OR_NULL(dev) && PTR_ERR(dev) != -EOPNOTSUPP) 2018 return NULL; 2019 } 2020 2021 if (!hca->alloc_rdma_netdev || PTR_ERR(dev) == -EOPNOTSUPP) 2022 dev = ipoib_create_netdev_default(hca, name, NET_NAME_UNKNOWN, 2023 ipoib_setup_common); 2024 2025 return dev; 2026 } 2027 2028 struct ipoib_dev_priv *ipoib_intf_alloc(struct ib_device *hca, u8 port, 2029 const char *name) 2030 { 2031 struct net_device *dev; 2032 struct ipoib_dev_priv *priv; 2033 struct rdma_netdev *rn; 2034 2035 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 2036 if (!priv) 2037 return NULL; 2038 2039 dev = ipoib_get_netdev(hca, port, name); 2040 if (!dev) 2041 goto free_priv; 2042 2043 priv->rn_ops = dev->netdev_ops; 2044 2045 /* fixme : should be after the query_cap */ 2046 if (priv->hca_caps & IB_DEVICE_VIRTUAL_FUNCTION) 2047 dev->netdev_ops = &ipoib_netdev_ops_vf; 2048 else 2049 dev->netdev_ops = &ipoib_netdev_ops_pf; 2050 2051 rn = netdev_priv(dev); 2052 rn->clnt_priv = priv; 2053 ipoib_build_priv(dev); 2054 2055 return priv; 2056 free_priv: 2057 kfree(priv); 2058 return NULL; 2059 } 2060 2061 static ssize_t show_pkey(struct device *dev, 2062 struct device_attribute *attr, char *buf) 2063 { 2064 struct net_device *ndev = to_net_dev(dev); 2065 struct ipoib_dev_priv *priv = ipoib_priv(ndev); 2066 2067 return sprintf(buf, "0x%04x\n", priv->pkey); 2068 } 2069 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 2070 2071 static ssize_t show_umcast(struct device *dev, 2072 struct device_attribute *attr, char *buf) 2073 { 2074 struct net_device *ndev = to_net_dev(dev); 2075 struct ipoib_dev_priv *priv = ipoib_priv(ndev); 2076 2077 return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags)); 2078 } 2079 2080 void ipoib_set_umcast(struct net_device *ndev, int umcast_val) 2081 { 2082 struct ipoib_dev_priv *priv = ipoib_priv(ndev); 2083 2084 if (umcast_val > 0) { 2085 set_bit(IPOIB_FLAG_UMCAST, &priv->flags); 2086 ipoib_warn(priv, "ignoring multicast groups joined directly " 2087 "by userspace\n"); 2088 } else 2089 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags); 2090 } 2091 2092 static ssize_t set_umcast(struct device *dev, 2093 struct device_attribute *attr, 2094 const char *buf, size_t count) 2095 { 2096 unsigned long umcast_val = simple_strtoul(buf, NULL, 0); 2097 2098 ipoib_set_umcast(to_net_dev(dev), umcast_val); 2099 2100 return count; 2101 } 2102 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast); 2103 2104 int ipoib_add_umcast_attr(struct net_device *dev) 2105 { 2106 return device_create_file(&dev->dev, &dev_attr_umcast); 2107 } 2108 2109 static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid) 2110 { 2111 struct ipoib_dev_priv *child_priv; 2112 struct net_device *netdev = priv->dev; 2113 2114 netif_addr_lock_bh(netdev); 2115 2116 memcpy(&priv->local_gid.global.interface_id, 2117 &gid->global.interface_id, 2118 sizeof(gid->global.interface_id)); 2119 memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid)); 2120 clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); 2121 2122 netif_addr_unlock_bh(netdev); 2123 2124 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 2125 down_read(&priv->vlan_rwsem); 2126 list_for_each_entry(child_priv, &priv->child_intfs, list) 2127 set_base_guid(child_priv, gid); 2128 up_read(&priv->vlan_rwsem); 2129 } 2130 } 2131 2132 static int ipoib_check_lladdr(struct net_device *dev, 2133 struct sockaddr_storage *ss) 2134 { 2135 union ib_gid *gid = (union ib_gid *)(ss->__data + 4); 2136 int ret = 0; 2137 2138 netif_addr_lock_bh(dev); 2139 2140 /* Make sure the QPN, reserved and subnet prefix match the current 2141 * lladdr, it also makes sure the lladdr is unicast. 2142 */ 2143 if (memcmp(dev->dev_addr, ss->__data, 2144 4 + sizeof(gid->global.subnet_prefix)) || 2145 gid->global.interface_id == 0) 2146 ret = -EINVAL; 2147 2148 netif_addr_unlock_bh(dev); 2149 2150 return ret; 2151 } 2152 2153 static int ipoib_set_mac(struct net_device *dev, void *addr) 2154 { 2155 struct ipoib_dev_priv *priv = ipoib_priv(dev); 2156 struct sockaddr_storage *ss = addr; 2157 int ret; 2158 2159 if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev)) 2160 return -EBUSY; 2161 2162 ret = ipoib_check_lladdr(dev, ss); 2163 if (ret) 2164 return ret; 2165 2166 set_base_guid(priv, (union ib_gid *)(ss->__data + 4)); 2167 2168 queue_work(ipoib_workqueue, &priv->flush_light); 2169 2170 return 0; 2171 } 2172 2173 static ssize_t create_child(struct device *dev, 2174 struct device_attribute *attr, 2175 const char *buf, size_t count) 2176 { 2177 int pkey; 2178 int ret; 2179 2180 if (sscanf(buf, "%i", &pkey) != 1) 2181 return -EINVAL; 2182 2183 if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000) 2184 return -EINVAL; 2185 2186 /* 2187 * Set the full membership bit, so that we join the right 2188 * broadcast group, etc. 2189 */ 2190 pkey |= 0x8000; 2191 2192 ret = ipoib_vlan_add(to_net_dev(dev), pkey); 2193 2194 return ret ? ret : count; 2195 } 2196 static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child); 2197 2198 static ssize_t delete_child(struct device *dev, 2199 struct device_attribute *attr, 2200 const char *buf, size_t count) 2201 { 2202 int pkey; 2203 int ret; 2204 2205 if (sscanf(buf, "%i", &pkey) != 1) 2206 return -EINVAL; 2207 2208 if (pkey < 0 || pkey > 0xffff) 2209 return -EINVAL; 2210 2211 ret = ipoib_vlan_delete(to_net_dev(dev), pkey); 2212 2213 return ret ? ret : count; 2214 2215 } 2216 static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child); 2217 2218 int ipoib_add_pkey_attr(struct net_device *dev) 2219 { 2220 return device_create_file(&dev->dev, &dev_attr_pkey); 2221 } 2222 2223 void ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) 2224 { 2225 priv->hca_caps = hca->attrs.device_cap_flags; 2226 2227 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { 2228 priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 2229 2230 if (priv->hca_caps & IB_DEVICE_UD_TSO) 2231 priv->dev->hw_features |= NETIF_F_TSO; 2232 2233 priv->dev->features |= priv->dev->hw_features; 2234 } 2235 } 2236 2237 static struct net_device *ipoib_add_port(const char *format, 2238 struct ib_device *hca, u8 port) 2239 { 2240 struct ipoib_dev_priv *priv; 2241 struct ib_port_attr attr; 2242 struct rdma_netdev *rn; 2243 int result = -ENOMEM; 2244 2245 priv = ipoib_intf_alloc(hca, port, format); 2246 if (!priv) { 2247 pr_warn("%s, %d: ipoib_intf_alloc failed\n", hca->name, port); 2248 goto alloc_mem_failed; 2249 } 2250 2251 SET_NETDEV_DEV(priv->dev, hca->dev.parent); 2252 priv->dev->dev_id = port - 1; 2253 2254 result = ib_query_port(hca, port, &attr); 2255 if (result) { 2256 pr_warn("%s: ib_query_port %d failed\n", hca->name, port); 2257 goto device_init_failed; 2258 } 2259 2260 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); 2261 2262 /* MTU will be reset when mcast join happens */ 2263 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 2264 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; 2265 priv->dev->max_mtu = IPOIB_CM_MTU; 2266 2267 priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh); 2268 2269 result = ib_query_pkey(hca, port, 0, &priv->pkey); 2270 if (result) { 2271 pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n", 2272 hca->name, port, result); 2273 goto device_init_failed; 2274 } 2275 2276 ipoib_set_dev_features(priv, hca); 2277 2278 /* 2279 * Set the full membership bit, so that we join the right 2280 * broadcast group, etc. 2281 */ 2282 priv->pkey |= 0x8000; 2283 2284 priv->dev->broadcast[8] = priv->pkey >> 8; 2285 priv->dev->broadcast[9] = priv->pkey & 0xff; 2286 2287 result = ib_query_gid(hca, port, 0, &priv->local_gid, NULL); 2288 if (result) { 2289 pr_warn("%s: ib_query_gid port %d failed (ret = %d)\n", 2290 hca->name, port, result); 2291 goto device_init_failed; 2292 } 2293 2294 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, 2295 sizeof(union ib_gid)); 2296 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); 2297 2298 result = ipoib_dev_init(priv->dev, hca, port); 2299 if (result) { 2300 pr_warn("%s: failed to initialize port %d (ret = %d)\n", 2301 hca->name, port, result); 2302 goto device_init_failed; 2303 } 2304 2305 INIT_IB_EVENT_HANDLER(&priv->event_handler, 2306 priv->ca, ipoib_event); 2307 ib_register_event_handler(&priv->event_handler); 2308 2309 /* call event handler to ensure pkey in sync */ 2310 queue_work(ipoib_workqueue, &priv->flush_heavy); 2311 2312 result = register_netdev(priv->dev); 2313 if (result) { 2314 pr_warn("%s: couldn't register ipoib port %d; error %d\n", 2315 hca->name, port, result); 2316 goto register_failed; 2317 } 2318 2319 result = -ENOMEM; 2320 if (ipoib_cm_add_mode_attr(priv->dev)) 2321 goto sysfs_failed; 2322 if (ipoib_add_pkey_attr(priv->dev)) 2323 goto sysfs_failed; 2324 if (ipoib_add_umcast_attr(priv->dev)) 2325 goto sysfs_failed; 2326 if (device_create_file(&priv->dev->dev, &dev_attr_create_child)) 2327 goto sysfs_failed; 2328 if (device_create_file(&priv->dev->dev, &dev_attr_delete_child)) 2329 goto sysfs_failed; 2330 2331 return priv->dev; 2332 2333 sysfs_failed: 2334 unregister_netdev(priv->dev); 2335 2336 register_failed: 2337 ib_unregister_event_handler(&priv->event_handler); 2338 flush_workqueue(ipoib_workqueue); 2339 /* Stop GC if started before flush */ 2340 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 2341 cancel_delayed_work(&priv->neigh_reap_task); 2342 flush_workqueue(priv->wq); 2343 ipoib_dev_cleanup(priv->dev); 2344 2345 device_init_failed: 2346 rn = netdev_priv(priv->dev); 2347 rn->free_rdma_netdev(priv->dev); 2348 kfree(priv); 2349 2350 alloc_mem_failed: 2351 return ERR_PTR(result); 2352 } 2353 2354 static void ipoib_add_one(struct ib_device *device) 2355 { 2356 struct list_head *dev_list; 2357 struct net_device *dev; 2358 struct ipoib_dev_priv *priv; 2359 int p; 2360 int count = 0; 2361 2362 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 2363 if (!dev_list) 2364 return; 2365 2366 INIT_LIST_HEAD(dev_list); 2367 2368 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { 2369 if (!rdma_protocol_ib(device, p)) 2370 continue; 2371 dev = ipoib_add_port("ib%d", device, p); 2372 if (!IS_ERR(dev)) { 2373 priv = ipoib_priv(dev); 2374 list_add_tail(&priv->list, dev_list); 2375 count++; 2376 } 2377 } 2378 2379 if (!count) { 2380 kfree(dev_list); 2381 return; 2382 } 2383 2384 ib_set_client_data(device, &ipoib_client, dev_list); 2385 } 2386 2387 static void ipoib_remove_one(struct ib_device *device, void *client_data) 2388 { 2389 struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv; 2390 struct list_head *dev_list = client_data; 2391 2392 if (!dev_list) 2393 return; 2394 2395 list_for_each_entry_safe(priv, tmp, dev_list, list) { 2396 struct rdma_netdev *parent_rn = netdev_priv(priv->dev); 2397 2398 ib_unregister_event_handler(&priv->event_handler); 2399 flush_workqueue(ipoib_workqueue); 2400 2401 /* mark interface in the middle of destruction */ 2402 set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags); 2403 2404 rtnl_lock(); 2405 dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); 2406 rtnl_unlock(); 2407 2408 /* Stop GC */ 2409 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 2410 cancel_delayed_work(&priv->neigh_reap_task); 2411 flush_workqueue(priv->wq); 2412 2413 /* Wrap rtnl_lock/unlock with mutex to protect sysfs calls */ 2414 mutex_lock(&priv->sysfs_mutex); 2415 unregister_netdev(priv->dev); 2416 mutex_unlock(&priv->sysfs_mutex); 2417 2418 parent_rn->free_rdma_netdev(priv->dev); 2419 2420 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { 2421 struct rdma_netdev *child_rn; 2422 2423 child_rn = netdev_priv(cpriv->dev); 2424 child_rn->free_rdma_netdev(cpriv->dev); 2425 kfree(cpriv); 2426 } 2427 2428 kfree(priv); 2429 } 2430 2431 kfree(dev_list); 2432 } 2433 2434 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 2435 static struct notifier_block ipoib_netdev_notifier = { 2436 .notifier_call = ipoib_netdev_event, 2437 }; 2438 #endif 2439 2440 static int __init ipoib_init_module(void) 2441 { 2442 int ret; 2443 2444 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size); 2445 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE); 2446 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE); 2447 2448 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size); 2449 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE); 2450 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); 2451 #ifdef CONFIG_INFINIBAND_IPOIB_CM 2452 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); 2453 ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0); 2454 #endif 2455 2456 /* 2457 * When copying small received packets, we only copy from the 2458 * linear data part of the SKB, so we rely on this condition. 2459 */ 2460 BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE); 2461 2462 ret = ipoib_register_debugfs(); 2463 if (ret) 2464 return ret; 2465 2466 /* 2467 * We create a global workqueue here that is used for all flush 2468 * operations. However, if you attempt to flush a workqueue 2469 * from a task on that same workqueue, it deadlocks the system. 2470 * We want to be able to flush the tasks associated with a 2471 * specific net device, so we also create a workqueue for each 2472 * netdevice. We queue up the tasks for that device only on 2473 * its private workqueue, and we only queue up flush events 2474 * on our global flush workqueue. This avoids the deadlocks. 2475 */ 2476 ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush", 2477 WQ_MEM_RECLAIM); 2478 if (!ipoib_workqueue) { 2479 ret = -ENOMEM; 2480 goto err_fs; 2481 } 2482 2483 ib_sa_register_client(&ipoib_sa_client); 2484 2485 ret = ib_register_client(&ipoib_client); 2486 if (ret) 2487 goto err_sa; 2488 2489 ret = ipoib_netlink_init(); 2490 if (ret) 2491 goto err_client; 2492 2493 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 2494 register_netdevice_notifier(&ipoib_netdev_notifier); 2495 #endif 2496 return 0; 2497 2498 err_client: 2499 ib_unregister_client(&ipoib_client); 2500 2501 err_sa: 2502 ib_sa_unregister_client(&ipoib_sa_client); 2503 destroy_workqueue(ipoib_workqueue); 2504 2505 err_fs: 2506 ipoib_unregister_debugfs(); 2507 2508 return ret; 2509 } 2510 2511 static void __exit ipoib_cleanup_module(void) 2512 { 2513 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 2514 unregister_netdevice_notifier(&ipoib_netdev_notifier); 2515 #endif 2516 ipoib_netlink_fini(); 2517 ib_unregister_client(&ipoib_client); 2518 ib_sa_unregister_client(&ipoib_sa_client); 2519 ipoib_unregister_debugfs(); 2520 destroy_workqueue(ipoib_workqueue); 2521 } 2522 2523 module_init(ipoib_init_module); 2524 module_exit(ipoib_cleanup_module); 2525