1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "ipoib.h" 36 37 #include <linux/module.h> 38 39 #include <linux/init.h> 40 #include <linux/slab.h> 41 #include <linux/kernel.h> 42 #include <linux/vmalloc.h> 43 44 #include <linux/if_arp.h> /* For ARPHRD_xxx */ 45 46 #include <linux/ip.h> 47 #include <linux/in.h> 48 49 #include <linux/jhash.h> 50 #include <net/arp.h> 51 #include <net/addrconf.h> 52 #include <linux/inetdevice.h> 53 #include <rdma/ib_cache.h> 54 55 #define DRV_VERSION "1.0.0" 56 57 const char ipoib_driver_version[] = DRV_VERSION; 58 59 MODULE_AUTHOR("Roland Dreier"); 60 MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); 61 MODULE_LICENSE("Dual BSD/GPL"); 62 63 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE; 64 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE; 65 66 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444); 67 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue"); 68 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); 69 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); 70 71 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 72 int ipoib_debug_level; 73 74 module_param_named(debug_level, ipoib_debug_level, int, 0644); 75 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 76 #endif 77 78 struct ipoib_path_iter { 79 struct net_device *dev; 80 struct ipoib_path path; 81 }; 82 83 static const u8 ipv4_bcast_addr[] = { 84 0x00, 0xff, 0xff, 0xff, 85 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, 86 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff 87 }; 88 89 struct workqueue_struct *ipoib_workqueue; 90 91 struct ib_sa_client ipoib_sa_client; 92 93 static void ipoib_add_one(struct ib_device *device); 94 static void ipoib_remove_one(struct ib_device *device, void *client_data); 95 static void ipoib_neigh_reclaim(struct rcu_head *rp); 96 static struct net_device *ipoib_get_net_dev_by_params( 97 struct ib_device *dev, u8 port, u16 pkey, 98 const union ib_gid *gid, const struct sockaddr *addr, 99 void *client_data); 100 static int ipoib_set_mac(struct net_device *dev, void *addr); 101 static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr, 102 int cmd); 103 104 static struct ib_client ipoib_client = { 105 .name = "ipoib", 106 .add = ipoib_add_one, 107 .remove = ipoib_remove_one, 108 .get_net_dev_by_params = ipoib_get_net_dev_by_params, 109 }; 110 111 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 112 static int ipoib_netdev_event(struct notifier_block *this, 113 unsigned long event, void *ptr) 114 { 115 struct netdev_notifier_info *ni = ptr; 116 struct net_device *dev = ni->dev; 117 118 if (dev->netdev_ops->ndo_open != ipoib_open) 119 return NOTIFY_DONE; 120 121 switch (event) { 122 case NETDEV_REGISTER: 123 ipoib_create_debug_files(dev); 124 break; 125 case NETDEV_CHANGENAME: 126 ipoib_delete_debug_files(dev); 127 ipoib_create_debug_files(dev); 128 break; 129 case NETDEV_UNREGISTER: 130 ipoib_delete_debug_files(dev); 131 break; 132 } 133 134 return NOTIFY_DONE; 135 } 136 #endif 137 138 int ipoib_open(struct net_device *dev) 139 { 140 struct ipoib_dev_priv *priv = ipoib_priv(dev); 141 142 ipoib_dbg(priv, "bringing up interface\n"); 143 144 netif_carrier_off(dev); 145 146 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 147 148 priv->sm_fullmember_sendonly_support = false; 149 150 if (ipoib_ib_dev_open(dev)) { 151 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) 152 return 0; 153 goto err_disable; 154 } 155 156 ipoib_ib_dev_up(dev); 157 158 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 159 struct ipoib_dev_priv *cpriv; 160 161 /* Bring up any child interfaces too */ 162 down_read(&priv->vlan_rwsem); 163 list_for_each_entry(cpriv, &priv->child_intfs, list) { 164 int flags; 165 166 flags = cpriv->dev->flags; 167 if (flags & IFF_UP) 168 continue; 169 170 dev_change_flags(cpriv->dev, flags | IFF_UP); 171 } 172 up_read(&priv->vlan_rwsem); 173 } 174 175 netif_start_queue(dev); 176 177 return 0; 178 179 err_disable: 180 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 181 182 return -EINVAL; 183 } 184 185 static int ipoib_stop(struct net_device *dev) 186 { 187 struct ipoib_dev_priv *priv = ipoib_priv(dev); 188 189 ipoib_dbg(priv, "stopping interface\n"); 190 191 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 192 193 netif_stop_queue(dev); 194 195 ipoib_ib_dev_down(dev); 196 ipoib_ib_dev_stop(dev); 197 198 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 199 struct ipoib_dev_priv *cpriv; 200 201 /* Bring down any child interfaces too */ 202 down_read(&priv->vlan_rwsem); 203 list_for_each_entry(cpriv, &priv->child_intfs, list) { 204 int flags; 205 206 flags = cpriv->dev->flags; 207 if (!(flags & IFF_UP)) 208 continue; 209 210 dev_change_flags(cpriv->dev, flags & ~IFF_UP); 211 } 212 up_read(&priv->vlan_rwsem); 213 } 214 215 return 0; 216 } 217 218 static void ipoib_uninit(struct net_device *dev) 219 { 220 ipoib_dev_cleanup(dev); 221 } 222 223 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features) 224 { 225 struct ipoib_dev_priv *priv = ipoib_priv(dev); 226 227 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) 228 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); 229 230 return features; 231 } 232 233 static int ipoib_change_mtu(struct net_device *dev, int new_mtu) 234 { 235 struct ipoib_dev_priv *priv = ipoib_priv(dev); 236 int ret = 0; 237 238 /* dev->mtu > 2K ==> connected mode */ 239 if (ipoib_cm_admin_enabled(dev)) { 240 if (new_mtu > ipoib_cm_max_mtu(dev)) 241 return -EINVAL; 242 243 if (new_mtu > priv->mcast_mtu) 244 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n", 245 priv->mcast_mtu); 246 247 dev->mtu = new_mtu; 248 return 0; 249 } 250 251 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) 252 return -EINVAL; 253 254 priv->admin_mtu = new_mtu; 255 256 if (priv->mcast_mtu < priv->admin_mtu) 257 ipoib_dbg(priv, "MTU must be smaller than the underlying " 258 "link layer MTU - 4 (%u)\n", priv->mcast_mtu); 259 260 new_mtu = min(priv->mcast_mtu, priv->admin_mtu); 261 262 if (priv->rn_ops->ndo_change_mtu) { 263 bool carrier_status = netif_carrier_ok(dev); 264 265 netif_carrier_off(dev); 266 267 /* notify lower level on the real mtu */ 268 ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu); 269 270 if (carrier_status) 271 netif_carrier_on(dev); 272 } else { 273 dev->mtu = new_mtu; 274 } 275 276 return ret; 277 } 278 279 static void ipoib_get_stats(struct net_device *dev, 280 struct rtnl_link_stats64 *stats) 281 { 282 struct ipoib_dev_priv *priv = ipoib_priv(dev); 283 284 if (priv->rn_ops->ndo_get_stats64) 285 priv->rn_ops->ndo_get_stats64(dev, stats); 286 else 287 netdev_stats_to_stats64(stats, &dev->stats); 288 } 289 290 /* Called with an RCU read lock taken */ 291 static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr, 292 struct net_device *dev) 293 { 294 struct net *net = dev_net(dev); 295 struct in_device *in_dev; 296 struct sockaddr_in *addr_in = (struct sockaddr_in *)addr; 297 struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr; 298 __be32 ret_addr; 299 300 switch (addr->sa_family) { 301 case AF_INET: 302 in_dev = in_dev_get(dev); 303 if (!in_dev) 304 return false; 305 306 ret_addr = inet_confirm_addr(net, in_dev, 0, 307 addr_in->sin_addr.s_addr, 308 RT_SCOPE_HOST); 309 in_dev_put(in_dev); 310 if (ret_addr) 311 return true; 312 313 break; 314 case AF_INET6: 315 if (IS_ENABLED(CONFIG_IPV6) && 316 ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1)) 317 return true; 318 319 break; 320 } 321 return false; 322 } 323 324 /** 325 * Find the master net_device on top of the given net_device. 326 * @dev: base IPoIB net_device 327 * 328 * Returns the master net_device with a reference held, or the same net_device 329 * if no master exists. 330 */ 331 static struct net_device *ipoib_get_master_net_dev(struct net_device *dev) 332 { 333 struct net_device *master; 334 335 rcu_read_lock(); 336 master = netdev_master_upper_dev_get_rcu(dev); 337 if (master) 338 dev_hold(master); 339 rcu_read_unlock(); 340 341 if (master) 342 return master; 343 344 dev_hold(dev); 345 return dev; 346 } 347 348 struct ipoib_walk_data { 349 const struct sockaddr *addr; 350 struct net_device *result; 351 }; 352 353 static int ipoib_upper_walk(struct net_device *upper, void *_data) 354 { 355 struct ipoib_walk_data *data = _data; 356 int ret = 0; 357 358 if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) { 359 dev_hold(upper); 360 data->result = upper; 361 ret = 1; 362 } 363 364 return ret; 365 } 366 367 /** 368 * Find a net_device matching the given address, which is an upper device of 369 * the given net_device. 370 * @addr: IP address to look for. 371 * @dev: base IPoIB net_device 372 * 373 * If found, returns the net_device with a reference held. Otherwise return 374 * NULL. 375 */ 376 static struct net_device *ipoib_get_net_dev_match_addr( 377 const struct sockaddr *addr, struct net_device *dev) 378 { 379 struct ipoib_walk_data data = { 380 .addr = addr, 381 }; 382 383 rcu_read_lock(); 384 if (ipoib_is_dev_match_addr_rcu(addr, dev)) { 385 dev_hold(dev); 386 data.result = dev; 387 goto out; 388 } 389 390 netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &data); 391 out: 392 rcu_read_unlock(); 393 return data.result; 394 } 395 396 /* returns the number of IPoIB netdevs on top a given ipoib device matching a 397 * pkey_index and address, if one exists. 398 * 399 * @found_net_dev: contains a matching net_device if the return value >= 1, 400 * with a reference held. */ 401 static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv, 402 const union ib_gid *gid, 403 u16 pkey_index, 404 const struct sockaddr *addr, 405 int nesting, 406 struct net_device **found_net_dev) 407 { 408 struct ipoib_dev_priv *child_priv; 409 struct net_device *net_dev = NULL; 410 int matches = 0; 411 412 if (priv->pkey_index == pkey_index && 413 (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) { 414 if (!addr) { 415 net_dev = ipoib_get_master_net_dev(priv->dev); 416 } else { 417 /* Verify the net_device matches the IP address, as 418 * IPoIB child devices currently share a GID. */ 419 net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev); 420 } 421 if (net_dev) { 422 if (!*found_net_dev) 423 *found_net_dev = net_dev; 424 else 425 dev_put(net_dev); 426 ++matches; 427 } 428 } 429 430 /* Check child interfaces */ 431 down_read_nested(&priv->vlan_rwsem, nesting); 432 list_for_each_entry(child_priv, &priv->child_intfs, list) { 433 matches += ipoib_match_gid_pkey_addr(child_priv, gid, 434 pkey_index, addr, 435 nesting + 1, 436 found_net_dev); 437 if (matches > 1) 438 break; 439 } 440 up_read(&priv->vlan_rwsem); 441 442 return matches; 443 } 444 445 /* Returns the number of matching net_devs found (between 0 and 2). Also 446 * return the matching net_device in the @net_dev parameter, holding a 447 * reference to the net_device, if the number of matches >= 1 */ 448 static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port, 449 u16 pkey_index, 450 const union ib_gid *gid, 451 const struct sockaddr *addr, 452 struct net_device **net_dev) 453 { 454 struct ipoib_dev_priv *priv; 455 int matches = 0; 456 457 *net_dev = NULL; 458 459 list_for_each_entry(priv, dev_list, list) { 460 if (priv->port != port) 461 continue; 462 463 matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index, 464 addr, 0, net_dev); 465 if (matches > 1) 466 break; 467 } 468 469 return matches; 470 } 471 472 static struct net_device *ipoib_get_net_dev_by_params( 473 struct ib_device *dev, u8 port, u16 pkey, 474 const union ib_gid *gid, const struct sockaddr *addr, 475 void *client_data) 476 { 477 struct net_device *net_dev; 478 struct list_head *dev_list = client_data; 479 u16 pkey_index; 480 int matches; 481 int ret; 482 483 if (!rdma_protocol_ib(dev, port)) 484 return NULL; 485 486 ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index); 487 if (ret) 488 return NULL; 489 490 if (!dev_list) 491 return NULL; 492 493 /* See if we can find a unique device matching the L2 parameters */ 494 matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index, 495 gid, NULL, &net_dev); 496 497 switch (matches) { 498 case 0: 499 return NULL; 500 case 1: 501 return net_dev; 502 } 503 504 dev_put(net_dev); 505 506 /* Couldn't find a unique device with L2 parameters only. Use L3 507 * address to uniquely match the net device */ 508 matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index, 509 gid, addr, &net_dev); 510 switch (matches) { 511 case 0: 512 return NULL; 513 default: 514 dev_warn_ratelimited(&dev->dev, 515 "duplicate IP address detected\n"); 516 /* Fall through */ 517 case 1: 518 return net_dev; 519 } 520 } 521 522 int ipoib_set_mode(struct net_device *dev, const char *buf) 523 { 524 struct ipoib_dev_priv *priv = ipoib_priv(dev); 525 526 if ((test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) && 527 !strcmp(buf, "connected\n")) || 528 (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) && 529 !strcmp(buf, "datagram\n"))) { 530 return 0; 531 } 532 533 /* flush paths if we switch modes so that connections are restarted */ 534 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) { 535 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 536 ipoib_warn(priv, "enabling connected mode " 537 "will cause multicast packet drops\n"); 538 netdev_update_features(dev); 539 dev_set_mtu(dev, ipoib_cm_max_mtu(dev)); 540 rtnl_unlock(); 541 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; 542 543 ipoib_flush_paths(dev); 544 return (!rtnl_trylock()) ? -EBUSY : 0; 545 } 546 547 if (!strcmp(buf, "datagram\n")) { 548 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 549 netdev_update_features(dev); 550 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); 551 rtnl_unlock(); 552 ipoib_flush_paths(dev); 553 return (!rtnl_trylock()) ? -EBUSY : 0; 554 } 555 556 return -EINVAL; 557 } 558 559 struct ipoib_path *__path_find(struct net_device *dev, void *gid) 560 { 561 struct ipoib_dev_priv *priv = ipoib_priv(dev); 562 struct rb_node *n = priv->path_tree.rb_node; 563 struct ipoib_path *path; 564 int ret; 565 566 while (n) { 567 path = rb_entry(n, struct ipoib_path, rb_node); 568 569 ret = memcmp(gid, path->pathrec.dgid.raw, 570 sizeof (union ib_gid)); 571 572 if (ret < 0) 573 n = n->rb_left; 574 else if (ret > 0) 575 n = n->rb_right; 576 else 577 return path; 578 } 579 580 return NULL; 581 } 582 583 static int __path_add(struct net_device *dev, struct ipoib_path *path) 584 { 585 struct ipoib_dev_priv *priv = ipoib_priv(dev); 586 struct rb_node **n = &priv->path_tree.rb_node; 587 struct rb_node *pn = NULL; 588 struct ipoib_path *tpath; 589 int ret; 590 591 while (*n) { 592 pn = *n; 593 tpath = rb_entry(pn, struct ipoib_path, rb_node); 594 595 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw, 596 sizeof (union ib_gid)); 597 if (ret < 0) 598 n = &pn->rb_left; 599 else if (ret > 0) 600 n = &pn->rb_right; 601 else 602 return -EEXIST; 603 } 604 605 rb_link_node(&path->rb_node, pn, n); 606 rb_insert_color(&path->rb_node, &priv->path_tree); 607 608 list_add_tail(&path->list, &priv->path_list); 609 610 return 0; 611 } 612 613 static void path_free(struct net_device *dev, struct ipoib_path *path) 614 { 615 struct sk_buff *skb; 616 617 while ((skb = __skb_dequeue(&path->queue))) 618 dev_kfree_skb_irq(skb); 619 620 ipoib_dbg(ipoib_priv(dev), "path_free\n"); 621 622 /* remove all neigh connected to this path */ 623 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); 624 625 if (path->ah) 626 ipoib_put_ah(path->ah); 627 628 kfree(path); 629 } 630 631 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 632 633 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev) 634 { 635 struct ipoib_path_iter *iter; 636 637 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 638 if (!iter) 639 return NULL; 640 641 iter->dev = dev; 642 memset(iter->path.pathrec.dgid.raw, 0, 16); 643 644 if (ipoib_path_iter_next(iter)) { 645 kfree(iter); 646 return NULL; 647 } 648 649 return iter; 650 } 651 652 int ipoib_path_iter_next(struct ipoib_path_iter *iter) 653 { 654 struct ipoib_dev_priv *priv = ipoib_priv(iter->dev); 655 struct rb_node *n; 656 struct ipoib_path *path; 657 int ret = 1; 658 659 spin_lock_irq(&priv->lock); 660 661 n = rb_first(&priv->path_tree); 662 663 while (n) { 664 path = rb_entry(n, struct ipoib_path, rb_node); 665 666 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, 667 sizeof (union ib_gid)) < 0) { 668 iter->path = *path; 669 ret = 0; 670 break; 671 } 672 673 n = rb_next(n); 674 } 675 676 spin_unlock_irq(&priv->lock); 677 678 return ret; 679 } 680 681 void ipoib_path_iter_read(struct ipoib_path_iter *iter, 682 struct ipoib_path *path) 683 { 684 *path = iter->path; 685 } 686 687 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 688 689 void ipoib_mark_paths_invalid(struct net_device *dev) 690 { 691 struct ipoib_dev_priv *priv = ipoib_priv(dev); 692 struct ipoib_path *path, *tp; 693 694 spin_lock_irq(&priv->lock); 695 696 list_for_each_entry_safe(path, tp, &priv->path_list, list) { 697 ipoib_dbg(priv, "mark path LID 0x%08x GID %pI6 invalid\n", 698 be32_to_cpu(sa_path_get_dlid(&path->pathrec)), 699 path->pathrec.dgid.raw); 700 if (path->ah) 701 path->ah->valid = 0; 702 } 703 704 spin_unlock_irq(&priv->lock); 705 } 706 707 static void push_pseudo_header(struct sk_buff *skb, const char *daddr) 708 { 709 struct ipoib_pseudo_header *phdr; 710 711 phdr = skb_push(skb, sizeof(*phdr)); 712 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); 713 } 714 715 void ipoib_flush_paths(struct net_device *dev) 716 { 717 struct ipoib_dev_priv *priv = ipoib_priv(dev); 718 struct ipoib_path *path, *tp; 719 LIST_HEAD(remove_list); 720 unsigned long flags; 721 722 netif_tx_lock_bh(dev); 723 spin_lock_irqsave(&priv->lock, flags); 724 725 list_splice_init(&priv->path_list, &remove_list); 726 727 list_for_each_entry(path, &remove_list, list) 728 rb_erase(&path->rb_node, &priv->path_tree); 729 730 list_for_each_entry_safe(path, tp, &remove_list, list) { 731 if (path->query) 732 ib_sa_cancel_query(path->query_id, path->query); 733 spin_unlock_irqrestore(&priv->lock, flags); 734 netif_tx_unlock_bh(dev); 735 wait_for_completion(&path->done); 736 path_free(dev, path); 737 netif_tx_lock_bh(dev); 738 spin_lock_irqsave(&priv->lock, flags); 739 } 740 741 spin_unlock_irqrestore(&priv->lock, flags); 742 netif_tx_unlock_bh(dev); 743 } 744 745 static void path_rec_completion(int status, 746 struct sa_path_rec *pathrec, 747 void *path_ptr) 748 { 749 struct ipoib_path *path = path_ptr; 750 struct net_device *dev = path->dev; 751 struct ipoib_dev_priv *priv = ipoib_priv(dev); 752 struct ipoib_ah *ah = NULL; 753 struct ipoib_ah *old_ah = NULL; 754 struct ipoib_neigh *neigh, *tn; 755 struct sk_buff_head skqueue; 756 struct sk_buff *skb; 757 unsigned long flags; 758 759 if (!status) 760 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n", 761 be32_to_cpu(sa_path_get_dlid(pathrec)), 762 pathrec->dgid.raw); 763 else 764 ipoib_dbg(priv, "PathRec status %d for GID %pI6\n", 765 status, path->pathrec.dgid.raw); 766 767 skb_queue_head_init(&skqueue); 768 769 if (!status) { 770 struct rdma_ah_attr av; 771 772 if (!ib_init_ah_attr_from_path(priv->ca, priv->port, 773 pathrec, &av, NULL)) { 774 ah = ipoib_create_ah(dev, priv->pd, &av); 775 rdma_destroy_ah_attr(&av); 776 } 777 } 778 779 spin_lock_irqsave(&priv->lock, flags); 780 781 if (!IS_ERR_OR_NULL(ah)) { 782 /* 783 * pathrec.dgid is used as the database key from the LLADDR, 784 * it must remain unchanged even if the SA returns a different 785 * GID to use in the AH. 786 */ 787 if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw, 788 sizeof(union ib_gid))) { 789 ipoib_dbg( 790 priv, 791 "%s got PathRec for gid %pI6 while asked for %pI6\n", 792 dev->name, pathrec->dgid.raw, 793 path->pathrec.dgid.raw); 794 memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw, 795 sizeof(union ib_gid)); 796 } 797 798 path->pathrec = *pathrec; 799 800 old_ah = path->ah; 801 path->ah = ah; 802 803 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", 804 ah, be32_to_cpu(sa_path_get_dlid(pathrec)), 805 pathrec->sl); 806 807 while ((skb = __skb_dequeue(&path->queue))) 808 __skb_queue_tail(&skqueue, skb); 809 810 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) { 811 if (neigh->ah) { 812 WARN_ON(neigh->ah != old_ah); 813 /* 814 * Dropping the ah reference inside 815 * priv->lock is safe here, because we 816 * will hold one more reference from 817 * the original value of path->ah (ie 818 * old_ah). 819 */ 820 ipoib_put_ah(neigh->ah); 821 } 822 kref_get(&path->ah->ref); 823 neigh->ah = path->ah; 824 825 if (ipoib_cm_enabled(dev, neigh->daddr)) { 826 if (!ipoib_cm_get(neigh)) 827 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, 828 path, 829 neigh)); 830 if (!ipoib_cm_get(neigh)) { 831 ipoib_neigh_free(neigh); 832 continue; 833 } 834 } 835 836 while ((skb = __skb_dequeue(&neigh->queue))) 837 __skb_queue_tail(&skqueue, skb); 838 } 839 path->ah->valid = 1; 840 } 841 842 path->query = NULL; 843 complete(&path->done); 844 845 spin_unlock_irqrestore(&priv->lock, flags); 846 847 if (IS_ERR_OR_NULL(ah)) 848 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); 849 850 if (old_ah) 851 ipoib_put_ah(old_ah); 852 853 while ((skb = __skb_dequeue(&skqueue))) { 854 int ret; 855 skb->dev = dev; 856 ret = dev_queue_xmit(skb); 857 if (ret) 858 ipoib_warn(priv, "%s: dev_queue_xmit failed to re-queue packet, ret:%d\n", 859 __func__, ret); 860 } 861 } 862 863 static void init_path_rec(struct ipoib_dev_priv *priv, struct ipoib_path *path, 864 void *gid) 865 { 866 path->dev = priv->dev; 867 868 if (rdma_cap_opa_ah(priv->ca, priv->port)) 869 path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA; 870 else 871 path->pathrec.rec_type = SA_PATH_REC_TYPE_IB; 872 873 memcpy(path->pathrec.dgid.raw, gid, sizeof(union ib_gid)); 874 path->pathrec.sgid = priv->local_gid; 875 path->pathrec.pkey = cpu_to_be16(priv->pkey); 876 path->pathrec.numb_path = 1; 877 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class; 878 } 879 880 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid) 881 { 882 struct ipoib_dev_priv *priv = ipoib_priv(dev); 883 struct ipoib_path *path; 884 885 if (!priv->broadcast) 886 return NULL; 887 888 path = kzalloc(sizeof(*path), GFP_ATOMIC); 889 if (!path) 890 return NULL; 891 892 skb_queue_head_init(&path->queue); 893 894 INIT_LIST_HEAD(&path->neigh_list); 895 896 init_path_rec(priv, path, gid); 897 898 return path; 899 } 900 901 static int path_rec_start(struct net_device *dev, 902 struct ipoib_path *path) 903 { 904 struct ipoib_dev_priv *priv = ipoib_priv(dev); 905 906 ipoib_dbg(priv, "Start path record lookup for %pI6\n", 907 path->pathrec.dgid.raw); 908 909 init_completion(&path->done); 910 911 path->query_id = 912 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port, 913 &path->pathrec, 914 IB_SA_PATH_REC_DGID | 915 IB_SA_PATH_REC_SGID | 916 IB_SA_PATH_REC_NUMB_PATH | 917 IB_SA_PATH_REC_TRAFFIC_CLASS | 918 IB_SA_PATH_REC_PKEY, 919 1000, GFP_ATOMIC, 920 path_rec_completion, 921 path, &path->query); 922 if (path->query_id < 0) { 923 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); 924 path->query = NULL; 925 complete(&path->done); 926 return path->query_id; 927 } 928 929 return 0; 930 } 931 932 static void neigh_refresh_path(struct ipoib_neigh *neigh, u8 *daddr, 933 struct net_device *dev) 934 { 935 struct ipoib_dev_priv *priv = ipoib_priv(dev); 936 struct ipoib_path *path; 937 unsigned long flags; 938 939 spin_lock_irqsave(&priv->lock, flags); 940 941 path = __path_find(dev, daddr + 4); 942 if (!path) 943 goto out; 944 if (!path->query) 945 path_rec_start(dev, path); 946 out: 947 spin_unlock_irqrestore(&priv->lock, flags); 948 } 949 950 static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr, 951 struct net_device *dev) 952 { 953 struct ipoib_dev_priv *priv = ipoib_priv(dev); 954 struct rdma_netdev *rn = netdev_priv(dev); 955 struct ipoib_path *path; 956 struct ipoib_neigh *neigh; 957 unsigned long flags; 958 959 spin_lock_irqsave(&priv->lock, flags); 960 neigh = ipoib_neigh_alloc(daddr, dev); 961 if (!neigh) { 962 spin_unlock_irqrestore(&priv->lock, flags); 963 ++dev->stats.tx_dropped; 964 dev_kfree_skb_any(skb); 965 return NULL; 966 } 967 968 /* To avoid race condition, make sure that the 969 * neigh will be added only once. 970 */ 971 if (unlikely(!list_empty(&neigh->list))) { 972 spin_unlock_irqrestore(&priv->lock, flags); 973 return neigh; 974 } 975 976 path = __path_find(dev, daddr + 4); 977 if (!path) { 978 path = path_rec_create(dev, daddr + 4); 979 if (!path) 980 goto err_path; 981 982 __path_add(dev, path); 983 } 984 985 list_add_tail(&neigh->list, &path->neigh_list); 986 987 if (path->ah && path->ah->valid) { 988 kref_get(&path->ah->ref); 989 neigh->ah = path->ah; 990 991 if (ipoib_cm_enabled(dev, neigh->daddr)) { 992 if (!ipoib_cm_get(neigh)) 993 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh)); 994 if (!ipoib_cm_get(neigh)) { 995 ipoib_neigh_free(neigh); 996 goto err_drop; 997 } 998 if (skb_queue_len(&neigh->queue) < 999 IPOIB_MAX_PATH_REC_QUEUE) { 1000 push_pseudo_header(skb, neigh->daddr); 1001 __skb_queue_tail(&neigh->queue, skb); 1002 } else { 1003 ipoib_warn(priv, "queue length limit %d. Packet drop.\n", 1004 skb_queue_len(&neigh->queue)); 1005 goto err_drop; 1006 } 1007 } else { 1008 spin_unlock_irqrestore(&priv->lock, flags); 1009 path->ah->last_send = rn->send(dev, skb, path->ah->ah, 1010 IPOIB_QPN(daddr)); 1011 ipoib_neigh_put(neigh); 1012 return NULL; 1013 } 1014 } else { 1015 neigh->ah = NULL; 1016 1017 if (!path->query && path_rec_start(dev, path)) 1018 goto err_path; 1019 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 1020 push_pseudo_header(skb, neigh->daddr); 1021 __skb_queue_tail(&neigh->queue, skb); 1022 } else { 1023 goto err_drop; 1024 } 1025 } 1026 1027 spin_unlock_irqrestore(&priv->lock, flags); 1028 ipoib_neigh_put(neigh); 1029 return NULL; 1030 1031 err_path: 1032 ipoib_neigh_free(neigh); 1033 err_drop: 1034 ++dev->stats.tx_dropped; 1035 dev_kfree_skb_any(skb); 1036 1037 spin_unlock_irqrestore(&priv->lock, flags); 1038 ipoib_neigh_put(neigh); 1039 1040 return NULL; 1041 } 1042 1043 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, 1044 struct ipoib_pseudo_header *phdr) 1045 { 1046 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1047 struct rdma_netdev *rn = netdev_priv(dev); 1048 struct ipoib_path *path; 1049 unsigned long flags; 1050 1051 spin_lock_irqsave(&priv->lock, flags); 1052 1053 /* no broadcast means that all paths are (going to be) not valid */ 1054 if (!priv->broadcast) 1055 goto drop_and_unlock; 1056 1057 path = __path_find(dev, phdr->hwaddr + 4); 1058 if (!path || !path->ah || !path->ah->valid) { 1059 if (!path) { 1060 path = path_rec_create(dev, phdr->hwaddr + 4); 1061 if (!path) 1062 goto drop_and_unlock; 1063 __path_add(dev, path); 1064 } else { 1065 /* 1066 * make sure there are no changes in the existing 1067 * path record 1068 */ 1069 init_path_rec(priv, path, phdr->hwaddr + 4); 1070 } 1071 if (!path->query && path_rec_start(dev, path)) { 1072 goto drop_and_unlock; 1073 } 1074 1075 if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 1076 push_pseudo_header(skb, phdr->hwaddr); 1077 __skb_queue_tail(&path->queue, skb); 1078 goto unlock; 1079 } else { 1080 goto drop_and_unlock; 1081 } 1082 } 1083 1084 spin_unlock_irqrestore(&priv->lock, flags); 1085 ipoib_dbg(priv, "Send unicast ARP to %08x\n", 1086 be32_to_cpu(sa_path_get_dlid(&path->pathrec))); 1087 path->ah->last_send = rn->send(dev, skb, path->ah->ah, 1088 IPOIB_QPN(phdr->hwaddr)); 1089 return; 1090 1091 drop_and_unlock: 1092 ++dev->stats.tx_dropped; 1093 dev_kfree_skb_any(skb); 1094 unlock: 1095 spin_unlock_irqrestore(&priv->lock, flags); 1096 } 1097 1098 static netdev_tx_t ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) 1099 { 1100 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1101 struct rdma_netdev *rn = netdev_priv(dev); 1102 struct ipoib_neigh *neigh; 1103 struct ipoib_pseudo_header *phdr; 1104 struct ipoib_header *header; 1105 unsigned long flags; 1106 1107 phdr = (struct ipoib_pseudo_header *) skb->data; 1108 skb_pull(skb, sizeof(*phdr)); 1109 header = (struct ipoib_header *) skb->data; 1110 1111 if (unlikely(phdr->hwaddr[4] == 0xff)) { 1112 /* multicast, arrange "if" according to probability */ 1113 if ((header->proto != htons(ETH_P_IP)) && 1114 (header->proto != htons(ETH_P_IPV6)) && 1115 (header->proto != htons(ETH_P_ARP)) && 1116 (header->proto != htons(ETH_P_RARP)) && 1117 (header->proto != htons(ETH_P_TIPC))) { 1118 /* ethertype not supported by IPoIB */ 1119 ++dev->stats.tx_dropped; 1120 dev_kfree_skb_any(skb); 1121 return NETDEV_TX_OK; 1122 } 1123 /* Add in the P_Key for multicast*/ 1124 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff; 1125 phdr->hwaddr[9] = priv->pkey & 0xff; 1126 1127 neigh = ipoib_neigh_get(dev, phdr->hwaddr); 1128 if (likely(neigh)) 1129 goto send_using_neigh; 1130 ipoib_mcast_send(dev, phdr->hwaddr, skb); 1131 return NETDEV_TX_OK; 1132 } 1133 1134 /* unicast, arrange "switch" according to probability */ 1135 switch (header->proto) { 1136 case htons(ETH_P_IP): 1137 case htons(ETH_P_IPV6): 1138 case htons(ETH_P_TIPC): 1139 neigh = ipoib_neigh_get(dev, phdr->hwaddr); 1140 if (unlikely(!neigh)) { 1141 neigh = neigh_add_path(skb, phdr->hwaddr, dev); 1142 if (likely(!neigh)) 1143 return NETDEV_TX_OK; 1144 } 1145 break; 1146 case htons(ETH_P_ARP): 1147 case htons(ETH_P_RARP): 1148 /* for unicast ARP and RARP should always perform path find */ 1149 unicast_arp_send(skb, dev, phdr); 1150 return NETDEV_TX_OK; 1151 default: 1152 /* ethertype not supported by IPoIB */ 1153 ++dev->stats.tx_dropped; 1154 dev_kfree_skb_any(skb); 1155 return NETDEV_TX_OK; 1156 } 1157 1158 send_using_neigh: 1159 /* note we now hold a ref to neigh */ 1160 if (ipoib_cm_get(neigh)) { 1161 if (ipoib_cm_up(neigh)) { 1162 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); 1163 goto unref; 1164 } 1165 } else if (neigh->ah && neigh->ah->valid) { 1166 neigh->ah->last_send = rn->send(dev, skb, neigh->ah->ah, 1167 IPOIB_QPN(phdr->hwaddr)); 1168 goto unref; 1169 } else if (neigh->ah) { 1170 neigh_refresh_path(neigh, phdr->hwaddr, dev); 1171 } 1172 1173 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 1174 push_pseudo_header(skb, phdr->hwaddr); 1175 spin_lock_irqsave(&priv->lock, flags); 1176 __skb_queue_tail(&neigh->queue, skb); 1177 spin_unlock_irqrestore(&priv->lock, flags); 1178 } else { 1179 ++dev->stats.tx_dropped; 1180 dev_kfree_skb_any(skb); 1181 } 1182 1183 unref: 1184 ipoib_neigh_put(neigh); 1185 1186 return NETDEV_TX_OK; 1187 } 1188 1189 static void ipoib_timeout(struct net_device *dev) 1190 { 1191 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1192 1193 ipoib_warn(priv, "transmit timeout: latency %d msecs\n", 1194 jiffies_to_msecs(jiffies - dev_trans_start(dev))); 1195 ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n", 1196 netif_queue_stopped(dev), 1197 priv->tx_head, priv->tx_tail); 1198 /* XXX reset QP, etc. */ 1199 } 1200 1201 static int ipoib_hard_header(struct sk_buff *skb, 1202 struct net_device *dev, 1203 unsigned short type, 1204 const void *daddr, 1205 const void *saddr, 1206 unsigned int len) 1207 { 1208 struct ipoib_header *header; 1209 1210 header = skb_push(skb, sizeof(*header)); 1211 1212 header->proto = htons(type); 1213 header->reserved = 0; 1214 1215 /* 1216 * we don't rely on dst_entry structure, always stuff the 1217 * destination address into skb hard header so we can figure out where 1218 * to send the packet later. 1219 */ 1220 push_pseudo_header(skb, daddr); 1221 1222 return IPOIB_HARD_LEN; 1223 } 1224 1225 static void ipoib_set_mcast_list(struct net_device *dev) 1226 { 1227 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1228 1229 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { 1230 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set"); 1231 return; 1232 } 1233 1234 queue_work(priv->wq, &priv->restart_task); 1235 } 1236 1237 static int ipoib_get_iflink(const struct net_device *dev) 1238 { 1239 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1240 1241 /* parent interface */ 1242 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) 1243 return dev->ifindex; 1244 1245 /* child/vlan interface */ 1246 return priv->parent->ifindex; 1247 } 1248 1249 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) 1250 { 1251 /* 1252 * Use only the address parts that contributes to spreading 1253 * The subnet prefix is not used as one can not connect to 1254 * same remote port (GUID) using the same remote QPN via two 1255 * different subnets. 1256 */ 1257 /* qpn octets[1:4) & port GUID octets[12:20) */ 1258 u32 *d32 = (u32 *) daddr; 1259 u32 hv; 1260 1261 hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0); 1262 return hv & htbl->mask; 1263 } 1264 1265 struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr) 1266 { 1267 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1268 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1269 struct ipoib_neigh_hash *htbl; 1270 struct ipoib_neigh *neigh = NULL; 1271 u32 hash_val; 1272 1273 rcu_read_lock_bh(); 1274 1275 htbl = rcu_dereference_bh(ntbl->htbl); 1276 1277 if (!htbl) 1278 goto out_unlock; 1279 1280 hash_val = ipoib_addr_hash(htbl, daddr); 1281 for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]); 1282 neigh != NULL; 1283 neigh = rcu_dereference_bh(neigh->hnext)) { 1284 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { 1285 /* found, take one ref on behalf of the caller */ 1286 if (!atomic_inc_not_zero(&neigh->refcnt)) { 1287 /* deleted */ 1288 neigh = NULL; 1289 goto out_unlock; 1290 } 1291 1292 if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)) 1293 neigh->alive = jiffies; 1294 goto out_unlock; 1295 } 1296 } 1297 1298 out_unlock: 1299 rcu_read_unlock_bh(); 1300 return neigh; 1301 } 1302 1303 static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) 1304 { 1305 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1306 struct ipoib_neigh_hash *htbl; 1307 unsigned long neigh_obsolete; 1308 unsigned long dt; 1309 unsigned long flags; 1310 int i; 1311 LIST_HEAD(remove_list); 1312 1313 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 1314 return; 1315 1316 spin_lock_irqsave(&priv->lock, flags); 1317 1318 htbl = rcu_dereference_protected(ntbl->htbl, 1319 lockdep_is_held(&priv->lock)); 1320 1321 if (!htbl) 1322 goto out_unlock; 1323 1324 /* neigh is obsolete if it was idle for two GC periods */ 1325 dt = 2 * arp_tbl.gc_interval; 1326 neigh_obsolete = jiffies - dt; 1327 /* handle possible race condition */ 1328 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 1329 goto out_unlock; 1330 1331 for (i = 0; i < htbl->size; i++) { 1332 struct ipoib_neigh *neigh; 1333 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1334 1335 while ((neigh = rcu_dereference_protected(*np, 1336 lockdep_is_held(&priv->lock))) != NULL) { 1337 /* was the neigh idle for two GC periods */ 1338 if (time_after(neigh_obsolete, neigh->alive)) { 1339 1340 ipoib_check_and_add_mcast_sendonly(priv, neigh->daddr + 4, &remove_list); 1341 1342 rcu_assign_pointer(*np, 1343 rcu_dereference_protected(neigh->hnext, 1344 lockdep_is_held(&priv->lock))); 1345 /* remove from path/mc list */ 1346 list_del_init(&neigh->list); 1347 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1348 } else { 1349 np = &neigh->hnext; 1350 } 1351 1352 } 1353 } 1354 1355 out_unlock: 1356 spin_unlock_irqrestore(&priv->lock, flags); 1357 ipoib_mcast_remove_list(&remove_list); 1358 } 1359 1360 static void ipoib_reap_neigh(struct work_struct *work) 1361 { 1362 struct ipoib_dev_priv *priv = 1363 container_of(work, struct ipoib_dev_priv, neigh_reap_task.work); 1364 1365 __ipoib_reap_neigh(priv); 1366 1367 if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 1368 queue_delayed_work(priv->wq, &priv->neigh_reap_task, 1369 arp_tbl.gc_interval); 1370 } 1371 1372 1373 static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr, 1374 struct net_device *dev) 1375 { 1376 struct ipoib_neigh *neigh; 1377 1378 neigh = kzalloc(sizeof(*neigh), GFP_ATOMIC); 1379 if (!neigh) 1380 return NULL; 1381 1382 neigh->dev = dev; 1383 memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr)); 1384 skb_queue_head_init(&neigh->queue); 1385 INIT_LIST_HEAD(&neigh->list); 1386 ipoib_cm_set(neigh, NULL); 1387 /* one ref on behalf of the caller */ 1388 atomic_set(&neigh->refcnt, 1); 1389 1390 return neigh; 1391 } 1392 1393 struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr, 1394 struct net_device *dev) 1395 { 1396 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1397 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1398 struct ipoib_neigh_hash *htbl; 1399 struct ipoib_neigh *neigh; 1400 u32 hash_val; 1401 1402 htbl = rcu_dereference_protected(ntbl->htbl, 1403 lockdep_is_held(&priv->lock)); 1404 if (!htbl) { 1405 neigh = NULL; 1406 goto out_unlock; 1407 } 1408 1409 /* need to add a new neigh, but maybe some other thread succeeded? 1410 * recalc hash, maybe hash resize took place so we do a search 1411 */ 1412 hash_val = ipoib_addr_hash(htbl, daddr); 1413 for (neigh = rcu_dereference_protected(htbl->buckets[hash_val], 1414 lockdep_is_held(&priv->lock)); 1415 neigh != NULL; 1416 neigh = rcu_dereference_protected(neigh->hnext, 1417 lockdep_is_held(&priv->lock))) { 1418 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { 1419 /* found, take one ref on behalf of the caller */ 1420 if (!atomic_inc_not_zero(&neigh->refcnt)) { 1421 /* deleted */ 1422 neigh = NULL; 1423 break; 1424 } 1425 neigh->alive = jiffies; 1426 goto out_unlock; 1427 } 1428 } 1429 1430 neigh = ipoib_neigh_ctor(daddr, dev); 1431 if (!neigh) 1432 goto out_unlock; 1433 1434 /* one ref on behalf of the hash table */ 1435 atomic_inc(&neigh->refcnt); 1436 neigh->alive = jiffies; 1437 /* put in hash */ 1438 rcu_assign_pointer(neigh->hnext, 1439 rcu_dereference_protected(htbl->buckets[hash_val], 1440 lockdep_is_held(&priv->lock))); 1441 rcu_assign_pointer(htbl->buckets[hash_val], neigh); 1442 atomic_inc(&ntbl->entries); 1443 1444 out_unlock: 1445 1446 return neigh; 1447 } 1448 1449 void ipoib_neigh_dtor(struct ipoib_neigh *neigh) 1450 { 1451 /* neigh reference count was dropprd to zero */ 1452 struct net_device *dev = neigh->dev; 1453 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1454 struct sk_buff *skb; 1455 if (neigh->ah) 1456 ipoib_put_ah(neigh->ah); 1457 while ((skb = __skb_dequeue(&neigh->queue))) { 1458 ++dev->stats.tx_dropped; 1459 dev_kfree_skb_any(skb); 1460 } 1461 if (ipoib_cm_get(neigh)) 1462 ipoib_cm_destroy_tx(ipoib_cm_get(neigh)); 1463 ipoib_dbg(ipoib_priv(dev), 1464 "neigh free for %06x %pI6\n", 1465 IPOIB_QPN(neigh->daddr), 1466 neigh->daddr + 4); 1467 kfree(neigh); 1468 if (atomic_dec_and_test(&priv->ntbl.entries)) { 1469 if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags)) 1470 complete(&priv->ntbl.flushed); 1471 } 1472 } 1473 1474 static void ipoib_neigh_reclaim(struct rcu_head *rp) 1475 { 1476 /* Called as a result of removal from hash table */ 1477 struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu); 1478 /* note TX context may hold another ref */ 1479 ipoib_neigh_put(neigh); 1480 } 1481 1482 void ipoib_neigh_free(struct ipoib_neigh *neigh) 1483 { 1484 struct net_device *dev = neigh->dev; 1485 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1486 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1487 struct ipoib_neigh_hash *htbl; 1488 struct ipoib_neigh __rcu **np; 1489 struct ipoib_neigh *n; 1490 u32 hash_val; 1491 1492 htbl = rcu_dereference_protected(ntbl->htbl, 1493 lockdep_is_held(&priv->lock)); 1494 if (!htbl) 1495 return; 1496 1497 hash_val = ipoib_addr_hash(htbl, neigh->daddr); 1498 np = &htbl->buckets[hash_val]; 1499 for (n = rcu_dereference_protected(*np, 1500 lockdep_is_held(&priv->lock)); 1501 n != NULL; 1502 n = rcu_dereference_protected(*np, 1503 lockdep_is_held(&priv->lock))) { 1504 if (n == neigh) { 1505 /* found */ 1506 rcu_assign_pointer(*np, 1507 rcu_dereference_protected(neigh->hnext, 1508 lockdep_is_held(&priv->lock))); 1509 /* remove from parent list */ 1510 list_del_init(&neigh->list); 1511 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1512 return; 1513 } else { 1514 np = &n->hnext; 1515 } 1516 } 1517 } 1518 1519 static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) 1520 { 1521 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1522 struct ipoib_neigh_hash *htbl; 1523 struct ipoib_neigh __rcu **buckets; 1524 u32 size; 1525 1526 clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); 1527 ntbl->htbl = NULL; 1528 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL); 1529 if (!htbl) 1530 return -ENOMEM; 1531 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1532 size = roundup_pow_of_two(arp_tbl.gc_thresh3); 1533 buckets = kvcalloc(size, sizeof(*buckets), GFP_KERNEL); 1534 if (!buckets) { 1535 kfree(htbl); 1536 return -ENOMEM; 1537 } 1538 htbl->size = size; 1539 htbl->mask = (size - 1); 1540 htbl->buckets = buckets; 1541 RCU_INIT_POINTER(ntbl->htbl, htbl); 1542 htbl->ntbl = ntbl; 1543 atomic_set(&ntbl->entries, 0); 1544 1545 /* start garbage collection */ 1546 clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1547 queue_delayed_work(priv->wq, &priv->neigh_reap_task, 1548 arp_tbl.gc_interval); 1549 1550 return 0; 1551 } 1552 1553 static void neigh_hash_free_rcu(struct rcu_head *head) 1554 { 1555 struct ipoib_neigh_hash *htbl = container_of(head, 1556 struct ipoib_neigh_hash, 1557 rcu); 1558 struct ipoib_neigh __rcu **buckets = htbl->buckets; 1559 struct ipoib_neigh_table *ntbl = htbl->ntbl; 1560 1561 kvfree(buckets); 1562 kfree(htbl); 1563 complete(&ntbl->deleted); 1564 } 1565 1566 void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid) 1567 { 1568 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1569 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1570 struct ipoib_neigh_hash *htbl; 1571 unsigned long flags; 1572 int i; 1573 1574 /* remove all neigh connected to a given path or mcast */ 1575 spin_lock_irqsave(&priv->lock, flags); 1576 1577 htbl = rcu_dereference_protected(ntbl->htbl, 1578 lockdep_is_held(&priv->lock)); 1579 1580 if (!htbl) 1581 goto out_unlock; 1582 1583 for (i = 0; i < htbl->size; i++) { 1584 struct ipoib_neigh *neigh; 1585 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1586 1587 while ((neigh = rcu_dereference_protected(*np, 1588 lockdep_is_held(&priv->lock))) != NULL) { 1589 /* delete neighs belong to this parent */ 1590 if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) { 1591 rcu_assign_pointer(*np, 1592 rcu_dereference_protected(neigh->hnext, 1593 lockdep_is_held(&priv->lock))); 1594 /* remove from parent list */ 1595 list_del_init(&neigh->list); 1596 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1597 } else { 1598 np = &neigh->hnext; 1599 } 1600 1601 } 1602 } 1603 out_unlock: 1604 spin_unlock_irqrestore(&priv->lock, flags); 1605 } 1606 1607 static void ipoib_flush_neighs(struct ipoib_dev_priv *priv) 1608 { 1609 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1610 struct ipoib_neigh_hash *htbl; 1611 unsigned long flags; 1612 int i, wait_flushed = 0; 1613 1614 init_completion(&priv->ntbl.flushed); 1615 set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); 1616 1617 spin_lock_irqsave(&priv->lock, flags); 1618 1619 htbl = rcu_dereference_protected(ntbl->htbl, 1620 lockdep_is_held(&priv->lock)); 1621 if (!htbl) 1622 goto out_unlock; 1623 1624 wait_flushed = atomic_read(&priv->ntbl.entries); 1625 if (!wait_flushed) 1626 goto free_htbl; 1627 1628 for (i = 0; i < htbl->size; i++) { 1629 struct ipoib_neigh *neigh; 1630 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1631 1632 while ((neigh = rcu_dereference_protected(*np, 1633 lockdep_is_held(&priv->lock))) != NULL) { 1634 rcu_assign_pointer(*np, 1635 rcu_dereference_protected(neigh->hnext, 1636 lockdep_is_held(&priv->lock))); 1637 /* remove from path/mc list */ 1638 list_del_init(&neigh->list); 1639 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1640 } 1641 } 1642 1643 free_htbl: 1644 rcu_assign_pointer(ntbl->htbl, NULL); 1645 call_rcu(&htbl->rcu, neigh_hash_free_rcu); 1646 1647 out_unlock: 1648 spin_unlock_irqrestore(&priv->lock, flags); 1649 if (wait_flushed) 1650 wait_for_completion(&priv->ntbl.flushed); 1651 } 1652 1653 static void ipoib_neigh_hash_uninit(struct net_device *dev) 1654 { 1655 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1656 int stopped; 1657 1658 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); 1659 init_completion(&priv->ntbl.deleted); 1660 1661 /* Stop GC if called at init fail need to cancel work */ 1662 stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1663 if (!stopped) 1664 cancel_delayed_work(&priv->neigh_reap_task); 1665 1666 ipoib_flush_neighs(priv); 1667 1668 wait_for_completion(&priv->ntbl.deleted); 1669 } 1670 1671 static void ipoib_napi_add(struct net_device *dev) 1672 { 1673 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1674 1675 netif_napi_add(dev, &priv->recv_napi, ipoib_rx_poll, IPOIB_NUM_WC); 1676 netif_napi_add(dev, &priv->send_napi, ipoib_tx_poll, MAX_SEND_CQE); 1677 } 1678 1679 static void ipoib_napi_del(struct net_device *dev) 1680 { 1681 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1682 1683 netif_napi_del(&priv->recv_napi); 1684 netif_napi_del(&priv->send_napi); 1685 } 1686 1687 static void ipoib_dev_uninit_default(struct net_device *dev) 1688 { 1689 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1690 1691 ipoib_transport_dev_cleanup(dev); 1692 1693 ipoib_napi_del(dev); 1694 1695 ipoib_cm_dev_cleanup(dev); 1696 1697 kfree(priv->rx_ring); 1698 vfree(priv->tx_ring); 1699 1700 priv->rx_ring = NULL; 1701 priv->tx_ring = NULL; 1702 } 1703 1704 static int ipoib_dev_init_default(struct net_device *dev) 1705 { 1706 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1707 1708 ipoib_napi_add(dev); 1709 1710 /* Allocate RX/TX "rings" to hold queued skbs */ 1711 priv->rx_ring = kcalloc(ipoib_recvq_size, 1712 sizeof(*priv->rx_ring), 1713 GFP_KERNEL); 1714 if (!priv->rx_ring) 1715 goto out; 1716 1717 priv->tx_ring = vzalloc(array_size(ipoib_sendq_size, 1718 sizeof(*priv->tx_ring))); 1719 if (!priv->tx_ring) { 1720 pr_warn("%s: failed to allocate TX ring (%d entries)\n", 1721 priv->ca->name, ipoib_sendq_size); 1722 goto out_rx_ring_cleanup; 1723 } 1724 1725 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ 1726 1727 if (ipoib_transport_dev_init(dev, priv->ca)) { 1728 pr_warn("%s: ipoib_transport_dev_init failed\n", 1729 priv->ca->name); 1730 goto out_tx_ring_cleanup; 1731 } 1732 1733 /* after qp created set dev address */ 1734 priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff; 1735 priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff; 1736 priv->dev->dev_addr[3] = (priv->qp->qp_num) & 0xff; 1737 1738 return 0; 1739 1740 out_tx_ring_cleanup: 1741 vfree(priv->tx_ring); 1742 1743 out_rx_ring_cleanup: 1744 kfree(priv->rx_ring); 1745 1746 out: 1747 ipoib_napi_del(dev); 1748 return -ENOMEM; 1749 } 1750 1751 static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr, 1752 int cmd) 1753 { 1754 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1755 1756 if (!priv->rn_ops->ndo_do_ioctl) 1757 return -EOPNOTSUPP; 1758 1759 return priv->rn_ops->ndo_do_ioctl(dev, ifr, cmd); 1760 } 1761 1762 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) 1763 { 1764 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1765 int ret = -ENOMEM; 1766 1767 priv->ca = ca; 1768 priv->port = port; 1769 priv->qp = NULL; 1770 1771 /* 1772 * the various IPoIB tasks assume they will never race against 1773 * themselves, so always use a single thread workqueue 1774 */ 1775 priv->wq = alloc_ordered_workqueue("ipoib_wq", WQ_MEM_RECLAIM); 1776 if (!priv->wq) { 1777 pr_warn("%s: failed to allocate device WQ\n", dev->name); 1778 goto out; 1779 } 1780 1781 /* create pd, which used both for control and datapath*/ 1782 priv->pd = ib_alloc_pd(priv->ca, 0); 1783 if (IS_ERR(priv->pd)) { 1784 pr_warn("%s: failed to allocate PD\n", ca->name); 1785 goto clean_wq; 1786 } 1787 1788 ret = priv->rn_ops->ndo_init(dev); 1789 if (ret) { 1790 pr_warn("%s failed to init HW resource\n", dev->name); 1791 goto out_free_pd; 1792 } 1793 1794 ret = ipoib_neigh_hash_init(priv); 1795 if (ret) { 1796 pr_warn("%s failed to init neigh hash\n", dev->name); 1797 goto out_dev_uninit; 1798 } 1799 1800 if (dev->flags & IFF_UP) { 1801 if (ipoib_ib_dev_open(dev)) { 1802 pr_warn("%s failed to open device\n", dev->name); 1803 ret = -ENODEV; 1804 goto out_dev_uninit; 1805 } 1806 } 1807 1808 return 0; 1809 1810 out_dev_uninit: 1811 ipoib_ib_dev_cleanup(dev); 1812 1813 out_free_pd: 1814 if (priv->pd) { 1815 ib_dealloc_pd(priv->pd); 1816 priv->pd = NULL; 1817 } 1818 1819 clean_wq: 1820 if (priv->wq) { 1821 destroy_workqueue(priv->wq); 1822 priv->wq = NULL; 1823 } 1824 1825 out: 1826 return ret; 1827 } 1828 1829 void ipoib_dev_cleanup(struct net_device *dev) 1830 { 1831 struct ipoib_dev_priv *priv = ipoib_priv(dev), *cpriv, *tcpriv; 1832 LIST_HEAD(head); 1833 1834 ASSERT_RTNL(); 1835 1836 /* Delete any child interfaces first */ 1837 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { 1838 /* Stop GC on child */ 1839 set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags); 1840 cancel_delayed_work(&cpriv->neigh_reap_task); 1841 unregister_netdevice_queue(cpriv->dev, &head); 1842 } 1843 unregister_netdevice_many(&head); 1844 1845 ipoib_neigh_hash_uninit(dev); 1846 1847 ipoib_ib_dev_cleanup(dev); 1848 1849 /* no more works over the priv->wq */ 1850 if (priv->wq) { 1851 flush_workqueue(priv->wq); 1852 destroy_workqueue(priv->wq); 1853 priv->wq = NULL; 1854 } 1855 } 1856 1857 static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state) 1858 { 1859 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1860 1861 return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state); 1862 } 1863 1864 static int ipoib_get_vf_config(struct net_device *dev, int vf, 1865 struct ifla_vf_info *ivf) 1866 { 1867 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1868 int err; 1869 1870 err = ib_get_vf_config(priv->ca, vf, priv->port, ivf); 1871 if (err) 1872 return err; 1873 1874 ivf->vf = vf; 1875 1876 return 0; 1877 } 1878 1879 static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type) 1880 { 1881 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1882 1883 if (type != IFLA_VF_IB_NODE_GUID && type != IFLA_VF_IB_PORT_GUID) 1884 return -EINVAL; 1885 1886 return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type); 1887 } 1888 1889 static int ipoib_get_vf_stats(struct net_device *dev, int vf, 1890 struct ifla_vf_stats *vf_stats) 1891 { 1892 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1893 1894 return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats); 1895 } 1896 1897 static const struct header_ops ipoib_header_ops = { 1898 .create = ipoib_hard_header, 1899 }; 1900 1901 static const struct net_device_ops ipoib_netdev_ops_pf = { 1902 .ndo_uninit = ipoib_uninit, 1903 .ndo_open = ipoib_open, 1904 .ndo_stop = ipoib_stop, 1905 .ndo_change_mtu = ipoib_change_mtu, 1906 .ndo_fix_features = ipoib_fix_features, 1907 .ndo_start_xmit = ipoib_start_xmit, 1908 .ndo_tx_timeout = ipoib_timeout, 1909 .ndo_set_rx_mode = ipoib_set_mcast_list, 1910 .ndo_get_iflink = ipoib_get_iflink, 1911 .ndo_set_vf_link_state = ipoib_set_vf_link_state, 1912 .ndo_get_vf_config = ipoib_get_vf_config, 1913 .ndo_get_vf_stats = ipoib_get_vf_stats, 1914 .ndo_set_vf_guid = ipoib_set_vf_guid, 1915 .ndo_set_mac_address = ipoib_set_mac, 1916 .ndo_get_stats64 = ipoib_get_stats, 1917 .ndo_do_ioctl = ipoib_ioctl, 1918 }; 1919 1920 static const struct net_device_ops ipoib_netdev_ops_vf = { 1921 .ndo_uninit = ipoib_uninit, 1922 .ndo_open = ipoib_open, 1923 .ndo_stop = ipoib_stop, 1924 .ndo_change_mtu = ipoib_change_mtu, 1925 .ndo_fix_features = ipoib_fix_features, 1926 .ndo_start_xmit = ipoib_start_xmit, 1927 .ndo_tx_timeout = ipoib_timeout, 1928 .ndo_set_rx_mode = ipoib_set_mcast_list, 1929 .ndo_get_iflink = ipoib_get_iflink, 1930 .ndo_get_stats64 = ipoib_get_stats, 1931 .ndo_do_ioctl = ipoib_ioctl, 1932 }; 1933 1934 void ipoib_setup_common(struct net_device *dev) 1935 { 1936 dev->header_ops = &ipoib_header_ops; 1937 1938 ipoib_set_ethtool_ops(dev); 1939 1940 dev->watchdog_timeo = HZ; 1941 1942 dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 1943 1944 dev->hard_header_len = IPOIB_HARD_LEN; 1945 dev->addr_len = INFINIBAND_ALEN; 1946 dev->type = ARPHRD_INFINIBAND; 1947 dev->tx_queue_len = ipoib_sendq_size * 2; 1948 dev->features = (NETIF_F_VLAN_CHALLENGED | 1949 NETIF_F_HIGHDMA); 1950 netif_keep_dst(dev); 1951 1952 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); 1953 } 1954 1955 static void ipoib_build_priv(struct net_device *dev) 1956 { 1957 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1958 1959 priv->dev = dev; 1960 spin_lock_init(&priv->lock); 1961 init_rwsem(&priv->vlan_rwsem); 1962 mutex_init(&priv->mcast_mutex); 1963 mutex_init(&priv->sysfs_mutex); 1964 1965 INIT_LIST_HEAD(&priv->path_list); 1966 INIT_LIST_HEAD(&priv->child_intfs); 1967 INIT_LIST_HEAD(&priv->dead_ahs); 1968 INIT_LIST_HEAD(&priv->multicast_list); 1969 1970 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); 1971 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); 1972 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); 1973 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal); 1974 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy); 1975 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); 1976 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); 1977 INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh); 1978 } 1979 1980 static const struct net_device_ops ipoib_netdev_default_pf = { 1981 .ndo_init = ipoib_dev_init_default, 1982 .ndo_uninit = ipoib_dev_uninit_default, 1983 .ndo_open = ipoib_ib_dev_open_default, 1984 .ndo_stop = ipoib_ib_dev_stop_default, 1985 }; 1986 1987 static struct net_device 1988 *ipoib_create_netdev_default(struct ib_device *hca, 1989 const char *name, 1990 unsigned char name_assign_type, 1991 void (*setup)(struct net_device *)) 1992 { 1993 struct net_device *dev; 1994 struct rdma_netdev *rn; 1995 1996 dev = alloc_netdev((int)sizeof(struct rdma_netdev), 1997 name, 1998 name_assign_type, setup); 1999 if (!dev) 2000 return NULL; 2001 2002 rn = netdev_priv(dev); 2003 2004 rn->send = ipoib_send; 2005 rn->attach_mcast = ipoib_mcast_attach; 2006 rn->detach_mcast = ipoib_mcast_detach; 2007 rn->free_rdma_netdev = free_netdev; 2008 rn->hca = hca; 2009 2010 dev->netdev_ops = &ipoib_netdev_default_pf; 2011 2012 return dev; 2013 } 2014 2015 static struct net_device *ipoib_get_netdev(struct ib_device *hca, u8 port, 2016 const char *name) 2017 { 2018 struct net_device *dev; 2019 2020 if (hca->alloc_rdma_netdev) { 2021 dev = hca->alloc_rdma_netdev(hca, port, 2022 RDMA_NETDEV_IPOIB, name, 2023 NET_NAME_UNKNOWN, 2024 ipoib_setup_common); 2025 if (IS_ERR_OR_NULL(dev) && PTR_ERR(dev) != -EOPNOTSUPP) 2026 return NULL; 2027 } 2028 2029 if (!hca->alloc_rdma_netdev || PTR_ERR(dev) == -EOPNOTSUPP) 2030 dev = ipoib_create_netdev_default(hca, name, NET_NAME_UNKNOWN, 2031 ipoib_setup_common); 2032 2033 return dev; 2034 } 2035 2036 struct ipoib_dev_priv *ipoib_intf_alloc(struct ib_device *hca, u8 port, 2037 const char *name) 2038 { 2039 struct net_device *dev; 2040 struct ipoib_dev_priv *priv; 2041 struct rdma_netdev *rn; 2042 2043 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 2044 if (!priv) 2045 return NULL; 2046 2047 dev = ipoib_get_netdev(hca, port, name); 2048 if (!dev) 2049 goto free_priv; 2050 2051 priv->rn_ops = dev->netdev_ops; 2052 2053 /* fixme : should be after the query_cap */ 2054 if (priv->hca_caps & IB_DEVICE_VIRTUAL_FUNCTION) 2055 dev->netdev_ops = &ipoib_netdev_ops_vf; 2056 else 2057 dev->netdev_ops = &ipoib_netdev_ops_pf; 2058 2059 rn = netdev_priv(dev); 2060 rn->clnt_priv = priv; 2061 ipoib_build_priv(dev); 2062 2063 return priv; 2064 free_priv: 2065 kfree(priv); 2066 return NULL; 2067 } 2068 2069 static ssize_t show_pkey(struct device *dev, 2070 struct device_attribute *attr, char *buf) 2071 { 2072 struct net_device *ndev = to_net_dev(dev); 2073 struct ipoib_dev_priv *priv = ipoib_priv(ndev); 2074 2075 return sprintf(buf, "0x%04x\n", priv->pkey); 2076 } 2077 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 2078 2079 static ssize_t show_umcast(struct device *dev, 2080 struct device_attribute *attr, char *buf) 2081 { 2082 struct net_device *ndev = to_net_dev(dev); 2083 struct ipoib_dev_priv *priv = ipoib_priv(ndev); 2084 2085 return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags)); 2086 } 2087 2088 void ipoib_set_umcast(struct net_device *ndev, int umcast_val) 2089 { 2090 struct ipoib_dev_priv *priv = ipoib_priv(ndev); 2091 2092 if (umcast_val > 0) { 2093 set_bit(IPOIB_FLAG_UMCAST, &priv->flags); 2094 ipoib_warn(priv, "ignoring multicast groups joined directly " 2095 "by userspace\n"); 2096 } else 2097 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags); 2098 } 2099 2100 static ssize_t set_umcast(struct device *dev, 2101 struct device_attribute *attr, 2102 const char *buf, size_t count) 2103 { 2104 unsigned long umcast_val = simple_strtoul(buf, NULL, 0); 2105 2106 ipoib_set_umcast(to_net_dev(dev), umcast_val); 2107 2108 return count; 2109 } 2110 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast); 2111 2112 int ipoib_add_umcast_attr(struct net_device *dev) 2113 { 2114 return device_create_file(&dev->dev, &dev_attr_umcast); 2115 } 2116 2117 static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid) 2118 { 2119 struct ipoib_dev_priv *child_priv; 2120 struct net_device *netdev = priv->dev; 2121 2122 netif_addr_lock_bh(netdev); 2123 2124 memcpy(&priv->local_gid.global.interface_id, 2125 &gid->global.interface_id, 2126 sizeof(gid->global.interface_id)); 2127 memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid)); 2128 clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); 2129 2130 netif_addr_unlock_bh(netdev); 2131 2132 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 2133 down_read(&priv->vlan_rwsem); 2134 list_for_each_entry(child_priv, &priv->child_intfs, list) 2135 set_base_guid(child_priv, gid); 2136 up_read(&priv->vlan_rwsem); 2137 } 2138 } 2139 2140 static int ipoib_check_lladdr(struct net_device *dev, 2141 struct sockaddr_storage *ss) 2142 { 2143 union ib_gid *gid = (union ib_gid *)(ss->__data + 4); 2144 int ret = 0; 2145 2146 netif_addr_lock_bh(dev); 2147 2148 /* Make sure the QPN, reserved and subnet prefix match the current 2149 * lladdr, it also makes sure the lladdr is unicast. 2150 */ 2151 if (memcmp(dev->dev_addr, ss->__data, 2152 4 + sizeof(gid->global.subnet_prefix)) || 2153 gid->global.interface_id == 0) 2154 ret = -EINVAL; 2155 2156 netif_addr_unlock_bh(dev); 2157 2158 return ret; 2159 } 2160 2161 static int ipoib_set_mac(struct net_device *dev, void *addr) 2162 { 2163 struct ipoib_dev_priv *priv = ipoib_priv(dev); 2164 struct sockaddr_storage *ss = addr; 2165 int ret; 2166 2167 if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev)) 2168 return -EBUSY; 2169 2170 ret = ipoib_check_lladdr(dev, ss); 2171 if (ret) 2172 return ret; 2173 2174 set_base_guid(priv, (union ib_gid *)(ss->__data + 4)); 2175 2176 queue_work(ipoib_workqueue, &priv->flush_light); 2177 2178 return 0; 2179 } 2180 2181 static ssize_t create_child(struct device *dev, 2182 struct device_attribute *attr, 2183 const char *buf, size_t count) 2184 { 2185 int pkey; 2186 int ret; 2187 2188 if (sscanf(buf, "%i", &pkey) != 1) 2189 return -EINVAL; 2190 2191 if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000) 2192 return -EINVAL; 2193 2194 /* 2195 * Set the full membership bit, so that we join the right 2196 * broadcast group, etc. 2197 */ 2198 pkey |= 0x8000; 2199 2200 ret = ipoib_vlan_add(to_net_dev(dev), pkey); 2201 2202 return ret ? ret : count; 2203 } 2204 static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child); 2205 2206 static ssize_t delete_child(struct device *dev, 2207 struct device_attribute *attr, 2208 const char *buf, size_t count) 2209 { 2210 int pkey; 2211 int ret; 2212 2213 if (sscanf(buf, "%i", &pkey) != 1) 2214 return -EINVAL; 2215 2216 if (pkey < 0 || pkey > 0xffff) 2217 return -EINVAL; 2218 2219 ret = ipoib_vlan_delete(to_net_dev(dev), pkey); 2220 2221 return ret ? ret : count; 2222 2223 } 2224 static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child); 2225 2226 int ipoib_add_pkey_attr(struct net_device *dev) 2227 { 2228 return device_create_file(&dev->dev, &dev_attr_pkey); 2229 } 2230 2231 void ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) 2232 { 2233 priv->hca_caps = hca->attrs.device_cap_flags; 2234 2235 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { 2236 priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 2237 2238 if (priv->hca_caps & IB_DEVICE_UD_TSO) 2239 priv->dev->hw_features |= NETIF_F_TSO; 2240 2241 priv->dev->features |= priv->dev->hw_features; 2242 } 2243 } 2244 2245 static struct net_device *ipoib_add_port(const char *format, 2246 struct ib_device *hca, u8 port) 2247 { 2248 struct ipoib_dev_priv *priv; 2249 struct ib_port_attr attr; 2250 struct rdma_netdev *rn; 2251 int result = -ENOMEM; 2252 2253 priv = ipoib_intf_alloc(hca, port, format); 2254 if (!priv) { 2255 pr_warn("%s, %d: ipoib_intf_alloc failed\n", hca->name, port); 2256 goto alloc_mem_failed; 2257 } 2258 2259 SET_NETDEV_DEV(priv->dev, hca->dev.parent); 2260 priv->dev->dev_id = port - 1; 2261 2262 result = ib_query_port(hca, port, &attr); 2263 if (result) { 2264 pr_warn("%s: ib_query_port %d failed\n", hca->name, port); 2265 goto device_init_failed; 2266 } 2267 2268 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); 2269 2270 /* MTU will be reset when mcast join happens */ 2271 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 2272 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; 2273 priv->dev->max_mtu = IPOIB_CM_MTU; 2274 2275 priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh); 2276 2277 result = ib_query_pkey(hca, port, 0, &priv->pkey); 2278 if (result) { 2279 pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n", 2280 hca->name, port, result); 2281 goto device_init_failed; 2282 } 2283 2284 ipoib_set_dev_features(priv, hca); 2285 2286 /* 2287 * Set the full membership bit, so that we join the right 2288 * broadcast group, etc. 2289 */ 2290 priv->pkey |= 0x8000; 2291 2292 priv->dev->broadcast[8] = priv->pkey >> 8; 2293 priv->dev->broadcast[9] = priv->pkey & 0xff; 2294 2295 result = rdma_query_gid(hca, port, 0, &priv->local_gid); 2296 if (result) { 2297 pr_warn("%s: rdma_query_gid port %d failed (ret = %d)\n", 2298 hca->name, port, result); 2299 goto device_init_failed; 2300 } 2301 2302 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, 2303 sizeof(union ib_gid)); 2304 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); 2305 2306 result = ipoib_dev_init(priv->dev, hca, port); 2307 if (result) { 2308 pr_warn("%s: failed to initialize port %d (ret = %d)\n", 2309 hca->name, port, result); 2310 goto device_init_failed; 2311 } 2312 2313 INIT_IB_EVENT_HANDLER(&priv->event_handler, 2314 priv->ca, ipoib_event); 2315 ib_register_event_handler(&priv->event_handler); 2316 2317 /* call event handler to ensure pkey in sync */ 2318 queue_work(ipoib_workqueue, &priv->flush_heavy); 2319 2320 result = register_netdev(priv->dev); 2321 if (result) { 2322 pr_warn("%s: couldn't register ipoib port %d; error %d\n", 2323 hca->name, port, result); 2324 goto register_failed; 2325 } 2326 2327 result = -ENOMEM; 2328 if (ipoib_cm_add_mode_attr(priv->dev)) 2329 goto sysfs_failed; 2330 if (ipoib_add_pkey_attr(priv->dev)) 2331 goto sysfs_failed; 2332 if (ipoib_add_umcast_attr(priv->dev)) 2333 goto sysfs_failed; 2334 if (device_create_file(&priv->dev->dev, &dev_attr_create_child)) 2335 goto sysfs_failed; 2336 if (device_create_file(&priv->dev->dev, &dev_attr_delete_child)) 2337 goto sysfs_failed; 2338 2339 return priv->dev; 2340 2341 sysfs_failed: 2342 unregister_netdev(priv->dev); 2343 2344 register_failed: 2345 ib_unregister_event_handler(&priv->event_handler); 2346 flush_workqueue(ipoib_workqueue); 2347 /* Stop GC if started before flush */ 2348 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 2349 cancel_delayed_work(&priv->neigh_reap_task); 2350 flush_workqueue(priv->wq); 2351 ipoib_dev_cleanup(priv->dev); 2352 2353 device_init_failed: 2354 rn = netdev_priv(priv->dev); 2355 rn->free_rdma_netdev(priv->dev); 2356 kfree(priv); 2357 2358 alloc_mem_failed: 2359 return ERR_PTR(result); 2360 } 2361 2362 static void ipoib_add_one(struct ib_device *device) 2363 { 2364 struct list_head *dev_list; 2365 struct net_device *dev; 2366 struct ipoib_dev_priv *priv; 2367 int p; 2368 int count = 0; 2369 2370 dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL); 2371 if (!dev_list) 2372 return; 2373 2374 INIT_LIST_HEAD(dev_list); 2375 2376 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { 2377 if (!rdma_protocol_ib(device, p)) 2378 continue; 2379 dev = ipoib_add_port("ib%d", device, p); 2380 if (!IS_ERR(dev)) { 2381 priv = ipoib_priv(dev); 2382 list_add_tail(&priv->list, dev_list); 2383 count++; 2384 } 2385 } 2386 2387 if (!count) { 2388 kfree(dev_list); 2389 return; 2390 } 2391 2392 ib_set_client_data(device, &ipoib_client, dev_list); 2393 } 2394 2395 static void ipoib_remove_one(struct ib_device *device, void *client_data) 2396 { 2397 struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv; 2398 struct list_head *dev_list = client_data; 2399 2400 if (!dev_list) 2401 return; 2402 2403 list_for_each_entry_safe(priv, tmp, dev_list, list) { 2404 struct rdma_netdev *parent_rn = netdev_priv(priv->dev); 2405 2406 ib_unregister_event_handler(&priv->event_handler); 2407 flush_workqueue(ipoib_workqueue); 2408 2409 rtnl_lock(); 2410 dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); 2411 rtnl_unlock(); 2412 2413 /* Stop GC */ 2414 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 2415 cancel_delayed_work(&priv->neigh_reap_task); 2416 flush_workqueue(priv->wq); 2417 2418 /* Wrap rtnl_lock/unlock with mutex to protect sysfs calls */ 2419 mutex_lock(&priv->sysfs_mutex); 2420 unregister_netdev(priv->dev); 2421 mutex_unlock(&priv->sysfs_mutex); 2422 2423 parent_rn->free_rdma_netdev(priv->dev); 2424 2425 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { 2426 struct rdma_netdev *child_rn; 2427 2428 child_rn = netdev_priv(cpriv->dev); 2429 child_rn->free_rdma_netdev(cpriv->dev); 2430 kfree(cpriv); 2431 } 2432 2433 kfree(priv); 2434 } 2435 2436 kfree(dev_list); 2437 } 2438 2439 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 2440 static struct notifier_block ipoib_netdev_notifier = { 2441 .notifier_call = ipoib_netdev_event, 2442 }; 2443 #endif 2444 2445 static int __init ipoib_init_module(void) 2446 { 2447 int ret; 2448 2449 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size); 2450 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE); 2451 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE); 2452 2453 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size); 2454 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE); 2455 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); 2456 #ifdef CONFIG_INFINIBAND_IPOIB_CM 2457 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); 2458 ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0); 2459 #endif 2460 2461 /* 2462 * When copying small received packets, we only copy from the 2463 * linear data part of the SKB, so we rely on this condition. 2464 */ 2465 BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE); 2466 2467 ret = ipoib_register_debugfs(); 2468 if (ret) 2469 return ret; 2470 2471 /* 2472 * We create a global workqueue here that is used for all flush 2473 * operations. However, if you attempt to flush a workqueue 2474 * from a task on that same workqueue, it deadlocks the system. 2475 * We want to be able to flush the tasks associated with a 2476 * specific net device, so we also create a workqueue for each 2477 * netdevice. We queue up the tasks for that device only on 2478 * its private workqueue, and we only queue up flush events 2479 * on our global flush workqueue. This avoids the deadlocks. 2480 */ 2481 ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush", 2482 WQ_MEM_RECLAIM); 2483 if (!ipoib_workqueue) { 2484 ret = -ENOMEM; 2485 goto err_fs; 2486 } 2487 2488 ib_sa_register_client(&ipoib_sa_client); 2489 2490 ret = ib_register_client(&ipoib_client); 2491 if (ret) 2492 goto err_sa; 2493 2494 ret = ipoib_netlink_init(); 2495 if (ret) 2496 goto err_client; 2497 2498 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 2499 register_netdevice_notifier(&ipoib_netdev_notifier); 2500 #endif 2501 return 0; 2502 2503 err_client: 2504 ib_unregister_client(&ipoib_client); 2505 2506 err_sa: 2507 ib_sa_unregister_client(&ipoib_sa_client); 2508 destroy_workqueue(ipoib_workqueue); 2509 2510 err_fs: 2511 ipoib_unregister_debugfs(); 2512 2513 return ret; 2514 } 2515 2516 static void __exit ipoib_cleanup_module(void) 2517 { 2518 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 2519 unregister_netdevice_notifier(&ipoib_netdev_notifier); 2520 #endif 2521 ipoib_netlink_fini(); 2522 ib_unregister_client(&ipoib_client); 2523 ib_sa_unregister_client(&ipoib_sa_client); 2524 ipoib_unregister_debugfs(); 2525 destroy_workqueue(ipoib_workqueue); 2526 } 2527 2528 module_init(ipoib_init_module); 2529 module_exit(ipoib_cleanup_module); 2530