1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "ipoib.h" 36 37 #include <linux/module.h> 38 39 #include <linux/init.h> 40 #include <linux/slab.h> 41 #include <linux/kernel.h> 42 #include <linux/vmalloc.h> 43 44 #include <linux/if_arp.h> /* For ARPHRD_xxx */ 45 46 #include <linux/ip.h> 47 #include <linux/in.h> 48 49 #include <linux/jhash.h> 50 #include <net/arp.h> 51 52 #define DRV_VERSION "1.0.0" 53 54 const char ipoib_driver_version[] = DRV_VERSION; 55 56 MODULE_AUTHOR("Roland Dreier"); 57 MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); 58 MODULE_LICENSE("Dual BSD/GPL"); 59 MODULE_VERSION(DRV_VERSION); 60 61 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE; 62 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE; 63 64 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444); 65 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue"); 66 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); 67 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); 68 69 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 70 int ipoib_debug_level; 71 72 module_param_named(debug_level, ipoib_debug_level, int, 0644); 73 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 74 #endif 75 76 struct ipoib_path_iter { 77 struct net_device *dev; 78 struct ipoib_path path; 79 }; 80 81 static const u8 ipv4_bcast_addr[] = { 82 0x00, 0xff, 0xff, 0xff, 83 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, 84 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff 85 }; 86 87 struct workqueue_struct *ipoib_workqueue; 88 89 struct ib_sa_client ipoib_sa_client; 90 91 static void ipoib_add_one(struct ib_device *device); 92 static void ipoib_remove_one(struct ib_device *device); 93 static void ipoib_neigh_reclaim(struct rcu_head *rp); 94 95 static struct ib_client ipoib_client = { 96 .name = "ipoib", 97 .add = ipoib_add_one, 98 .remove = ipoib_remove_one 99 }; 100 101 int ipoib_open(struct net_device *dev) 102 { 103 struct ipoib_dev_priv *priv = netdev_priv(dev); 104 105 ipoib_dbg(priv, "bringing up interface\n"); 106 107 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 108 109 if (ipoib_pkey_dev_delay_open(dev)) 110 return 0; 111 112 if (ipoib_ib_dev_open(dev)) 113 goto err_disable; 114 115 if (ipoib_ib_dev_up(dev)) 116 goto err_stop; 117 118 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 119 struct ipoib_dev_priv *cpriv; 120 121 /* Bring up any child interfaces too */ 122 mutex_lock(&priv->vlan_mutex); 123 list_for_each_entry(cpriv, &priv->child_intfs, list) { 124 int flags; 125 126 flags = cpriv->dev->flags; 127 if (flags & IFF_UP) 128 continue; 129 130 dev_change_flags(cpriv->dev, flags | IFF_UP); 131 } 132 mutex_unlock(&priv->vlan_mutex); 133 } 134 135 netif_start_queue(dev); 136 137 return 0; 138 139 err_stop: 140 ipoib_ib_dev_stop(dev, 1); 141 142 err_disable: 143 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 144 145 return -EINVAL; 146 } 147 148 static int ipoib_stop(struct net_device *dev) 149 { 150 struct ipoib_dev_priv *priv = netdev_priv(dev); 151 152 ipoib_dbg(priv, "stopping interface\n"); 153 154 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 155 156 netif_stop_queue(dev); 157 158 ipoib_ib_dev_down(dev, 1); 159 ipoib_ib_dev_stop(dev, 0); 160 161 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 162 struct ipoib_dev_priv *cpriv; 163 164 /* Bring down any child interfaces too */ 165 mutex_lock(&priv->vlan_mutex); 166 list_for_each_entry(cpriv, &priv->child_intfs, list) { 167 int flags; 168 169 flags = cpriv->dev->flags; 170 if (!(flags & IFF_UP)) 171 continue; 172 173 dev_change_flags(cpriv->dev, flags & ~IFF_UP); 174 } 175 mutex_unlock(&priv->vlan_mutex); 176 } 177 178 return 0; 179 } 180 181 static void ipoib_uninit(struct net_device *dev) 182 { 183 ipoib_dev_cleanup(dev); 184 } 185 186 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features) 187 { 188 struct ipoib_dev_priv *priv = netdev_priv(dev); 189 190 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) 191 features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); 192 193 return features; 194 } 195 196 static int ipoib_change_mtu(struct net_device *dev, int new_mtu) 197 { 198 struct ipoib_dev_priv *priv = netdev_priv(dev); 199 200 /* dev->mtu > 2K ==> connected mode */ 201 if (ipoib_cm_admin_enabled(dev)) { 202 if (new_mtu > ipoib_cm_max_mtu(dev)) 203 return -EINVAL; 204 205 if (new_mtu > priv->mcast_mtu) 206 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n", 207 priv->mcast_mtu); 208 209 dev->mtu = new_mtu; 210 return 0; 211 } 212 213 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) 214 return -EINVAL; 215 216 priv->admin_mtu = new_mtu; 217 218 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); 219 220 return 0; 221 } 222 223 int ipoib_set_mode(struct net_device *dev, const char *buf) 224 { 225 struct ipoib_dev_priv *priv = netdev_priv(dev); 226 227 /* flush paths if we switch modes so that connections are restarted */ 228 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) { 229 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 230 ipoib_warn(priv, "enabling connected mode " 231 "will cause multicast packet drops\n"); 232 netdev_update_features(dev); 233 rtnl_unlock(); 234 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; 235 236 ipoib_flush_paths(dev); 237 rtnl_lock(); 238 return 0; 239 } 240 241 if (!strcmp(buf, "datagram\n")) { 242 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 243 netdev_update_features(dev); 244 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); 245 rtnl_unlock(); 246 ipoib_flush_paths(dev); 247 rtnl_lock(); 248 return 0; 249 } 250 251 return -EINVAL; 252 } 253 254 static struct ipoib_path *__path_find(struct net_device *dev, void *gid) 255 { 256 struct ipoib_dev_priv *priv = netdev_priv(dev); 257 struct rb_node *n = priv->path_tree.rb_node; 258 struct ipoib_path *path; 259 int ret; 260 261 while (n) { 262 path = rb_entry(n, struct ipoib_path, rb_node); 263 264 ret = memcmp(gid, path->pathrec.dgid.raw, 265 sizeof (union ib_gid)); 266 267 if (ret < 0) 268 n = n->rb_left; 269 else if (ret > 0) 270 n = n->rb_right; 271 else 272 return path; 273 } 274 275 return NULL; 276 } 277 278 static int __path_add(struct net_device *dev, struct ipoib_path *path) 279 { 280 struct ipoib_dev_priv *priv = netdev_priv(dev); 281 struct rb_node **n = &priv->path_tree.rb_node; 282 struct rb_node *pn = NULL; 283 struct ipoib_path *tpath; 284 int ret; 285 286 while (*n) { 287 pn = *n; 288 tpath = rb_entry(pn, struct ipoib_path, rb_node); 289 290 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw, 291 sizeof (union ib_gid)); 292 if (ret < 0) 293 n = &pn->rb_left; 294 else if (ret > 0) 295 n = &pn->rb_right; 296 else 297 return -EEXIST; 298 } 299 300 rb_link_node(&path->rb_node, pn, n); 301 rb_insert_color(&path->rb_node, &priv->path_tree); 302 303 list_add_tail(&path->list, &priv->path_list); 304 305 return 0; 306 } 307 308 static void path_free(struct net_device *dev, struct ipoib_path *path) 309 { 310 struct sk_buff *skb; 311 312 while ((skb = __skb_dequeue(&path->queue))) 313 dev_kfree_skb_irq(skb); 314 315 ipoib_dbg(netdev_priv(dev), "path_free\n"); 316 317 /* remove all neigh connected to this path */ 318 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); 319 320 if (path->ah) 321 ipoib_put_ah(path->ah); 322 323 kfree(path); 324 } 325 326 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 327 328 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev) 329 { 330 struct ipoib_path_iter *iter; 331 332 iter = kmalloc(sizeof *iter, GFP_KERNEL); 333 if (!iter) 334 return NULL; 335 336 iter->dev = dev; 337 memset(iter->path.pathrec.dgid.raw, 0, 16); 338 339 if (ipoib_path_iter_next(iter)) { 340 kfree(iter); 341 return NULL; 342 } 343 344 return iter; 345 } 346 347 int ipoib_path_iter_next(struct ipoib_path_iter *iter) 348 { 349 struct ipoib_dev_priv *priv = netdev_priv(iter->dev); 350 struct rb_node *n; 351 struct ipoib_path *path; 352 int ret = 1; 353 354 spin_lock_irq(&priv->lock); 355 356 n = rb_first(&priv->path_tree); 357 358 while (n) { 359 path = rb_entry(n, struct ipoib_path, rb_node); 360 361 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, 362 sizeof (union ib_gid)) < 0) { 363 iter->path = *path; 364 ret = 0; 365 break; 366 } 367 368 n = rb_next(n); 369 } 370 371 spin_unlock_irq(&priv->lock); 372 373 return ret; 374 } 375 376 void ipoib_path_iter_read(struct ipoib_path_iter *iter, 377 struct ipoib_path *path) 378 { 379 *path = iter->path; 380 } 381 382 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 383 384 void ipoib_mark_paths_invalid(struct net_device *dev) 385 { 386 struct ipoib_dev_priv *priv = netdev_priv(dev); 387 struct ipoib_path *path, *tp; 388 389 spin_lock_irq(&priv->lock); 390 391 list_for_each_entry_safe(path, tp, &priv->path_list, list) { 392 ipoib_dbg(priv, "mark path LID 0x%04x GID %pI6 invalid\n", 393 be16_to_cpu(path->pathrec.dlid), 394 path->pathrec.dgid.raw); 395 path->valid = 0; 396 } 397 398 spin_unlock_irq(&priv->lock); 399 } 400 401 void ipoib_flush_paths(struct net_device *dev) 402 { 403 struct ipoib_dev_priv *priv = netdev_priv(dev); 404 struct ipoib_path *path, *tp; 405 LIST_HEAD(remove_list); 406 unsigned long flags; 407 408 netif_tx_lock_bh(dev); 409 spin_lock_irqsave(&priv->lock, flags); 410 411 list_splice_init(&priv->path_list, &remove_list); 412 413 list_for_each_entry(path, &remove_list, list) 414 rb_erase(&path->rb_node, &priv->path_tree); 415 416 list_for_each_entry_safe(path, tp, &remove_list, list) { 417 if (path->query) 418 ib_sa_cancel_query(path->query_id, path->query); 419 spin_unlock_irqrestore(&priv->lock, flags); 420 netif_tx_unlock_bh(dev); 421 wait_for_completion(&path->done); 422 path_free(dev, path); 423 netif_tx_lock_bh(dev); 424 spin_lock_irqsave(&priv->lock, flags); 425 } 426 427 spin_unlock_irqrestore(&priv->lock, flags); 428 netif_tx_unlock_bh(dev); 429 } 430 431 static void path_rec_completion(int status, 432 struct ib_sa_path_rec *pathrec, 433 void *path_ptr) 434 { 435 struct ipoib_path *path = path_ptr; 436 struct net_device *dev = path->dev; 437 struct ipoib_dev_priv *priv = netdev_priv(dev); 438 struct ipoib_ah *ah = NULL; 439 struct ipoib_ah *old_ah = NULL; 440 struct ipoib_neigh *neigh, *tn; 441 struct sk_buff_head skqueue; 442 struct sk_buff *skb; 443 unsigned long flags; 444 445 if (!status) 446 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n", 447 be16_to_cpu(pathrec->dlid), pathrec->dgid.raw); 448 else 449 ipoib_dbg(priv, "PathRec status %d for GID %pI6\n", 450 status, path->pathrec.dgid.raw); 451 452 skb_queue_head_init(&skqueue); 453 454 if (!status) { 455 struct ib_ah_attr av; 456 457 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av)) 458 ah = ipoib_create_ah(dev, priv->pd, &av); 459 } 460 461 spin_lock_irqsave(&priv->lock, flags); 462 463 if (!IS_ERR_OR_NULL(ah)) { 464 path->pathrec = *pathrec; 465 466 old_ah = path->ah; 467 path->ah = ah; 468 469 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", 470 ah, be16_to_cpu(pathrec->dlid), pathrec->sl); 471 472 while ((skb = __skb_dequeue(&path->queue))) 473 __skb_queue_tail(&skqueue, skb); 474 475 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) { 476 if (neigh->ah) { 477 WARN_ON(neigh->ah != old_ah); 478 /* 479 * Dropping the ah reference inside 480 * priv->lock is safe here, because we 481 * will hold one more reference from 482 * the original value of path->ah (ie 483 * old_ah). 484 */ 485 ipoib_put_ah(neigh->ah); 486 } 487 kref_get(&path->ah->ref); 488 neigh->ah = path->ah; 489 490 if (ipoib_cm_enabled(dev, neigh->daddr)) { 491 if (!ipoib_cm_get(neigh)) 492 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, 493 path, 494 neigh)); 495 if (!ipoib_cm_get(neigh)) { 496 list_del(&neigh->list); 497 ipoib_neigh_free(neigh); 498 continue; 499 } 500 } 501 502 while ((skb = __skb_dequeue(&neigh->queue))) 503 __skb_queue_tail(&skqueue, skb); 504 } 505 path->valid = 1; 506 } 507 508 path->query = NULL; 509 complete(&path->done); 510 511 spin_unlock_irqrestore(&priv->lock, flags); 512 513 if (IS_ERR_OR_NULL(ah)) 514 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); 515 516 if (old_ah) 517 ipoib_put_ah(old_ah); 518 519 while ((skb = __skb_dequeue(&skqueue))) { 520 skb->dev = dev; 521 if (dev_queue_xmit(skb)) 522 ipoib_warn(priv, "dev_queue_xmit failed " 523 "to requeue packet\n"); 524 } 525 } 526 527 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid) 528 { 529 struct ipoib_dev_priv *priv = netdev_priv(dev); 530 struct ipoib_path *path; 531 532 if (!priv->broadcast) 533 return NULL; 534 535 path = kzalloc(sizeof *path, GFP_ATOMIC); 536 if (!path) 537 return NULL; 538 539 path->dev = dev; 540 541 skb_queue_head_init(&path->queue); 542 543 INIT_LIST_HEAD(&path->neigh_list); 544 545 memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid)); 546 path->pathrec.sgid = priv->local_gid; 547 path->pathrec.pkey = cpu_to_be16(priv->pkey); 548 path->pathrec.numb_path = 1; 549 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class; 550 551 return path; 552 } 553 554 static int path_rec_start(struct net_device *dev, 555 struct ipoib_path *path) 556 { 557 struct ipoib_dev_priv *priv = netdev_priv(dev); 558 559 ipoib_dbg(priv, "Start path record lookup for %pI6\n", 560 path->pathrec.dgid.raw); 561 562 init_completion(&path->done); 563 564 path->query_id = 565 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port, 566 &path->pathrec, 567 IB_SA_PATH_REC_DGID | 568 IB_SA_PATH_REC_SGID | 569 IB_SA_PATH_REC_NUMB_PATH | 570 IB_SA_PATH_REC_TRAFFIC_CLASS | 571 IB_SA_PATH_REC_PKEY, 572 1000, GFP_ATOMIC, 573 path_rec_completion, 574 path, &path->query); 575 if (path->query_id < 0) { 576 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); 577 path->query = NULL; 578 complete(&path->done); 579 return path->query_id; 580 } 581 582 return 0; 583 } 584 585 static void neigh_add_path(struct sk_buff *skb, u8 *daddr, 586 struct net_device *dev) 587 { 588 struct ipoib_dev_priv *priv = netdev_priv(dev); 589 struct ipoib_path *path; 590 struct ipoib_neigh *neigh; 591 unsigned long flags; 592 593 spin_lock_irqsave(&priv->lock, flags); 594 neigh = ipoib_neigh_alloc(daddr, dev); 595 if (!neigh) { 596 spin_unlock_irqrestore(&priv->lock, flags); 597 ++dev->stats.tx_dropped; 598 dev_kfree_skb_any(skb); 599 return; 600 } 601 602 path = __path_find(dev, daddr + 4); 603 if (!path) { 604 path = path_rec_create(dev, daddr + 4); 605 if (!path) 606 goto err_path; 607 608 __path_add(dev, path); 609 } 610 611 list_add_tail(&neigh->list, &path->neigh_list); 612 613 if (path->ah) { 614 kref_get(&path->ah->ref); 615 neigh->ah = path->ah; 616 617 if (ipoib_cm_enabled(dev, neigh->daddr)) { 618 if (!ipoib_cm_get(neigh)) 619 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh)); 620 if (!ipoib_cm_get(neigh)) { 621 list_del(&neigh->list); 622 ipoib_neigh_free(neigh); 623 goto err_drop; 624 } 625 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) 626 __skb_queue_tail(&neigh->queue, skb); 627 else { 628 ipoib_warn(priv, "queue length limit %d. Packet drop.\n", 629 skb_queue_len(&neigh->queue)); 630 goto err_drop; 631 } 632 } else { 633 spin_unlock_irqrestore(&priv->lock, flags); 634 ipoib_send(dev, skb, path->ah, IPOIB_QPN(daddr)); 635 ipoib_neigh_put(neigh); 636 return; 637 } 638 } else { 639 neigh->ah = NULL; 640 641 if (!path->query && path_rec_start(dev, path)) 642 goto err_list; 643 644 __skb_queue_tail(&neigh->queue, skb); 645 } 646 647 spin_unlock_irqrestore(&priv->lock, flags); 648 ipoib_neigh_put(neigh); 649 return; 650 651 err_list: 652 list_del(&neigh->list); 653 654 err_path: 655 ipoib_neigh_free(neigh); 656 err_drop: 657 ++dev->stats.tx_dropped; 658 dev_kfree_skb_any(skb); 659 660 spin_unlock_irqrestore(&priv->lock, flags); 661 ipoib_neigh_put(neigh); 662 } 663 664 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, 665 struct ipoib_cb *cb) 666 { 667 struct ipoib_dev_priv *priv = netdev_priv(dev); 668 struct ipoib_path *path; 669 unsigned long flags; 670 671 spin_lock_irqsave(&priv->lock, flags); 672 673 path = __path_find(dev, cb->hwaddr + 4); 674 if (!path || !path->valid) { 675 int new_path = 0; 676 677 if (!path) { 678 path = path_rec_create(dev, cb->hwaddr + 4); 679 new_path = 1; 680 } 681 if (path) { 682 __skb_queue_tail(&path->queue, skb); 683 684 if (!path->query && path_rec_start(dev, path)) { 685 spin_unlock_irqrestore(&priv->lock, flags); 686 if (new_path) 687 path_free(dev, path); 688 return; 689 } else 690 __path_add(dev, path); 691 } else { 692 ++dev->stats.tx_dropped; 693 dev_kfree_skb_any(skb); 694 } 695 696 spin_unlock_irqrestore(&priv->lock, flags); 697 return; 698 } 699 700 if (path->ah) { 701 ipoib_dbg(priv, "Send unicast ARP to %04x\n", 702 be16_to_cpu(path->pathrec.dlid)); 703 704 spin_unlock_irqrestore(&priv->lock, flags); 705 ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr)); 706 return; 707 } else if ((path->query || !path_rec_start(dev, path)) && 708 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 709 __skb_queue_tail(&path->queue, skb); 710 } else { 711 ++dev->stats.tx_dropped; 712 dev_kfree_skb_any(skb); 713 } 714 715 spin_unlock_irqrestore(&priv->lock, flags); 716 } 717 718 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) 719 { 720 struct ipoib_dev_priv *priv = netdev_priv(dev); 721 struct ipoib_neigh *neigh; 722 struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb; 723 struct ipoib_header *header; 724 unsigned long flags; 725 726 header = (struct ipoib_header *) skb->data; 727 728 if (unlikely(cb->hwaddr[4] == 0xff)) { 729 /* multicast, arrange "if" according to probability */ 730 if ((header->proto != htons(ETH_P_IP)) && 731 (header->proto != htons(ETH_P_IPV6)) && 732 (header->proto != htons(ETH_P_ARP)) && 733 (header->proto != htons(ETH_P_RARP))) { 734 /* ethertype not supported by IPoIB */ 735 ++dev->stats.tx_dropped; 736 dev_kfree_skb_any(skb); 737 return NETDEV_TX_OK; 738 } 739 /* Add in the P_Key for multicast*/ 740 cb->hwaddr[8] = (priv->pkey >> 8) & 0xff; 741 cb->hwaddr[9] = priv->pkey & 0xff; 742 743 neigh = ipoib_neigh_get(dev, cb->hwaddr); 744 if (likely(neigh)) 745 goto send_using_neigh; 746 ipoib_mcast_send(dev, cb->hwaddr, skb); 747 return NETDEV_TX_OK; 748 } 749 750 /* unicast, arrange "switch" according to probability */ 751 switch (header->proto) { 752 case htons(ETH_P_IP): 753 case htons(ETH_P_IPV6): 754 neigh = ipoib_neigh_get(dev, cb->hwaddr); 755 if (unlikely(!neigh)) { 756 neigh_add_path(skb, cb->hwaddr, dev); 757 return NETDEV_TX_OK; 758 } 759 break; 760 case htons(ETH_P_ARP): 761 case htons(ETH_P_RARP): 762 /* for unicast ARP and RARP should always perform path find */ 763 unicast_arp_send(skb, dev, cb); 764 return NETDEV_TX_OK; 765 default: 766 /* ethertype not supported by IPoIB */ 767 ++dev->stats.tx_dropped; 768 dev_kfree_skb_any(skb); 769 return NETDEV_TX_OK; 770 } 771 772 send_using_neigh: 773 /* note we now hold a ref to neigh */ 774 if (ipoib_cm_get(neigh)) { 775 if (ipoib_cm_up(neigh)) { 776 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); 777 goto unref; 778 } 779 } else if (neigh->ah) { 780 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr)); 781 goto unref; 782 } 783 784 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 785 spin_lock_irqsave(&priv->lock, flags); 786 __skb_queue_tail(&neigh->queue, skb); 787 spin_unlock_irqrestore(&priv->lock, flags); 788 } else { 789 ++dev->stats.tx_dropped; 790 dev_kfree_skb_any(skb); 791 } 792 793 unref: 794 ipoib_neigh_put(neigh); 795 796 return NETDEV_TX_OK; 797 } 798 799 static void ipoib_timeout(struct net_device *dev) 800 { 801 struct ipoib_dev_priv *priv = netdev_priv(dev); 802 803 ipoib_warn(priv, "transmit timeout: latency %d msecs\n", 804 jiffies_to_msecs(jiffies - dev->trans_start)); 805 ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n", 806 netif_queue_stopped(dev), 807 priv->tx_head, priv->tx_tail); 808 /* XXX reset QP, etc. */ 809 } 810 811 static int ipoib_hard_header(struct sk_buff *skb, 812 struct net_device *dev, 813 unsigned short type, 814 const void *daddr, const void *saddr, unsigned len) 815 { 816 struct ipoib_header *header; 817 struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb; 818 819 header = (struct ipoib_header *) skb_push(skb, sizeof *header); 820 821 header->proto = htons(type); 822 header->reserved = 0; 823 824 /* 825 * we don't rely on dst_entry structure, always stuff the 826 * destination address into skb->cb so we can figure out where 827 * to send the packet later. 828 */ 829 memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN); 830 831 return 0; 832 } 833 834 static void ipoib_set_mcast_list(struct net_device *dev) 835 { 836 struct ipoib_dev_priv *priv = netdev_priv(dev); 837 838 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { 839 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set"); 840 return; 841 } 842 843 queue_work(ipoib_workqueue, &priv->restart_task); 844 } 845 846 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) 847 { 848 /* 849 * Use only the address parts that contributes to spreading 850 * The subnet prefix is not used as one can not connect to 851 * same remote port (GUID) using the same remote QPN via two 852 * different subnets. 853 */ 854 /* qpn octets[1:4) & port GUID octets[12:20) */ 855 u32 *d32 = (u32 *) daddr; 856 u32 hv; 857 858 hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0); 859 return hv & htbl->mask; 860 } 861 862 struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr) 863 { 864 struct ipoib_dev_priv *priv = netdev_priv(dev); 865 struct ipoib_neigh_table *ntbl = &priv->ntbl; 866 struct ipoib_neigh_hash *htbl; 867 struct ipoib_neigh *neigh = NULL; 868 u32 hash_val; 869 870 rcu_read_lock_bh(); 871 872 htbl = rcu_dereference_bh(ntbl->htbl); 873 874 if (!htbl) 875 goto out_unlock; 876 877 hash_val = ipoib_addr_hash(htbl, daddr); 878 for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]); 879 neigh != NULL; 880 neigh = rcu_dereference_bh(neigh->hnext)) { 881 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { 882 /* found, take one ref on behalf of the caller */ 883 if (!atomic_inc_not_zero(&neigh->refcnt)) { 884 /* deleted */ 885 neigh = NULL; 886 goto out_unlock; 887 } 888 neigh->alive = jiffies; 889 goto out_unlock; 890 } 891 } 892 893 out_unlock: 894 rcu_read_unlock_bh(); 895 return neigh; 896 } 897 898 static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) 899 { 900 struct ipoib_neigh_table *ntbl = &priv->ntbl; 901 struct ipoib_neigh_hash *htbl; 902 unsigned long neigh_obsolete; 903 unsigned long dt; 904 unsigned long flags; 905 int i; 906 907 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 908 return; 909 910 spin_lock_irqsave(&priv->lock, flags); 911 912 htbl = rcu_dereference_protected(ntbl->htbl, 913 lockdep_is_held(&priv->lock)); 914 915 if (!htbl) 916 goto out_unlock; 917 918 /* neigh is obsolete if it was idle for two GC periods */ 919 dt = 2 * arp_tbl.gc_interval; 920 neigh_obsolete = jiffies - dt; 921 /* handle possible race condition */ 922 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 923 goto out_unlock; 924 925 for (i = 0; i < htbl->size; i++) { 926 struct ipoib_neigh *neigh; 927 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 928 929 while ((neigh = rcu_dereference_protected(*np, 930 lockdep_is_held(&priv->lock))) != NULL) { 931 /* was the neigh idle for two GC periods */ 932 if (time_after(neigh_obsolete, neigh->alive)) { 933 rcu_assign_pointer(*np, 934 rcu_dereference_protected(neigh->hnext, 935 lockdep_is_held(&priv->lock))); 936 /* remove from path/mc list */ 937 list_del(&neigh->list); 938 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 939 } else { 940 np = &neigh->hnext; 941 } 942 943 } 944 } 945 946 out_unlock: 947 spin_unlock_irqrestore(&priv->lock, flags); 948 } 949 950 static void ipoib_reap_neigh(struct work_struct *work) 951 { 952 struct ipoib_dev_priv *priv = 953 container_of(work, struct ipoib_dev_priv, neigh_reap_task.work); 954 955 __ipoib_reap_neigh(priv); 956 957 if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 958 queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task, 959 arp_tbl.gc_interval); 960 } 961 962 963 static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr, 964 struct net_device *dev) 965 { 966 struct ipoib_neigh *neigh; 967 968 neigh = kzalloc(sizeof *neigh, GFP_ATOMIC); 969 if (!neigh) 970 return NULL; 971 972 neigh->dev = dev; 973 memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr)); 974 skb_queue_head_init(&neigh->queue); 975 INIT_LIST_HEAD(&neigh->list); 976 ipoib_cm_set(neigh, NULL); 977 /* one ref on behalf of the caller */ 978 atomic_set(&neigh->refcnt, 1); 979 980 return neigh; 981 } 982 983 struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr, 984 struct net_device *dev) 985 { 986 struct ipoib_dev_priv *priv = netdev_priv(dev); 987 struct ipoib_neigh_table *ntbl = &priv->ntbl; 988 struct ipoib_neigh_hash *htbl; 989 struct ipoib_neigh *neigh; 990 u32 hash_val; 991 992 htbl = rcu_dereference_protected(ntbl->htbl, 993 lockdep_is_held(&priv->lock)); 994 if (!htbl) { 995 neigh = NULL; 996 goto out_unlock; 997 } 998 999 /* need to add a new neigh, but maybe some other thread succeeded? 1000 * recalc hash, maybe hash resize took place so we do a search 1001 */ 1002 hash_val = ipoib_addr_hash(htbl, daddr); 1003 for (neigh = rcu_dereference_protected(htbl->buckets[hash_val], 1004 lockdep_is_held(&priv->lock)); 1005 neigh != NULL; 1006 neigh = rcu_dereference_protected(neigh->hnext, 1007 lockdep_is_held(&priv->lock))) { 1008 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { 1009 /* found, take one ref on behalf of the caller */ 1010 if (!atomic_inc_not_zero(&neigh->refcnt)) { 1011 /* deleted */ 1012 neigh = NULL; 1013 break; 1014 } 1015 neigh->alive = jiffies; 1016 goto out_unlock; 1017 } 1018 } 1019 1020 neigh = ipoib_neigh_ctor(daddr, dev); 1021 if (!neigh) 1022 goto out_unlock; 1023 1024 /* one ref on behalf of the hash table */ 1025 atomic_inc(&neigh->refcnt); 1026 neigh->alive = jiffies; 1027 /* put in hash */ 1028 rcu_assign_pointer(neigh->hnext, 1029 rcu_dereference_protected(htbl->buckets[hash_val], 1030 lockdep_is_held(&priv->lock))); 1031 rcu_assign_pointer(htbl->buckets[hash_val], neigh); 1032 atomic_inc(&ntbl->entries); 1033 1034 out_unlock: 1035 1036 return neigh; 1037 } 1038 1039 void ipoib_neigh_dtor(struct ipoib_neigh *neigh) 1040 { 1041 /* neigh reference count was dropprd to zero */ 1042 struct net_device *dev = neigh->dev; 1043 struct ipoib_dev_priv *priv = netdev_priv(dev); 1044 struct sk_buff *skb; 1045 if (neigh->ah) 1046 ipoib_put_ah(neigh->ah); 1047 while ((skb = __skb_dequeue(&neigh->queue))) { 1048 ++dev->stats.tx_dropped; 1049 dev_kfree_skb_any(skb); 1050 } 1051 if (ipoib_cm_get(neigh)) 1052 ipoib_cm_destroy_tx(ipoib_cm_get(neigh)); 1053 ipoib_dbg(netdev_priv(dev), 1054 "neigh free for %06x %pI6\n", 1055 IPOIB_QPN(neigh->daddr), 1056 neigh->daddr + 4); 1057 kfree(neigh); 1058 if (atomic_dec_and_test(&priv->ntbl.entries)) { 1059 if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags)) 1060 complete(&priv->ntbl.flushed); 1061 } 1062 } 1063 1064 static void ipoib_neigh_reclaim(struct rcu_head *rp) 1065 { 1066 /* Called as a result of removal from hash table */ 1067 struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu); 1068 /* note TX context may hold another ref */ 1069 ipoib_neigh_put(neigh); 1070 } 1071 1072 void ipoib_neigh_free(struct ipoib_neigh *neigh) 1073 { 1074 struct net_device *dev = neigh->dev; 1075 struct ipoib_dev_priv *priv = netdev_priv(dev); 1076 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1077 struct ipoib_neigh_hash *htbl; 1078 struct ipoib_neigh __rcu **np; 1079 struct ipoib_neigh *n; 1080 u32 hash_val; 1081 1082 htbl = rcu_dereference_protected(ntbl->htbl, 1083 lockdep_is_held(&priv->lock)); 1084 if (!htbl) 1085 return; 1086 1087 hash_val = ipoib_addr_hash(htbl, neigh->daddr); 1088 np = &htbl->buckets[hash_val]; 1089 for (n = rcu_dereference_protected(*np, 1090 lockdep_is_held(&priv->lock)); 1091 n != NULL; 1092 n = rcu_dereference_protected(*np, 1093 lockdep_is_held(&priv->lock))) { 1094 if (n == neigh) { 1095 /* found */ 1096 rcu_assign_pointer(*np, 1097 rcu_dereference_protected(neigh->hnext, 1098 lockdep_is_held(&priv->lock))); 1099 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1100 return; 1101 } else { 1102 np = &n->hnext; 1103 } 1104 } 1105 } 1106 1107 static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) 1108 { 1109 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1110 struct ipoib_neigh_hash *htbl; 1111 struct ipoib_neigh **buckets; 1112 u32 size; 1113 1114 clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); 1115 ntbl->htbl = NULL; 1116 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL); 1117 if (!htbl) 1118 return -ENOMEM; 1119 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1120 size = roundup_pow_of_two(arp_tbl.gc_thresh3); 1121 buckets = kzalloc(size * sizeof(*buckets), GFP_KERNEL); 1122 if (!buckets) { 1123 kfree(htbl); 1124 return -ENOMEM; 1125 } 1126 htbl->size = size; 1127 htbl->mask = (size - 1); 1128 htbl->buckets = buckets; 1129 ntbl->htbl = htbl; 1130 htbl->ntbl = ntbl; 1131 atomic_set(&ntbl->entries, 0); 1132 1133 /* start garbage collection */ 1134 clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1135 queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task, 1136 arp_tbl.gc_interval); 1137 1138 return 0; 1139 } 1140 1141 static void neigh_hash_free_rcu(struct rcu_head *head) 1142 { 1143 struct ipoib_neigh_hash *htbl = container_of(head, 1144 struct ipoib_neigh_hash, 1145 rcu); 1146 struct ipoib_neigh __rcu **buckets = htbl->buckets; 1147 struct ipoib_neigh_table *ntbl = htbl->ntbl; 1148 1149 kfree(buckets); 1150 kfree(htbl); 1151 complete(&ntbl->deleted); 1152 } 1153 1154 void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid) 1155 { 1156 struct ipoib_dev_priv *priv = netdev_priv(dev); 1157 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1158 struct ipoib_neigh_hash *htbl; 1159 unsigned long flags; 1160 int i; 1161 1162 /* remove all neigh connected to a given path or mcast */ 1163 spin_lock_irqsave(&priv->lock, flags); 1164 1165 htbl = rcu_dereference_protected(ntbl->htbl, 1166 lockdep_is_held(&priv->lock)); 1167 1168 if (!htbl) 1169 goto out_unlock; 1170 1171 for (i = 0; i < htbl->size; i++) { 1172 struct ipoib_neigh *neigh; 1173 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1174 1175 while ((neigh = rcu_dereference_protected(*np, 1176 lockdep_is_held(&priv->lock))) != NULL) { 1177 /* delete neighs belong to this parent */ 1178 if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) { 1179 rcu_assign_pointer(*np, 1180 rcu_dereference_protected(neigh->hnext, 1181 lockdep_is_held(&priv->lock))); 1182 /* remove from parent list */ 1183 list_del(&neigh->list); 1184 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1185 } else { 1186 np = &neigh->hnext; 1187 } 1188 1189 } 1190 } 1191 out_unlock: 1192 spin_unlock_irqrestore(&priv->lock, flags); 1193 } 1194 1195 static void ipoib_flush_neighs(struct ipoib_dev_priv *priv) 1196 { 1197 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1198 struct ipoib_neigh_hash *htbl; 1199 unsigned long flags; 1200 int i, wait_flushed = 0; 1201 1202 init_completion(&priv->ntbl.flushed); 1203 1204 spin_lock_irqsave(&priv->lock, flags); 1205 1206 htbl = rcu_dereference_protected(ntbl->htbl, 1207 lockdep_is_held(&priv->lock)); 1208 if (!htbl) 1209 goto out_unlock; 1210 1211 wait_flushed = atomic_read(&priv->ntbl.entries); 1212 if (!wait_flushed) 1213 goto free_htbl; 1214 1215 for (i = 0; i < htbl->size; i++) { 1216 struct ipoib_neigh *neigh; 1217 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1218 1219 while ((neigh = rcu_dereference_protected(*np, 1220 lockdep_is_held(&priv->lock))) != NULL) { 1221 rcu_assign_pointer(*np, 1222 rcu_dereference_protected(neigh->hnext, 1223 lockdep_is_held(&priv->lock))); 1224 /* remove from path/mc list */ 1225 list_del(&neigh->list); 1226 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1227 } 1228 } 1229 1230 free_htbl: 1231 rcu_assign_pointer(ntbl->htbl, NULL); 1232 call_rcu(&htbl->rcu, neigh_hash_free_rcu); 1233 1234 out_unlock: 1235 spin_unlock_irqrestore(&priv->lock, flags); 1236 if (wait_flushed) 1237 wait_for_completion(&priv->ntbl.flushed); 1238 } 1239 1240 static void ipoib_neigh_hash_uninit(struct net_device *dev) 1241 { 1242 struct ipoib_dev_priv *priv = netdev_priv(dev); 1243 int stopped; 1244 1245 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); 1246 init_completion(&priv->ntbl.deleted); 1247 set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); 1248 1249 /* Stop GC if called at init fail need to cancel work */ 1250 stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1251 if (!stopped) 1252 cancel_delayed_work(&priv->neigh_reap_task); 1253 1254 ipoib_flush_neighs(priv); 1255 1256 wait_for_completion(&priv->ntbl.deleted); 1257 } 1258 1259 1260 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) 1261 { 1262 struct ipoib_dev_priv *priv = netdev_priv(dev); 1263 1264 if (ipoib_neigh_hash_init(priv) < 0) 1265 goto out; 1266 /* Allocate RX/TX "rings" to hold queued skbs */ 1267 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, 1268 GFP_KERNEL); 1269 if (!priv->rx_ring) { 1270 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", 1271 ca->name, ipoib_recvq_size); 1272 goto out_neigh_hash_cleanup; 1273 } 1274 1275 priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring); 1276 if (!priv->tx_ring) { 1277 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", 1278 ca->name, ipoib_sendq_size); 1279 goto out_rx_ring_cleanup; 1280 } 1281 1282 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ 1283 1284 if (ipoib_ib_dev_init(dev, ca, port)) 1285 goto out_tx_ring_cleanup; 1286 1287 return 0; 1288 1289 out_tx_ring_cleanup: 1290 vfree(priv->tx_ring); 1291 1292 out_rx_ring_cleanup: 1293 kfree(priv->rx_ring); 1294 1295 out_neigh_hash_cleanup: 1296 ipoib_neigh_hash_uninit(dev); 1297 out: 1298 return -ENOMEM; 1299 } 1300 1301 void ipoib_dev_cleanup(struct net_device *dev) 1302 { 1303 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv; 1304 LIST_HEAD(head); 1305 1306 ASSERT_RTNL(); 1307 1308 ipoib_delete_debug_files(dev); 1309 1310 /* Delete any child interfaces first */ 1311 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { 1312 /* Stop GC on child */ 1313 set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags); 1314 cancel_delayed_work(&cpriv->neigh_reap_task); 1315 unregister_netdevice_queue(cpriv->dev, &head); 1316 } 1317 unregister_netdevice_many(&head); 1318 1319 ipoib_ib_dev_cleanup(dev); 1320 1321 kfree(priv->rx_ring); 1322 vfree(priv->tx_ring); 1323 1324 priv->rx_ring = NULL; 1325 priv->tx_ring = NULL; 1326 1327 ipoib_neigh_hash_uninit(dev); 1328 } 1329 1330 static const struct header_ops ipoib_header_ops = { 1331 .create = ipoib_hard_header, 1332 }; 1333 1334 static const struct net_device_ops ipoib_netdev_ops = { 1335 .ndo_uninit = ipoib_uninit, 1336 .ndo_open = ipoib_open, 1337 .ndo_stop = ipoib_stop, 1338 .ndo_change_mtu = ipoib_change_mtu, 1339 .ndo_fix_features = ipoib_fix_features, 1340 .ndo_start_xmit = ipoib_start_xmit, 1341 .ndo_tx_timeout = ipoib_timeout, 1342 .ndo_set_rx_mode = ipoib_set_mcast_list, 1343 }; 1344 1345 void ipoib_setup(struct net_device *dev) 1346 { 1347 struct ipoib_dev_priv *priv = netdev_priv(dev); 1348 1349 dev->netdev_ops = &ipoib_netdev_ops; 1350 dev->header_ops = &ipoib_header_ops; 1351 1352 ipoib_set_ethtool_ops(dev); 1353 1354 netif_napi_add(dev, &priv->napi, ipoib_poll, 100); 1355 1356 dev->watchdog_timeo = HZ; 1357 1358 dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 1359 1360 dev->hard_header_len = IPOIB_ENCAP_LEN; 1361 dev->addr_len = INFINIBAND_ALEN; 1362 dev->type = ARPHRD_INFINIBAND; 1363 dev->tx_queue_len = ipoib_sendq_size * 2; 1364 dev->features = (NETIF_F_VLAN_CHALLENGED | 1365 NETIF_F_HIGHDMA); 1366 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1367 1368 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); 1369 1370 netif_carrier_off(dev); 1371 1372 priv->dev = dev; 1373 1374 spin_lock_init(&priv->lock); 1375 1376 mutex_init(&priv->vlan_mutex); 1377 1378 INIT_LIST_HEAD(&priv->path_list); 1379 INIT_LIST_HEAD(&priv->child_intfs); 1380 INIT_LIST_HEAD(&priv->dead_ahs); 1381 INIT_LIST_HEAD(&priv->multicast_list); 1382 1383 INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll); 1384 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); 1385 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); 1386 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); 1387 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal); 1388 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy); 1389 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); 1390 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); 1391 INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh); 1392 } 1393 1394 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name) 1395 { 1396 struct net_device *dev; 1397 1398 dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name, 1399 ipoib_setup); 1400 if (!dev) 1401 return NULL; 1402 1403 return netdev_priv(dev); 1404 } 1405 1406 static ssize_t show_pkey(struct device *dev, 1407 struct device_attribute *attr, char *buf) 1408 { 1409 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev)); 1410 1411 return sprintf(buf, "0x%04x\n", priv->pkey); 1412 } 1413 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 1414 1415 static ssize_t show_umcast(struct device *dev, 1416 struct device_attribute *attr, char *buf) 1417 { 1418 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev)); 1419 1420 return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags)); 1421 } 1422 1423 void ipoib_set_umcast(struct net_device *ndev, int umcast_val) 1424 { 1425 struct ipoib_dev_priv *priv = netdev_priv(ndev); 1426 1427 if (umcast_val > 0) { 1428 set_bit(IPOIB_FLAG_UMCAST, &priv->flags); 1429 ipoib_warn(priv, "ignoring multicast groups joined directly " 1430 "by userspace\n"); 1431 } else 1432 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags); 1433 } 1434 1435 static ssize_t set_umcast(struct device *dev, 1436 struct device_attribute *attr, 1437 const char *buf, size_t count) 1438 { 1439 unsigned long umcast_val = simple_strtoul(buf, NULL, 0); 1440 1441 ipoib_set_umcast(to_net_dev(dev), umcast_val); 1442 1443 return count; 1444 } 1445 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast); 1446 1447 int ipoib_add_umcast_attr(struct net_device *dev) 1448 { 1449 return device_create_file(&dev->dev, &dev_attr_umcast); 1450 } 1451 1452 static ssize_t create_child(struct device *dev, 1453 struct device_attribute *attr, 1454 const char *buf, size_t count) 1455 { 1456 int pkey; 1457 int ret; 1458 1459 if (sscanf(buf, "%i", &pkey) != 1) 1460 return -EINVAL; 1461 1462 if (pkey < 0 || pkey > 0xffff) 1463 return -EINVAL; 1464 1465 /* 1466 * Set the full membership bit, so that we join the right 1467 * broadcast group, etc. 1468 */ 1469 pkey |= 0x8000; 1470 1471 ret = ipoib_vlan_add(to_net_dev(dev), pkey); 1472 1473 return ret ? ret : count; 1474 } 1475 static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child); 1476 1477 static ssize_t delete_child(struct device *dev, 1478 struct device_attribute *attr, 1479 const char *buf, size_t count) 1480 { 1481 int pkey; 1482 int ret; 1483 1484 if (sscanf(buf, "%i", &pkey) != 1) 1485 return -EINVAL; 1486 1487 if (pkey < 0 || pkey > 0xffff) 1488 return -EINVAL; 1489 1490 ret = ipoib_vlan_delete(to_net_dev(dev), pkey); 1491 1492 return ret ? ret : count; 1493 1494 } 1495 static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child); 1496 1497 int ipoib_add_pkey_attr(struct net_device *dev) 1498 { 1499 return device_create_file(&dev->dev, &dev_attr_pkey); 1500 } 1501 1502 int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) 1503 { 1504 struct ib_device_attr *device_attr; 1505 int result = -ENOMEM; 1506 1507 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL); 1508 if (!device_attr) { 1509 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n", 1510 hca->name, sizeof *device_attr); 1511 return result; 1512 } 1513 1514 result = ib_query_device(hca, device_attr); 1515 if (result) { 1516 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n", 1517 hca->name, result); 1518 kfree(device_attr); 1519 return result; 1520 } 1521 priv->hca_caps = device_attr->device_cap_flags; 1522 1523 kfree(device_attr); 1524 1525 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { 1526 priv->dev->hw_features = NETIF_F_SG | 1527 NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 1528 1529 if (priv->hca_caps & IB_DEVICE_UD_TSO) 1530 priv->dev->hw_features |= NETIF_F_TSO; 1531 1532 priv->dev->features |= priv->dev->hw_features; 1533 } 1534 1535 return 0; 1536 } 1537 1538 static struct net_device *ipoib_add_port(const char *format, 1539 struct ib_device *hca, u8 port) 1540 { 1541 struct ipoib_dev_priv *priv; 1542 struct ib_port_attr attr; 1543 int result = -ENOMEM; 1544 1545 priv = ipoib_intf_alloc(format); 1546 if (!priv) 1547 goto alloc_mem_failed; 1548 1549 SET_NETDEV_DEV(priv->dev, hca->dma_device); 1550 priv->dev->dev_id = port - 1; 1551 1552 if (!ib_query_port(hca, port, &attr)) 1553 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); 1554 else { 1555 printk(KERN_WARNING "%s: ib_query_port %d failed\n", 1556 hca->name, port); 1557 goto device_init_failed; 1558 } 1559 1560 /* MTU will be reset when mcast join happens */ 1561 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 1562 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; 1563 1564 priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh); 1565 1566 result = ib_query_pkey(hca, port, 0, &priv->pkey); 1567 if (result) { 1568 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", 1569 hca->name, port, result); 1570 goto device_init_failed; 1571 } 1572 1573 if (ipoib_set_dev_features(priv, hca)) 1574 goto device_init_failed; 1575 1576 /* 1577 * Set the full membership bit, so that we join the right 1578 * broadcast group, etc. 1579 */ 1580 priv->pkey |= 0x8000; 1581 1582 priv->dev->broadcast[8] = priv->pkey >> 8; 1583 priv->dev->broadcast[9] = priv->pkey & 0xff; 1584 1585 result = ib_query_gid(hca, port, 0, &priv->local_gid); 1586 if (result) { 1587 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", 1588 hca->name, port, result); 1589 goto device_init_failed; 1590 } else 1591 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 1592 1593 result = ipoib_dev_init(priv->dev, hca, port); 1594 if (result < 0) { 1595 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", 1596 hca->name, port, result); 1597 goto device_init_failed; 1598 } 1599 1600 INIT_IB_EVENT_HANDLER(&priv->event_handler, 1601 priv->ca, ipoib_event); 1602 result = ib_register_event_handler(&priv->event_handler); 1603 if (result < 0) { 1604 printk(KERN_WARNING "%s: ib_register_event_handler failed for " 1605 "port %d (ret = %d)\n", 1606 hca->name, port, result); 1607 goto event_failed; 1608 } 1609 1610 result = register_netdev(priv->dev); 1611 if (result) { 1612 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n", 1613 hca->name, port, result); 1614 goto register_failed; 1615 } 1616 1617 ipoib_create_debug_files(priv->dev); 1618 1619 if (ipoib_cm_add_mode_attr(priv->dev)) 1620 goto sysfs_failed; 1621 if (ipoib_add_pkey_attr(priv->dev)) 1622 goto sysfs_failed; 1623 if (ipoib_add_umcast_attr(priv->dev)) 1624 goto sysfs_failed; 1625 if (device_create_file(&priv->dev->dev, &dev_attr_create_child)) 1626 goto sysfs_failed; 1627 if (device_create_file(&priv->dev->dev, &dev_attr_delete_child)) 1628 goto sysfs_failed; 1629 1630 return priv->dev; 1631 1632 sysfs_failed: 1633 ipoib_delete_debug_files(priv->dev); 1634 unregister_netdev(priv->dev); 1635 1636 register_failed: 1637 ib_unregister_event_handler(&priv->event_handler); 1638 /* Stop GC if started before flush */ 1639 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1640 cancel_delayed_work(&priv->neigh_reap_task); 1641 flush_workqueue(ipoib_workqueue); 1642 1643 event_failed: 1644 ipoib_dev_cleanup(priv->dev); 1645 1646 device_init_failed: 1647 free_netdev(priv->dev); 1648 1649 alloc_mem_failed: 1650 return ERR_PTR(result); 1651 } 1652 1653 static void ipoib_add_one(struct ib_device *device) 1654 { 1655 struct list_head *dev_list; 1656 struct net_device *dev; 1657 struct ipoib_dev_priv *priv; 1658 int s, e, p; 1659 1660 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 1661 return; 1662 1663 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1664 if (!dev_list) 1665 return; 1666 1667 INIT_LIST_HEAD(dev_list); 1668 1669 if (device->node_type == RDMA_NODE_IB_SWITCH) { 1670 s = 0; 1671 e = 0; 1672 } else { 1673 s = 1; 1674 e = device->phys_port_cnt; 1675 } 1676 1677 for (p = s; p <= e; ++p) { 1678 if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND) 1679 continue; 1680 dev = ipoib_add_port("ib%d", device, p); 1681 if (!IS_ERR(dev)) { 1682 priv = netdev_priv(dev); 1683 list_add_tail(&priv->list, dev_list); 1684 } 1685 } 1686 1687 ib_set_client_data(device, &ipoib_client, dev_list); 1688 } 1689 1690 static void ipoib_remove_one(struct ib_device *device) 1691 { 1692 struct ipoib_dev_priv *priv, *tmp; 1693 struct list_head *dev_list; 1694 1695 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 1696 return; 1697 1698 dev_list = ib_get_client_data(device, &ipoib_client); 1699 if (!dev_list) 1700 return; 1701 1702 list_for_each_entry_safe(priv, tmp, dev_list, list) { 1703 ib_unregister_event_handler(&priv->event_handler); 1704 1705 rtnl_lock(); 1706 dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); 1707 rtnl_unlock(); 1708 1709 /* Stop GC */ 1710 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1711 cancel_delayed_work(&priv->neigh_reap_task); 1712 flush_workqueue(ipoib_workqueue); 1713 1714 unregister_netdev(priv->dev); 1715 free_netdev(priv->dev); 1716 } 1717 1718 kfree(dev_list); 1719 } 1720 1721 static int __init ipoib_init_module(void) 1722 { 1723 int ret; 1724 1725 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size); 1726 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE); 1727 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE); 1728 1729 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size); 1730 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE); 1731 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); 1732 #ifdef CONFIG_INFINIBAND_IPOIB_CM 1733 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); 1734 #endif 1735 1736 /* 1737 * When copying small received packets, we only copy from the 1738 * linear data part of the SKB, so we rely on this condition. 1739 */ 1740 BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE); 1741 1742 ret = ipoib_register_debugfs(); 1743 if (ret) 1744 return ret; 1745 1746 /* 1747 * We create our own workqueue mainly because we want to be 1748 * able to flush it when devices are being removed. We can't 1749 * use schedule_work()/flush_scheduled_work() because both 1750 * unregister_netdev() and linkwatch_event take the rtnl lock, 1751 * so flush_scheduled_work() can deadlock during device 1752 * removal. 1753 */ 1754 ipoib_workqueue = create_singlethread_workqueue("ipoib"); 1755 if (!ipoib_workqueue) { 1756 ret = -ENOMEM; 1757 goto err_fs; 1758 } 1759 1760 ib_sa_register_client(&ipoib_sa_client); 1761 1762 ret = ib_register_client(&ipoib_client); 1763 if (ret) 1764 goto err_sa; 1765 1766 ret = ipoib_netlink_init(); 1767 if (ret) 1768 goto err_client; 1769 1770 return 0; 1771 1772 err_client: 1773 ib_unregister_client(&ipoib_client); 1774 1775 err_sa: 1776 ib_sa_unregister_client(&ipoib_sa_client); 1777 destroy_workqueue(ipoib_workqueue); 1778 1779 err_fs: 1780 ipoib_unregister_debugfs(); 1781 1782 return ret; 1783 } 1784 1785 static void __exit ipoib_cleanup_module(void) 1786 { 1787 ipoib_netlink_fini(); 1788 ib_unregister_client(&ipoib_client); 1789 ib_sa_unregister_client(&ipoib_sa_client); 1790 ipoib_unregister_debugfs(); 1791 destroy_workqueue(ipoib_workqueue); 1792 } 1793 1794 module_init(ipoib_init_module); 1795 module_exit(ipoib_cleanup_module); 1796