1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "ipoib.h" 36 37 #include <linux/module.h> 38 39 #include <linux/init.h> 40 #include <linux/slab.h> 41 #include <linux/kernel.h> 42 #include <linux/vmalloc.h> 43 44 #include <linux/if_arp.h> /* For ARPHRD_xxx */ 45 46 #include <linux/ip.h> 47 #include <linux/in.h> 48 49 #include <net/dst.h> 50 51 MODULE_AUTHOR("Roland Dreier"); 52 MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); 53 MODULE_LICENSE("Dual BSD/GPL"); 54 55 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE; 56 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE; 57 58 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444); 59 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue"); 60 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); 61 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); 62 63 static int lro; 64 module_param(lro, bool, 0444); 65 MODULE_PARM_DESC(lro, "Enable LRO (Large Receive Offload)"); 66 67 static int lro_max_aggr = IPOIB_LRO_MAX_AGGR; 68 module_param(lro_max_aggr, int, 0644); 69 MODULE_PARM_DESC(lro_max_aggr, "LRO: Max packets to be aggregated " 70 "(default = 64)"); 71 72 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 73 int ipoib_debug_level; 74 75 module_param_named(debug_level, ipoib_debug_level, int, 0644); 76 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 77 #endif 78 79 struct ipoib_path_iter { 80 struct net_device *dev; 81 struct ipoib_path path; 82 }; 83 84 static const u8 ipv4_bcast_addr[] = { 85 0x00, 0xff, 0xff, 0xff, 86 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, 87 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff 88 }; 89 90 struct workqueue_struct *ipoib_workqueue; 91 92 struct ib_sa_client ipoib_sa_client; 93 94 static void ipoib_add_one(struct ib_device *device); 95 static void ipoib_remove_one(struct ib_device *device); 96 97 static struct ib_client ipoib_client = { 98 .name = "ipoib", 99 .add = ipoib_add_one, 100 .remove = ipoib_remove_one 101 }; 102 103 int ipoib_open(struct net_device *dev) 104 { 105 struct ipoib_dev_priv *priv = netdev_priv(dev); 106 107 ipoib_dbg(priv, "bringing up interface\n"); 108 109 napi_enable(&priv->napi); 110 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 111 112 if (ipoib_pkey_dev_delay_open(dev)) 113 return 0; 114 115 if (ipoib_ib_dev_open(dev)) { 116 napi_disable(&priv->napi); 117 return -EINVAL; 118 } 119 120 if (ipoib_ib_dev_up(dev)) { 121 ipoib_ib_dev_stop(dev, 1); 122 napi_disable(&priv->napi); 123 return -EINVAL; 124 } 125 126 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 127 struct ipoib_dev_priv *cpriv; 128 129 /* Bring up any child interfaces too */ 130 mutex_lock(&priv->vlan_mutex); 131 list_for_each_entry(cpriv, &priv->child_intfs, list) { 132 int flags; 133 134 flags = cpriv->dev->flags; 135 if (flags & IFF_UP) 136 continue; 137 138 dev_change_flags(cpriv->dev, flags | IFF_UP); 139 } 140 mutex_unlock(&priv->vlan_mutex); 141 } 142 143 netif_start_queue(dev); 144 145 return 0; 146 } 147 148 static int ipoib_stop(struct net_device *dev) 149 { 150 struct ipoib_dev_priv *priv = netdev_priv(dev); 151 152 ipoib_dbg(priv, "stopping interface\n"); 153 154 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 155 napi_disable(&priv->napi); 156 157 netif_stop_queue(dev); 158 159 ipoib_ib_dev_down(dev, 0); 160 ipoib_ib_dev_stop(dev, 0); 161 162 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 163 struct ipoib_dev_priv *cpriv; 164 165 /* Bring down any child interfaces too */ 166 mutex_lock(&priv->vlan_mutex); 167 list_for_each_entry(cpriv, &priv->child_intfs, list) { 168 int flags; 169 170 flags = cpriv->dev->flags; 171 if (!(flags & IFF_UP)) 172 continue; 173 174 dev_change_flags(cpriv->dev, flags & ~IFF_UP); 175 } 176 mutex_unlock(&priv->vlan_mutex); 177 } 178 179 return 0; 180 } 181 182 static int ipoib_change_mtu(struct net_device *dev, int new_mtu) 183 { 184 struct ipoib_dev_priv *priv = netdev_priv(dev); 185 186 /* dev->mtu > 2K ==> connected mode */ 187 if (ipoib_cm_admin_enabled(dev)) { 188 if (new_mtu > ipoib_cm_max_mtu(dev)) 189 return -EINVAL; 190 191 if (new_mtu > priv->mcast_mtu) 192 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n", 193 priv->mcast_mtu); 194 195 dev->mtu = new_mtu; 196 return 0; 197 } 198 199 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) 200 return -EINVAL; 201 202 priv->admin_mtu = new_mtu; 203 204 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); 205 206 return 0; 207 } 208 209 static struct ipoib_path *__path_find(struct net_device *dev, void *gid) 210 { 211 struct ipoib_dev_priv *priv = netdev_priv(dev); 212 struct rb_node *n = priv->path_tree.rb_node; 213 struct ipoib_path *path; 214 int ret; 215 216 while (n) { 217 path = rb_entry(n, struct ipoib_path, rb_node); 218 219 ret = memcmp(gid, path->pathrec.dgid.raw, 220 sizeof (union ib_gid)); 221 222 if (ret < 0) 223 n = n->rb_left; 224 else if (ret > 0) 225 n = n->rb_right; 226 else 227 return path; 228 } 229 230 return NULL; 231 } 232 233 static int __path_add(struct net_device *dev, struct ipoib_path *path) 234 { 235 struct ipoib_dev_priv *priv = netdev_priv(dev); 236 struct rb_node **n = &priv->path_tree.rb_node; 237 struct rb_node *pn = NULL; 238 struct ipoib_path *tpath; 239 int ret; 240 241 while (*n) { 242 pn = *n; 243 tpath = rb_entry(pn, struct ipoib_path, rb_node); 244 245 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw, 246 sizeof (union ib_gid)); 247 if (ret < 0) 248 n = &pn->rb_left; 249 else if (ret > 0) 250 n = &pn->rb_right; 251 else 252 return -EEXIST; 253 } 254 255 rb_link_node(&path->rb_node, pn, n); 256 rb_insert_color(&path->rb_node, &priv->path_tree); 257 258 list_add_tail(&path->list, &priv->path_list); 259 260 return 0; 261 } 262 263 static void path_free(struct net_device *dev, struct ipoib_path *path) 264 { 265 struct ipoib_dev_priv *priv = netdev_priv(dev); 266 struct ipoib_neigh *neigh, *tn; 267 struct sk_buff *skb; 268 unsigned long flags; 269 270 while ((skb = __skb_dequeue(&path->queue))) 271 dev_kfree_skb_irq(skb); 272 273 spin_lock_irqsave(&priv->lock, flags); 274 275 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) { 276 /* 277 * It's safe to call ipoib_put_ah() inside priv->lock 278 * here, because we know that path->ah will always 279 * hold one more reference, so ipoib_put_ah() will 280 * never do more than decrement the ref count. 281 */ 282 if (neigh->ah) 283 ipoib_put_ah(neigh->ah); 284 285 ipoib_neigh_free(dev, neigh); 286 } 287 288 spin_unlock_irqrestore(&priv->lock, flags); 289 290 if (path->ah) 291 ipoib_put_ah(path->ah); 292 293 kfree(path); 294 } 295 296 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 297 298 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev) 299 { 300 struct ipoib_path_iter *iter; 301 302 iter = kmalloc(sizeof *iter, GFP_KERNEL); 303 if (!iter) 304 return NULL; 305 306 iter->dev = dev; 307 memset(iter->path.pathrec.dgid.raw, 0, 16); 308 309 if (ipoib_path_iter_next(iter)) { 310 kfree(iter); 311 return NULL; 312 } 313 314 return iter; 315 } 316 317 int ipoib_path_iter_next(struct ipoib_path_iter *iter) 318 { 319 struct ipoib_dev_priv *priv = netdev_priv(iter->dev); 320 struct rb_node *n; 321 struct ipoib_path *path; 322 int ret = 1; 323 324 spin_lock_irq(&priv->lock); 325 326 n = rb_first(&priv->path_tree); 327 328 while (n) { 329 path = rb_entry(n, struct ipoib_path, rb_node); 330 331 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, 332 sizeof (union ib_gid)) < 0) { 333 iter->path = *path; 334 ret = 0; 335 break; 336 } 337 338 n = rb_next(n); 339 } 340 341 spin_unlock_irq(&priv->lock); 342 343 return ret; 344 } 345 346 void ipoib_path_iter_read(struct ipoib_path_iter *iter, 347 struct ipoib_path *path) 348 { 349 *path = iter->path; 350 } 351 352 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 353 354 void ipoib_mark_paths_invalid(struct net_device *dev) 355 { 356 struct ipoib_dev_priv *priv = netdev_priv(dev); 357 struct ipoib_path *path, *tp; 358 359 spin_lock_irq(&priv->lock); 360 361 list_for_each_entry_safe(path, tp, &priv->path_list, list) { 362 ipoib_dbg(priv, "mark path LID 0x%04x GID " IPOIB_GID_FMT " invalid\n", 363 be16_to_cpu(path->pathrec.dlid), 364 IPOIB_GID_ARG(path->pathrec.dgid)); 365 path->valid = 0; 366 } 367 368 spin_unlock_irq(&priv->lock); 369 } 370 371 void ipoib_flush_paths(struct net_device *dev) 372 { 373 struct ipoib_dev_priv *priv = netdev_priv(dev); 374 struct ipoib_path *path, *tp; 375 LIST_HEAD(remove_list); 376 unsigned long flags; 377 378 netif_tx_lock_bh(dev); 379 spin_lock_irqsave(&priv->lock, flags); 380 381 list_splice_init(&priv->path_list, &remove_list); 382 383 list_for_each_entry(path, &remove_list, list) 384 rb_erase(&path->rb_node, &priv->path_tree); 385 386 list_for_each_entry_safe(path, tp, &remove_list, list) { 387 if (path->query) 388 ib_sa_cancel_query(path->query_id, path->query); 389 spin_unlock_irqrestore(&priv->lock, flags); 390 netif_tx_unlock_bh(dev); 391 wait_for_completion(&path->done); 392 path_free(dev, path); 393 netif_tx_lock_bh(dev); 394 spin_lock_irqsave(&priv->lock, flags); 395 } 396 397 spin_unlock_irqrestore(&priv->lock, flags); 398 netif_tx_unlock_bh(dev); 399 } 400 401 static void path_rec_completion(int status, 402 struct ib_sa_path_rec *pathrec, 403 void *path_ptr) 404 { 405 struct ipoib_path *path = path_ptr; 406 struct net_device *dev = path->dev; 407 struct ipoib_dev_priv *priv = netdev_priv(dev); 408 struct ipoib_ah *ah = NULL; 409 struct ipoib_ah *old_ah = NULL; 410 struct ipoib_neigh *neigh, *tn; 411 struct sk_buff_head skqueue; 412 struct sk_buff *skb; 413 unsigned long flags; 414 415 if (!status) 416 ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n", 417 be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid)); 418 else 419 ipoib_dbg(priv, "PathRec status %d for GID " IPOIB_GID_FMT "\n", 420 status, IPOIB_GID_ARG(path->pathrec.dgid)); 421 422 skb_queue_head_init(&skqueue); 423 424 if (!status) { 425 struct ib_ah_attr av; 426 427 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av)) 428 ah = ipoib_create_ah(dev, priv->pd, &av); 429 } 430 431 spin_lock_irqsave(&priv->lock, flags); 432 433 if (ah) { 434 path->pathrec = *pathrec; 435 436 old_ah = path->ah; 437 path->ah = ah; 438 439 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", 440 ah, be16_to_cpu(pathrec->dlid), pathrec->sl); 441 442 while ((skb = __skb_dequeue(&path->queue))) 443 __skb_queue_tail(&skqueue, skb); 444 445 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) { 446 if (neigh->ah) { 447 WARN_ON(neigh->ah != old_ah); 448 /* 449 * Dropping the ah reference inside 450 * priv->lock is safe here, because we 451 * will hold one more reference from 452 * the original value of path->ah (ie 453 * old_ah). 454 */ 455 ipoib_put_ah(neigh->ah); 456 } 457 kref_get(&path->ah->ref); 458 neigh->ah = path->ah; 459 memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw, 460 sizeof(union ib_gid)); 461 462 if (ipoib_cm_enabled(dev, neigh->neighbour)) { 463 if (!ipoib_cm_get(neigh)) 464 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, 465 path, 466 neigh)); 467 if (!ipoib_cm_get(neigh)) { 468 list_del(&neigh->list); 469 if (neigh->ah) 470 ipoib_put_ah(neigh->ah); 471 ipoib_neigh_free(dev, neigh); 472 continue; 473 } 474 } 475 476 while ((skb = __skb_dequeue(&neigh->queue))) 477 __skb_queue_tail(&skqueue, skb); 478 } 479 path->valid = 1; 480 } 481 482 path->query = NULL; 483 complete(&path->done); 484 485 spin_unlock_irqrestore(&priv->lock, flags); 486 487 if (old_ah) 488 ipoib_put_ah(old_ah); 489 490 while ((skb = __skb_dequeue(&skqueue))) { 491 skb->dev = dev; 492 if (dev_queue_xmit(skb)) 493 ipoib_warn(priv, "dev_queue_xmit failed " 494 "to requeue packet\n"); 495 } 496 } 497 498 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid) 499 { 500 struct ipoib_dev_priv *priv = netdev_priv(dev); 501 struct ipoib_path *path; 502 503 if (!priv->broadcast) 504 return NULL; 505 506 path = kzalloc(sizeof *path, GFP_ATOMIC); 507 if (!path) 508 return NULL; 509 510 path->dev = dev; 511 512 skb_queue_head_init(&path->queue); 513 514 INIT_LIST_HEAD(&path->neigh_list); 515 516 memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid)); 517 path->pathrec.sgid = priv->local_gid; 518 path->pathrec.pkey = cpu_to_be16(priv->pkey); 519 path->pathrec.numb_path = 1; 520 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class; 521 522 return path; 523 } 524 525 static int path_rec_start(struct net_device *dev, 526 struct ipoib_path *path) 527 { 528 struct ipoib_dev_priv *priv = netdev_priv(dev); 529 530 ipoib_dbg(priv, "Start path record lookup for " IPOIB_GID_FMT "\n", 531 IPOIB_GID_ARG(path->pathrec.dgid)); 532 533 init_completion(&path->done); 534 535 path->query_id = 536 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port, 537 &path->pathrec, 538 IB_SA_PATH_REC_DGID | 539 IB_SA_PATH_REC_SGID | 540 IB_SA_PATH_REC_NUMB_PATH | 541 IB_SA_PATH_REC_TRAFFIC_CLASS | 542 IB_SA_PATH_REC_PKEY, 543 1000, GFP_ATOMIC, 544 path_rec_completion, 545 path, &path->query); 546 if (path->query_id < 0) { 547 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); 548 path->query = NULL; 549 return path->query_id; 550 } 551 552 return 0; 553 } 554 555 static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) 556 { 557 struct ipoib_dev_priv *priv = netdev_priv(dev); 558 struct ipoib_path *path; 559 struct ipoib_neigh *neigh; 560 unsigned long flags; 561 562 neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev); 563 if (!neigh) { 564 ++dev->stats.tx_dropped; 565 dev_kfree_skb_any(skb); 566 return; 567 } 568 569 spin_lock_irqsave(&priv->lock, flags); 570 571 path = __path_find(dev, skb->dst->neighbour->ha + 4); 572 if (!path) { 573 path = path_rec_create(dev, skb->dst->neighbour->ha + 4); 574 if (!path) 575 goto err_path; 576 577 __path_add(dev, path); 578 } 579 580 list_add_tail(&neigh->list, &path->neigh_list); 581 582 if (path->ah) { 583 kref_get(&path->ah->ref); 584 neigh->ah = path->ah; 585 memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw, 586 sizeof(union ib_gid)); 587 588 if (ipoib_cm_enabled(dev, neigh->neighbour)) { 589 if (!ipoib_cm_get(neigh)) 590 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh)); 591 if (!ipoib_cm_get(neigh)) { 592 list_del(&neigh->list); 593 if (neigh->ah) 594 ipoib_put_ah(neigh->ah); 595 ipoib_neigh_free(dev, neigh); 596 goto err_drop; 597 } 598 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) 599 __skb_queue_tail(&neigh->queue, skb); 600 else { 601 ipoib_warn(priv, "queue length limit %d. Packet drop.\n", 602 skb_queue_len(&neigh->queue)); 603 goto err_drop; 604 } 605 } else 606 ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb->dst->neighbour->ha)); 607 } else { 608 neigh->ah = NULL; 609 610 if (!path->query && path_rec_start(dev, path)) 611 goto err_list; 612 613 __skb_queue_tail(&neigh->queue, skb); 614 } 615 616 spin_unlock_irqrestore(&priv->lock, flags); 617 return; 618 619 err_list: 620 list_del(&neigh->list); 621 622 err_path: 623 ipoib_neigh_free(dev, neigh); 624 err_drop: 625 ++dev->stats.tx_dropped; 626 dev_kfree_skb_any(skb); 627 628 spin_unlock_irqrestore(&priv->lock, flags); 629 } 630 631 static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) 632 { 633 struct ipoib_dev_priv *priv = netdev_priv(skb->dev); 634 635 /* Look up path record for unicasts */ 636 if (skb->dst->neighbour->ha[4] != 0xff) { 637 neigh_add_path(skb, dev); 638 return; 639 } 640 641 /* Add in the P_Key for multicasts */ 642 skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff; 643 skb->dst->neighbour->ha[9] = priv->pkey & 0xff; 644 ipoib_mcast_send(dev, skb->dst->neighbour->ha + 4, skb); 645 } 646 647 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, 648 struct ipoib_pseudoheader *phdr) 649 { 650 struct ipoib_dev_priv *priv = netdev_priv(dev); 651 struct ipoib_path *path; 652 unsigned long flags; 653 654 spin_lock_irqsave(&priv->lock, flags); 655 656 path = __path_find(dev, phdr->hwaddr + 4); 657 if (!path || !path->valid) { 658 if (!path) 659 path = path_rec_create(dev, phdr->hwaddr + 4); 660 if (path) { 661 /* put pseudoheader back on for next time */ 662 skb_push(skb, sizeof *phdr); 663 __skb_queue_tail(&path->queue, skb); 664 665 if (path_rec_start(dev, path)) { 666 spin_unlock_irqrestore(&priv->lock, flags); 667 path_free(dev, path); 668 return; 669 } else 670 __path_add(dev, path); 671 } else { 672 ++dev->stats.tx_dropped; 673 dev_kfree_skb_any(skb); 674 } 675 676 spin_unlock_irqrestore(&priv->lock, flags); 677 return; 678 } 679 680 if (path->ah) { 681 ipoib_dbg(priv, "Send unicast ARP to %04x\n", 682 be16_to_cpu(path->pathrec.dlid)); 683 684 ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr)); 685 } else if ((path->query || !path_rec_start(dev, path)) && 686 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 687 /* put pseudoheader back on for next time */ 688 skb_push(skb, sizeof *phdr); 689 __skb_queue_tail(&path->queue, skb); 690 } else { 691 ++dev->stats.tx_dropped; 692 dev_kfree_skb_any(skb); 693 } 694 695 spin_unlock_irqrestore(&priv->lock, flags); 696 } 697 698 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) 699 { 700 struct ipoib_dev_priv *priv = netdev_priv(dev); 701 struct ipoib_neigh *neigh; 702 unsigned long flags; 703 704 if (likely(skb->dst && skb->dst->neighbour)) { 705 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { 706 ipoib_path_lookup(skb, dev); 707 return NETDEV_TX_OK; 708 } 709 710 neigh = *to_ipoib_neigh(skb->dst->neighbour); 711 712 if (neigh->ah) 713 if (unlikely((memcmp(&neigh->dgid.raw, 714 skb->dst->neighbour->ha + 4, 715 sizeof(union ib_gid))) || 716 (neigh->dev != dev))) { 717 spin_lock_irqsave(&priv->lock, flags); 718 /* 719 * It's safe to call ipoib_put_ah() inside 720 * priv->lock here, because we know that 721 * path->ah will always hold one more reference, 722 * so ipoib_put_ah() will never do more than 723 * decrement the ref count. 724 */ 725 ipoib_put_ah(neigh->ah); 726 list_del(&neigh->list); 727 ipoib_neigh_free(dev, neigh); 728 spin_unlock_irqrestore(&priv->lock, flags); 729 ipoib_path_lookup(skb, dev); 730 return NETDEV_TX_OK; 731 } 732 733 if (ipoib_cm_get(neigh)) { 734 if (ipoib_cm_up(neigh)) { 735 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); 736 return NETDEV_TX_OK; 737 } 738 } else if (neigh->ah) { 739 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha)); 740 return NETDEV_TX_OK; 741 } 742 743 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 744 spin_lock_irqsave(&priv->lock, flags); 745 __skb_queue_tail(&neigh->queue, skb); 746 spin_unlock_irqrestore(&priv->lock, flags); 747 } else { 748 ++dev->stats.tx_dropped; 749 dev_kfree_skb_any(skb); 750 } 751 } else { 752 struct ipoib_pseudoheader *phdr = 753 (struct ipoib_pseudoheader *) skb->data; 754 skb_pull(skb, sizeof *phdr); 755 756 if (phdr->hwaddr[4] == 0xff) { 757 /* Add in the P_Key for multicast*/ 758 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff; 759 phdr->hwaddr[9] = priv->pkey & 0xff; 760 761 ipoib_mcast_send(dev, phdr->hwaddr + 4, skb); 762 } else { 763 /* unicast GID -- should be ARP or RARP reply */ 764 765 if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) && 766 (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) { 767 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x " 768 IPOIB_GID_FMT "\n", 769 skb->dst ? "neigh" : "dst", 770 be16_to_cpup((__be16 *) skb->data), 771 IPOIB_QPN(phdr->hwaddr), 772 IPOIB_GID_RAW_ARG(phdr->hwaddr + 4)); 773 dev_kfree_skb_any(skb); 774 ++dev->stats.tx_dropped; 775 return NETDEV_TX_OK; 776 } 777 778 unicast_arp_send(skb, dev, phdr); 779 } 780 } 781 782 return NETDEV_TX_OK; 783 } 784 785 static void ipoib_timeout(struct net_device *dev) 786 { 787 struct ipoib_dev_priv *priv = netdev_priv(dev); 788 789 ipoib_warn(priv, "transmit timeout: latency %d msecs\n", 790 jiffies_to_msecs(jiffies - dev->trans_start)); 791 ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n", 792 netif_queue_stopped(dev), 793 priv->tx_head, priv->tx_tail); 794 /* XXX reset QP, etc. */ 795 } 796 797 static int ipoib_hard_header(struct sk_buff *skb, 798 struct net_device *dev, 799 unsigned short type, 800 const void *daddr, const void *saddr, unsigned len) 801 { 802 struct ipoib_header *header; 803 804 header = (struct ipoib_header *) skb_push(skb, sizeof *header); 805 806 header->proto = htons(type); 807 header->reserved = 0; 808 809 /* 810 * If we don't have a neighbour structure, stuff the 811 * destination address onto the front of the skb so we can 812 * figure out where to send the packet later. 813 */ 814 if ((!skb->dst || !skb->dst->neighbour) && daddr) { 815 struct ipoib_pseudoheader *phdr = 816 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr); 817 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); 818 } 819 820 return 0; 821 } 822 823 static void ipoib_set_mcast_list(struct net_device *dev) 824 { 825 struct ipoib_dev_priv *priv = netdev_priv(dev); 826 827 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { 828 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set"); 829 return; 830 } 831 832 queue_work(ipoib_workqueue, &priv->restart_task); 833 } 834 835 static void ipoib_neigh_cleanup(struct neighbour *n) 836 { 837 struct ipoib_neigh *neigh; 838 struct ipoib_dev_priv *priv = netdev_priv(n->dev); 839 unsigned long flags; 840 struct ipoib_ah *ah = NULL; 841 842 neigh = *to_ipoib_neigh(n); 843 if (neigh) 844 priv = netdev_priv(neigh->dev); 845 else 846 return; 847 ipoib_dbg(priv, 848 "neigh_cleanup for %06x " IPOIB_GID_FMT "\n", 849 IPOIB_QPN(n->ha), 850 IPOIB_GID_RAW_ARG(n->ha + 4)); 851 852 spin_lock_irqsave(&priv->lock, flags); 853 854 if (neigh->ah) 855 ah = neigh->ah; 856 list_del(&neigh->list); 857 ipoib_neigh_free(n->dev, neigh); 858 859 spin_unlock_irqrestore(&priv->lock, flags); 860 861 if (ah) 862 ipoib_put_ah(ah); 863 } 864 865 struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour, 866 struct net_device *dev) 867 { 868 struct ipoib_neigh *neigh; 869 870 neigh = kmalloc(sizeof *neigh, GFP_ATOMIC); 871 if (!neigh) 872 return NULL; 873 874 neigh->neighbour = neighbour; 875 neigh->dev = dev; 876 *to_ipoib_neigh(neighbour) = neigh; 877 skb_queue_head_init(&neigh->queue); 878 ipoib_cm_set(neigh, NULL); 879 880 return neigh; 881 } 882 883 void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh) 884 { 885 struct sk_buff *skb; 886 *to_ipoib_neigh(neigh->neighbour) = NULL; 887 while ((skb = __skb_dequeue(&neigh->queue))) { 888 ++dev->stats.tx_dropped; 889 dev_kfree_skb_any(skb); 890 } 891 if (ipoib_cm_get(neigh)) 892 ipoib_cm_destroy_tx(ipoib_cm_get(neigh)); 893 kfree(neigh); 894 } 895 896 static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms) 897 { 898 parms->neigh_cleanup = ipoib_neigh_cleanup; 899 900 return 0; 901 } 902 903 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) 904 { 905 struct ipoib_dev_priv *priv = netdev_priv(dev); 906 907 /* Allocate RX/TX "rings" to hold queued skbs */ 908 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, 909 GFP_KERNEL); 910 if (!priv->rx_ring) { 911 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", 912 ca->name, ipoib_recvq_size); 913 goto out; 914 } 915 916 priv->tx_ring = vmalloc(ipoib_sendq_size * sizeof *priv->tx_ring); 917 if (!priv->tx_ring) { 918 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", 919 ca->name, ipoib_sendq_size); 920 goto out_rx_ring_cleanup; 921 } 922 memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring); 923 924 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ 925 926 if (ipoib_ib_dev_init(dev, ca, port)) 927 goto out_tx_ring_cleanup; 928 929 return 0; 930 931 out_tx_ring_cleanup: 932 vfree(priv->tx_ring); 933 934 out_rx_ring_cleanup: 935 kfree(priv->rx_ring); 936 937 out: 938 return -ENOMEM; 939 } 940 941 void ipoib_dev_cleanup(struct net_device *dev) 942 { 943 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv; 944 945 ipoib_delete_debug_files(dev); 946 947 /* Delete any child interfaces first */ 948 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { 949 unregister_netdev(cpriv->dev); 950 ipoib_dev_cleanup(cpriv->dev); 951 free_netdev(cpriv->dev); 952 } 953 954 ipoib_ib_dev_cleanup(dev); 955 956 kfree(priv->rx_ring); 957 vfree(priv->tx_ring); 958 959 priv->rx_ring = NULL; 960 priv->tx_ring = NULL; 961 } 962 963 static const struct header_ops ipoib_header_ops = { 964 .create = ipoib_hard_header, 965 }; 966 967 static int get_skb_hdr(struct sk_buff *skb, void **iphdr, 968 void **tcph, u64 *hdr_flags, void *priv) 969 { 970 unsigned int ip_len; 971 struct iphdr *iph; 972 973 if (unlikely(skb->protocol != htons(ETH_P_IP))) 974 return -1; 975 976 /* 977 * In the future we may add an else clause that verifies the 978 * checksum and allows devices which do not calculate checksum 979 * to use LRO. 980 */ 981 if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY)) 982 return -1; 983 984 /* Check for non-TCP packet */ 985 skb_reset_network_header(skb); 986 iph = ip_hdr(skb); 987 if (iph->protocol != IPPROTO_TCP) 988 return -1; 989 990 ip_len = ip_hdrlen(skb); 991 skb_set_transport_header(skb, ip_len); 992 *tcph = tcp_hdr(skb); 993 994 /* check if IP header and TCP header are complete */ 995 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) 996 return -1; 997 998 *hdr_flags = LRO_IPV4 | LRO_TCP; 999 *iphdr = iph; 1000 1001 return 0; 1002 } 1003 1004 static void ipoib_lro_setup(struct ipoib_dev_priv *priv) 1005 { 1006 priv->lro.lro_mgr.max_aggr = lro_max_aggr; 1007 priv->lro.lro_mgr.max_desc = IPOIB_MAX_LRO_DESCRIPTORS; 1008 priv->lro.lro_mgr.lro_arr = priv->lro.lro_desc; 1009 priv->lro.lro_mgr.get_skb_header = get_skb_hdr; 1010 priv->lro.lro_mgr.features = LRO_F_NAPI; 1011 priv->lro.lro_mgr.dev = priv->dev; 1012 priv->lro.lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; 1013 } 1014 1015 static void ipoib_setup(struct net_device *dev) 1016 { 1017 struct ipoib_dev_priv *priv = netdev_priv(dev); 1018 1019 dev->open = ipoib_open; 1020 dev->stop = ipoib_stop; 1021 dev->change_mtu = ipoib_change_mtu; 1022 dev->hard_start_xmit = ipoib_start_xmit; 1023 dev->tx_timeout = ipoib_timeout; 1024 dev->header_ops = &ipoib_header_ops; 1025 dev->set_multicast_list = ipoib_set_mcast_list; 1026 dev->neigh_setup = ipoib_neigh_setup_dev; 1027 1028 ipoib_set_ethtool_ops(dev); 1029 1030 netif_napi_add(dev, &priv->napi, ipoib_poll, 100); 1031 1032 dev->watchdog_timeo = HZ; 1033 1034 dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 1035 1036 /* 1037 * We add in INFINIBAND_ALEN to allow for the destination 1038 * address "pseudoheader" for skbs without neighbour struct. 1039 */ 1040 dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN; 1041 dev->addr_len = INFINIBAND_ALEN; 1042 dev->type = ARPHRD_INFINIBAND; 1043 dev->tx_queue_len = ipoib_sendq_size * 2; 1044 dev->features = (NETIF_F_VLAN_CHALLENGED | 1045 NETIF_F_HIGHDMA); 1046 1047 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); 1048 1049 netif_carrier_off(dev); 1050 1051 priv->dev = dev; 1052 1053 ipoib_lro_setup(priv); 1054 1055 spin_lock_init(&priv->lock); 1056 1057 mutex_init(&priv->vlan_mutex); 1058 1059 INIT_LIST_HEAD(&priv->path_list); 1060 INIT_LIST_HEAD(&priv->child_intfs); 1061 INIT_LIST_HEAD(&priv->dead_ahs); 1062 INIT_LIST_HEAD(&priv->multicast_list); 1063 1064 INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll); 1065 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); 1066 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); 1067 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); 1068 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal); 1069 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy); 1070 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); 1071 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); 1072 } 1073 1074 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name) 1075 { 1076 struct net_device *dev; 1077 1078 dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name, 1079 ipoib_setup); 1080 if (!dev) 1081 return NULL; 1082 1083 return netdev_priv(dev); 1084 } 1085 1086 static ssize_t show_pkey(struct device *dev, 1087 struct device_attribute *attr, char *buf) 1088 { 1089 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev)); 1090 1091 return sprintf(buf, "0x%04x\n", priv->pkey); 1092 } 1093 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 1094 1095 static ssize_t show_umcast(struct device *dev, 1096 struct device_attribute *attr, char *buf) 1097 { 1098 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev)); 1099 1100 return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags)); 1101 } 1102 1103 static ssize_t set_umcast(struct device *dev, 1104 struct device_attribute *attr, 1105 const char *buf, size_t count) 1106 { 1107 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev)); 1108 unsigned long umcast_val = simple_strtoul(buf, NULL, 0); 1109 1110 if (umcast_val > 0) { 1111 set_bit(IPOIB_FLAG_UMCAST, &priv->flags); 1112 ipoib_warn(priv, "ignoring multicast groups joined directly " 1113 "by userspace\n"); 1114 } else 1115 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags); 1116 1117 return count; 1118 } 1119 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast); 1120 1121 int ipoib_add_umcast_attr(struct net_device *dev) 1122 { 1123 return device_create_file(&dev->dev, &dev_attr_umcast); 1124 } 1125 1126 static ssize_t create_child(struct device *dev, 1127 struct device_attribute *attr, 1128 const char *buf, size_t count) 1129 { 1130 int pkey; 1131 int ret; 1132 1133 if (sscanf(buf, "%i", &pkey) != 1) 1134 return -EINVAL; 1135 1136 if (pkey < 0 || pkey > 0xffff) 1137 return -EINVAL; 1138 1139 /* 1140 * Set the full membership bit, so that we join the right 1141 * broadcast group, etc. 1142 */ 1143 pkey |= 0x8000; 1144 1145 ret = ipoib_vlan_add(to_net_dev(dev), pkey); 1146 1147 return ret ? ret : count; 1148 } 1149 static DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child); 1150 1151 static ssize_t delete_child(struct device *dev, 1152 struct device_attribute *attr, 1153 const char *buf, size_t count) 1154 { 1155 int pkey; 1156 int ret; 1157 1158 if (sscanf(buf, "%i", &pkey) != 1) 1159 return -EINVAL; 1160 1161 if (pkey < 0 || pkey > 0xffff) 1162 return -EINVAL; 1163 1164 ret = ipoib_vlan_delete(to_net_dev(dev), pkey); 1165 1166 return ret ? ret : count; 1167 1168 } 1169 static DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child); 1170 1171 int ipoib_add_pkey_attr(struct net_device *dev) 1172 { 1173 return device_create_file(&dev->dev, &dev_attr_pkey); 1174 } 1175 1176 int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) 1177 { 1178 struct ib_device_attr *device_attr; 1179 int result = -ENOMEM; 1180 1181 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL); 1182 if (!device_attr) { 1183 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n", 1184 hca->name, sizeof *device_attr); 1185 return result; 1186 } 1187 1188 result = ib_query_device(hca, device_attr); 1189 if (result) { 1190 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n", 1191 hca->name, result); 1192 kfree(device_attr); 1193 return result; 1194 } 1195 priv->hca_caps = device_attr->device_cap_flags; 1196 1197 kfree(device_attr); 1198 1199 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { 1200 set_bit(IPOIB_FLAG_CSUM, &priv->flags); 1201 priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 1202 } 1203 1204 if (lro) 1205 priv->dev->features |= NETIF_F_LRO; 1206 1207 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) 1208 priv->dev->features |= NETIF_F_TSO; 1209 1210 return 0; 1211 } 1212 1213 1214 static struct net_device *ipoib_add_port(const char *format, 1215 struct ib_device *hca, u8 port) 1216 { 1217 struct ipoib_dev_priv *priv; 1218 struct ib_port_attr attr; 1219 int result = -ENOMEM; 1220 1221 priv = ipoib_intf_alloc(format); 1222 if (!priv) 1223 goto alloc_mem_failed; 1224 1225 SET_NETDEV_DEV(priv->dev, hca->dma_device); 1226 1227 if (!ib_query_port(hca, port, &attr)) 1228 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); 1229 else { 1230 printk(KERN_WARNING "%s: ib_query_port %d failed\n", 1231 hca->name, port); 1232 goto device_init_failed; 1233 } 1234 1235 /* MTU will be reset when mcast join happens */ 1236 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 1237 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; 1238 1239 result = ib_query_pkey(hca, port, 0, &priv->pkey); 1240 if (result) { 1241 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", 1242 hca->name, port, result); 1243 goto device_init_failed; 1244 } 1245 1246 if (ipoib_set_dev_features(priv, hca)) 1247 goto device_init_failed; 1248 1249 /* 1250 * Set the full membership bit, so that we join the right 1251 * broadcast group, etc. 1252 */ 1253 priv->pkey |= 0x8000; 1254 1255 priv->dev->broadcast[8] = priv->pkey >> 8; 1256 priv->dev->broadcast[9] = priv->pkey & 0xff; 1257 1258 result = ib_query_gid(hca, port, 0, &priv->local_gid); 1259 if (result) { 1260 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", 1261 hca->name, port, result); 1262 goto device_init_failed; 1263 } else 1264 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 1265 1266 result = ipoib_dev_init(priv->dev, hca, port); 1267 if (result < 0) { 1268 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", 1269 hca->name, port, result); 1270 goto device_init_failed; 1271 } 1272 1273 INIT_IB_EVENT_HANDLER(&priv->event_handler, 1274 priv->ca, ipoib_event); 1275 result = ib_register_event_handler(&priv->event_handler); 1276 if (result < 0) { 1277 printk(KERN_WARNING "%s: ib_register_event_handler failed for " 1278 "port %d (ret = %d)\n", 1279 hca->name, port, result); 1280 goto event_failed; 1281 } 1282 1283 result = register_netdev(priv->dev); 1284 if (result) { 1285 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n", 1286 hca->name, port, result); 1287 goto register_failed; 1288 } 1289 1290 ipoib_create_debug_files(priv->dev); 1291 1292 if (ipoib_cm_add_mode_attr(priv->dev)) 1293 goto sysfs_failed; 1294 if (ipoib_add_pkey_attr(priv->dev)) 1295 goto sysfs_failed; 1296 if (ipoib_add_umcast_attr(priv->dev)) 1297 goto sysfs_failed; 1298 if (device_create_file(&priv->dev->dev, &dev_attr_create_child)) 1299 goto sysfs_failed; 1300 if (device_create_file(&priv->dev->dev, &dev_attr_delete_child)) 1301 goto sysfs_failed; 1302 1303 return priv->dev; 1304 1305 sysfs_failed: 1306 ipoib_delete_debug_files(priv->dev); 1307 unregister_netdev(priv->dev); 1308 1309 register_failed: 1310 ib_unregister_event_handler(&priv->event_handler); 1311 flush_workqueue(ipoib_workqueue); 1312 1313 event_failed: 1314 ipoib_dev_cleanup(priv->dev); 1315 1316 device_init_failed: 1317 free_netdev(priv->dev); 1318 1319 alloc_mem_failed: 1320 return ERR_PTR(result); 1321 } 1322 1323 static void ipoib_add_one(struct ib_device *device) 1324 { 1325 struct list_head *dev_list; 1326 struct net_device *dev; 1327 struct ipoib_dev_priv *priv; 1328 int s, e, p; 1329 1330 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 1331 return; 1332 1333 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1334 if (!dev_list) 1335 return; 1336 1337 INIT_LIST_HEAD(dev_list); 1338 1339 if (device->node_type == RDMA_NODE_IB_SWITCH) { 1340 s = 0; 1341 e = 0; 1342 } else { 1343 s = 1; 1344 e = device->phys_port_cnt; 1345 } 1346 1347 for (p = s; p <= e; ++p) { 1348 dev = ipoib_add_port("ib%d", device, p); 1349 if (!IS_ERR(dev)) { 1350 priv = netdev_priv(dev); 1351 list_add_tail(&priv->list, dev_list); 1352 } 1353 } 1354 1355 ib_set_client_data(device, &ipoib_client, dev_list); 1356 } 1357 1358 static void ipoib_remove_one(struct ib_device *device) 1359 { 1360 struct ipoib_dev_priv *priv, *tmp; 1361 struct list_head *dev_list; 1362 1363 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 1364 return; 1365 1366 dev_list = ib_get_client_data(device, &ipoib_client); 1367 1368 list_for_each_entry_safe(priv, tmp, dev_list, list) { 1369 ib_unregister_event_handler(&priv->event_handler); 1370 1371 rtnl_lock(); 1372 dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); 1373 rtnl_unlock(); 1374 1375 flush_workqueue(ipoib_workqueue); 1376 1377 unregister_netdev(priv->dev); 1378 ipoib_dev_cleanup(priv->dev); 1379 free_netdev(priv->dev); 1380 } 1381 1382 kfree(dev_list); 1383 } 1384 1385 static int __init ipoib_init_module(void) 1386 { 1387 int ret; 1388 1389 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size); 1390 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE); 1391 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE); 1392 1393 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size); 1394 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE); 1395 ipoib_sendq_size = max(ipoib_sendq_size, max(2 * MAX_SEND_CQE, 1396 IPOIB_MIN_QUEUE_SIZE)); 1397 #ifdef CONFIG_INFINIBAND_IPOIB_CM 1398 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); 1399 #endif 1400 1401 /* 1402 * When copying small received packets, we only copy from the 1403 * linear data part of the SKB, so we rely on this condition. 1404 */ 1405 BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE); 1406 1407 ret = ipoib_register_debugfs(); 1408 if (ret) 1409 return ret; 1410 1411 /* 1412 * We create our own workqueue mainly because we want to be 1413 * able to flush it when devices are being removed. We can't 1414 * use schedule_work()/flush_scheduled_work() because both 1415 * unregister_netdev() and linkwatch_event take the rtnl lock, 1416 * so flush_scheduled_work() can deadlock during device 1417 * removal. 1418 */ 1419 ipoib_workqueue = create_singlethread_workqueue("ipoib"); 1420 if (!ipoib_workqueue) { 1421 ret = -ENOMEM; 1422 goto err_fs; 1423 } 1424 1425 ib_sa_register_client(&ipoib_sa_client); 1426 1427 ret = ib_register_client(&ipoib_client); 1428 if (ret) 1429 goto err_sa; 1430 1431 return 0; 1432 1433 err_sa: 1434 ib_sa_unregister_client(&ipoib_sa_client); 1435 destroy_workqueue(ipoib_workqueue); 1436 1437 err_fs: 1438 ipoib_unregister_debugfs(); 1439 1440 return ret; 1441 } 1442 1443 static void __exit ipoib_cleanup_module(void) 1444 { 1445 ib_unregister_client(&ipoib_client); 1446 ib_sa_unregister_client(&ipoib_sa_client); 1447 ipoib_unregister_debugfs(); 1448 destroy_workqueue(ipoib_workqueue); 1449 } 1450 1451 module_init(ipoib_init_module); 1452 module_exit(ipoib_cleanup_module); 1453