1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "ipoib.h" 36 37 #include <linux/module.h> 38 39 #include <linux/init.h> 40 #include <linux/slab.h> 41 #include <linux/kernel.h> 42 #include <linux/vmalloc.h> 43 44 #include <linux/if_arp.h> /* For ARPHRD_xxx */ 45 46 #include <linux/ip.h> 47 #include <linux/in.h> 48 49 #include <net/dst.h> 50 51 MODULE_AUTHOR("Roland Dreier"); 52 MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); 53 MODULE_LICENSE("Dual BSD/GPL"); 54 55 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE; 56 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE; 57 58 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444); 59 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue"); 60 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); 61 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); 62 63 static int lro; 64 module_param(lro, bool, 0444); 65 MODULE_PARM_DESC(lro, "Enable LRO (Large Receive Offload)"); 66 67 static int lro_max_aggr = IPOIB_LRO_MAX_AGGR; 68 module_param(lro_max_aggr, int, 0644); 69 MODULE_PARM_DESC(lro_max_aggr, "LRO: Max packets to be aggregated " 70 "(default = 64)"); 71 72 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 73 int ipoib_debug_level; 74 75 module_param_named(debug_level, ipoib_debug_level, int, 0644); 76 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 77 #endif 78 79 struct ipoib_path_iter { 80 struct net_device *dev; 81 struct ipoib_path path; 82 }; 83 84 static const u8 ipv4_bcast_addr[] = { 85 0x00, 0xff, 0xff, 0xff, 86 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, 87 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff 88 }; 89 90 struct workqueue_struct *ipoib_workqueue; 91 92 struct ib_sa_client ipoib_sa_client; 93 94 static void ipoib_add_one(struct ib_device *device); 95 static void ipoib_remove_one(struct ib_device *device); 96 97 static struct ib_client ipoib_client = { 98 .name = "ipoib", 99 .add = ipoib_add_one, 100 .remove = ipoib_remove_one 101 }; 102 103 int ipoib_open(struct net_device *dev) 104 { 105 struct ipoib_dev_priv *priv = netdev_priv(dev); 106 107 ipoib_dbg(priv, "bringing up interface\n"); 108 109 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 110 111 if (ipoib_pkey_dev_delay_open(dev)) 112 return 0; 113 114 if (ipoib_ib_dev_open(dev)) 115 goto err_disable; 116 117 if (ipoib_ib_dev_up(dev)) 118 goto err_stop; 119 120 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 121 struct ipoib_dev_priv *cpriv; 122 123 /* Bring up any child interfaces too */ 124 mutex_lock(&priv->vlan_mutex); 125 list_for_each_entry(cpriv, &priv->child_intfs, list) { 126 int flags; 127 128 flags = cpriv->dev->flags; 129 if (flags & IFF_UP) 130 continue; 131 132 dev_change_flags(cpriv->dev, flags | IFF_UP); 133 } 134 mutex_unlock(&priv->vlan_mutex); 135 } 136 137 netif_start_queue(dev); 138 139 return 0; 140 141 err_stop: 142 ipoib_ib_dev_stop(dev, 1); 143 144 err_disable: 145 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 146 147 return -EINVAL; 148 } 149 150 static int ipoib_stop(struct net_device *dev) 151 { 152 struct ipoib_dev_priv *priv = netdev_priv(dev); 153 154 ipoib_dbg(priv, "stopping interface\n"); 155 156 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 157 158 netif_stop_queue(dev); 159 160 ipoib_ib_dev_down(dev, 0); 161 ipoib_ib_dev_stop(dev, 0); 162 163 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 164 struct ipoib_dev_priv *cpriv; 165 166 /* Bring down any child interfaces too */ 167 mutex_lock(&priv->vlan_mutex); 168 list_for_each_entry(cpriv, &priv->child_intfs, list) { 169 int flags; 170 171 flags = cpriv->dev->flags; 172 if (!(flags & IFF_UP)) 173 continue; 174 175 dev_change_flags(cpriv->dev, flags & ~IFF_UP); 176 } 177 mutex_unlock(&priv->vlan_mutex); 178 } 179 180 return 0; 181 } 182 183 static int ipoib_change_mtu(struct net_device *dev, int new_mtu) 184 { 185 struct ipoib_dev_priv *priv = netdev_priv(dev); 186 187 /* dev->mtu > 2K ==> connected mode */ 188 if (ipoib_cm_admin_enabled(dev)) { 189 if (new_mtu > ipoib_cm_max_mtu(dev)) 190 return -EINVAL; 191 192 if (new_mtu > priv->mcast_mtu) 193 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n", 194 priv->mcast_mtu); 195 196 dev->mtu = new_mtu; 197 return 0; 198 } 199 200 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) 201 return -EINVAL; 202 203 priv->admin_mtu = new_mtu; 204 205 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); 206 207 return 0; 208 } 209 210 static struct ipoib_path *__path_find(struct net_device *dev, void *gid) 211 { 212 struct ipoib_dev_priv *priv = netdev_priv(dev); 213 struct rb_node *n = priv->path_tree.rb_node; 214 struct ipoib_path *path; 215 int ret; 216 217 while (n) { 218 path = rb_entry(n, struct ipoib_path, rb_node); 219 220 ret = memcmp(gid, path->pathrec.dgid.raw, 221 sizeof (union ib_gid)); 222 223 if (ret < 0) 224 n = n->rb_left; 225 else if (ret > 0) 226 n = n->rb_right; 227 else 228 return path; 229 } 230 231 return NULL; 232 } 233 234 static int __path_add(struct net_device *dev, struct ipoib_path *path) 235 { 236 struct ipoib_dev_priv *priv = netdev_priv(dev); 237 struct rb_node **n = &priv->path_tree.rb_node; 238 struct rb_node *pn = NULL; 239 struct ipoib_path *tpath; 240 int ret; 241 242 while (*n) { 243 pn = *n; 244 tpath = rb_entry(pn, struct ipoib_path, rb_node); 245 246 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw, 247 sizeof (union ib_gid)); 248 if (ret < 0) 249 n = &pn->rb_left; 250 else if (ret > 0) 251 n = &pn->rb_right; 252 else 253 return -EEXIST; 254 } 255 256 rb_link_node(&path->rb_node, pn, n); 257 rb_insert_color(&path->rb_node, &priv->path_tree); 258 259 list_add_tail(&path->list, &priv->path_list); 260 261 return 0; 262 } 263 264 static void path_free(struct net_device *dev, struct ipoib_path *path) 265 { 266 struct ipoib_dev_priv *priv = netdev_priv(dev); 267 struct ipoib_neigh *neigh, *tn; 268 struct sk_buff *skb; 269 unsigned long flags; 270 271 while ((skb = __skb_dequeue(&path->queue))) 272 dev_kfree_skb_irq(skb); 273 274 spin_lock_irqsave(&priv->lock, flags); 275 276 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) { 277 /* 278 * It's safe to call ipoib_put_ah() inside priv->lock 279 * here, because we know that path->ah will always 280 * hold one more reference, so ipoib_put_ah() will 281 * never do more than decrement the ref count. 282 */ 283 if (neigh->ah) 284 ipoib_put_ah(neigh->ah); 285 286 ipoib_neigh_free(dev, neigh); 287 } 288 289 spin_unlock_irqrestore(&priv->lock, flags); 290 291 if (path->ah) 292 ipoib_put_ah(path->ah); 293 294 kfree(path); 295 } 296 297 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 298 299 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev) 300 { 301 struct ipoib_path_iter *iter; 302 303 iter = kmalloc(sizeof *iter, GFP_KERNEL); 304 if (!iter) 305 return NULL; 306 307 iter->dev = dev; 308 memset(iter->path.pathrec.dgid.raw, 0, 16); 309 310 if (ipoib_path_iter_next(iter)) { 311 kfree(iter); 312 return NULL; 313 } 314 315 return iter; 316 } 317 318 int ipoib_path_iter_next(struct ipoib_path_iter *iter) 319 { 320 struct ipoib_dev_priv *priv = netdev_priv(iter->dev); 321 struct rb_node *n; 322 struct ipoib_path *path; 323 int ret = 1; 324 325 spin_lock_irq(&priv->lock); 326 327 n = rb_first(&priv->path_tree); 328 329 while (n) { 330 path = rb_entry(n, struct ipoib_path, rb_node); 331 332 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, 333 sizeof (union ib_gid)) < 0) { 334 iter->path = *path; 335 ret = 0; 336 break; 337 } 338 339 n = rb_next(n); 340 } 341 342 spin_unlock_irq(&priv->lock); 343 344 return ret; 345 } 346 347 void ipoib_path_iter_read(struct ipoib_path_iter *iter, 348 struct ipoib_path *path) 349 { 350 *path = iter->path; 351 } 352 353 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 354 355 void ipoib_mark_paths_invalid(struct net_device *dev) 356 { 357 struct ipoib_dev_priv *priv = netdev_priv(dev); 358 struct ipoib_path *path, *tp; 359 360 spin_lock_irq(&priv->lock); 361 362 list_for_each_entry_safe(path, tp, &priv->path_list, list) { 363 ipoib_dbg(priv, "mark path LID 0x%04x GID %pI6 invalid\n", 364 be16_to_cpu(path->pathrec.dlid), 365 path->pathrec.dgid.raw); 366 path->valid = 0; 367 } 368 369 spin_unlock_irq(&priv->lock); 370 } 371 372 void ipoib_flush_paths(struct net_device *dev) 373 { 374 struct ipoib_dev_priv *priv = netdev_priv(dev); 375 struct ipoib_path *path, *tp; 376 LIST_HEAD(remove_list); 377 unsigned long flags; 378 379 netif_tx_lock_bh(dev); 380 spin_lock_irqsave(&priv->lock, flags); 381 382 list_splice_init(&priv->path_list, &remove_list); 383 384 list_for_each_entry(path, &remove_list, list) 385 rb_erase(&path->rb_node, &priv->path_tree); 386 387 list_for_each_entry_safe(path, tp, &remove_list, list) { 388 if (path->query) 389 ib_sa_cancel_query(path->query_id, path->query); 390 spin_unlock_irqrestore(&priv->lock, flags); 391 netif_tx_unlock_bh(dev); 392 wait_for_completion(&path->done); 393 path_free(dev, path); 394 netif_tx_lock_bh(dev); 395 spin_lock_irqsave(&priv->lock, flags); 396 } 397 398 spin_unlock_irqrestore(&priv->lock, flags); 399 netif_tx_unlock_bh(dev); 400 } 401 402 static void path_rec_completion(int status, 403 struct ib_sa_path_rec *pathrec, 404 void *path_ptr) 405 { 406 struct ipoib_path *path = path_ptr; 407 struct net_device *dev = path->dev; 408 struct ipoib_dev_priv *priv = netdev_priv(dev); 409 struct ipoib_ah *ah = NULL; 410 struct ipoib_ah *old_ah = NULL; 411 struct ipoib_neigh *neigh, *tn; 412 struct sk_buff_head skqueue; 413 struct sk_buff *skb; 414 unsigned long flags; 415 416 if (!status) 417 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n", 418 be16_to_cpu(pathrec->dlid), pathrec->dgid.raw); 419 else 420 ipoib_dbg(priv, "PathRec status %d for GID %pI6\n", 421 status, path->pathrec.dgid.raw); 422 423 skb_queue_head_init(&skqueue); 424 425 if (!status) { 426 struct ib_ah_attr av; 427 428 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av)) 429 ah = ipoib_create_ah(dev, priv->pd, &av); 430 } 431 432 spin_lock_irqsave(&priv->lock, flags); 433 434 if (ah) { 435 path->pathrec = *pathrec; 436 437 old_ah = path->ah; 438 path->ah = ah; 439 440 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", 441 ah, be16_to_cpu(pathrec->dlid), pathrec->sl); 442 443 while ((skb = __skb_dequeue(&path->queue))) 444 __skb_queue_tail(&skqueue, skb); 445 446 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) { 447 if (neigh->ah) { 448 WARN_ON(neigh->ah != old_ah); 449 /* 450 * Dropping the ah reference inside 451 * priv->lock is safe here, because we 452 * will hold one more reference from 453 * the original value of path->ah (ie 454 * old_ah). 455 */ 456 ipoib_put_ah(neigh->ah); 457 } 458 kref_get(&path->ah->ref); 459 neigh->ah = path->ah; 460 memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw, 461 sizeof(union ib_gid)); 462 463 if (ipoib_cm_enabled(dev, neigh->neighbour)) { 464 if (!ipoib_cm_get(neigh)) 465 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, 466 path, 467 neigh)); 468 if (!ipoib_cm_get(neigh)) { 469 list_del(&neigh->list); 470 if (neigh->ah) 471 ipoib_put_ah(neigh->ah); 472 ipoib_neigh_free(dev, neigh); 473 continue; 474 } 475 } 476 477 while ((skb = __skb_dequeue(&neigh->queue))) 478 __skb_queue_tail(&skqueue, skb); 479 } 480 path->valid = 1; 481 } 482 483 path->query = NULL; 484 complete(&path->done); 485 486 spin_unlock_irqrestore(&priv->lock, flags); 487 488 if (old_ah) 489 ipoib_put_ah(old_ah); 490 491 while ((skb = __skb_dequeue(&skqueue))) { 492 skb->dev = dev; 493 if (dev_queue_xmit(skb)) 494 ipoib_warn(priv, "dev_queue_xmit failed " 495 "to requeue packet\n"); 496 } 497 } 498 499 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid) 500 { 501 struct ipoib_dev_priv *priv = netdev_priv(dev); 502 struct ipoib_path *path; 503 504 if (!priv->broadcast) 505 return NULL; 506 507 path = kzalloc(sizeof *path, GFP_ATOMIC); 508 if (!path) 509 return NULL; 510 511 path->dev = dev; 512 513 skb_queue_head_init(&path->queue); 514 515 INIT_LIST_HEAD(&path->neigh_list); 516 517 memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid)); 518 path->pathrec.sgid = priv->local_gid; 519 path->pathrec.pkey = cpu_to_be16(priv->pkey); 520 path->pathrec.numb_path = 1; 521 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class; 522 523 return path; 524 } 525 526 static int path_rec_start(struct net_device *dev, 527 struct ipoib_path *path) 528 { 529 struct ipoib_dev_priv *priv = netdev_priv(dev); 530 531 ipoib_dbg(priv, "Start path record lookup for %pI6\n", 532 path->pathrec.dgid.raw); 533 534 init_completion(&path->done); 535 536 path->query_id = 537 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port, 538 &path->pathrec, 539 IB_SA_PATH_REC_DGID | 540 IB_SA_PATH_REC_SGID | 541 IB_SA_PATH_REC_NUMB_PATH | 542 IB_SA_PATH_REC_TRAFFIC_CLASS | 543 IB_SA_PATH_REC_PKEY, 544 1000, GFP_ATOMIC, 545 path_rec_completion, 546 path, &path->query); 547 if (path->query_id < 0) { 548 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); 549 path->query = NULL; 550 complete(&path->done); 551 return path->query_id; 552 } 553 554 return 0; 555 } 556 557 static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) 558 { 559 struct ipoib_dev_priv *priv = netdev_priv(dev); 560 struct ipoib_path *path; 561 struct ipoib_neigh *neigh; 562 unsigned long flags; 563 564 neigh = ipoib_neigh_alloc(skb_dst(skb)->neighbour, skb->dev); 565 if (!neigh) { 566 ++dev->stats.tx_dropped; 567 dev_kfree_skb_any(skb); 568 return; 569 } 570 571 spin_lock_irqsave(&priv->lock, flags); 572 573 path = __path_find(dev, skb_dst(skb)->neighbour->ha + 4); 574 if (!path) { 575 path = path_rec_create(dev, skb_dst(skb)->neighbour->ha + 4); 576 if (!path) 577 goto err_path; 578 579 __path_add(dev, path); 580 } 581 582 list_add_tail(&neigh->list, &path->neigh_list); 583 584 if (path->ah) { 585 kref_get(&path->ah->ref); 586 neigh->ah = path->ah; 587 memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw, 588 sizeof(union ib_gid)); 589 590 if (ipoib_cm_enabled(dev, neigh->neighbour)) { 591 if (!ipoib_cm_get(neigh)) 592 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh)); 593 if (!ipoib_cm_get(neigh)) { 594 list_del(&neigh->list); 595 if (neigh->ah) 596 ipoib_put_ah(neigh->ah); 597 ipoib_neigh_free(dev, neigh); 598 goto err_drop; 599 } 600 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) 601 __skb_queue_tail(&neigh->queue, skb); 602 else { 603 ipoib_warn(priv, "queue length limit %d. Packet drop.\n", 604 skb_queue_len(&neigh->queue)); 605 goto err_drop; 606 } 607 } else { 608 spin_unlock_irqrestore(&priv->lock, flags); 609 ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha)); 610 return; 611 } 612 } else { 613 neigh->ah = NULL; 614 615 if (!path->query && path_rec_start(dev, path)) 616 goto err_list; 617 618 __skb_queue_tail(&neigh->queue, skb); 619 } 620 621 spin_unlock_irqrestore(&priv->lock, flags); 622 return; 623 624 err_list: 625 list_del(&neigh->list); 626 627 err_path: 628 ipoib_neigh_free(dev, neigh); 629 err_drop: 630 ++dev->stats.tx_dropped; 631 dev_kfree_skb_any(skb); 632 633 spin_unlock_irqrestore(&priv->lock, flags); 634 } 635 636 static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) 637 { 638 struct ipoib_dev_priv *priv = netdev_priv(skb->dev); 639 640 /* Look up path record for unicasts */ 641 if (skb_dst(skb)->neighbour->ha[4] != 0xff) { 642 neigh_add_path(skb, dev); 643 return; 644 } 645 646 /* Add in the P_Key for multicasts */ 647 skb_dst(skb)->neighbour->ha[8] = (priv->pkey >> 8) & 0xff; 648 skb_dst(skb)->neighbour->ha[9] = priv->pkey & 0xff; 649 ipoib_mcast_send(dev, skb_dst(skb)->neighbour->ha + 4, skb); 650 } 651 652 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, 653 struct ipoib_pseudoheader *phdr) 654 { 655 struct ipoib_dev_priv *priv = netdev_priv(dev); 656 struct ipoib_path *path; 657 unsigned long flags; 658 659 spin_lock_irqsave(&priv->lock, flags); 660 661 path = __path_find(dev, phdr->hwaddr + 4); 662 if (!path || !path->valid) { 663 int new_path = 0; 664 665 if (!path) { 666 path = path_rec_create(dev, phdr->hwaddr + 4); 667 new_path = 1; 668 } 669 if (path) { 670 /* put pseudoheader back on for next time */ 671 skb_push(skb, sizeof *phdr); 672 __skb_queue_tail(&path->queue, skb); 673 674 if (!path->query && path_rec_start(dev, path)) { 675 spin_unlock_irqrestore(&priv->lock, flags); 676 if (new_path) 677 path_free(dev, path); 678 return; 679 } else 680 __path_add(dev, path); 681 } else { 682 ++dev->stats.tx_dropped; 683 dev_kfree_skb_any(skb); 684 } 685 686 spin_unlock_irqrestore(&priv->lock, flags); 687 return; 688 } 689 690 if (path->ah) { 691 ipoib_dbg(priv, "Send unicast ARP to %04x\n", 692 be16_to_cpu(path->pathrec.dlid)); 693 694 spin_unlock_irqrestore(&priv->lock, flags); 695 ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr)); 696 return; 697 } else if ((path->query || !path_rec_start(dev, path)) && 698 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 699 /* put pseudoheader back on for next time */ 700 skb_push(skb, sizeof *phdr); 701 __skb_queue_tail(&path->queue, skb); 702 } else { 703 ++dev->stats.tx_dropped; 704 dev_kfree_skb_any(skb); 705 } 706 707 spin_unlock_irqrestore(&priv->lock, flags); 708 } 709 710 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) 711 { 712 struct ipoib_dev_priv *priv = netdev_priv(dev); 713 struct ipoib_neigh *neigh; 714 unsigned long flags; 715 716 if (likely(skb_dst(skb) && skb_dst(skb)->neighbour)) { 717 if (unlikely(!*to_ipoib_neigh(skb_dst(skb)->neighbour))) { 718 ipoib_path_lookup(skb, dev); 719 return NETDEV_TX_OK; 720 } 721 722 neigh = *to_ipoib_neigh(skb_dst(skb)->neighbour); 723 724 if (unlikely((memcmp(&neigh->dgid.raw, 725 skb_dst(skb)->neighbour->ha + 4, 726 sizeof(union ib_gid))) || 727 (neigh->dev != dev))) { 728 spin_lock_irqsave(&priv->lock, flags); 729 /* 730 * It's safe to call ipoib_put_ah() inside 731 * priv->lock here, because we know that 732 * path->ah will always hold one more reference, 733 * so ipoib_put_ah() will never do more than 734 * decrement the ref count. 735 */ 736 if (neigh->ah) 737 ipoib_put_ah(neigh->ah); 738 list_del(&neigh->list); 739 ipoib_neigh_free(dev, neigh); 740 spin_unlock_irqrestore(&priv->lock, flags); 741 ipoib_path_lookup(skb, dev); 742 return NETDEV_TX_OK; 743 } 744 745 if (ipoib_cm_get(neigh)) { 746 if (ipoib_cm_up(neigh)) { 747 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); 748 return NETDEV_TX_OK; 749 } 750 } else if (neigh->ah) { 751 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha)); 752 return NETDEV_TX_OK; 753 } 754 755 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 756 spin_lock_irqsave(&priv->lock, flags); 757 __skb_queue_tail(&neigh->queue, skb); 758 spin_unlock_irqrestore(&priv->lock, flags); 759 } else { 760 ++dev->stats.tx_dropped; 761 dev_kfree_skb_any(skb); 762 } 763 } else { 764 struct ipoib_pseudoheader *phdr = 765 (struct ipoib_pseudoheader *) skb->data; 766 skb_pull(skb, sizeof *phdr); 767 768 if (phdr->hwaddr[4] == 0xff) { 769 /* Add in the P_Key for multicast*/ 770 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff; 771 phdr->hwaddr[9] = priv->pkey & 0xff; 772 773 ipoib_mcast_send(dev, phdr->hwaddr + 4, skb); 774 } else { 775 /* unicast GID -- should be ARP or RARP reply */ 776 777 if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) && 778 (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) { 779 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n", 780 skb_dst(skb) ? "neigh" : "dst", 781 be16_to_cpup((__be16 *) skb->data), 782 IPOIB_QPN(phdr->hwaddr), 783 phdr->hwaddr + 4); 784 dev_kfree_skb_any(skb); 785 ++dev->stats.tx_dropped; 786 return NETDEV_TX_OK; 787 } 788 789 unicast_arp_send(skb, dev, phdr); 790 } 791 } 792 793 return NETDEV_TX_OK; 794 } 795 796 static void ipoib_timeout(struct net_device *dev) 797 { 798 struct ipoib_dev_priv *priv = netdev_priv(dev); 799 800 ipoib_warn(priv, "transmit timeout: latency %d msecs\n", 801 jiffies_to_msecs(jiffies - dev->trans_start)); 802 ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n", 803 netif_queue_stopped(dev), 804 priv->tx_head, priv->tx_tail); 805 /* XXX reset QP, etc. */ 806 } 807 808 static int ipoib_hard_header(struct sk_buff *skb, 809 struct net_device *dev, 810 unsigned short type, 811 const void *daddr, const void *saddr, unsigned len) 812 { 813 struct ipoib_header *header; 814 815 header = (struct ipoib_header *) skb_push(skb, sizeof *header); 816 817 header->proto = htons(type); 818 header->reserved = 0; 819 820 /* 821 * If we don't have a neighbour structure, stuff the 822 * destination address onto the front of the skb so we can 823 * figure out where to send the packet later. 824 */ 825 if ((!skb_dst(skb) || !skb_dst(skb)->neighbour) && daddr) { 826 struct ipoib_pseudoheader *phdr = 827 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr); 828 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); 829 } 830 831 return 0; 832 } 833 834 static void ipoib_set_mcast_list(struct net_device *dev) 835 { 836 struct ipoib_dev_priv *priv = netdev_priv(dev); 837 838 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { 839 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set"); 840 return; 841 } 842 843 queue_work(ipoib_workqueue, &priv->restart_task); 844 } 845 846 static void ipoib_neigh_cleanup(struct neighbour *n) 847 { 848 struct ipoib_neigh *neigh; 849 struct ipoib_dev_priv *priv = netdev_priv(n->dev); 850 unsigned long flags; 851 struct ipoib_ah *ah = NULL; 852 853 neigh = *to_ipoib_neigh(n); 854 if (neigh) 855 priv = netdev_priv(neigh->dev); 856 else 857 return; 858 ipoib_dbg(priv, 859 "neigh_cleanup for %06x %pI6\n", 860 IPOIB_QPN(n->ha), 861 n->ha + 4); 862 863 spin_lock_irqsave(&priv->lock, flags); 864 865 if (neigh->ah) 866 ah = neigh->ah; 867 list_del(&neigh->list); 868 ipoib_neigh_free(n->dev, neigh); 869 870 spin_unlock_irqrestore(&priv->lock, flags); 871 872 if (ah) 873 ipoib_put_ah(ah); 874 } 875 876 struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour, 877 struct net_device *dev) 878 { 879 struct ipoib_neigh *neigh; 880 881 neigh = kmalloc(sizeof *neigh, GFP_ATOMIC); 882 if (!neigh) 883 return NULL; 884 885 neigh->neighbour = neighbour; 886 neigh->dev = dev; 887 *to_ipoib_neigh(neighbour) = neigh; 888 skb_queue_head_init(&neigh->queue); 889 ipoib_cm_set(neigh, NULL); 890 891 return neigh; 892 } 893 894 void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh) 895 { 896 struct sk_buff *skb; 897 *to_ipoib_neigh(neigh->neighbour) = NULL; 898 while ((skb = __skb_dequeue(&neigh->queue))) { 899 ++dev->stats.tx_dropped; 900 dev_kfree_skb_any(skb); 901 } 902 if (ipoib_cm_get(neigh)) 903 ipoib_cm_destroy_tx(ipoib_cm_get(neigh)); 904 kfree(neigh); 905 } 906 907 static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms) 908 { 909 parms->neigh_cleanup = ipoib_neigh_cleanup; 910 911 return 0; 912 } 913 914 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) 915 { 916 struct ipoib_dev_priv *priv = netdev_priv(dev); 917 918 /* Allocate RX/TX "rings" to hold queued skbs */ 919 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, 920 GFP_KERNEL); 921 if (!priv->rx_ring) { 922 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", 923 ca->name, ipoib_recvq_size); 924 goto out; 925 } 926 927 priv->tx_ring = vmalloc(ipoib_sendq_size * sizeof *priv->tx_ring); 928 if (!priv->tx_ring) { 929 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", 930 ca->name, ipoib_sendq_size); 931 goto out_rx_ring_cleanup; 932 } 933 memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring); 934 935 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ 936 937 if (ipoib_ib_dev_init(dev, ca, port)) 938 goto out_tx_ring_cleanup; 939 940 return 0; 941 942 out_tx_ring_cleanup: 943 vfree(priv->tx_ring); 944 945 out_rx_ring_cleanup: 946 kfree(priv->rx_ring); 947 948 out: 949 return -ENOMEM; 950 } 951 952 void ipoib_dev_cleanup(struct net_device *dev) 953 { 954 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv; 955 956 ipoib_delete_debug_files(dev); 957 958 /* Delete any child interfaces first */ 959 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { 960 unregister_netdev(cpriv->dev); 961 ipoib_dev_cleanup(cpriv->dev); 962 free_netdev(cpriv->dev); 963 } 964 965 ipoib_ib_dev_cleanup(dev); 966 967 kfree(priv->rx_ring); 968 vfree(priv->tx_ring); 969 970 priv->rx_ring = NULL; 971 priv->tx_ring = NULL; 972 } 973 974 static const struct header_ops ipoib_header_ops = { 975 .create = ipoib_hard_header, 976 }; 977 978 static int get_skb_hdr(struct sk_buff *skb, void **iphdr, 979 void **tcph, u64 *hdr_flags, void *priv) 980 { 981 unsigned int ip_len; 982 struct iphdr *iph; 983 984 if (unlikely(skb->protocol != htons(ETH_P_IP))) 985 return -1; 986 987 /* 988 * In the future we may add an else clause that verifies the 989 * checksum and allows devices which do not calculate checksum 990 * to use LRO. 991 */ 992 if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY)) 993 return -1; 994 995 /* Check for non-TCP packet */ 996 skb_reset_network_header(skb); 997 iph = ip_hdr(skb); 998 if (iph->protocol != IPPROTO_TCP) 999 return -1; 1000 1001 ip_len = ip_hdrlen(skb); 1002 skb_set_transport_header(skb, ip_len); 1003 *tcph = tcp_hdr(skb); 1004 1005 /* check if IP header and TCP header are complete */ 1006 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) 1007 return -1; 1008 1009 *hdr_flags = LRO_IPV4 | LRO_TCP; 1010 *iphdr = iph; 1011 1012 return 0; 1013 } 1014 1015 static void ipoib_lro_setup(struct ipoib_dev_priv *priv) 1016 { 1017 priv->lro.lro_mgr.max_aggr = lro_max_aggr; 1018 priv->lro.lro_mgr.max_desc = IPOIB_MAX_LRO_DESCRIPTORS; 1019 priv->lro.lro_mgr.lro_arr = priv->lro.lro_desc; 1020 priv->lro.lro_mgr.get_skb_header = get_skb_hdr; 1021 priv->lro.lro_mgr.features = LRO_F_NAPI; 1022 priv->lro.lro_mgr.dev = priv->dev; 1023 priv->lro.lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; 1024 } 1025 1026 static const struct net_device_ops ipoib_netdev_ops = { 1027 .ndo_open = ipoib_open, 1028 .ndo_stop = ipoib_stop, 1029 .ndo_change_mtu = ipoib_change_mtu, 1030 .ndo_start_xmit = ipoib_start_xmit, 1031 .ndo_tx_timeout = ipoib_timeout, 1032 .ndo_set_multicast_list = ipoib_set_mcast_list, 1033 .ndo_neigh_setup = ipoib_neigh_setup_dev, 1034 }; 1035 1036 static void ipoib_setup(struct net_device *dev) 1037 { 1038 struct ipoib_dev_priv *priv = netdev_priv(dev); 1039 1040 dev->netdev_ops = &ipoib_netdev_ops; 1041 dev->header_ops = &ipoib_header_ops; 1042 1043 ipoib_set_ethtool_ops(dev); 1044 1045 netif_napi_add(dev, &priv->napi, ipoib_poll, 100); 1046 1047 dev->watchdog_timeo = HZ; 1048 1049 dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 1050 1051 /* 1052 * We add in INFINIBAND_ALEN to allow for the destination 1053 * address "pseudoheader" for skbs without neighbour struct. 1054 */ 1055 dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN; 1056 dev->addr_len = INFINIBAND_ALEN; 1057 dev->type = ARPHRD_INFINIBAND; 1058 dev->tx_queue_len = ipoib_sendq_size * 2; 1059 dev->features = (NETIF_F_VLAN_CHALLENGED | 1060 NETIF_F_HIGHDMA); 1061 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1062 1063 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); 1064 1065 netif_carrier_off(dev); 1066 1067 priv->dev = dev; 1068 1069 ipoib_lro_setup(priv); 1070 1071 spin_lock_init(&priv->lock); 1072 1073 mutex_init(&priv->vlan_mutex); 1074 1075 INIT_LIST_HEAD(&priv->path_list); 1076 INIT_LIST_HEAD(&priv->child_intfs); 1077 INIT_LIST_HEAD(&priv->dead_ahs); 1078 INIT_LIST_HEAD(&priv->multicast_list); 1079 1080 INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll); 1081 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); 1082 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); 1083 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); 1084 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal); 1085 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy); 1086 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); 1087 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); 1088 } 1089 1090 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name) 1091 { 1092 struct net_device *dev; 1093 1094 dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name, 1095 ipoib_setup); 1096 if (!dev) 1097 return NULL; 1098 1099 return netdev_priv(dev); 1100 } 1101 1102 static ssize_t show_pkey(struct device *dev, 1103 struct device_attribute *attr, char *buf) 1104 { 1105 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev)); 1106 1107 return sprintf(buf, "0x%04x\n", priv->pkey); 1108 } 1109 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 1110 1111 static ssize_t show_umcast(struct device *dev, 1112 struct device_attribute *attr, char *buf) 1113 { 1114 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev)); 1115 1116 return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags)); 1117 } 1118 1119 static ssize_t set_umcast(struct device *dev, 1120 struct device_attribute *attr, 1121 const char *buf, size_t count) 1122 { 1123 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev)); 1124 unsigned long umcast_val = simple_strtoul(buf, NULL, 0); 1125 1126 if (umcast_val > 0) { 1127 set_bit(IPOIB_FLAG_UMCAST, &priv->flags); 1128 ipoib_warn(priv, "ignoring multicast groups joined directly " 1129 "by userspace\n"); 1130 } else 1131 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags); 1132 1133 return count; 1134 } 1135 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast); 1136 1137 int ipoib_add_umcast_attr(struct net_device *dev) 1138 { 1139 return device_create_file(&dev->dev, &dev_attr_umcast); 1140 } 1141 1142 static ssize_t create_child(struct device *dev, 1143 struct device_attribute *attr, 1144 const char *buf, size_t count) 1145 { 1146 int pkey; 1147 int ret; 1148 1149 if (sscanf(buf, "%i", &pkey) != 1) 1150 return -EINVAL; 1151 1152 if (pkey < 0 || pkey > 0xffff) 1153 return -EINVAL; 1154 1155 /* 1156 * Set the full membership bit, so that we join the right 1157 * broadcast group, etc. 1158 */ 1159 pkey |= 0x8000; 1160 1161 ret = ipoib_vlan_add(to_net_dev(dev), pkey); 1162 1163 return ret ? ret : count; 1164 } 1165 static DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child); 1166 1167 static ssize_t delete_child(struct device *dev, 1168 struct device_attribute *attr, 1169 const char *buf, size_t count) 1170 { 1171 int pkey; 1172 int ret; 1173 1174 if (sscanf(buf, "%i", &pkey) != 1) 1175 return -EINVAL; 1176 1177 if (pkey < 0 || pkey > 0xffff) 1178 return -EINVAL; 1179 1180 ret = ipoib_vlan_delete(to_net_dev(dev), pkey); 1181 1182 return ret ? ret : count; 1183 1184 } 1185 static DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child); 1186 1187 int ipoib_add_pkey_attr(struct net_device *dev) 1188 { 1189 return device_create_file(&dev->dev, &dev_attr_pkey); 1190 } 1191 1192 int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) 1193 { 1194 struct ib_device_attr *device_attr; 1195 int result = -ENOMEM; 1196 1197 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL); 1198 if (!device_attr) { 1199 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n", 1200 hca->name, sizeof *device_attr); 1201 return result; 1202 } 1203 1204 result = ib_query_device(hca, device_attr); 1205 if (result) { 1206 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n", 1207 hca->name, result); 1208 kfree(device_attr); 1209 return result; 1210 } 1211 priv->hca_caps = device_attr->device_cap_flags; 1212 1213 kfree(device_attr); 1214 1215 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { 1216 set_bit(IPOIB_FLAG_CSUM, &priv->flags); 1217 priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 1218 } 1219 1220 if (lro) 1221 priv->dev->features |= NETIF_F_LRO; 1222 1223 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) 1224 priv->dev->features |= NETIF_F_TSO; 1225 1226 return 0; 1227 } 1228 1229 1230 static struct net_device *ipoib_add_port(const char *format, 1231 struct ib_device *hca, u8 port) 1232 { 1233 struct ipoib_dev_priv *priv; 1234 struct ib_port_attr attr; 1235 int result = -ENOMEM; 1236 1237 priv = ipoib_intf_alloc(format); 1238 if (!priv) 1239 goto alloc_mem_failed; 1240 1241 SET_NETDEV_DEV(priv->dev, hca->dma_device); 1242 1243 if (!ib_query_port(hca, port, &attr)) 1244 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); 1245 else { 1246 printk(KERN_WARNING "%s: ib_query_port %d failed\n", 1247 hca->name, port); 1248 goto device_init_failed; 1249 } 1250 1251 /* MTU will be reset when mcast join happens */ 1252 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 1253 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; 1254 1255 result = ib_query_pkey(hca, port, 0, &priv->pkey); 1256 if (result) { 1257 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", 1258 hca->name, port, result); 1259 goto device_init_failed; 1260 } 1261 1262 if (ipoib_set_dev_features(priv, hca)) 1263 goto device_init_failed; 1264 1265 /* 1266 * Set the full membership bit, so that we join the right 1267 * broadcast group, etc. 1268 */ 1269 priv->pkey |= 0x8000; 1270 1271 priv->dev->broadcast[8] = priv->pkey >> 8; 1272 priv->dev->broadcast[9] = priv->pkey & 0xff; 1273 1274 result = ib_query_gid(hca, port, 0, &priv->local_gid); 1275 if (result) { 1276 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", 1277 hca->name, port, result); 1278 goto device_init_failed; 1279 } else 1280 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 1281 1282 result = ipoib_dev_init(priv->dev, hca, port); 1283 if (result < 0) { 1284 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", 1285 hca->name, port, result); 1286 goto device_init_failed; 1287 } 1288 1289 INIT_IB_EVENT_HANDLER(&priv->event_handler, 1290 priv->ca, ipoib_event); 1291 result = ib_register_event_handler(&priv->event_handler); 1292 if (result < 0) { 1293 printk(KERN_WARNING "%s: ib_register_event_handler failed for " 1294 "port %d (ret = %d)\n", 1295 hca->name, port, result); 1296 goto event_failed; 1297 } 1298 1299 result = register_netdev(priv->dev); 1300 if (result) { 1301 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n", 1302 hca->name, port, result); 1303 goto register_failed; 1304 } 1305 1306 ipoib_create_debug_files(priv->dev); 1307 1308 if (ipoib_cm_add_mode_attr(priv->dev)) 1309 goto sysfs_failed; 1310 if (ipoib_add_pkey_attr(priv->dev)) 1311 goto sysfs_failed; 1312 if (ipoib_add_umcast_attr(priv->dev)) 1313 goto sysfs_failed; 1314 if (device_create_file(&priv->dev->dev, &dev_attr_create_child)) 1315 goto sysfs_failed; 1316 if (device_create_file(&priv->dev->dev, &dev_attr_delete_child)) 1317 goto sysfs_failed; 1318 1319 return priv->dev; 1320 1321 sysfs_failed: 1322 ipoib_delete_debug_files(priv->dev); 1323 unregister_netdev(priv->dev); 1324 1325 register_failed: 1326 ib_unregister_event_handler(&priv->event_handler); 1327 flush_workqueue(ipoib_workqueue); 1328 1329 event_failed: 1330 ipoib_dev_cleanup(priv->dev); 1331 1332 device_init_failed: 1333 free_netdev(priv->dev); 1334 1335 alloc_mem_failed: 1336 return ERR_PTR(result); 1337 } 1338 1339 static void ipoib_add_one(struct ib_device *device) 1340 { 1341 struct list_head *dev_list; 1342 struct net_device *dev; 1343 struct ipoib_dev_priv *priv; 1344 int s, e, p; 1345 1346 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 1347 return; 1348 1349 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1350 if (!dev_list) 1351 return; 1352 1353 INIT_LIST_HEAD(dev_list); 1354 1355 if (device->node_type == RDMA_NODE_IB_SWITCH) { 1356 s = 0; 1357 e = 0; 1358 } else { 1359 s = 1; 1360 e = device->phys_port_cnt; 1361 } 1362 1363 for (p = s; p <= e; ++p) { 1364 dev = ipoib_add_port("ib%d", device, p); 1365 if (!IS_ERR(dev)) { 1366 priv = netdev_priv(dev); 1367 list_add_tail(&priv->list, dev_list); 1368 } 1369 } 1370 1371 ib_set_client_data(device, &ipoib_client, dev_list); 1372 } 1373 1374 static void ipoib_remove_one(struct ib_device *device) 1375 { 1376 struct ipoib_dev_priv *priv, *tmp; 1377 struct list_head *dev_list; 1378 1379 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 1380 return; 1381 1382 dev_list = ib_get_client_data(device, &ipoib_client); 1383 1384 list_for_each_entry_safe(priv, tmp, dev_list, list) { 1385 ib_unregister_event_handler(&priv->event_handler); 1386 1387 rtnl_lock(); 1388 dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); 1389 rtnl_unlock(); 1390 1391 flush_workqueue(ipoib_workqueue); 1392 1393 unregister_netdev(priv->dev); 1394 ipoib_dev_cleanup(priv->dev); 1395 free_netdev(priv->dev); 1396 } 1397 1398 kfree(dev_list); 1399 } 1400 1401 static int __init ipoib_init_module(void) 1402 { 1403 int ret; 1404 1405 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size); 1406 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE); 1407 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE); 1408 1409 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size); 1410 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE); 1411 ipoib_sendq_size = max(ipoib_sendq_size, max(2 * MAX_SEND_CQE, 1412 IPOIB_MIN_QUEUE_SIZE)); 1413 #ifdef CONFIG_INFINIBAND_IPOIB_CM 1414 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); 1415 #endif 1416 1417 /* 1418 * When copying small received packets, we only copy from the 1419 * linear data part of the SKB, so we rely on this condition. 1420 */ 1421 BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE); 1422 1423 ret = ipoib_register_debugfs(); 1424 if (ret) 1425 return ret; 1426 1427 /* 1428 * We create our own workqueue mainly because we want to be 1429 * able to flush it when devices are being removed. We can't 1430 * use schedule_work()/flush_scheduled_work() because both 1431 * unregister_netdev() and linkwatch_event take the rtnl lock, 1432 * so flush_scheduled_work() can deadlock during device 1433 * removal. 1434 */ 1435 ipoib_workqueue = create_singlethread_workqueue("ipoib"); 1436 if (!ipoib_workqueue) { 1437 ret = -ENOMEM; 1438 goto err_fs; 1439 } 1440 1441 ib_sa_register_client(&ipoib_sa_client); 1442 1443 ret = ib_register_client(&ipoib_client); 1444 if (ret) 1445 goto err_sa; 1446 1447 return 0; 1448 1449 err_sa: 1450 ib_sa_unregister_client(&ipoib_sa_client); 1451 destroy_workqueue(ipoib_workqueue); 1452 1453 err_fs: 1454 ipoib_unregister_debugfs(); 1455 1456 return ret; 1457 } 1458 1459 static void __exit ipoib_cleanup_module(void) 1460 { 1461 ib_unregister_client(&ipoib_client); 1462 ib_sa_unregister_client(&ipoib_sa_client); 1463 ipoib_unregister_debugfs(); 1464 destroy_workqueue(ipoib_workqueue); 1465 } 1466 1467 module_init(ipoib_init_module); 1468 module_exit(ipoib_cleanup_module); 1469