1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "ipoib.h" 36 37 #include <linux/module.h> 38 39 #include <linux/init.h> 40 #include <linux/slab.h> 41 #include <linux/kernel.h> 42 #include <linux/vmalloc.h> 43 44 #include <linux/if_arp.h> /* For ARPHRD_xxx */ 45 46 #include <linux/ip.h> 47 #include <linux/in.h> 48 49 #include <linux/jhash.h> 50 #include <net/arp.h> 51 52 #define DRV_VERSION "1.0.0" 53 54 const char ipoib_driver_version[] = DRV_VERSION; 55 56 MODULE_AUTHOR("Roland Dreier"); 57 MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); 58 MODULE_LICENSE("Dual BSD/GPL"); 59 MODULE_VERSION(DRV_VERSION); 60 61 int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE; 62 int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE; 63 64 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444); 65 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue"); 66 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); 67 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); 68 69 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 70 int ipoib_debug_level; 71 72 module_param_named(debug_level, ipoib_debug_level, int, 0644); 73 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 74 #endif 75 76 struct ipoib_path_iter { 77 struct net_device *dev; 78 struct ipoib_path path; 79 }; 80 81 static const u8 ipv4_bcast_addr[] = { 82 0x00, 0xff, 0xff, 0xff, 83 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, 84 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff 85 }; 86 87 struct workqueue_struct *ipoib_workqueue; 88 89 struct ib_sa_client ipoib_sa_client; 90 91 static void ipoib_add_one(struct ib_device *device); 92 static void ipoib_remove_one(struct ib_device *device); 93 static void ipoib_neigh_reclaim(struct rcu_head *rp); 94 95 static struct ib_client ipoib_client = { 96 .name = "ipoib", 97 .add = ipoib_add_one, 98 .remove = ipoib_remove_one 99 }; 100 101 int ipoib_open(struct net_device *dev) 102 { 103 struct ipoib_dev_priv *priv = netdev_priv(dev); 104 105 ipoib_dbg(priv, "bringing up interface\n"); 106 107 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 108 109 if (ipoib_pkey_dev_delay_open(dev)) 110 return 0; 111 112 if (ipoib_ib_dev_open(dev)) 113 goto err_disable; 114 115 if (ipoib_ib_dev_up(dev)) 116 goto err_stop; 117 118 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 119 struct ipoib_dev_priv *cpriv; 120 121 /* Bring up any child interfaces too */ 122 mutex_lock(&priv->vlan_mutex); 123 list_for_each_entry(cpriv, &priv->child_intfs, list) { 124 int flags; 125 126 flags = cpriv->dev->flags; 127 if (flags & IFF_UP) 128 continue; 129 130 dev_change_flags(cpriv->dev, flags | IFF_UP); 131 } 132 mutex_unlock(&priv->vlan_mutex); 133 } 134 135 netif_start_queue(dev); 136 137 return 0; 138 139 err_stop: 140 ipoib_ib_dev_stop(dev, 1); 141 142 err_disable: 143 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 144 145 return -EINVAL; 146 } 147 148 static int ipoib_stop(struct net_device *dev) 149 { 150 struct ipoib_dev_priv *priv = netdev_priv(dev); 151 152 ipoib_dbg(priv, "stopping interface\n"); 153 154 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 155 156 netif_stop_queue(dev); 157 158 ipoib_ib_dev_down(dev, 1); 159 ipoib_ib_dev_stop(dev, 0); 160 161 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 162 struct ipoib_dev_priv *cpriv; 163 164 /* Bring down any child interfaces too */ 165 mutex_lock(&priv->vlan_mutex); 166 list_for_each_entry(cpriv, &priv->child_intfs, list) { 167 int flags; 168 169 flags = cpriv->dev->flags; 170 if (!(flags & IFF_UP)) 171 continue; 172 173 dev_change_flags(cpriv->dev, flags & ~IFF_UP); 174 } 175 mutex_unlock(&priv->vlan_mutex); 176 } 177 178 return 0; 179 } 180 181 static void ipoib_uninit(struct net_device *dev) 182 { 183 ipoib_dev_cleanup(dev); 184 } 185 186 static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features) 187 { 188 struct ipoib_dev_priv *priv = netdev_priv(dev); 189 190 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) 191 features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); 192 193 return features; 194 } 195 196 static int ipoib_change_mtu(struct net_device *dev, int new_mtu) 197 { 198 struct ipoib_dev_priv *priv = netdev_priv(dev); 199 200 /* dev->mtu > 2K ==> connected mode */ 201 if (ipoib_cm_admin_enabled(dev)) { 202 if (new_mtu > ipoib_cm_max_mtu(dev)) 203 return -EINVAL; 204 205 if (new_mtu > priv->mcast_mtu) 206 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n", 207 priv->mcast_mtu); 208 209 dev->mtu = new_mtu; 210 return 0; 211 } 212 213 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) 214 return -EINVAL; 215 216 priv->admin_mtu = new_mtu; 217 218 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); 219 220 return 0; 221 } 222 223 int ipoib_set_mode(struct net_device *dev, const char *buf) 224 { 225 struct ipoib_dev_priv *priv = netdev_priv(dev); 226 227 /* flush paths if we switch modes so that connections are restarted */ 228 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) { 229 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 230 ipoib_warn(priv, "enabling connected mode " 231 "will cause multicast packet drops\n"); 232 netdev_update_features(dev); 233 rtnl_unlock(); 234 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; 235 236 ipoib_flush_paths(dev); 237 rtnl_lock(); 238 return 0; 239 } 240 241 if (!strcmp(buf, "datagram\n")) { 242 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 243 netdev_update_features(dev); 244 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); 245 rtnl_unlock(); 246 ipoib_flush_paths(dev); 247 rtnl_lock(); 248 return 0; 249 } 250 251 return -EINVAL; 252 } 253 254 static struct ipoib_path *__path_find(struct net_device *dev, void *gid) 255 { 256 struct ipoib_dev_priv *priv = netdev_priv(dev); 257 struct rb_node *n = priv->path_tree.rb_node; 258 struct ipoib_path *path; 259 int ret; 260 261 while (n) { 262 path = rb_entry(n, struct ipoib_path, rb_node); 263 264 ret = memcmp(gid, path->pathrec.dgid.raw, 265 sizeof (union ib_gid)); 266 267 if (ret < 0) 268 n = n->rb_left; 269 else if (ret > 0) 270 n = n->rb_right; 271 else 272 return path; 273 } 274 275 return NULL; 276 } 277 278 static int __path_add(struct net_device *dev, struct ipoib_path *path) 279 { 280 struct ipoib_dev_priv *priv = netdev_priv(dev); 281 struct rb_node **n = &priv->path_tree.rb_node; 282 struct rb_node *pn = NULL; 283 struct ipoib_path *tpath; 284 int ret; 285 286 while (*n) { 287 pn = *n; 288 tpath = rb_entry(pn, struct ipoib_path, rb_node); 289 290 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw, 291 sizeof (union ib_gid)); 292 if (ret < 0) 293 n = &pn->rb_left; 294 else if (ret > 0) 295 n = &pn->rb_right; 296 else 297 return -EEXIST; 298 } 299 300 rb_link_node(&path->rb_node, pn, n); 301 rb_insert_color(&path->rb_node, &priv->path_tree); 302 303 list_add_tail(&path->list, &priv->path_list); 304 305 return 0; 306 } 307 308 static void path_free(struct net_device *dev, struct ipoib_path *path) 309 { 310 struct sk_buff *skb; 311 312 while ((skb = __skb_dequeue(&path->queue))) 313 dev_kfree_skb_irq(skb); 314 315 ipoib_dbg(netdev_priv(dev), "path_free\n"); 316 317 /* remove all neigh connected to this path */ 318 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); 319 320 if (path->ah) 321 ipoib_put_ah(path->ah); 322 323 kfree(path); 324 } 325 326 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 327 328 struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev) 329 { 330 struct ipoib_path_iter *iter; 331 332 iter = kmalloc(sizeof *iter, GFP_KERNEL); 333 if (!iter) 334 return NULL; 335 336 iter->dev = dev; 337 memset(iter->path.pathrec.dgid.raw, 0, 16); 338 339 if (ipoib_path_iter_next(iter)) { 340 kfree(iter); 341 return NULL; 342 } 343 344 return iter; 345 } 346 347 int ipoib_path_iter_next(struct ipoib_path_iter *iter) 348 { 349 struct ipoib_dev_priv *priv = netdev_priv(iter->dev); 350 struct rb_node *n; 351 struct ipoib_path *path; 352 int ret = 1; 353 354 spin_lock_irq(&priv->lock); 355 356 n = rb_first(&priv->path_tree); 357 358 while (n) { 359 path = rb_entry(n, struct ipoib_path, rb_node); 360 361 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, 362 sizeof (union ib_gid)) < 0) { 363 iter->path = *path; 364 ret = 0; 365 break; 366 } 367 368 n = rb_next(n); 369 } 370 371 spin_unlock_irq(&priv->lock); 372 373 return ret; 374 } 375 376 void ipoib_path_iter_read(struct ipoib_path_iter *iter, 377 struct ipoib_path *path) 378 { 379 *path = iter->path; 380 } 381 382 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 383 384 void ipoib_mark_paths_invalid(struct net_device *dev) 385 { 386 struct ipoib_dev_priv *priv = netdev_priv(dev); 387 struct ipoib_path *path, *tp; 388 389 spin_lock_irq(&priv->lock); 390 391 list_for_each_entry_safe(path, tp, &priv->path_list, list) { 392 ipoib_dbg(priv, "mark path LID 0x%04x GID %pI6 invalid\n", 393 be16_to_cpu(path->pathrec.dlid), 394 path->pathrec.dgid.raw); 395 path->valid = 0; 396 } 397 398 spin_unlock_irq(&priv->lock); 399 } 400 401 void ipoib_flush_paths(struct net_device *dev) 402 { 403 struct ipoib_dev_priv *priv = netdev_priv(dev); 404 struct ipoib_path *path, *tp; 405 LIST_HEAD(remove_list); 406 unsigned long flags; 407 408 netif_tx_lock_bh(dev); 409 spin_lock_irqsave(&priv->lock, flags); 410 411 list_splice_init(&priv->path_list, &remove_list); 412 413 list_for_each_entry(path, &remove_list, list) 414 rb_erase(&path->rb_node, &priv->path_tree); 415 416 list_for_each_entry_safe(path, tp, &remove_list, list) { 417 if (path->query) 418 ib_sa_cancel_query(path->query_id, path->query); 419 spin_unlock_irqrestore(&priv->lock, flags); 420 netif_tx_unlock_bh(dev); 421 wait_for_completion(&path->done); 422 path_free(dev, path); 423 netif_tx_lock_bh(dev); 424 spin_lock_irqsave(&priv->lock, flags); 425 } 426 427 spin_unlock_irqrestore(&priv->lock, flags); 428 netif_tx_unlock_bh(dev); 429 } 430 431 static void path_rec_completion(int status, 432 struct ib_sa_path_rec *pathrec, 433 void *path_ptr) 434 { 435 struct ipoib_path *path = path_ptr; 436 struct net_device *dev = path->dev; 437 struct ipoib_dev_priv *priv = netdev_priv(dev); 438 struct ipoib_ah *ah = NULL; 439 struct ipoib_ah *old_ah = NULL; 440 struct ipoib_neigh *neigh, *tn; 441 struct sk_buff_head skqueue; 442 struct sk_buff *skb; 443 unsigned long flags; 444 445 if (!status) 446 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n", 447 be16_to_cpu(pathrec->dlid), pathrec->dgid.raw); 448 else 449 ipoib_dbg(priv, "PathRec status %d for GID %pI6\n", 450 status, path->pathrec.dgid.raw); 451 452 skb_queue_head_init(&skqueue); 453 454 if (!status) { 455 struct ib_ah_attr av; 456 457 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av)) 458 ah = ipoib_create_ah(dev, priv->pd, &av); 459 } 460 461 spin_lock_irqsave(&priv->lock, flags); 462 463 if (!IS_ERR_OR_NULL(ah)) { 464 path->pathrec = *pathrec; 465 466 old_ah = path->ah; 467 path->ah = ah; 468 469 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", 470 ah, be16_to_cpu(pathrec->dlid), pathrec->sl); 471 472 while ((skb = __skb_dequeue(&path->queue))) 473 __skb_queue_tail(&skqueue, skb); 474 475 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) { 476 if (neigh->ah) { 477 WARN_ON(neigh->ah != old_ah); 478 /* 479 * Dropping the ah reference inside 480 * priv->lock is safe here, because we 481 * will hold one more reference from 482 * the original value of path->ah (ie 483 * old_ah). 484 */ 485 ipoib_put_ah(neigh->ah); 486 } 487 kref_get(&path->ah->ref); 488 neigh->ah = path->ah; 489 490 if (ipoib_cm_enabled(dev, neigh->daddr)) { 491 if (!ipoib_cm_get(neigh)) 492 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, 493 path, 494 neigh)); 495 if (!ipoib_cm_get(neigh)) { 496 ipoib_neigh_free(neigh); 497 continue; 498 } 499 } 500 501 while ((skb = __skb_dequeue(&neigh->queue))) 502 __skb_queue_tail(&skqueue, skb); 503 } 504 path->valid = 1; 505 } 506 507 path->query = NULL; 508 complete(&path->done); 509 510 spin_unlock_irqrestore(&priv->lock, flags); 511 512 if (IS_ERR_OR_NULL(ah)) 513 ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw); 514 515 if (old_ah) 516 ipoib_put_ah(old_ah); 517 518 while ((skb = __skb_dequeue(&skqueue))) { 519 skb->dev = dev; 520 if (dev_queue_xmit(skb)) 521 ipoib_warn(priv, "dev_queue_xmit failed " 522 "to requeue packet\n"); 523 } 524 } 525 526 static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid) 527 { 528 struct ipoib_dev_priv *priv = netdev_priv(dev); 529 struct ipoib_path *path; 530 531 if (!priv->broadcast) 532 return NULL; 533 534 path = kzalloc(sizeof *path, GFP_ATOMIC); 535 if (!path) 536 return NULL; 537 538 path->dev = dev; 539 540 skb_queue_head_init(&path->queue); 541 542 INIT_LIST_HEAD(&path->neigh_list); 543 544 memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid)); 545 path->pathrec.sgid = priv->local_gid; 546 path->pathrec.pkey = cpu_to_be16(priv->pkey); 547 path->pathrec.numb_path = 1; 548 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class; 549 550 return path; 551 } 552 553 static int path_rec_start(struct net_device *dev, 554 struct ipoib_path *path) 555 { 556 struct ipoib_dev_priv *priv = netdev_priv(dev); 557 558 ipoib_dbg(priv, "Start path record lookup for %pI6\n", 559 path->pathrec.dgid.raw); 560 561 init_completion(&path->done); 562 563 path->query_id = 564 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port, 565 &path->pathrec, 566 IB_SA_PATH_REC_DGID | 567 IB_SA_PATH_REC_SGID | 568 IB_SA_PATH_REC_NUMB_PATH | 569 IB_SA_PATH_REC_TRAFFIC_CLASS | 570 IB_SA_PATH_REC_PKEY, 571 1000, GFP_ATOMIC, 572 path_rec_completion, 573 path, &path->query); 574 if (path->query_id < 0) { 575 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); 576 path->query = NULL; 577 complete(&path->done); 578 return path->query_id; 579 } 580 581 return 0; 582 } 583 584 static void neigh_add_path(struct sk_buff *skb, u8 *daddr, 585 struct net_device *dev) 586 { 587 struct ipoib_dev_priv *priv = netdev_priv(dev); 588 struct ipoib_path *path; 589 struct ipoib_neigh *neigh; 590 unsigned long flags; 591 592 spin_lock_irqsave(&priv->lock, flags); 593 neigh = ipoib_neigh_alloc(daddr, dev); 594 if (!neigh) { 595 spin_unlock_irqrestore(&priv->lock, flags); 596 ++dev->stats.tx_dropped; 597 dev_kfree_skb_any(skb); 598 return; 599 } 600 601 path = __path_find(dev, daddr + 4); 602 if (!path) { 603 path = path_rec_create(dev, daddr + 4); 604 if (!path) 605 goto err_path; 606 607 __path_add(dev, path); 608 } 609 610 list_add_tail(&neigh->list, &path->neigh_list); 611 612 if (path->ah) { 613 kref_get(&path->ah->ref); 614 neigh->ah = path->ah; 615 616 if (ipoib_cm_enabled(dev, neigh->daddr)) { 617 if (!ipoib_cm_get(neigh)) 618 ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh)); 619 if (!ipoib_cm_get(neigh)) { 620 ipoib_neigh_free(neigh); 621 goto err_drop; 622 } 623 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) 624 __skb_queue_tail(&neigh->queue, skb); 625 else { 626 ipoib_warn(priv, "queue length limit %d. Packet drop.\n", 627 skb_queue_len(&neigh->queue)); 628 goto err_drop; 629 } 630 } else { 631 spin_unlock_irqrestore(&priv->lock, flags); 632 ipoib_send(dev, skb, path->ah, IPOIB_QPN(daddr)); 633 ipoib_neigh_put(neigh); 634 return; 635 } 636 } else { 637 neigh->ah = NULL; 638 639 if (!path->query && path_rec_start(dev, path)) 640 goto err_path; 641 642 __skb_queue_tail(&neigh->queue, skb); 643 } 644 645 spin_unlock_irqrestore(&priv->lock, flags); 646 ipoib_neigh_put(neigh); 647 return; 648 649 err_path: 650 ipoib_neigh_free(neigh); 651 err_drop: 652 ++dev->stats.tx_dropped; 653 dev_kfree_skb_any(skb); 654 655 spin_unlock_irqrestore(&priv->lock, flags); 656 ipoib_neigh_put(neigh); 657 } 658 659 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, 660 struct ipoib_cb *cb) 661 { 662 struct ipoib_dev_priv *priv = netdev_priv(dev); 663 struct ipoib_path *path; 664 unsigned long flags; 665 666 spin_lock_irqsave(&priv->lock, flags); 667 668 path = __path_find(dev, cb->hwaddr + 4); 669 if (!path || !path->valid) { 670 int new_path = 0; 671 672 if (!path) { 673 path = path_rec_create(dev, cb->hwaddr + 4); 674 new_path = 1; 675 } 676 if (path) { 677 __skb_queue_tail(&path->queue, skb); 678 679 if (!path->query && path_rec_start(dev, path)) { 680 spin_unlock_irqrestore(&priv->lock, flags); 681 if (new_path) 682 path_free(dev, path); 683 return; 684 } else 685 __path_add(dev, path); 686 } else { 687 ++dev->stats.tx_dropped; 688 dev_kfree_skb_any(skb); 689 } 690 691 spin_unlock_irqrestore(&priv->lock, flags); 692 return; 693 } 694 695 if (path->ah) { 696 ipoib_dbg(priv, "Send unicast ARP to %04x\n", 697 be16_to_cpu(path->pathrec.dlid)); 698 699 spin_unlock_irqrestore(&priv->lock, flags); 700 ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr)); 701 return; 702 } else if ((path->query || !path_rec_start(dev, path)) && 703 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 704 __skb_queue_tail(&path->queue, skb); 705 } else { 706 ++dev->stats.tx_dropped; 707 dev_kfree_skb_any(skb); 708 } 709 710 spin_unlock_irqrestore(&priv->lock, flags); 711 } 712 713 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) 714 { 715 struct ipoib_dev_priv *priv = netdev_priv(dev); 716 struct ipoib_neigh *neigh; 717 struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb; 718 struct ipoib_header *header; 719 unsigned long flags; 720 721 header = (struct ipoib_header *) skb->data; 722 723 if (unlikely(cb->hwaddr[4] == 0xff)) { 724 /* multicast, arrange "if" according to probability */ 725 if ((header->proto != htons(ETH_P_IP)) && 726 (header->proto != htons(ETH_P_IPV6)) && 727 (header->proto != htons(ETH_P_ARP)) && 728 (header->proto != htons(ETH_P_RARP)) && 729 (header->proto != htons(ETH_P_TIPC))) { 730 /* ethertype not supported by IPoIB */ 731 ++dev->stats.tx_dropped; 732 dev_kfree_skb_any(skb); 733 return NETDEV_TX_OK; 734 } 735 /* Add in the P_Key for multicast*/ 736 cb->hwaddr[8] = (priv->pkey >> 8) & 0xff; 737 cb->hwaddr[9] = priv->pkey & 0xff; 738 739 neigh = ipoib_neigh_get(dev, cb->hwaddr); 740 if (likely(neigh)) 741 goto send_using_neigh; 742 ipoib_mcast_send(dev, cb->hwaddr, skb); 743 return NETDEV_TX_OK; 744 } 745 746 /* unicast, arrange "switch" according to probability */ 747 switch (header->proto) { 748 case htons(ETH_P_IP): 749 case htons(ETH_P_IPV6): 750 case htons(ETH_P_TIPC): 751 neigh = ipoib_neigh_get(dev, cb->hwaddr); 752 if (unlikely(!neigh)) { 753 neigh_add_path(skb, cb->hwaddr, dev); 754 return NETDEV_TX_OK; 755 } 756 break; 757 case htons(ETH_P_ARP): 758 case htons(ETH_P_RARP): 759 /* for unicast ARP and RARP should always perform path find */ 760 unicast_arp_send(skb, dev, cb); 761 return NETDEV_TX_OK; 762 default: 763 /* ethertype not supported by IPoIB */ 764 ++dev->stats.tx_dropped; 765 dev_kfree_skb_any(skb); 766 return NETDEV_TX_OK; 767 } 768 769 send_using_neigh: 770 /* note we now hold a ref to neigh */ 771 if (ipoib_cm_get(neigh)) { 772 if (ipoib_cm_up(neigh)) { 773 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); 774 goto unref; 775 } 776 } else if (neigh->ah) { 777 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr)); 778 goto unref; 779 } 780 781 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 782 spin_lock_irqsave(&priv->lock, flags); 783 __skb_queue_tail(&neigh->queue, skb); 784 spin_unlock_irqrestore(&priv->lock, flags); 785 } else { 786 ++dev->stats.tx_dropped; 787 dev_kfree_skb_any(skb); 788 } 789 790 unref: 791 ipoib_neigh_put(neigh); 792 793 return NETDEV_TX_OK; 794 } 795 796 static void ipoib_timeout(struct net_device *dev) 797 { 798 struct ipoib_dev_priv *priv = netdev_priv(dev); 799 800 ipoib_warn(priv, "transmit timeout: latency %d msecs\n", 801 jiffies_to_msecs(jiffies - dev->trans_start)); 802 ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n", 803 netif_queue_stopped(dev), 804 priv->tx_head, priv->tx_tail); 805 /* XXX reset QP, etc. */ 806 } 807 808 static int ipoib_hard_header(struct sk_buff *skb, 809 struct net_device *dev, 810 unsigned short type, 811 const void *daddr, const void *saddr, unsigned len) 812 { 813 struct ipoib_header *header; 814 struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb; 815 816 header = (struct ipoib_header *) skb_push(skb, sizeof *header); 817 818 header->proto = htons(type); 819 header->reserved = 0; 820 821 /* 822 * we don't rely on dst_entry structure, always stuff the 823 * destination address into skb->cb so we can figure out where 824 * to send the packet later. 825 */ 826 memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN); 827 828 return sizeof *header; 829 } 830 831 static void ipoib_set_mcast_list(struct net_device *dev) 832 { 833 struct ipoib_dev_priv *priv = netdev_priv(dev); 834 835 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { 836 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set"); 837 return; 838 } 839 840 queue_work(ipoib_workqueue, &priv->restart_task); 841 } 842 843 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) 844 { 845 /* 846 * Use only the address parts that contributes to spreading 847 * The subnet prefix is not used as one can not connect to 848 * same remote port (GUID) using the same remote QPN via two 849 * different subnets. 850 */ 851 /* qpn octets[1:4) & port GUID octets[12:20) */ 852 u32 *d32 = (u32 *) daddr; 853 u32 hv; 854 855 hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0); 856 return hv & htbl->mask; 857 } 858 859 struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr) 860 { 861 struct ipoib_dev_priv *priv = netdev_priv(dev); 862 struct ipoib_neigh_table *ntbl = &priv->ntbl; 863 struct ipoib_neigh_hash *htbl; 864 struct ipoib_neigh *neigh = NULL; 865 u32 hash_val; 866 867 rcu_read_lock_bh(); 868 869 htbl = rcu_dereference_bh(ntbl->htbl); 870 871 if (!htbl) 872 goto out_unlock; 873 874 hash_val = ipoib_addr_hash(htbl, daddr); 875 for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]); 876 neigh != NULL; 877 neigh = rcu_dereference_bh(neigh->hnext)) { 878 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { 879 /* found, take one ref on behalf of the caller */ 880 if (!atomic_inc_not_zero(&neigh->refcnt)) { 881 /* deleted */ 882 neigh = NULL; 883 goto out_unlock; 884 } 885 neigh->alive = jiffies; 886 goto out_unlock; 887 } 888 } 889 890 out_unlock: 891 rcu_read_unlock_bh(); 892 return neigh; 893 } 894 895 static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) 896 { 897 struct ipoib_neigh_table *ntbl = &priv->ntbl; 898 struct ipoib_neigh_hash *htbl; 899 unsigned long neigh_obsolete; 900 unsigned long dt; 901 unsigned long flags; 902 int i; 903 904 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 905 return; 906 907 spin_lock_irqsave(&priv->lock, flags); 908 909 htbl = rcu_dereference_protected(ntbl->htbl, 910 lockdep_is_held(&priv->lock)); 911 912 if (!htbl) 913 goto out_unlock; 914 915 /* neigh is obsolete if it was idle for two GC periods */ 916 dt = 2 * arp_tbl.gc_interval; 917 neigh_obsolete = jiffies - dt; 918 /* handle possible race condition */ 919 if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 920 goto out_unlock; 921 922 for (i = 0; i < htbl->size; i++) { 923 struct ipoib_neigh *neigh; 924 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 925 926 while ((neigh = rcu_dereference_protected(*np, 927 lockdep_is_held(&priv->lock))) != NULL) { 928 /* was the neigh idle for two GC periods */ 929 if (time_after(neigh_obsolete, neigh->alive)) { 930 rcu_assign_pointer(*np, 931 rcu_dereference_protected(neigh->hnext, 932 lockdep_is_held(&priv->lock))); 933 /* remove from path/mc list */ 934 list_del(&neigh->list); 935 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 936 } else { 937 np = &neigh->hnext; 938 } 939 940 } 941 } 942 943 out_unlock: 944 spin_unlock_irqrestore(&priv->lock, flags); 945 } 946 947 static void ipoib_reap_neigh(struct work_struct *work) 948 { 949 struct ipoib_dev_priv *priv = 950 container_of(work, struct ipoib_dev_priv, neigh_reap_task.work); 951 952 __ipoib_reap_neigh(priv); 953 954 if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) 955 queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task, 956 arp_tbl.gc_interval); 957 } 958 959 960 static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr, 961 struct net_device *dev) 962 { 963 struct ipoib_neigh *neigh; 964 965 neigh = kzalloc(sizeof *neigh, GFP_ATOMIC); 966 if (!neigh) 967 return NULL; 968 969 neigh->dev = dev; 970 memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr)); 971 skb_queue_head_init(&neigh->queue); 972 INIT_LIST_HEAD(&neigh->list); 973 ipoib_cm_set(neigh, NULL); 974 /* one ref on behalf of the caller */ 975 atomic_set(&neigh->refcnt, 1); 976 977 return neigh; 978 } 979 980 struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr, 981 struct net_device *dev) 982 { 983 struct ipoib_dev_priv *priv = netdev_priv(dev); 984 struct ipoib_neigh_table *ntbl = &priv->ntbl; 985 struct ipoib_neigh_hash *htbl; 986 struct ipoib_neigh *neigh; 987 u32 hash_val; 988 989 htbl = rcu_dereference_protected(ntbl->htbl, 990 lockdep_is_held(&priv->lock)); 991 if (!htbl) { 992 neigh = NULL; 993 goto out_unlock; 994 } 995 996 /* need to add a new neigh, but maybe some other thread succeeded? 997 * recalc hash, maybe hash resize took place so we do a search 998 */ 999 hash_val = ipoib_addr_hash(htbl, daddr); 1000 for (neigh = rcu_dereference_protected(htbl->buckets[hash_val], 1001 lockdep_is_held(&priv->lock)); 1002 neigh != NULL; 1003 neigh = rcu_dereference_protected(neigh->hnext, 1004 lockdep_is_held(&priv->lock))) { 1005 if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { 1006 /* found, take one ref on behalf of the caller */ 1007 if (!atomic_inc_not_zero(&neigh->refcnt)) { 1008 /* deleted */ 1009 neigh = NULL; 1010 break; 1011 } 1012 neigh->alive = jiffies; 1013 goto out_unlock; 1014 } 1015 } 1016 1017 neigh = ipoib_neigh_ctor(daddr, dev); 1018 if (!neigh) 1019 goto out_unlock; 1020 1021 /* one ref on behalf of the hash table */ 1022 atomic_inc(&neigh->refcnt); 1023 neigh->alive = jiffies; 1024 /* put in hash */ 1025 rcu_assign_pointer(neigh->hnext, 1026 rcu_dereference_protected(htbl->buckets[hash_val], 1027 lockdep_is_held(&priv->lock))); 1028 rcu_assign_pointer(htbl->buckets[hash_val], neigh); 1029 atomic_inc(&ntbl->entries); 1030 1031 out_unlock: 1032 1033 return neigh; 1034 } 1035 1036 void ipoib_neigh_dtor(struct ipoib_neigh *neigh) 1037 { 1038 /* neigh reference count was dropprd to zero */ 1039 struct net_device *dev = neigh->dev; 1040 struct ipoib_dev_priv *priv = netdev_priv(dev); 1041 struct sk_buff *skb; 1042 if (neigh->ah) 1043 ipoib_put_ah(neigh->ah); 1044 while ((skb = __skb_dequeue(&neigh->queue))) { 1045 ++dev->stats.tx_dropped; 1046 dev_kfree_skb_any(skb); 1047 } 1048 if (ipoib_cm_get(neigh)) 1049 ipoib_cm_destroy_tx(ipoib_cm_get(neigh)); 1050 ipoib_dbg(netdev_priv(dev), 1051 "neigh free for %06x %pI6\n", 1052 IPOIB_QPN(neigh->daddr), 1053 neigh->daddr + 4); 1054 kfree(neigh); 1055 if (atomic_dec_and_test(&priv->ntbl.entries)) { 1056 if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags)) 1057 complete(&priv->ntbl.flushed); 1058 } 1059 } 1060 1061 static void ipoib_neigh_reclaim(struct rcu_head *rp) 1062 { 1063 /* Called as a result of removal from hash table */ 1064 struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu); 1065 /* note TX context may hold another ref */ 1066 ipoib_neigh_put(neigh); 1067 } 1068 1069 void ipoib_neigh_free(struct ipoib_neigh *neigh) 1070 { 1071 struct net_device *dev = neigh->dev; 1072 struct ipoib_dev_priv *priv = netdev_priv(dev); 1073 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1074 struct ipoib_neigh_hash *htbl; 1075 struct ipoib_neigh __rcu **np; 1076 struct ipoib_neigh *n; 1077 u32 hash_val; 1078 1079 htbl = rcu_dereference_protected(ntbl->htbl, 1080 lockdep_is_held(&priv->lock)); 1081 if (!htbl) 1082 return; 1083 1084 hash_val = ipoib_addr_hash(htbl, neigh->daddr); 1085 np = &htbl->buckets[hash_val]; 1086 for (n = rcu_dereference_protected(*np, 1087 lockdep_is_held(&priv->lock)); 1088 n != NULL; 1089 n = rcu_dereference_protected(*np, 1090 lockdep_is_held(&priv->lock))) { 1091 if (n == neigh) { 1092 /* found */ 1093 rcu_assign_pointer(*np, 1094 rcu_dereference_protected(neigh->hnext, 1095 lockdep_is_held(&priv->lock))); 1096 /* remove from parent list */ 1097 list_del(&neigh->list); 1098 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1099 return; 1100 } else { 1101 np = &n->hnext; 1102 } 1103 } 1104 } 1105 1106 static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) 1107 { 1108 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1109 struct ipoib_neigh_hash *htbl; 1110 struct ipoib_neigh **buckets; 1111 u32 size; 1112 1113 clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); 1114 ntbl->htbl = NULL; 1115 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL); 1116 if (!htbl) 1117 return -ENOMEM; 1118 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1119 size = roundup_pow_of_two(arp_tbl.gc_thresh3); 1120 buckets = kzalloc(size * sizeof(*buckets), GFP_KERNEL); 1121 if (!buckets) { 1122 kfree(htbl); 1123 return -ENOMEM; 1124 } 1125 htbl->size = size; 1126 htbl->mask = (size - 1); 1127 htbl->buckets = buckets; 1128 ntbl->htbl = htbl; 1129 htbl->ntbl = ntbl; 1130 atomic_set(&ntbl->entries, 0); 1131 1132 /* start garbage collection */ 1133 clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1134 queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task, 1135 arp_tbl.gc_interval); 1136 1137 return 0; 1138 } 1139 1140 static void neigh_hash_free_rcu(struct rcu_head *head) 1141 { 1142 struct ipoib_neigh_hash *htbl = container_of(head, 1143 struct ipoib_neigh_hash, 1144 rcu); 1145 struct ipoib_neigh __rcu **buckets = htbl->buckets; 1146 struct ipoib_neigh_table *ntbl = htbl->ntbl; 1147 1148 kfree(buckets); 1149 kfree(htbl); 1150 complete(&ntbl->deleted); 1151 } 1152 1153 void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid) 1154 { 1155 struct ipoib_dev_priv *priv = netdev_priv(dev); 1156 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1157 struct ipoib_neigh_hash *htbl; 1158 unsigned long flags; 1159 int i; 1160 1161 /* remove all neigh connected to a given path or mcast */ 1162 spin_lock_irqsave(&priv->lock, flags); 1163 1164 htbl = rcu_dereference_protected(ntbl->htbl, 1165 lockdep_is_held(&priv->lock)); 1166 1167 if (!htbl) 1168 goto out_unlock; 1169 1170 for (i = 0; i < htbl->size; i++) { 1171 struct ipoib_neigh *neigh; 1172 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1173 1174 while ((neigh = rcu_dereference_protected(*np, 1175 lockdep_is_held(&priv->lock))) != NULL) { 1176 /* delete neighs belong to this parent */ 1177 if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) { 1178 rcu_assign_pointer(*np, 1179 rcu_dereference_protected(neigh->hnext, 1180 lockdep_is_held(&priv->lock))); 1181 /* remove from parent list */ 1182 list_del(&neigh->list); 1183 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1184 } else { 1185 np = &neigh->hnext; 1186 } 1187 1188 } 1189 } 1190 out_unlock: 1191 spin_unlock_irqrestore(&priv->lock, flags); 1192 } 1193 1194 static void ipoib_flush_neighs(struct ipoib_dev_priv *priv) 1195 { 1196 struct ipoib_neigh_table *ntbl = &priv->ntbl; 1197 struct ipoib_neigh_hash *htbl; 1198 unsigned long flags; 1199 int i, wait_flushed = 0; 1200 1201 init_completion(&priv->ntbl.flushed); 1202 1203 spin_lock_irqsave(&priv->lock, flags); 1204 1205 htbl = rcu_dereference_protected(ntbl->htbl, 1206 lockdep_is_held(&priv->lock)); 1207 if (!htbl) 1208 goto out_unlock; 1209 1210 wait_flushed = atomic_read(&priv->ntbl.entries); 1211 if (!wait_flushed) 1212 goto free_htbl; 1213 1214 for (i = 0; i < htbl->size; i++) { 1215 struct ipoib_neigh *neigh; 1216 struct ipoib_neigh __rcu **np = &htbl->buckets[i]; 1217 1218 while ((neigh = rcu_dereference_protected(*np, 1219 lockdep_is_held(&priv->lock))) != NULL) { 1220 rcu_assign_pointer(*np, 1221 rcu_dereference_protected(neigh->hnext, 1222 lockdep_is_held(&priv->lock))); 1223 /* remove from path/mc list */ 1224 list_del(&neigh->list); 1225 call_rcu(&neigh->rcu, ipoib_neigh_reclaim); 1226 } 1227 } 1228 1229 free_htbl: 1230 rcu_assign_pointer(ntbl->htbl, NULL); 1231 call_rcu(&htbl->rcu, neigh_hash_free_rcu); 1232 1233 out_unlock: 1234 spin_unlock_irqrestore(&priv->lock, flags); 1235 if (wait_flushed) 1236 wait_for_completion(&priv->ntbl.flushed); 1237 } 1238 1239 static void ipoib_neigh_hash_uninit(struct net_device *dev) 1240 { 1241 struct ipoib_dev_priv *priv = netdev_priv(dev); 1242 int stopped; 1243 1244 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); 1245 init_completion(&priv->ntbl.deleted); 1246 set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); 1247 1248 /* Stop GC if called at init fail need to cancel work */ 1249 stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1250 if (!stopped) 1251 cancel_delayed_work(&priv->neigh_reap_task); 1252 1253 ipoib_flush_neighs(priv); 1254 1255 wait_for_completion(&priv->ntbl.deleted); 1256 } 1257 1258 1259 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) 1260 { 1261 struct ipoib_dev_priv *priv = netdev_priv(dev); 1262 1263 if (ipoib_neigh_hash_init(priv) < 0) 1264 goto out; 1265 /* Allocate RX/TX "rings" to hold queued skbs */ 1266 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, 1267 GFP_KERNEL); 1268 if (!priv->rx_ring) { 1269 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", 1270 ca->name, ipoib_recvq_size); 1271 goto out_neigh_hash_cleanup; 1272 } 1273 1274 priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring); 1275 if (!priv->tx_ring) { 1276 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", 1277 ca->name, ipoib_sendq_size); 1278 goto out_rx_ring_cleanup; 1279 } 1280 1281 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ 1282 1283 if (ipoib_ib_dev_init(dev, ca, port)) 1284 goto out_tx_ring_cleanup; 1285 1286 return 0; 1287 1288 out_tx_ring_cleanup: 1289 vfree(priv->tx_ring); 1290 1291 out_rx_ring_cleanup: 1292 kfree(priv->rx_ring); 1293 1294 out_neigh_hash_cleanup: 1295 ipoib_neigh_hash_uninit(dev); 1296 out: 1297 return -ENOMEM; 1298 } 1299 1300 void ipoib_dev_cleanup(struct net_device *dev) 1301 { 1302 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv; 1303 LIST_HEAD(head); 1304 1305 ASSERT_RTNL(); 1306 1307 ipoib_delete_debug_files(dev); 1308 1309 /* Delete any child interfaces first */ 1310 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { 1311 /* Stop GC on child */ 1312 set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags); 1313 cancel_delayed_work(&cpriv->neigh_reap_task); 1314 unregister_netdevice_queue(cpriv->dev, &head); 1315 } 1316 unregister_netdevice_many(&head); 1317 1318 ipoib_ib_dev_cleanup(dev); 1319 1320 kfree(priv->rx_ring); 1321 vfree(priv->tx_ring); 1322 1323 priv->rx_ring = NULL; 1324 priv->tx_ring = NULL; 1325 1326 ipoib_neigh_hash_uninit(dev); 1327 } 1328 1329 static const struct header_ops ipoib_header_ops = { 1330 .create = ipoib_hard_header, 1331 }; 1332 1333 static const struct net_device_ops ipoib_netdev_ops = { 1334 .ndo_uninit = ipoib_uninit, 1335 .ndo_open = ipoib_open, 1336 .ndo_stop = ipoib_stop, 1337 .ndo_change_mtu = ipoib_change_mtu, 1338 .ndo_fix_features = ipoib_fix_features, 1339 .ndo_start_xmit = ipoib_start_xmit, 1340 .ndo_tx_timeout = ipoib_timeout, 1341 .ndo_set_rx_mode = ipoib_set_mcast_list, 1342 }; 1343 1344 void ipoib_setup(struct net_device *dev) 1345 { 1346 struct ipoib_dev_priv *priv = netdev_priv(dev); 1347 1348 dev->netdev_ops = &ipoib_netdev_ops; 1349 dev->header_ops = &ipoib_header_ops; 1350 1351 ipoib_set_ethtool_ops(dev); 1352 1353 netif_napi_add(dev, &priv->napi, ipoib_poll, 100); 1354 1355 dev->watchdog_timeo = HZ; 1356 1357 dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 1358 1359 dev->hard_header_len = IPOIB_ENCAP_LEN; 1360 dev->addr_len = INFINIBAND_ALEN; 1361 dev->type = ARPHRD_INFINIBAND; 1362 dev->tx_queue_len = ipoib_sendq_size * 2; 1363 dev->features = (NETIF_F_VLAN_CHALLENGED | 1364 NETIF_F_HIGHDMA); 1365 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1366 1367 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); 1368 1369 netif_carrier_off(dev); 1370 1371 priv->dev = dev; 1372 1373 spin_lock_init(&priv->lock); 1374 1375 mutex_init(&priv->vlan_mutex); 1376 1377 INIT_LIST_HEAD(&priv->path_list); 1378 INIT_LIST_HEAD(&priv->child_intfs); 1379 INIT_LIST_HEAD(&priv->dead_ahs); 1380 INIT_LIST_HEAD(&priv->multicast_list); 1381 1382 INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll); 1383 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); 1384 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); 1385 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); 1386 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal); 1387 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy); 1388 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); 1389 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); 1390 INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh); 1391 } 1392 1393 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name) 1394 { 1395 struct net_device *dev; 1396 1397 dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name, 1398 ipoib_setup); 1399 if (!dev) 1400 return NULL; 1401 1402 return netdev_priv(dev); 1403 } 1404 1405 static ssize_t show_pkey(struct device *dev, 1406 struct device_attribute *attr, char *buf) 1407 { 1408 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev)); 1409 1410 return sprintf(buf, "0x%04x\n", priv->pkey); 1411 } 1412 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 1413 1414 static ssize_t show_umcast(struct device *dev, 1415 struct device_attribute *attr, char *buf) 1416 { 1417 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev)); 1418 1419 return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags)); 1420 } 1421 1422 void ipoib_set_umcast(struct net_device *ndev, int umcast_val) 1423 { 1424 struct ipoib_dev_priv *priv = netdev_priv(ndev); 1425 1426 if (umcast_val > 0) { 1427 set_bit(IPOIB_FLAG_UMCAST, &priv->flags); 1428 ipoib_warn(priv, "ignoring multicast groups joined directly " 1429 "by userspace\n"); 1430 } else 1431 clear_bit(IPOIB_FLAG_UMCAST, &priv->flags); 1432 } 1433 1434 static ssize_t set_umcast(struct device *dev, 1435 struct device_attribute *attr, 1436 const char *buf, size_t count) 1437 { 1438 unsigned long umcast_val = simple_strtoul(buf, NULL, 0); 1439 1440 ipoib_set_umcast(to_net_dev(dev), umcast_val); 1441 1442 return count; 1443 } 1444 static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast); 1445 1446 int ipoib_add_umcast_attr(struct net_device *dev) 1447 { 1448 return device_create_file(&dev->dev, &dev_attr_umcast); 1449 } 1450 1451 static ssize_t create_child(struct device *dev, 1452 struct device_attribute *attr, 1453 const char *buf, size_t count) 1454 { 1455 int pkey; 1456 int ret; 1457 1458 if (sscanf(buf, "%i", &pkey) != 1) 1459 return -EINVAL; 1460 1461 if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000) 1462 return -EINVAL; 1463 1464 /* 1465 * Set the full membership bit, so that we join the right 1466 * broadcast group, etc. 1467 */ 1468 pkey |= 0x8000; 1469 1470 ret = ipoib_vlan_add(to_net_dev(dev), pkey); 1471 1472 return ret ? ret : count; 1473 } 1474 static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child); 1475 1476 static ssize_t delete_child(struct device *dev, 1477 struct device_attribute *attr, 1478 const char *buf, size_t count) 1479 { 1480 int pkey; 1481 int ret; 1482 1483 if (sscanf(buf, "%i", &pkey) != 1) 1484 return -EINVAL; 1485 1486 if (pkey < 0 || pkey > 0xffff) 1487 return -EINVAL; 1488 1489 ret = ipoib_vlan_delete(to_net_dev(dev), pkey); 1490 1491 return ret ? ret : count; 1492 1493 } 1494 static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child); 1495 1496 int ipoib_add_pkey_attr(struct net_device *dev) 1497 { 1498 return device_create_file(&dev->dev, &dev_attr_pkey); 1499 } 1500 1501 int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) 1502 { 1503 struct ib_device_attr *device_attr; 1504 int result = -ENOMEM; 1505 1506 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL); 1507 if (!device_attr) { 1508 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n", 1509 hca->name, sizeof *device_attr); 1510 return result; 1511 } 1512 1513 result = ib_query_device(hca, device_attr); 1514 if (result) { 1515 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n", 1516 hca->name, result); 1517 kfree(device_attr); 1518 return result; 1519 } 1520 priv->hca_caps = device_attr->device_cap_flags; 1521 1522 kfree(device_attr); 1523 1524 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { 1525 priv->dev->hw_features = NETIF_F_SG | 1526 NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 1527 1528 if (priv->hca_caps & IB_DEVICE_UD_TSO) 1529 priv->dev->hw_features |= NETIF_F_TSO; 1530 1531 priv->dev->features |= priv->dev->hw_features; 1532 } 1533 1534 return 0; 1535 } 1536 1537 static struct net_device *ipoib_add_port(const char *format, 1538 struct ib_device *hca, u8 port) 1539 { 1540 struct ipoib_dev_priv *priv; 1541 struct ib_port_attr attr; 1542 int result = -ENOMEM; 1543 1544 priv = ipoib_intf_alloc(format); 1545 if (!priv) 1546 goto alloc_mem_failed; 1547 1548 SET_NETDEV_DEV(priv->dev, hca->dma_device); 1549 priv->dev->dev_id = port - 1; 1550 1551 if (!ib_query_port(hca, port, &attr)) 1552 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); 1553 else { 1554 printk(KERN_WARNING "%s: ib_query_port %d failed\n", 1555 hca->name, port); 1556 goto device_init_failed; 1557 } 1558 1559 /* MTU will be reset when mcast join happens */ 1560 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 1561 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; 1562 1563 priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh); 1564 1565 result = ib_query_pkey(hca, port, 0, &priv->pkey); 1566 if (result) { 1567 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", 1568 hca->name, port, result); 1569 goto device_init_failed; 1570 } 1571 1572 if (ipoib_set_dev_features(priv, hca)) 1573 goto device_init_failed; 1574 1575 /* 1576 * Set the full membership bit, so that we join the right 1577 * broadcast group, etc. 1578 */ 1579 priv->pkey |= 0x8000; 1580 1581 priv->dev->broadcast[8] = priv->pkey >> 8; 1582 priv->dev->broadcast[9] = priv->pkey & 0xff; 1583 1584 result = ib_query_gid(hca, port, 0, &priv->local_gid); 1585 if (result) { 1586 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", 1587 hca->name, port, result); 1588 goto device_init_failed; 1589 } else 1590 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 1591 1592 result = ipoib_dev_init(priv->dev, hca, port); 1593 if (result < 0) { 1594 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", 1595 hca->name, port, result); 1596 goto device_init_failed; 1597 } 1598 1599 INIT_IB_EVENT_HANDLER(&priv->event_handler, 1600 priv->ca, ipoib_event); 1601 result = ib_register_event_handler(&priv->event_handler); 1602 if (result < 0) { 1603 printk(KERN_WARNING "%s: ib_register_event_handler failed for " 1604 "port %d (ret = %d)\n", 1605 hca->name, port, result); 1606 goto event_failed; 1607 } 1608 1609 result = register_netdev(priv->dev); 1610 if (result) { 1611 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n", 1612 hca->name, port, result); 1613 goto register_failed; 1614 } 1615 1616 ipoib_create_debug_files(priv->dev); 1617 1618 if (ipoib_cm_add_mode_attr(priv->dev)) 1619 goto sysfs_failed; 1620 if (ipoib_add_pkey_attr(priv->dev)) 1621 goto sysfs_failed; 1622 if (ipoib_add_umcast_attr(priv->dev)) 1623 goto sysfs_failed; 1624 if (device_create_file(&priv->dev->dev, &dev_attr_create_child)) 1625 goto sysfs_failed; 1626 if (device_create_file(&priv->dev->dev, &dev_attr_delete_child)) 1627 goto sysfs_failed; 1628 1629 return priv->dev; 1630 1631 sysfs_failed: 1632 ipoib_delete_debug_files(priv->dev); 1633 unregister_netdev(priv->dev); 1634 1635 register_failed: 1636 ib_unregister_event_handler(&priv->event_handler); 1637 /* Stop GC if started before flush */ 1638 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1639 cancel_delayed_work(&priv->neigh_reap_task); 1640 flush_workqueue(ipoib_workqueue); 1641 1642 event_failed: 1643 ipoib_dev_cleanup(priv->dev); 1644 1645 device_init_failed: 1646 free_netdev(priv->dev); 1647 1648 alloc_mem_failed: 1649 return ERR_PTR(result); 1650 } 1651 1652 static void ipoib_add_one(struct ib_device *device) 1653 { 1654 struct list_head *dev_list; 1655 struct net_device *dev; 1656 struct ipoib_dev_priv *priv; 1657 int s, e, p; 1658 1659 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 1660 return; 1661 1662 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1663 if (!dev_list) 1664 return; 1665 1666 INIT_LIST_HEAD(dev_list); 1667 1668 if (device->node_type == RDMA_NODE_IB_SWITCH) { 1669 s = 0; 1670 e = 0; 1671 } else { 1672 s = 1; 1673 e = device->phys_port_cnt; 1674 } 1675 1676 for (p = s; p <= e; ++p) { 1677 if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND) 1678 continue; 1679 dev = ipoib_add_port("ib%d", device, p); 1680 if (!IS_ERR(dev)) { 1681 priv = netdev_priv(dev); 1682 list_add_tail(&priv->list, dev_list); 1683 } 1684 } 1685 1686 ib_set_client_data(device, &ipoib_client, dev_list); 1687 } 1688 1689 static void ipoib_remove_one(struct ib_device *device) 1690 { 1691 struct ipoib_dev_priv *priv, *tmp; 1692 struct list_head *dev_list; 1693 1694 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 1695 return; 1696 1697 dev_list = ib_get_client_data(device, &ipoib_client); 1698 if (!dev_list) 1699 return; 1700 1701 list_for_each_entry_safe(priv, tmp, dev_list, list) { 1702 ib_unregister_event_handler(&priv->event_handler); 1703 1704 rtnl_lock(); 1705 dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); 1706 rtnl_unlock(); 1707 1708 /* Stop GC */ 1709 set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1710 cancel_delayed_work(&priv->neigh_reap_task); 1711 flush_workqueue(ipoib_workqueue); 1712 1713 unregister_netdev(priv->dev); 1714 free_netdev(priv->dev); 1715 } 1716 1717 kfree(dev_list); 1718 } 1719 1720 static int __init ipoib_init_module(void) 1721 { 1722 int ret; 1723 1724 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size); 1725 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE); 1726 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE); 1727 1728 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size); 1729 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE); 1730 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); 1731 #ifdef CONFIG_INFINIBAND_IPOIB_CM 1732 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); 1733 #endif 1734 1735 /* 1736 * When copying small received packets, we only copy from the 1737 * linear data part of the SKB, so we rely on this condition. 1738 */ 1739 BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE); 1740 1741 ret = ipoib_register_debugfs(); 1742 if (ret) 1743 return ret; 1744 1745 /* 1746 * We create our own workqueue mainly because we want to be 1747 * able to flush it when devices are being removed. We can't 1748 * use schedule_work()/flush_scheduled_work() because both 1749 * unregister_netdev() and linkwatch_event take the rtnl lock, 1750 * so flush_scheduled_work() can deadlock during device 1751 * removal. 1752 */ 1753 ipoib_workqueue = create_singlethread_workqueue("ipoib"); 1754 if (!ipoib_workqueue) { 1755 ret = -ENOMEM; 1756 goto err_fs; 1757 } 1758 1759 ib_sa_register_client(&ipoib_sa_client); 1760 1761 ret = ib_register_client(&ipoib_client); 1762 if (ret) 1763 goto err_sa; 1764 1765 ret = ipoib_netlink_init(); 1766 if (ret) 1767 goto err_client; 1768 1769 return 0; 1770 1771 err_client: 1772 ib_unregister_client(&ipoib_client); 1773 1774 err_sa: 1775 ib_sa_unregister_client(&ipoib_sa_client); 1776 destroy_workqueue(ipoib_workqueue); 1777 1778 err_fs: 1779 ipoib_unregister_debugfs(); 1780 1781 return ret; 1782 } 1783 1784 static void __exit ipoib_cleanup_module(void) 1785 { 1786 ipoib_netlink_fini(); 1787 ib_unregister_client(&ipoib_client); 1788 ib_sa_unregister_client(&ipoib_sa_client); 1789 ipoib_unregister_debugfs(); 1790 destroy_workqueue(ipoib_workqueue); 1791 } 1792 1793 module_init(ipoib_init_module); 1794 module_exit(ipoib_cleanup_module); 1795