1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/skbuff.h> 36 #include <linux/rtnetlink.h> 37 #include <linux/ip.h> 38 #include <linux/in.h> 39 #include <linux/igmp.h> 40 #include <linux/inetdevice.h> 41 #include <linux/delay.h> 42 #include <linux/completion.h> 43 #include <linux/slab.h> 44 45 #include <net/dst.h> 46 47 #include "ipoib.h" 48 49 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 50 static int mcast_debug_level; 51 52 module_param(mcast_debug_level, int, 0644); 53 MODULE_PARM_DESC(mcast_debug_level, 54 "Enable multicast debug tracing if > 0"); 55 #endif 56 57 static DEFINE_MUTEX(mcast_mutex); 58 59 struct ipoib_mcast_iter { 60 struct net_device *dev; 61 union ib_gid mgid; 62 unsigned long created; 63 unsigned int queuelen; 64 unsigned int complete; 65 unsigned int send_only; 66 }; 67 68 static void ipoib_mcast_free(struct ipoib_mcast *mcast) 69 { 70 struct net_device *dev = mcast->dev; 71 struct ipoib_dev_priv *priv = netdev_priv(dev); 72 struct ipoib_neigh *neigh, *tmp; 73 int tx_dropped = 0; 74 75 ipoib_dbg_mcast(netdev_priv(dev), "deleting multicast group %pI6\n", 76 mcast->mcmember.mgid.raw); 77 78 spin_lock_irq(&priv->lock); 79 80 list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) { 81 /* 82 * It's safe to call ipoib_put_ah() inside priv->lock 83 * here, because we know that mcast->ah will always 84 * hold one more reference, so ipoib_put_ah() will 85 * never do more than decrement the ref count. 86 */ 87 if (neigh->ah) 88 ipoib_put_ah(neigh->ah); 89 ipoib_neigh_free(dev, neigh); 90 } 91 92 spin_unlock_irq(&priv->lock); 93 94 if (mcast->ah) 95 ipoib_put_ah(mcast->ah); 96 97 while (!skb_queue_empty(&mcast->pkt_queue)) { 98 ++tx_dropped; 99 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); 100 } 101 102 netif_tx_lock_bh(dev); 103 dev->stats.tx_dropped += tx_dropped; 104 netif_tx_unlock_bh(dev); 105 106 kfree(mcast); 107 } 108 109 static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev, 110 int can_sleep) 111 { 112 struct ipoib_mcast *mcast; 113 114 mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC); 115 if (!mcast) 116 return NULL; 117 118 mcast->dev = dev; 119 mcast->created = jiffies; 120 mcast->backoff = 1; 121 122 INIT_LIST_HEAD(&mcast->list); 123 INIT_LIST_HEAD(&mcast->neigh_list); 124 skb_queue_head_init(&mcast->pkt_queue); 125 126 return mcast; 127 } 128 129 static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) 130 { 131 struct ipoib_dev_priv *priv = netdev_priv(dev); 132 struct rb_node *n = priv->multicast_tree.rb_node; 133 134 while (n) { 135 struct ipoib_mcast *mcast; 136 int ret; 137 138 mcast = rb_entry(n, struct ipoib_mcast, rb_node); 139 140 ret = memcmp(mgid, mcast->mcmember.mgid.raw, 141 sizeof (union ib_gid)); 142 if (ret < 0) 143 n = n->rb_left; 144 else if (ret > 0) 145 n = n->rb_right; 146 else 147 return mcast; 148 } 149 150 return NULL; 151 } 152 153 static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast) 154 { 155 struct ipoib_dev_priv *priv = netdev_priv(dev); 156 struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL; 157 158 while (*n) { 159 struct ipoib_mcast *tmcast; 160 int ret; 161 162 pn = *n; 163 tmcast = rb_entry(pn, struct ipoib_mcast, rb_node); 164 165 ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw, 166 sizeof (union ib_gid)); 167 if (ret < 0) 168 n = &pn->rb_left; 169 else if (ret > 0) 170 n = &pn->rb_right; 171 else 172 return -EEXIST; 173 } 174 175 rb_link_node(&mcast->rb_node, pn, n); 176 rb_insert_color(&mcast->rb_node, &priv->multicast_tree); 177 178 return 0; 179 } 180 181 static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, 182 struct ib_sa_mcmember_rec *mcmember) 183 { 184 struct net_device *dev = mcast->dev; 185 struct ipoib_dev_priv *priv = netdev_priv(dev); 186 struct ipoib_ah *ah; 187 int ret; 188 int set_qkey = 0; 189 190 mcast->mcmember = *mcmember; 191 192 /* Set the cached Q_Key before we attach if it's the broadcast group */ 193 if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4, 194 sizeof (union ib_gid))) { 195 spin_lock_irq(&priv->lock); 196 if (!priv->broadcast) { 197 spin_unlock_irq(&priv->lock); 198 return -EAGAIN; 199 } 200 priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey); 201 spin_unlock_irq(&priv->lock); 202 priv->tx_wr.wr.ud.remote_qkey = priv->qkey; 203 set_qkey = 1; 204 } 205 206 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { 207 if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { 208 ipoib_warn(priv, "multicast group %pI6 already attached\n", 209 mcast->mcmember.mgid.raw); 210 211 return 0; 212 } 213 214 ret = ipoib_mcast_attach(dev, be16_to_cpu(mcast->mcmember.mlid), 215 &mcast->mcmember.mgid, set_qkey); 216 if (ret < 0) { 217 ipoib_warn(priv, "couldn't attach QP to multicast group %pI6\n", 218 mcast->mcmember.mgid.raw); 219 220 clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags); 221 return ret; 222 } 223 } 224 225 { 226 struct ib_ah_attr av = { 227 .dlid = be16_to_cpu(mcast->mcmember.mlid), 228 .port_num = priv->port, 229 .sl = mcast->mcmember.sl, 230 .ah_flags = IB_AH_GRH, 231 .static_rate = mcast->mcmember.rate, 232 .grh = { 233 .flow_label = be32_to_cpu(mcast->mcmember.flow_label), 234 .hop_limit = mcast->mcmember.hop_limit, 235 .sgid_index = 0, 236 .traffic_class = mcast->mcmember.traffic_class 237 } 238 }; 239 av.grh.dgid = mcast->mcmember.mgid; 240 241 ah = ipoib_create_ah(dev, priv->pd, &av); 242 if (!ah) { 243 ipoib_warn(priv, "ib_address_create failed\n"); 244 } else { 245 spin_lock_irq(&priv->lock); 246 mcast->ah = ah; 247 spin_unlock_irq(&priv->lock); 248 249 ipoib_dbg_mcast(priv, "MGID %pI6 AV %p, LID 0x%04x, SL %d\n", 250 mcast->mcmember.mgid.raw, 251 mcast->ah->ah, 252 be16_to_cpu(mcast->mcmember.mlid), 253 mcast->mcmember.sl); 254 } 255 } 256 257 /* actually send any queued packets */ 258 netif_tx_lock_bh(dev); 259 while (!skb_queue_empty(&mcast->pkt_queue)) { 260 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); 261 struct dst_entry *dst = skb_dst(skb); 262 struct neighbour *n = NULL; 263 264 netif_tx_unlock_bh(dev); 265 266 skb->dev = dev; 267 if (dst) 268 n = dst_get_neighbour(dst); 269 if (!dst || !n) { 270 /* put pseudoheader back on for next time */ 271 skb_push(skb, sizeof (struct ipoib_pseudoheader)); 272 } 273 274 if (dev_queue_xmit(skb)) 275 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n"); 276 netif_tx_lock_bh(dev); 277 } 278 netif_tx_unlock_bh(dev); 279 280 return 0; 281 } 282 283 static int 284 ipoib_mcast_sendonly_join_complete(int status, 285 struct ib_sa_multicast *multicast) 286 { 287 struct ipoib_mcast *mcast = multicast->context; 288 struct net_device *dev = mcast->dev; 289 290 /* We trap for port events ourselves. */ 291 if (status == -ENETRESET) 292 return 0; 293 294 if (!status) 295 status = ipoib_mcast_join_finish(mcast, &multicast->rec); 296 297 if (status) { 298 if (mcast->logcount++ < 20) 299 ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n", 300 mcast->mcmember.mgid.raw, status); 301 302 /* Flush out any queued packets */ 303 netif_tx_lock_bh(dev); 304 while (!skb_queue_empty(&mcast->pkt_queue)) { 305 ++dev->stats.tx_dropped; 306 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); 307 } 308 netif_tx_unlock_bh(dev); 309 310 /* Clear the busy flag so we try again */ 311 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, 312 &mcast->flags); 313 } 314 return status; 315 } 316 317 static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) 318 { 319 struct net_device *dev = mcast->dev; 320 struct ipoib_dev_priv *priv = netdev_priv(dev); 321 struct ib_sa_mcmember_rec rec = { 322 #if 0 /* Some SMs don't support send-only yet */ 323 .join_state = 4 324 #else 325 .join_state = 1 326 #endif 327 }; 328 int ret = 0; 329 330 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { 331 ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n"); 332 return -ENODEV; 333 } 334 335 if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { 336 ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n"); 337 return -EBUSY; 338 } 339 340 rec.mgid = mcast->mcmember.mgid; 341 rec.port_gid = priv->local_gid; 342 rec.pkey = cpu_to_be16(priv->pkey); 343 344 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, 345 priv->port, &rec, 346 IB_SA_MCMEMBER_REC_MGID | 347 IB_SA_MCMEMBER_REC_PORT_GID | 348 IB_SA_MCMEMBER_REC_PKEY | 349 IB_SA_MCMEMBER_REC_JOIN_STATE, 350 GFP_ATOMIC, 351 ipoib_mcast_sendonly_join_complete, 352 mcast); 353 if (IS_ERR(mcast->mc)) { 354 ret = PTR_ERR(mcast->mc); 355 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 356 ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n", 357 ret); 358 } else { 359 ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n", 360 mcast->mcmember.mgid.raw); 361 } 362 363 return ret; 364 } 365 366 void ipoib_mcast_carrier_on_task(struct work_struct *work) 367 { 368 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 369 carrier_on_task); 370 struct ib_port_attr attr; 371 372 /* 373 * Take rtnl_lock to avoid racing with ipoib_stop() and 374 * turning the carrier back on while a device is being 375 * removed. 376 */ 377 if (ib_query_port(priv->ca, priv->port, &attr) || 378 attr.state != IB_PORT_ACTIVE) { 379 ipoib_dbg(priv, "Keeping carrier off until IB port is active\n"); 380 return; 381 } 382 383 rtnl_lock(); 384 netif_carrier_on(priv->dev); 385 rtnl_unlock(); 386 } 387 388 static int ipoib_mcast_join_complete(int status, 389 struct ib_sa_multicast *multicast) 390 { 391 struct ipoib_mcast *mcast = multicast->context; 392 struct net_device *dev = mcast->dev; 393 struct ipoib_dev_priv *priv = netdev_priv(dev); 394 395 ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n", 396 mcast->mcmember.mgid.raw, status); 397 398 /* We trap for port events ourselves. */ 399 if (status == -ENETRESET) 400 return 0; 401 402 if (!status) 403 status = ipoib_mcast_join_finish(mcast, &multicast->rec); 404 405 if (!status) { 406 mcast->backoff = 1; 407 mutex_lock(&mcast_mutex); 408 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 409 queue_delayed_work(ipoib_workqueue, 410 &priv->mcast_task, 0); 411 mutex_unlock(&mcast_mutex); 412 413 /* 414 * Defer carrier on work to ipoib_workqueue to avoid a 415 * deadlock on rtnl_lock here. 416 */ 417 if (mcast == priv->broadcast) 418 queue_work(ipoib_workqueue, &priv->carrier_on_task); 419 420 return 0; 421 } 422 423 if (mcast->logcount++ < 20) { 424 if (status == -ETIMEDOUT || status == -EAGAIN) { 425 ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", 426 mcast->mcmember.mgid.raw, status); 427 } else { 428 ipoib_warn(priv, "multicast join failed for %pI6, status %d\n", 429 mcast->mcmember.mgid.raw, status); 430 } 431 } 432 433 mcast->backoff *= 2; 434 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) 435 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; 436 437 /* Clear the busy flag so we try again */ 438 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 439 440 mutex_lock(&mcast_mutex); 441 spin_lock_irq(&priv->lock); 442 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 443 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 444 mcast->backoff * HZ); 445 spin_unlock_irq(&priv->lock); 446 mutex_unlock(&mcast_mutex); 447 448 return status; 449 } 450 451 static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, 452 int create) 453 { 454 struct ipoib_dev_priv *priv = netdev_priv(dev); 455 struct ib_sa_mcmember_rec rec = { 456 .join_state = 1 457 }; 458 ib_sa_comp_mask comp_mask; 459 int ret = 0; 460 461 ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw); 462 463 rec.mgid = mcast->mcmember.mgid; 464 rec.port_gid = priv->local_gid; 465 rec.pkey = cpu_to_be16(priv->pkey); 466 467 comp_mask = 468 IB_SA_MCMEMBER_REC_MGID | 469 IB_SA_MCMEMBER_REC_PORT_GID | 470 IB_SA_MCMEMBER_REC_PKEY | 471 IB_SA_MCMEMBER_REC_JOIN_STATE; 472 473 if (create) { 474 comp_mask |= 475 IB_SA_MCMEMBER_REC_QKEY | 476 IB_SA_MCMEMBER_REC_MTU_SELECTOR | 477 IB_SA_MCMEMBER_REC_MTU | 478 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS | 479 IB_SA_MCMEMBER_REC_RATE_SELECTOR | 480 IB_SA_MCMEMBER_REC_RATE | 481 IB_SA_MCMEMBER_REC_SL | 482 IB_SA_MCMEMBER_REC_FLOW_LABEL | 483 IB_SA_MCMEMBER_REC_HOP_LIMIT; 484 485 rec.qkey = priv->broadcast->mcmember.qkey; 486 rec.mtu_selector = IB_SA_EQ; 487 rec.mtu = priv->broadcast->mcmember.mtu; 488 rec.traffic_class = priv->broadcast->mcmember.traffic_class; 489 rec.rate_selector = IB_SA_EQ; 490 rec.rate = priv->broadcast->mcmember.rate; 491 rec.sl = priv->broadcast->mcmember.sl; 492 rec.flow_label = priv->broadcast->mcmember.flow_label; 493 rec.hop_limit = priv->broadcast->mcmember.hop_limit; 494 } 495 496 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 497 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, 498 &rec, comp_mask, GFP_KERNEL, 499 ipoib_mcast_join_complete, mcast); 500 if (IS_ERR(mcast->mc)) { 501 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 502 ret = PTR_ERR(mcast->mc); 503 ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret); 504 505 mcast->backoff *= 2; 506 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) 507 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; 508 509 mutex_lock(&mcast_mutex); 510 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 511 queue_delayed_work(ipoib_workqueue, 512 &priv->mcast_task, 513 mcast->backoff * HZ); 514 mutex_unlock(&mcast_mutex); 515 } 516 } 517 518 void ipoib_mcast_join_task(struct work_struct *work) 519 { 520 struct ipoib_dev_priv *priv = 521 container_of(work, struct ipoib_dev_priv, mcast_task.work); 522 struct net_device *dev = priv->dev; 523 524 if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) 525 return; 526 527 if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid)) 528 ipoib_warn(priv, "ib_query_gid() failed\n"); 529 else 530 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 531 532 { 533 struct ib_port_attr attr; 534 535 if (!ib_query_port(priv->ca, priv->port, &attr)) 536 priv->local_lid = attr.lid; 537 else 538 ipoib_warn(priv, "ib_query_port failed\n"); 539 } 540 541 if (!priv->broadcast) { 542 struct ipoib_mcast *broadcast; 543 544 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 545 return; 546 547 broadcast = ipoib_mcast_alloc(dev, 1); 548 if (!broadcast) { 549 ipoib_warn(priv, "failed to allocate broadcast group\n"); 550 mutex_lock(&mcast_mutex); 551 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 552 queue_delayed_work(ipoib_workqueue, 553 &priv->mcast_task, HZ); 554 mutex_unlock(&mcast_mutex); 555 return; 556 } 557 558 spin_lock_irq(&priv->lock); 559 memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4, 560 sizeof (union ib_gid)); 561 priv->broadcast = broadcast; 562 563 __ipoib_mcast_add(dev, priv->broadcast); 564 spin_unlock_irq(&priv->lock); 565 } 566 567 if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { 568 if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) 569 ipoib_mcast_join(dev, priv->broadcast, 0); 570 return; 571 } 572 573 while (1) { 574 struct ipoib_mcast *mcast = NULL; 575 576 spin_lock_irq(&priv->lock); 577 list_for_each_entry(mcast, &priv->multicast_list, list) { 578 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) 579 && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) 580 && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { 581 /* Found the next unjoined group */ 582 break; 583 } 584 } 585 spin_unlock_irq(&priv->lock); 586 587 if (&mcast->list == &priv->multicast_list) { 588 /* All done */ 589 break; 590 } 591 592 ipoib_mcast_join(dev, mcast, 1); 593 return; 594 } 595 596 priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu)); 597 598 if (!ipoib_cm_admin_enabled(dev)) { 599 rtnl_lock(); 600 dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu)); 601 rtnl_unlock(); 602 } 603 604 ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n"); 605 606 clear_bit(IPOIB_MCAST_RUN, &priv->flags); 607 } 608 609 int ipoib_mcast_start_thread(struct net_device *dev) 610 { 611 struct ipoib_dev_priv *priv = netdev_priv(dev); 612 613 ipoib_dbg_mcast(priv, "starting multicast thread\n"); 614 615 mutex_lock(&mcast_mutex); 616 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) 617 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0); 618 mutex_unlock(&mcast_mutex); 619 620 return 0; 621 } 622 623 int ipoib_mcast_stop_thread(struct net_device *dev, int flush) 624 { 625 struct ipoib_dev_priv *priv = netdev_priv(dev); 626 627 ipoib_dbg_mcast(priv, "stopping multicast thread\n"); 628 629 mutex_lock(&mcast_mutex); 630 clear_bit(IPOIB_MCAST_RUN, &priv->flags); 631 cancel_delayed_work(&priv->mcast_task); 632 mutex_unlock(&mcast_mutex); 633 634 if (flush) 635 flush_workqueue(ipoib_workqueue); 636 637 return 0; 638 } 639 640 static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) 641 { 642 struct ipoib_dev_priv *priv = netdev_priv(dev); 643 int ret = 0; 644 645 if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 646 ib_sa_free_multicast(mcast->mc); 647 648 if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { 649 ipoib_dbg_mcast(priv, "leaving MGID %pI6\n", 650 mcast->mcmember.mgid.raw); 651 652 /* Remove ourselves from the multicast group */ 653 ret = ib_detach_mcast(priv->qp, &mcast->mcmember.mgid, 654 be16_to_cpu(mcast->mcmember.mlid)); 655 if (ret) 656 ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret); 657 } 658 659 return 0; 660 } 661 662 void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) 663 { 664 struct ipoib_dev_priv *priv = netdev_priv(dev); 665 struct ipoib_mcast *mcast; 666 unsigned long flags; 667 668 spin_lock_irqsave(&priv->lock, flags); 669 670 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) || 671 !priv->broadcast || 672 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { 673 ++dev->stats.tx_dropped; 674 dev_kfree_skb_any(skb); 675 goto unlock; 676 } 677 678 mcast = __ipoib_mcast_find(dev, mgid); 679 if (!mcast) { 680 /* Let's create a new send only group now */ 681 ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n", 682 mgid); 683 684 mcast = ipoib_mcast_alloc(dev, 0); 685 if (!mcast) { 686 ipoib_warn(priv, "unable to allocate memory for " 687 "multicast structure\n"); 688 ++dev->stats.tx_dropped; 689 dev_kfree_skb_any(skb); 690 goto out; 691 } 692 693 set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags); 694 memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid)); 695 __ipoib_mcast_add(dev, mcast); 696 list_add_tail(&mcast->list, &priv->multicast_list); 697 } 698 699 if (!mcast->ah) { 700 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) 701 skb_queue_tail(&mcast->pkt_queue, skb); 702 else { 703 ++dev->stats.tx_dropped; 704 dev_kfree_skb_any(skb); 705 } 706 707 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 708 ipoib_dbg_mcast(priv, "no address vector, " 709 "but multicast join already started\n"); 710 else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) 711 ipoib_mcast_sendonly_join(mcast); 712 713 /* 714 * If lookup completes between here and out:, don't 715 * want to send packet twice. 716 */ 717 mcast = NULL; 718 } 719 720 out: 721 if (mcast && mcast->ah) { 722 struct dst_entry *dst = skb_dst(skb); 723 struct neighbour *n = NULL; 724 if (dst) 725 n = dst_get_neighbour(dst); 726 if (n && !*to_ipoib_neigh(n)) { 727 struct ipoib_neigh *neigh = ipoib_neigh_alloc(n, 728 skb->dev); 729 730 if (neigh) { 731 kref_get(&mcast->ah->ref); 732 neigh->ah = mcast->ah; 733 list_add_tail(&neigh->list, &mcast->neigh_list); 734 } 735 } 736 737 spin_unlock_irqrestore(&priv->lock, flags); 738 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); 739 return; 740 } 741 742 unlock: 743 spin_unlock_irqrestore(&priv->lock, flags); 744 } 745 746 void ipoib_mcast_dev_flush(struct net_device *dev) 747 { 748 struct ipoib_dev_priv *priv = netdev_priv(dev); 749 LIST_HEAD(remove_list); 750 struct ipoib_mcast *mcast, *tmcast; 751 unsigned long flags; 752 753 ipoib_dbg_mcast(priv, "flushing multicast list\n"); 754 755 spin_lock_irqsave(&priv->lock, flags); 756 757 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { 758 list_del(&mcast->list); 759 rb_erase(&mcast->rb_node, &priv->multicast_tree); 760 list_add_tail(&mcast->list, &remove_list); 761 } 762 763 if (priv->broadcast) { 764 rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree); 765 list_add_tail(&priv->broadcast->list, &remove_list); 766 priv->broadcast = NULL; 767 } 768 769 spin_unlock_irqrestore(&priv->lock, flags); 770 771 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { 772 ipoib_mcast_leave(dev, mcast); 773 ipoib_mcast_free(mcast); 774 } 775 } 776 777 static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast) 778 { 779 /* reserved QPN, prefix, scope */ 780 if (memcmp(addr, broadcast, 6)) 781 return 0; 782 /* signature lower, pkey */ 783 if (memcmp(addr + 7, broadcast + 7, 3)) 784 return 0; 785 return 1; 786 } 787 788 void ipoib_mcast_restart_task(struct work_struct *work) 789 { 790 struct ipoib_dev_priv *priv = 791 container_of(work, struct ipoib_dev_priv, restart_task); 792 struct net_device *dev = priv->dev; 793 struct netdev_hw_addr *ha; 794 struct ipoib_mcast *mcast, *tmcast; 795 LIST_HEAD(remove_list); 796 unsigned long flags; 797 struct ib_sa_mcmember_rec rec; 798 799 ipoib_dbg_mcast(priv, "restarting multicast task\n"); 800 801 ipoib_mcast_stop_thread(dev, 0); 802 803 local_irq_save(flags); 804 netif_addr_lock(dev); 805 spin_lock(&priv->lock); 806 807 /* 808 * Unfortunately, the networking core only gives us a list of all of 809 * the multicast hardware addresses. We need to figure out which ones 810 * are new and which ones have been removed 811 */ 812 813 /* Clear out the found flag */ 814 list_for_each_entry(mcast, &priv->multicast_list, list) 815 clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); 816 817 /* Mark all of the entries that are found or don't exist */ 818 netdev_for_each_mc_addr(ha, dev) { 819 union ib_gid mgid; 820 821 if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast)) 822 continue; 823 824 memcpy(mgid.raw, ha->addr + 4, sizeof mgid); 825 826 mcast = __ipoib_mcast_find(dev, &mgid); 827 if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { 828 struct ipoib_mcast *nmcast; 829 830 /* ignore group which is directly joined by userspace */ 831 if (test_bit(IPOIB_FLAG_UMCAST, &priv->flags) && 832 !ib_sa_get_mcmember_rec(priv->ca, priv->port, &mgid, &rec)) { 833 ipoib_dbg_mcast(priv, "ignoring multicast entry for mgid %pI6\n", 834 mgid.raw); 835 continue; 836 } 837 838 /* Not found or send-only group, let's add a new entry */ 839 ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n", 840 mgid.raw); 841 842 nmcast = ipoib_mcast_alloc(dev, 0); 843 if (!nmcast) { 844 ipoib_warn(priv, "unable to allocate memory for multicast structure\n"); 845 continue; 846 } 847 848 set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags); 849 850 nmcast->mcmember.mgid = mgid; 851 852 if (mcast) { 853 /* Destroy the send only entry */ 854 list_move_tail(&mcast->list, &remove_list); 855 856 rb_replace_node(&mcast->rb_node, 857 &nmcast->rb_node, 858 &priv->multicast_tree); 859 } else 860 __ipoib_mcast_add(dev, nmcast); 861 862 list_add_tail(&nmcast->list, &priv->multicast_list); 863 } 864 865 if (mcast) 866 set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); 867 } 868 869 /* Remove all of the entries don't exist anymore */ 870 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { 871 if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) && 872 !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { 873 ipoib_dbg_mcast(priv, "deleting multicast group %pI6\n", 874 mcast->mcmember.mgid.raw); 875 876 rb_erase(&mcast->rb_node, &priv->multicast_tree); 877 878 /* Move to the remove list */ 879 list_move_tail(&mcast->list, &remove_list); 880 } 881 } 882 883 spin_unlock(&priv->lock); 884 netif_addr_unlock(dev); 885 local_irq_restore(flags); 886 887 /* We have to cancel outside of the spinlock */ 888 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { 889 ipoib_mcast_leave(mcast->dev, mcast); 890 ipoib_mcast_free(mcast); 891 } 892 893 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 894 ipoib_mcast_start_thread(dev); 895 } 896 897 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 898 899 struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev) 900 { 901 struct ipoib_mcast_iter *iter; 902 903 iter = kmalloc(sizeof *iter, GFP_KERNEL); 904 if (!iter) 905 return NULL; 906 907 iter->dev = dev; 908 memset(iter->mgid.raw, 0, 16); 909 910 if (ipoib_mcast_iter_next(iter)) { 911 kfree(iter); 912 return NULL; 913 } 914 915 return iter; 916 } 917 918 int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter) 919 { 920 struct ipoib_dev_priv *priv = netdev_priv(iter->dev); 921 struct rb_node *n; 922 struct ipoib_mcast *mcast; 923 int ret = 1; 924 925 spin_lock_irq(&priv->lock); 926 927 n = rb_first(&priv->multicast_tree); 928 929 while (n) { 930 mcast = rb_entry(n, struct ipoib_mcast, rb_node); 931 932 if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw, 933 sizeof (union ib_gid)) < 0) { 934 iter->mgid = mcast->mcmember.mgid; 935 iter->created = mcast->created; 936 iter->queuelen = skb_queue_len(&mcast->pkt_queue); 937 iter->complete = !!mcast->ah; 938 iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY)); 939 940 ret = 0; 941 942 break; 943 } 944 945 n = rb_next(n); 946 } 947 948 spin_unlock_irq(&priv->lock); 949 950 return ret; 951 } 952 953 void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter, 954 union ib_gid *mgid, 955 unsigned long *created, 956 unsigned int *queuelen, 957 unsigned int *complete, 958 unsigned int *send_only) 959 { 960 *mgid = iter->mgid; 961 *created = iter->created; 962 *queuelen = iter->queuelen; 963 *complete = iter->complete; 964 *send_only = iter->send_only; 965 } 966 967 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 968