1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/skbuff.h> 36 #include <linux/rtnetlink.h> 37 #include <linux/ip.h> 38 #include <linux/in.h> 39 #include <linux/igmp.h> 40 #include <linux/inetdevice.h> 41 #include <linux/delay.h> 42 #include <linux/completion.h> 43 44 #include <net/dst.h> 45 46 #include "ipoib.h" 47 48 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 49 static int mcast_debug_level; 50 51 module_param(mcast_debug_level, int, 0644); 52 MODULE_PARM_DESC(mcast_debug_level, 53 "Enable multicast debug tracing if > 0"); 54 #endif 55 56 static DEFINE_MUTEX(mcast_mutex); 57 58 struct ipoib_mcast_iter { 59 struct net_device *dev; 60 union ib_gid mgid; 61 unsigned long created; 62 unsigned int queuelen; 63 unsigned int complete; 64 unsigned int send_only; 65 }; 66 67 static void ipoib_mcast_free(struct ipoib_mcast *mcast) 68 { 69 struct net_device *dev = mcast->dev; 70 struct ipoib_dev_priv *priv = netdev_priv(dev); 71 struct ipoib_neigh *neigh, *tmp; 72 int tx_dropped = 0; 73 74 ipoib_dbg_mcast(netdev_priv(dev), "deleting multicast group %pI6\n", 75 mcast->mcmember.mgid.raw); 76 77 spin_lock_irq(&priv->lock); 78 79 list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) { 80 /* 81 * It's safe to call ipoib_put_ah() inside priv->lock 82 * here, because we know that mcast->ah will always 83 * hold one more reference, so ipoib_put_ah() will 84 * never do more than decrement the ref count. 85 */ 86 if (neigh->ah) 87 ipoib_put_ah(neigh->ah); 88 ipoib_neigh_free(dev, neigh); 89 } 90 91 spin_unlock_irq(&priv->lock); 92 93 if (mcast->ah) 94 ipoib_put_ah(mcast->ah); 95 96 while (!skb_queue_empty(&mcast->pkt_queue)) { 97 ++tx_dropped; 98 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); 99 } 100 101 netif_tx_lock_bh(dev); 102 dev->stats.tx_dropped += tx_dropped; 103 netif_tx_unlock_bh(dev); 104 105 kfree(mcast); 106 } 107 108 static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev, 109 int can_sleep) 110 { 111 struct ipoib_mcast *mcast; 112 113 mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC); 114 if (!mcast) 115 return NULL; 116 117 mcast->dev = dev; 118 mcast->created = jiffies; 119 mcast->backoff = 1; 120 121 INIT_LIST_HEAD(&mcast->list); 122 INIT_LIST_HEAD(&mcast->neigh_list); 123 skb_queue_head_init(&mcast->pkt_queue); 124 125 return mcast; 126 } 127 128 static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) 129 { 130 struct ipoib_dev_priv *priv = netdev_priv(dev); 131 struct rb_node *n = priv->multicast_tree.rb_node; 132 133 while (n) { 134 struct ipoib_mcast *mcast; 135 int ret; 136 137 mcast = rb_entry(n, struct ipoib_mcast, rb_node); 138 139 ret = memcmp(mgid, mcast->mcmember.mgid.raw, 140 sizeof (union ib_gid)); 141 if (ret < 0) 142 n = n->rb_left; 143 else if (ret > 0) 144 n = n->rb_right; 145 else 146 return mcast; 147 } 148 149 return NULL; 150 } 151 152 static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast) 153 { 154 struct ipoib_dev_priv *priv = netdev_priv(dev); 155 struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL; 156 157 while (*n) { 158 struct ipoib_mcast *tmcast; 159 int ret; 160 161 pn = *n; 162 tmcast = rb_entry(pn, struct ipoib_mcast, rb_node); 163 164 ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw, 165 sizeof (union ib_gid)); 166 if (ret < 0) 167 n = &pn->rb_left; 168 else if (ret > 0) 169 n = &pn->rb_right; 170 else 171 return -EEXIST; 172 } 173 174 rb_link_node(&mcast->rb_node, pn, n); 175 rb_insert_color(&mcast->rb_node, &priv->multicast_tree); 176 177 return 0; 178 } 179 180 static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, 181 struct ib_sa_mcmember_rec *mcmember) 182 { 183 struct net_device *dev = mcast->dev; 184 struct ipoib_dev_priv *priv = netdev_priv(dev); 185 struct ipoib_ah *ah; 186 int ret; 187 int set_qkey = 0; 188 189 mcast->mcmember = *mcmember; 190 191 /* Set the cached Q_Key before we attach if it's the broadcast group */ 192 if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4, 193 sizeof (union ib_gid))) { 194 spin_lock_irq(&priv->lock); 195 if (!priv->broadcast) { 196 spin_unlock_irq(&priv->lock); 197 return -EAGAIN; 198 } 199 priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey); 200 spin_unlock_irq(&priv->lock); 201 priv->tx_wr.wr.ud.remote_qkey = priv->qkey; 202 set_qkey = 1; 203 } 204 205 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { 206 if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { 207 ipoib_warn(priv, "multicast group %pI6 already attached\n", 208 mcast->mcmember.mgid.raw); 209 210 return 0; 211 } 212 213 ret = ipoib_mcast_attach(dev, be16_to_cpu(mcast->mcmember.mlid), 214 &mcast->mcmember.mgid, set_qkey); 215 if (ret < 0) { 216 ipoib_warn(priv, "couldn't attach QP to multicast group %pI6\n", 217 mcast->mcmember.mgid.raw); 218 219 clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags); 220 return ret; 221 } 222 } 223 224 { 225 struct ib_ah_attr av = { 226 .dlid = be16_to_cpu(mcast->mcmember.mlid), 227 .port_num = priv->port, 228 .sl = mcast->mcmember.sl, 229 .ah_flags = IB_AH_GRH, 230 .static_rate = mcast->mcmember.rate, 231 .grh = { 232 .flow_label = be32_to_cpu(mcast->mcmember.flow_label), 233 .hop_limit = mcast->mcmember.hop_limit, 234 .sgid_index = 0, 235 .traffic_class = mcast->mcmember.traffic_class 236 } 237 }; 238 av.grh.dgid = mcast->mcmember.mgid; 239 240 ah = ipoib_create_ah(dev, priv->pd, &av); 241 if (!ah) { 242 ipoib_warn(priv, "ib_address_create failed\n"); 243 } else { 244 spin_lock_irq(&priv->lock); 245 mcast->ah = ah; 246 spin_unlock_irq(&priv->lock); 247 248 ipoib_dbg_mcast(priv, "MGID %pI6 AV %p, LID 0x%04x, SL %d\n", 249 mcast->mcmember.mgid.raw, 250 mcast->ah->ah, 251 be16_to_cpu(mcast->mcmember.mlid), 252 mcast->mcmember.sl); 253 } 254 } 255 256 /* actually send any queued packets */ 257 netif_tx_lock_bh(dev); 258 while (!skb_queue_empty(&mcast->pkt_queue)) { 259 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); 260 netif_tx_unlock_bh(dev); 261 262 skb->dev = dev; 263 264 if (!skb_dst(skb) || !skb_dst(skb)->neighbour) { 265 /* put pseudoheader back on for next time */ 266 skb_push(skb, sizeof (struct ipoib_pseudoheader)); 267 } 268 269 if (dev_queue_xmit(skb)) 270 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n"); 271 netif_tx_lock_bh(dev); 272 } 273 netif_tx_unlock_bh(dev); 274 275 return 0; 276 } 277 278 static int 279 ipoib_mcast_sendonly_join_complete(int status, 280 struct ib_sa_multicast *multicast) 281 { 282 struct ipoib_mcast *mcast = multicast->context; 283 struct net_device *dev = mcast->dev; 284 285 /* We trap for port events ourselves. */ 286 if (status == -ENETRESET) 287 return 0; 288 289 if (!status) 290 status = ipoib_mcast_join_finish(mcast, &multicast->rec); 291 292 if (status) { 293 if (mcast->logcount++ < 20) 294 ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n", 295 mcast->mcmember.mgid.raw, status); 296 297 /* Flush out any queued packets */ 298 netif_tx_lock_bh(dev); 299 while (!skb_queue_empty(&mcast->pkt_queue)) { 300 ++dev->stats.tx_dropped; 301 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); 302 } 303 netif_tx_unlock_bh(dev); 304 305 /* Clear the busy flag so we try again */ 306 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, 307 &mcast->flags); 308 } 309 return status; 310 } 311 312 static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) 313 { 314 struct net_device *dev = mcast->dev; 315 struct ipoib_dev_priv *priv = netdev_priv(dev); 316 struct ib_sa_mcmember_rec rec = { 317 #if 0 /* Some SMs don't support send-only yet */ 318 .join_state = 4 319 #else 320 .join_state = 1 321 #endif 322 }; 323 int ret = 0; 324 325 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) { 326 ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n"); 327 return -ENODEV; 328 } 329 330 if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { 331 ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n"); 332 return -EBUSY; 333 } 334 335 rec.mgid = mcast->mcmember.mgid; 336 rec.port_gid = priv->local_gid; 337 rec.pkey = cpu_to_be16(priv->pkey); 338 339 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, 340 priv->port, &rec, 341 IB_SA_MCMEMBER_REC_MGID | 342 IB_SA_MCMEMBER_REC_PORT_GID | 343 IB_SA_MCMEMBER_REC_PKEY | 344 IB_SA_MCMEMBER_REC_JOIN_STATE, 345 GFP_ATOMIC, 346 ipoib_mcast_sendonly_join_complete, 347 mcast); 348 if (IS_ERR(mcast->mc)) { 349 ret = PTR_ERR(mcast->mc); 350 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 351 ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n", 352 ret); 353 } else { 354 ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n", 355 mcast->mcmember.mgid.raw); 356 } 357 358 return ret; 359 } 360 361 void ipoib_mcast_carrier_on_task(struct work_struct *work) 362 { 363 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 364 carrier_on_task); 365 struct ib_port_attr attr; 366 367 /* 368 * Take rtnl_lock to avoid racing with ipoib_stop() and 369 * turning the carrier back on while a device is being 370 * removed. 371 */ 372 if (ib_query_port(priv->ca, priv->port, &attr) || 373 attr.state != IB_PORT_ACTIVE) { 374 ipoib_dbg(priv, "Keeping carrier off until IB port is active\n"); 375 return; 376 } 377 378 rtnl_lock(); 379 netif_carrier_on(priv->dev); 380 rtnl_unlock(); 381 } 382 383 static int ipoib_mcast_join_complete(int status, 384 struct ib_sa_multicast *multicast) 385 { 386 struct ipoib_mcast *mcast = multicast->context; 387 struct net_device *dev = mcast->dev; 388 struct ipoib_dev_priv *priv = netdev_priv(dev); 389 390 ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n", 391 mcast->mcmember.mgid.raw, status); 392 393 /* We trap for port events ourselves. */ 394 if (status == -ENETRESET) 395 return 0; 396 397 if (!status) 398 status = ipoib_mcast_join_finish(mcast, &multicast->rec); 399 400 if (!status) { 401 mcast->backoff = 1; 402 mutex_lock(&mcast_mutex); 403 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 404 queue_delayed_work(ipoib_workqueue, 405 &priv->mcast_task, 0); 406 mutex_unlock(&mcast_mutex); 407 408 /* 409 * Defer carrier on work to ipoib_workqueue to avoid a 410 * deadlock on rtnl_lock here. 411 */ 412 if (mcast == priv->broadcast) 413 queue_work(ipoib_workqueue, &priv->carrier_on_task); 414 415 return 0; 416 } 417 418 if (mcast->logcount++ < 20) { 419 if (status == -ETIMEDOUT || status == -EAGAIN) { 420 ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", 421 mcast->mcmember.mgid.raw, status); 422 } else { 423 ipoib_warn(priv, "multicast join failed for %pI6, status %d\n", 424 mcast->mcmember.mgid.raw, status); 425 } 426 } 427 428 mcast->backoff *= 2; 429 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) 430 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; 431 432 /* Clear the busy flag so we try again */ 433 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 434 435 mutex_lock(&mcast_mutex); 436 spin_lock_irq(&priv->lock); 437 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 438 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 439 mcast->backoff * HZ); 440 spin_unlock_irq(&priv->lock); 441 mutex_unlock(&mcast_mutex); 442 443 return status; 444 } 445 446 static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, 447 int create) 448 { 449 struct ipoib_dev_priv *priv = netdev_priv(dev); 450 struct ib_sa_mcmember_rec rec = { 451 .join_state = 1 452 }; 453 ib_sa_comp_mask comp_mask; 454 int ret = 0; 455 456 ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw); 457 458 rec.mgid = mcast->mcmember.mgid; 459 rec.port_gid = priv->local_gid; 460 rec.pkey = cpu_to_be16(priv->pkey); 461 462 comp_mask = 463 IB_SA_MCMEMBER_REC_MGID | 464 IB_SA_MCMEMBER_REC_PORT_GID | 465 IB_SA_MCMEMBER_REC_PKEY | 466 IB_SA_MCMEMBER_REC_JOIN_STATE; 467 468 if (create) { 469 comp_mask |= 470 IB_SA_MCMEMBER_REC_QKEY | 471 IB_SA_MCMEMBER_REC_MTU_SELECTOR | 472 IB_SA_MCMEMBER_REC_MTU | 473 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS | 474 IB_SA_MCMEMBER_REC_RATE_SELECTOR | 475 IB_SA_MCMEMBER_REC_RATE | 476 IB_SA_MCMEMBER_REC_SL | 477 IB_SA_MCMEMBER_REC_FLOW_LABEL | 478 IB_SA_MCMEMBER_REC_HOP_LIMIT; 479 480 rec.qkey = priv->broadcast->mcmember.qkey; 481 rec.mtu_selector = IB_SA_EQ; 482 rec.mtu = priv->broadcast->mcmember.mtu; 483 rec.traffic_class = priv->broadcast->mcmember.traffic_class; 484 rec.rate_selector = IB_SA_EQ; 485 rec.rate = priv->broadcast->mcmember.rate; 486 rec.sl = priv->broadcast->mcmember.sl; 487 rec.flow_label = priv->broadcast->mcmember.flow_label; 488 rec.hop_limit = priv->broadcast->mcmember.hop_limit; 489 } 490 491 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 492 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, 493 &rec, comp_mask, GFP_KERNEL, 494 ipoib_mcast_join_complete, mcast); 495 if (IS_ERR(mcast->mc)) { 496 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 497 ret = PTR_ERR(mcast->mc); 498 ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret); 499 500 mcast->backoff *= 2; 501 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) 502 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; 503 504 mutex_lock(&mcast_mutex); 505 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 506 queue_delayed_work(ipoib_workqueue, 507 &priv->mcast_task, 508 mcast->backoff * HZ); 509 mutex_unlock(&mcast_mutex); 510 } 511 } 512 513 void ipoib_mcast_join_task(struct work_struct *work) 514 { 515 struct ipoib_dev_priv *priv = 516 container_of(work, struct ipoib_dev_priv, mcast_task.work); 517 struct net_device *dev = priv->dev; 518 519 if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) 520 return; 521 522 if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid)) 523 ipoib_warn(priv, "ib_query_gid() failed\n"); 524 else 525 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 526 527 { 528 struct ib_port_attr attr; 529 530 if (!ib_query_port(priv->ca, priv->port, &attr)) 531 priv->local_lid = attr.lid; 532 else 533 ipoib_warn(priv, "ib_query_port failed\n"); 534 } 535 536 if (!priv->broadcast) { 537 struct ipoib_mcast *broadcast; 538 539 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 540 return; 541 542 broadcast = ipoib_mcast_alloc(dev, 1); 543 if (!broadcast) { 544 ipoib_warn(priv, "failed to allocate broadcast group\n"); 545 mutex_lock(&mcast_mutex); 546 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 547 queue_delayed_work(ipoib_workqueue, 548 &priv->mcast_task, HZ); 549 mutex_unlock(&mcast_mutex); 550 return; 551 } 552 553 spin_lock_irq(&priv->lock); 554 memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4, 555 sizeof (union ib_gid)); 556 priv->broadcast = broadcast; 557 558 __ipoib_mcast_add(dev, priv->broadcast); 559 spin_unlock_irq(&priv->lock); 560 } 561 562 if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { 563 if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) 564 ipoib_mcast_join(dev, priv->broadcast, 0); 565 return; 566 } 567 568 while (1) { 569 struct ipoib_mcast *mcast = NULL; 570 571 spin_lock_irq(&priv->lock); 572 list_for_each_entry(mcast, &priv->multicast_list, list) { 573 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) 574 && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) 575 && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { 576 /* Found the next unjoined group */ 577 break; 578 } 579 } 580 spin_unlock_irq(&priv->lock); 581 582 if (&mcast->list == &priv->multicast_list) { 583 /* All done */ 584 break; 585 } 586 587 ipoib_mcast_join(dev, mcast, 1); 588 return; 589 } 590 591 priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu)); 592 593 if (!ipoib_cm_admin_enabled(dev)) { 594 rtnl_lock(); 595 dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu)); 596 rtnl_unlock(); 597 } 598 599 ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n"); 600 601 clear_bit(IPOIB_MCAST_RUN, &priv->flags); 602 } 603 604 int ipoib_mcast_start_thread(struct net_device *dev) 605 { 606 struct ipoib_dev_priv *priv = netdev_priv(dev); 607 608 ipoib_dbg_mcast(priv, "starting multicast thread\n"); 609 610 mutex_lock(&mcast_mutex); 611 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) 612 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0); 613 mutex_unlock(&mcast_mutex); 614 615 return 0; 616 } 617 618 int ipoib_mcast_stop_thread(struct net_device *dev, int flush) 619 { 620 struct ipoib_dev_priv *priv = netdev_priv(dev); 621 622 ipoib_dbg_mcast(priv, "stopping multicast thread\n"); 623 624 mutex_lock(&mcast_mutex); 625 clear_bit(IPOIB_MCAST_RUN, &priv->flags); 626 cancel_delayed_work(&priv->mcast_task); 627 mutex_unlock(&mcast_mutex); 628 629 if (flush) 630 flush_workqueue(ipoib_workqueue); 631 632 return 0; 633 } 634 635 static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) 636 { 637 struct ipoib_dev_priv *priv = netdev_priv(dev); 638 int ret = 0; 639 640 if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 641 ib_sa_free_multicast(mcast->mc); 642 643 if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { 644 ipoib_dbg_mcast(priv, "leaving MGID %pI6\n", 645 mcast->mcmember.mgid.raw); 646 647 /* Remove ourselves from the multicast group */ 648 ret = ib_detach_mcast(priv->qp, &mcast->mcmember.mgid, 649 be16_to_cpu(mcast->mcmember.mlid)); 650 if (ret) 651 ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret); 652 } 653 654 return 0; 655 } 656 657 void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) 658 { 659 struct ipoib_dev_priv *priv = netdev_priv(dev); 660 struct ipoib_mcast *mcast; 661 unsigned long flags; 662 663 spin_lock_irqsave(&priv->lock, flags); 664 665 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) || 666 !priv->broadcast || 667 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { 668 ++dev->stats.tx_dropped; 669 dev_kfree_skb_any(skb); 670 goto unlock; 671 } 672 673 mcast = __ipoib_mcast_find(dev, mgid); 674 if (!mcast) { 675 /* Let's create a new send only group now */ 676 ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n", 677 mgid); 678 679 mcast = ipoib_mcast_alloc(dev, 0); 680 if (!mcast) { 681 ipoib_warn(priv, "unable to allocate memory for " 682 "multicast structure\n"); 683 ++dev->stats.tx_dropped; 684 dev_kfree_skb_any(skb); 685 goto out; 686 } 687 688 set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags); 689 memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid)); 690 __ipoib_mcast_add(dev, mcast); 691 list_add_tail(&mcast->list, &priv->multicast_list); 692 } 693 694 if (!mcast->ah) { 695 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) 696 skb_queue_tail(&mcast->pkt_queue, skb); 697 else { 698 ++dev->stats.tx_dropped; 699 dev_kfree_skb_any(skb); 700 } 701 702 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) 703 ipoib_dbg_mcast(priv, "no address vector, " 704 "but multicast join already started\n"); 705 else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) 706 ipoib_mcast_sendonly_join(mcast); 707 708 /* 709 * If lookup completes between here and out:, don't 710 * want to send packet twice. 711 */ 712 mcast = NULL; 713 } 714 715 out: 716 if (mcast && mcast->ah) { 717 if (skb_dst(skb) && 718 skb_dst(skb)->neighbour && 719 !*to_ipoib_neigh(skb_dst(skb)->neighbour)) { 720 struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb_dst(skb)->neighbour, 721 skb->dev); 722 723 if (neigh) { 724 kref_get(&mcast->ah->ref); 725 neigh->ah = mcast->ah; 726 list_add_tail(&neigh->list, &mcast->neigh_list); 727 } 728 } 729 730 spin_unlock_irqrestore(&priv->lock, flags); 731 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); 732 return; 733 } 734 735 unlock: 736 spin_unlock_irqrestore(&priv->lock, flags); 737 } 738 739 void ipoib_mcast_dev_flush(struct net_device *dev) 740 { 741 struct ipoib_dev_priv *priv = netdev_priv(dev); 742 LIST_HEAD(remove_list); 743 struct ipoib_mcast *mcast, *tmcast; 744 unsigned long flags; 745 746 ipoib_dbg_mcast(priv, "flushing multicast list\n"); 747 748 spin_lock_irqsave(&priv->lock, flags); 749 750 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { 751 list_del(&mcast->list); 752 rb_erase(&mcast->rb_node, &priv->multicast_tree); 753 list_add_tail(&mcast->list, &remove_list); 754 } 755 756 if (priv->broadcast) { 757 rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree); 758 list_add_tail(&priv->broadcast->list, &remove_list); 759 priv->broadcast = NULL; 760 } 761 762 spin_unlock_irqrestore(&priv->lock, flags); 763 764 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { 765 ipoib_mcast_leave(dev, mcast); 766 ipoib_mcast_free(mcast); 767 } 768 } 769 770 static int ipoib_mcast_addr_is_valid(const u8 *addr, unsigned int addrlen, 771 const u8 *broadcast) 772 { 773 if (addrlen != INFINIBAND_ALEN) 774 return 0; 775 /* reserved QPN, prefix, scope */ 776 if (memcmp(addr, broadcast, 6)) 777 return 0; 778 /* signature lower, pkey */ 779 if (memcmp(addr + 7, broadcast + 7, 3)) 780 return 0; 781 return 1; 782 } 783 784 void ipoib_mcast_restart_task(struct work_struct *work) 785 { 786 struct ipoib_dev_priv *priv = 787 container_of(work, struct ipoib_dev_priv, restart_task); 788 struct net_device *dev = priv->dev; 789 struct dev_mc_list *mclist; 790 struct ipoib_mcast *mcast, *tmcast; 791 LIST_HEAD(remove_list); 792 unsigned long flags; 793 struct ib_sa_mcmember_rec rec; 794 795 ipoib_dbg_mcast(priv, "restarting multicast task\n"); 796 797 ipoib_mcast_stop_thread(dev, 0); 798 799 local_irq_save(flags); 800 netif_addr_lock(dev); 801 spin_lock(&priv->lock); 802 803 /* 804 * Unfortunately, the networking core only gives us a list of all of 805 * the multicast hardware addresses. We need to figure out which ones 806 * are new and which ones have been removed 807 */ 808 809 /* Clear out the found flag */ 810 list_for_each_entry(mcast, &priv->multicast_list, list) 811 clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); 812 813 /* Mark all of the entries that are found or don't exist */ 814 for (mclist = dev->mc_list; mclist; mclist = mclist->next) { 815 union ib_gid mgid; 816 817 if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr, 818 mclist->dmi_addrlen, 819 dev->broadcast)) 820 continue; 821 822 memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid); 823 824 mcast = __ipoib_mcast_find(dev, &mgid); 825 if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { 826 struct ipoib_mcast *nmcast; 827 828 /* ignore group which is directly joined by userspace */ 829 if (test_bit(IPOIB_FLAG_UMCAST, &priv->flags) && 830 !ib_sa_get_mcmember_rec(priv->ca, priv->port, &mgid, &rec)) { 831 ipoib_dbg_mcast(priv, "ignoring multicast entry for mgid %pI6\n", 832 mgid.raw); 833 continue; 834 } 835 836 /* Not found or send-only group, let's add a new entry */ 837 ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n", 838 mgid.raw); 839 840 nmcast = ipoib_mcast_alloc(dev, 0); 841 if (!nmcast) { 842 ipoib_warn(priv, "unable to allocate memory for multicast structure\n"); 843 continue; 844 } 845 846 set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags); 847 848 nmcast->mcmember.mgid = mgid; 849 850 if (mcast) { 851 /* Destroy the send only entry */ 852 list_move_tail(&mcast->list, &remove_list); 853 854 rb_replace_node(&mcast->rb_node, 855 &nmcast->rb_node, 856 &priv->multicast_tree); 857 } else 858 __ipoib_mcast_add(dev, nmcast); 859 860 list_add_tail(&nmcast->list, &priv->multicast_list); 861 } 862 863 if (mcast) 864 set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); 865 } 866 867 /* Remove all of the entries don't exist anymore */ 868 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { 869 if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) && 870 !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { 871 ipoib_dbg_mcast(priv, "deleting multicast group %pI6\n", 872 mcast->mcmember.mgid.raw); 873 874 rb_erase(&mcast->rb_node, &priv->multicast_tree); 875 876 /* Move to the remove list */ 877 list_move_tail(&mcast->list, &remove_list); 878 } 879 } 880 881 spin_unlock(&priv->lock); 882 netif_addr_unlock(dev); 883 local_irq_restore(flags); 884 885 /* We have to cancel outside of the spinlock */ 886 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { 887 ipoib_mcast_leave(mcast->dev, mcast); 888 ipoib_mcast_free(mcast); 889 } 890 891 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 892 ipoib_mcast_start_thread(dev); 893 } 894 895 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 896 897 struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev) 898 { 899 struct ipoib_mcast_iter *iter; 900 901 iter = kmalloc(sizeof *iter, GFP_KERNEL); 902 if (!iter) 903 return NULL; 904 905 iter->dev = dev; 906 memset(iter->mgid.raw, 0, 16); 907 908 if (ipoib_mcast_iter_next(iter)) { 909 kfree(iter); 910 return NULL; 911 } 912 913 return iter; 914 } 915 916 int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter) 917 { 918 struct ipoib_dev_priv *priv = netdev_priv(iter->dev); 919 struct rb_node *n; 920 struct ipoib_mcast *mcast; 921 int ret = 1; 922 923 spin_lock_irq(&priv->lock); 924 925 n = rb_first(&priv->multicast_tree); 926 927 while (n) { 928 mcast = rb_entry(n, struct ipoib_mcast, rb_node); 929 930 if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw, 931 sizeof (union ib_gid)) < 0) { 932 iter->mgid = mcast->mcmember.mgid; 933 iter->created = mcast->created; 934 iter->queuelen = skb_queue_len(&mcast->pkt_queue); 935 iter->complete = !!mcast->ah; 936 iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY)); 937 938 ret = 0; 939 940 break; 941 } 942 943 n = rb_next(n); 944 } 945 946 spin_unlock_irq(&priv->lock); 947 948 return ret; 949 } 950 951 void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter, 952 union ib_gid *mgid, 953 unsigned long *created, 954 unsigned int *queuelen, 955 unsigned int *complete, 956 unsigned int *send_only) 957 { 958 *mgid = iter->mgid; 959 *created = iter->created; 960 *queuelen = iter->queuelen; 961 *complete = iter->complete; 962 *send_only = iter->send_only; 963 } 964 965 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ 966