1 /* 2 * Copyright (c) 2006 Intel Corporation. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/completion.h> 34 #include <linux/dma-mapping.h> 35 #include <linux/err.h> 36 #include <linux/interrupt.h> 37 #include <linux/export.h> 38 #include <linux/slab.h> 39 #include <linux/bitops.h> 40 #include <linux/random.h> 41 42 #include <rdma/ib_cache.h> 43 #include "sa.h" 44 45 static void mcast_add_one(struct ib_device *device); 46 static void mcast_remove_one(struct ib_device *device, void *client_data); 47 48 static struct ib_client mcast_client = { 49 .name = "ib_multicast", 50 .add = mcast_add_one, 51 .remove = mcast_remove_one 52 }; 53 54 static struct ib_sa_client sa_client; 55 static struct workqueue_struct *mcast_wq; 56 static union ib_gid mgid0; 57 58 struct mcast_device; 59 60 struct mcast_port { 61 struct mcast_device *dev; 62 spinlock_t lock; 63 struct rb_root table; 64 atomic_t refcount; 65 struct completion comp; 66 u8 port_num; 67 }; 68 69 struct mcast_device { 70 struct ib_device *device; 71 struct ib_event_handler event_handler; 72 int start_port; 73 int end_port; 74 struct mcast_port port[0]; 75 }; 76 77 enum mcast_state { 78 MCAST_JOINING, 79 MCAST_MEMBER, 80 MCAST_ERROR, 81 }; 82 83 enum mcast_group_state { 84 MCAST_IDLE, 85 MCAST_BUSY, 86 MCAST_GROUP_ERROR, 87 MCAST_PKEY_EVENT 88 }; 89 90 enum { 91 MCAST_INVALID_PKEY_INDEX = 0xFFFF 92 }; 93 94 struct mcast_member; 95 96 struct mcast_group { 97 struct ib_sa_mcmember_rec rec; 98 struct rb_node node; 99 struct mcast_port *port; 100 spinlock_t lock; 101 struct work_struct work; 102 struct list_head pending_list; 103 struct list_head active_list; 104 struct mcast_member *last_join; 105 int members[NUM_JOIN_MEMBERSHIP_TYPES]; 106 atomic_t refcount; 107 enum mcast_group_state state; 108 struct ib_sa_query *query; 109 int query_id; 110 u16 pkey_index; 111 u8 leave_state; 112 int retries; 113 }; 114 115 struct mcast_member { 116 struct ib_sa_multicast multicast; 117 struct ib_sa_client *client; 118 struct mcast_group *group; 119 struct list_head list; 120 enum mcast_state state; 121 atomic_t refcount; 122 struct completion comp; 123 }; 124 125 static void join_handler(int status, struct ib_sa_mcmember_rec *rec, 126 void *context); 127 static void leave_handler(int status, struct ib_sa_mcmember_rec *rec, 128 void *context); 129 130 static struct mcast_group *mcast_find(struct mcast_port *port, 131 union ib_gid *mgid) 132 { 133 struct rb_node *node = port->table.rb_node; 134 struct mcast_group *group; 135 int ret; 136 137 while (node) { 138 group = rb_entry(node, struct mcast_group, node); 139 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); 140 if (!ret) 141 return group; 142 143 if (ret < 0) 144 node = node->rb_left; 145 else 146 node = node->rb_right; 147 } 148 return NULL; 149 } 150 151 static struct mcast_group *mcast_insert(struct mcast_port *port, 152 struct mcast_group *group, 153 int allow_duplicates) 154 { 155 struct rb_node **link = &port->table.rb_node; 156 struct rb_node *parent = NULL; 157 struct mcast_group *cur_group; 158 int ret; 159 160 while (*link) { 161 parent = *link; 162 cur_group = rb_entry(parent, struct mcast_group, node); 163 164 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, 165 sizeof group->rec.mgid); 166 if (ret < 0) 167 link = &(*link)->rb_left; 168 else if (ret > 0) 169 link = &(*link)->rb_right; 170 else if (allow_duplicates) 171 link = &(*link)->rb_left; 172 else 173 return cur_group; 174 } 175 rb_link_node(&group->node, parent, link); 176 rb_insert_color(&group->node, &port->table); 177 return NULL; 178 } 179 180 static void deref_port(struct mcast_port *port) 181 { 182 if (atomic_dec_and_test(&port->refcount)) 183 complete(&port->comp); 184 } 185 186 static void release_group(struct mcast_group *group) 187 { 188 struct mcast_port *port = group->port; 189 unsigned long flags; 190 191 spin_lock_irqsave(&port->lock, flags); 192 if (atomic_dec_and_test(&group->refcount)) { 193 rb_erase(&group->node, &port->table); 194 spin_unlock_irqrestore(&port->lock, flags); 195 kfree(group); 196 deref_port(port); 197 } else 198 spin_unlock_irqrestore(&port->lock, flags); 199 } 200 201 static void deref_member(struct mcast_member *member) 202 { 203 if (atomic_dec_and_test(&member->refcount)) 204 complete(&member->comp); 205 } 206 207 static void queue_join(struct mcast_member *member) 208 { 209 struct mcast_group *group = member->group; 210 unsigned long flags; 211 212 spin_lock_irqsave(&group->lock, flags); 213 list_add_tail(&member->list, &group->pending_list); 214 if (group->state == MCAST_IDLE) { 215 group->state = MCAST_BUSY; 216 atomic_inc(&group->refcount); 217 queue_work(mcast_wq, &group->work); 218 } 219 spin_unlock_irqrestore(&group->lock, flags); 220 } 221 222 /* 223 * A multicast group has four types of members: full member, non member, 224 * sendonly non member and sendonly full member. 225 * We need to keep track of the number of members of each 226 * type based on their join state. Adjust the number of members the belong to 227 * the specified join states. 228 */ 229 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc) 230 { 231 int i; 232 233 for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++, join_state >>= 1) 234 if (join_state & 0x1) 235 group->members[i] += inc; 236 } 237 238 /* 239 * If a multicast group has zero members left for a particular join state, but 240 * the group is still a member with the SA, we need to leave that join state. 241 * Determine which join states we still belong to, but that do not have any 242 * active members. 243 */ 244 static u8 get_leave_state(struct mcast_group *group) 245 { 246 u8 leave_state = 0; 247 int i; 248 249 for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++) 250 if (!group->members[i]) 251 leave_state |= (0x1 << i); 252 253 return leave_state & group->rec.join_state; 254 } 255 256 static int check_selector(ib_sa_comp_mask comp_mask, 257 ib_sa_comp_mask selector_mask, 258 ib_sa_comp_mask value_mask, 259 u8 selector, u8 src_value, u8 dst_value) 260 { 261 int err; 262 263 if (!(comp_mask & selector_mask) || !(comp_mask & value_mask)) 264 return 0; 265 266 switch (selector) { 267 case IB_SA_GT: 268 err = (src_value <= dst_value); 269 break; 270 case IB_SA_LT: 271 err = (src_value >= dst_value); 272 break; 273 case IB_SA_EQ: 274 err = (src_value != dst_value); 275 break; 276 default: 277 err = 0; 278 break; 279 } 280 281 return err; 282 } 283 284 static int cmp_rec(struct ib_sa_mcmember_rec *src, 285 struct ib_sa_mcmember_rec *dst, ib_sa_comp_mask comp_mask) 286 { 287 /* MGID must already match */ 288 289 if (comp_mask & IB_SA_MCMEMBER_REC_PORT_GID && 290 memcmp(&src->port_gid, &dst->port_gid, sizeof src->port_gid)) 291 return -EINVAL; 292 if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey) 293 return -EINVAL; 294 if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid) 295 return -EINVAL; 296 if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR, 297 IB_SA_MCMEMBER_REC_MTU, dst->mtu_selector, 298 src->mtu, dst->mtu)) 299 return -EINVAL; 300 if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS && 301 src->traffic_class != dst->traffic_class) 302 return -EINVAL; 303 if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey) 304 return -EINVAL; 305 if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR, 306 IB_SA_MCMEMBER_REC_RATE, dst->rate_selector, 307 src->rate, dst->rate)) 308 return -EINVAL; 309 if (check_selector(comp_mask, 310 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR, 311 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME, 312 dst->packet_life_time_selector, 313 src->packet_life_time, dst->packet_life_time)) 314 return -EINVAL; 315 if (comp_mask & IB_SA_MCMEMBER_REC_SL && src->sl != dst->sl) 316 return -EINVAL; 317 if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL && 318 src->flow_label != dst->flow_label) 319 return -EINVAL; 320 if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT && 321 src->hop_limit != dst->hop_limit) 322 return -EINVAL; 323 if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE && src->scope != dst->scope) 324 return -EINVAL; 325 326 /* join_state checked separately, proxy_join ignored */ 327 328 return 0; 329 } 330 331 static int send_join(struct mcast_group *group, struct mcast_member *member) 332 { 333 struct mcast_port *port = group->port; 334 int ret; 335 336 group->last_join = member; 337 ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device, 338 port->port_num, IB_MGMT_METHOD_SET, 339 &member->multicast.rec, 340 member->multicast.comp_mask, 341 3000, GFP_KERNEL, join_handler, group, 342 &group->query); 343 if (ret >= 0) { 344 group->query_id = ret; 345 ret = 0; 346 } 347 return ret; 348 } 349 350 static int send_leave(struct mcast_group *group, u8 leave_state) 351 { 352 struct mcast_port *port = group->port; 353 struct ib_sa_mcmember_rec rec; 354 int ret; 355 356 rec = group->rec; 357 rec.join_state = leave_state; 358 group->leave_state = leave_state; 359 360 ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device, 361 port->port_num, IB_SA_METHOD_DELETE, &rec, 362 IB_SA_MCMEMBER_REC_MGID | 363 IB_SA_MCMEMBER_REC_PORT_GID | 364 IB_SA_MCMEMBER_REC_JOIN_STATE, 365 3000, GFP_KERNEL, leave_handler, 366 group, &group->query); 367 if (ret >= 0) { 368 group->query_id = ret; 369 ret = 0; 370 } 371 return ret; 372 } 373 374 static void join_group(struct mcast_group *group, struct mcast_member *member, 375 u8 join_state) 376 { 377 member->state = MCAST_MEMBER; 378 adjust_membership(group, join_state, 1); 379 group->rec.join_state |= join_state; 380 member->multicast.rec = group->rec; 381 member->multicast.rec.join_state = join_state; 382 list_move(&member->list, &group->active_list); 383 } 384 385 static int fail_join(struct mcast_group *group, struct mcast_member *member, 386 int status) 387 { 388 spin_lock_irq(&group->lock); 389 list_del_init(&member->list); 390 spin_unlock_irq(&group->lock); 391 return member->multicast.callback(status, &member->multicast); 392 } 393 394 static void process_group_error(struct mcast_group *group) 395 { 396 struct mcast_member *member; 397 int ret = 0; 398 u16 pkey_index; 399 400 if (group->state == MCAST_PKEY_EVENT) 401 ret = ib_find_pkey(group->port->dev->device, 402 group->port->port_num, 403 be16_to_cpu(group->rec.pkey), &pkey_index); 404 405 spin_lock_irq(&group->lock); 406 if (group->state == MCAST_PKEY_EVENT && !ret && 407 group->pkey_index == pkey_index) 408 goto out; 409 410 while (!list_empty(&group->active_list)) { 411 member = list_entry(group->active_list.next, 412 struct mcast_member, list); 413 atomic_inc(&member->refcount); 414 list_del_init(&member->list); 415 adjust_membership(group, member->multicast.rec.join_state, -1); 416 member->state = MCAST_ERROR; 417 spin_unlock_irq(&group->lock); 418 419 ret = member->multicast.callback(-ENETRESET, 420 &member->multicast); 421 deref_member(member); 422 if (ret) 423 ib_sa_free_multicast(&member->multicast); 424 spin_lock_irq(&group->lock); 425 } 426 427 group->rec.join_state = 0; 428 out: 429 group->state = MCAST_BUSY; 430 spin_unlock_irq(&group->lock); 431 } 432 433 static void mcast_work_handler(struct work_struct *work) 434 { 435 struct mcast_group *group; 436 struct mcast_member *member; 437 struct ib_sa_multicast *multicast; 438 int status, ret; 439 u8 join_state; 440 441 group = container_of(work, typeof(*group), work); 442 retest: 443 spin_lock_irq(&group->lock); 444 while (!list_empty(&group->pending_list) || 445 (group->state != MCAST_BUSY)) { 446 447 if (group->state != MCAST_BUSY) { 448 spin_unlock_irq(&group->lock); 449 process_group_error(group); 450 goto retest; 451 } 452 453 member = list_entry(group->pending_list.next, 454 struct mcast_member, list); 455 multicast = &member->multicast; 456 join_state = multicast->rec.join_state; 457 atomic_inc(&member->refcount); 458 459 if (join_state == (group->rec.join_state & join_state)) { 460 status = cmp_rec(&group->rec, &multicast->rec, 461 multicast->comp_mask); 462 if (!status) 463 join_group(group, member, join_state); 464 else 465 list_del_init(&member->list); 466 spin_unlock_irq(&group->lock); 467 ret = multicast->callback(status, multicast); 468 } else { 469 spin_unlock_irq(&group->lock); 470 status = send_join(group, member); 471 if (!status) { 472 deref_member(member); 473 return; 474 } 475 ret = fail_join(group, member, status); 476 } 477 478 deref_member(member); 479 if (ret) 480 ib_sa_free_multicast(&member->multicast); 481 spin_lock_irq(&group->lock); 482 } 483 484 join_state = get_leave_state(group); 485 if (join_state) { 486 group->rec.join_state &= ~join_state; 487 spin_unlock_irq(&group->lock); 488 if (send_leave(group, join_state)) 489 goto retest; 490 } else { 491 group->state = MCAST_IDLE; 492 spin_unlock_irq(&group->lock); 493 release_group(group); 494 } 495 } 496 497 /* 498 * Fail a join request if it is still active - at the head of the pending queue. 499 */ 500 static void process_join_error(struct mcast_group *group, int status) 501 { 502 struct mcast_member *member; 503 int ret; 504 505 spin_lock_irq(&group->lock); 506 member = list_entry(group->pending_list.next, 507 struct mcast_member, list); 508 if (group->last_join == member) { 509 atomic_inc(&member->refcount); 510 list_del_init(&member->list); 511 spin_unlock_irq(&group->lock); 512 ret = member->multicast.callback(status, &member->multicast); 513 deref_member(member); 514 if (ret) 515 ib_sa_free_multicast(&member->multicast); 516 } else 517 spin_unlock_irq(&group->lock); 518 } 519 520 static void join_handler(int status, struct ib_sa_mcmember_rec *rec, 521 void *context) 522 { 523 struct mcast_group *group = context; 524 u16 pkey_index = MCAST_INVALID_PKEY_INDEX; 525 526 if (status) 527 process_join_error(group, status); 528 else { 529 int mgids_changed, is_mgid0; 530 ib_find_pkey(group->port->dev->device, group->port->port_num, 531 be16_to_cpu(rec->pkey), &pkey_index); 532 533 spin_lock_irq(&group->port->lock); 534 if (group->state == MCAST_BUSY && 535 group->pkey_index == MCAST_INVALID_PKEY_INDEX) 536 group->pkey_index = pkey_index; 537 mgids_changed = memcmp(&rec->mgid, &group->rec.mgid, 538 sizeof(group->rec.mgid)); 539 group->rec = *rec; 540 if (mgids_changed) { 541 rb_erase(&group->node, &group->port->table); 542 is_mgid0 = !memcmp(&mgid0, &group->rec.mgid, 543 sizeof(mgid0)); 544 mcast_insert(group->port, group, is_mgid0); 545 } 546 spin_unlock_irq(&group->port->lock); 547 } 548 mcast_work_handler(&group->work); 549 } 550 551 static void leave_handler(int status, struct ib_sa_mcmember_rec *rec, 552 void *context) 553 { 554 struct mcast_group *group = context; 555 556 if (status && group->retries > 0 && 557 !send_leave(group, group->leave_state)) 558 group->retries--; 559 else 560 mcast_work_handler(&group->work); 561 } 562 563 static struct mcast_group *acquire_group(struct mcast_port *port, 564 union ib_gid *mgid, gfp_t gfp_mask) 565 { 566 struct mcast_group *group, *cur_group; 567 unsigned long flags; 568 int is_mgid0; 569 570 is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0); 571 if (!is_mgid0) { 572 spin_lock_irqsave(&port->lock, flags); 573 group = mcast_find(port, mgid); 574 if (group) 575 goto found; 576 spin_unlock_irqrestore(&port->lock, flags); 577 } 578 579 group = kzalloc(sizeof *group, gfp_mask); 580 if (!group) 581 return NULL; 582 583 group->retries = 3; 584 group->port = port; 585 group->rec.mgid = *mgid; 586 group->pkey_index = MCAST_INVALID_PKEY_INDEX; 587 INIT_LIST_HEAD(&group->pending_list); 588 INIT_LIST_HEAD(&group->active_list); 589 INIT_WORK(&group->work, mcast_work_handler); 590 spin_lock_init(&group->lock); 591 592 spin_lock_irqsave(&port->lock, flags); 593 cur_group = mcast_insert(port, group, is_mgid0); 594 if (cur_group) { 595 kfree(group); 596 group = cur_group; 597 } else 598 atomic_inc(&port->refcount); 599 found: 600 atomic_inc(&group->refcount); 601 spin_unlock_irqrestore(&port->lock, flags); 602 return group; 603 } 604 605 /* 606 * We serialize all join requests to a single group to make our lives much 607 * easier. Otherwise, two users could try to join the same group 608 * simultaneously, with different configurations, one could leave while the 609 * join is in progress, etc., which makes locking around error recovery 610 * difficult. 611 */ 612 struct ib_sa_multicast * 613 ib_sa_join_multicast(struct ib_sa_client *client, 614 struct ib_device *device, u8 port_num, 615 struct ib_sa_mcmember_rec *rec, 616 ib_sa_comp_mask comp_mask, gfp_t gfp_mask, 617 int (*callback)(int status, 618 struct ib_sa_multicast *multicast), 619 void *context) 620 { 621 struct mcast_device *dev; 622 struct mcast_member *member; 623 struct ib_sa_multicast *multicast; 624 int ret; 625 626 dev = ib_get_client_data(device, &mcast_client); 627 if (!dev) 628 return ERR_PTR(-ENODEV); 629 630 member = kmalloc(sizeof *member, gfp_mask); 631 if (!member) 632 return ERR_PTR(-ENOMEM); 633 634 ib_sa_client_get(client); 635 member->client = client; 636 member->multicast.rec = *rec; 637 member->multicast.comp_mask = comp_mask; 638 member->multicast.callback = callback; 639 member->multicast.context = context; 640 init_completion(&member->comp); 641 atomic_set(&member->refcount, 1); 642 member->state = MCAST_JOINING; 643 644 member->group = acquire_group(&dev->port[port_num - dev->start_port], 645 &rec->mgid, gfp_mask); 646 if (!member->group) { 647 ret = -ENOMEM; 648 goto err; 649 } 650 651 /* 652 * The user will get the multicast structure in their callback. They 653 * could then free the multicast structure before we can return from 654 * this routine. So we save the pointer to return before queuing 655 * any callback. 656 */ 657 multicast = &member->multicast; 658 queue_join(member); 659 return multicast; 660 661 err: 662 ib_sa_client_put(client); 663 kfree(member); 664 return ERR_PTR(ret); 665 } 666 EXPORT_SYMBOL(ib_sa_join_multicast); 667 668 void ib_sa_free_multicast(struct ib_sa_multicast *multicast) 669 { 670 struct mcast_member *member; 671 struct mcast_group *group; 672 673 member = container_of(multicast, struct mcast_member, multicast); 674 group = member->group; 675 676 spin_lock_irq(&group->lock); 677 if (member->state == MCAST_MEMBER) 678 adjust_membership(group, multicast->rec.join_state, -1); 679 680 list_del_init(&member->list); 681 682 if (group->state == MCAST_IDLE) { 683 group->state = MCAST_BUSY; 684 spin_unlock_irq(&group->lock); 685 /* Continue to hold reference on group until callback */ 686 queue_work(mcast_wq, &group->work); 687 } else { 688 spin_unlock_irq(&group->lock); 689 release_group(group); 690 } 691 692 deref_member(member); 693 wait_for_completion(&member->comp); 694 ib_sa_client_put(member->client); 695 kfree(member); 696 } 697 EXPORT_SYMBOL(ib_sa_free_multicast); 698 699 int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num, 700 union ib_gid *mgid, struct ib_sa_mcmember_rec *rec) 701 { 702 struct mcast_device *dev; 703 struct mcast_port *port; 704 struct mcast_group *group; 705 unsigned long flags; 706 int ret = 0; 707 708 dev = ib_get_client_data(device, &mcast_client); 709 if (!dev) 710 return -ENODEV; 711 712 port = &dev->port[port_num - dev->start_port]; 713 spin_lock_irqsave(&port->lock, flags); 714 group = mcast_find(port, mgid); 715 if (group) 716 *rec = group->rec; 717 else 718 ret = -EADDRNOTAVAIL; 719 spin_unlock_irqrestore(&port->lock, flags); 720 721 return ret; 722 } 723 EXPORT_SYMBOL(ib_sa_get_mcmember_rec); 724 725 int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num, 726 struct ib_sa_mcmember_rec *rec, 727 struct net_device *ndev, 728 enum ib_gid_type gid_type, 729 struct ib_ah_attr *ah_attr) 730 { 731 int ret; 732 u16 gid_index; 733 u8 p; 734 735 if (rdma_protocol_roce(device, port_num)) { 736 ret = ib_find_cached_gid_by_port(device, &rec->port_gid, 737 gid_type, port_num, 738 ndev, 739 &gid_index); 740 } else if (rdma_protocol_ib(device, port_num)) { 741 ret = ib_find_cached_gid(device, &rec->port_gid, 742 IB_GID_TYPE_IB, NULL, &p, 743 &gid_index); 744 } else { 745 ret = -EINVAL; 746 } 747 748 if (ret) 749 return ret; 750 751 memset(ah_attr, 0, sizeof *ah_attr); 752 ah_attr->dlid = be16_to_cpu(rec->mlid); 753 ah_attr->sl = rec->sl; 754 ah_attr->port_num = port_num; 755 ah_attr->static_rate = rec->rate; 756 757 ah_attr->ah_flags = IB_AH_GRH; 758 ah_attr->grh.dgid = rec->mgid; 759 760 ah_attr->grh.sgid_index = (u8) gid_index; 761 ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label); 762 ah_attr->grh.hop_limit = rec->hop_limit; 763 ah_attr->grh.traffic_class = rec->traffic_class; 764 765 return 0; 766 } 767 EXPORT_SYMBOL(ib_init_ah_from_mcmember); 768 769 static void mcast_groups_event(struct mcast_port *port, 770 enum mcast_group_state state) 771 { 772 struct mcast_group *group; 773 struct rb_node *node; 774 unsigned long flags; 775 776 spin_lock_irqsave(&port->lock, flags); 777 for (node = rb_first(&port->table); node; node = rb_next(node)) { 778 group = rb_entry(node, struct mcast_group, node); 779 spin_lock(&group->lock); 780 if (group->state == MCAST_IDLE) { 781 atomic_inc(&group->refcount); 782 queue_work(mcast_wq, &group->work); 783 } 784 if (group->state != MCAST_GROUP_ERROR) 785 group->state = state; 786 spin_unlock(&group->lock); 787 } 788 spin_unlock_irqrestore(&port->lock, flags); 789 } 790 791 static void mcast_event_handler(struct ib_event_handler *handler, 792 struct ib_event *event) 793 { 794 struct mcast_device *dev; 795 int index; 796 797 dev = container_of(handler, struct mcast_device, event_handler); 798 if (!rdma_cap_ib_mcast(dev->device, event->element.port_num)) 799 return; 800 801 index = event->element.port_num - dev->start_port; 802 803 switch (event->event) { 804 case IB_EVENT_PORT_ERR: 805 case IB_EVENT_LID_CHANGE: 806 case IB_EVENT_SM_CHANGE: 807 case IB_EVENT_CLIENT_REREGISTER: 808 mcast_groups_event(&dev->port[index], MCAST_GROUP_ERROR); 809 break; 810 case IB_EVENT_PKEY_CHANGE: 811 mcast_groups_event(&dev->port[index], MCAST_PKEY_EVENT); 812 break; 813 default: 814 break; 815 } 816 } 817 818 static void mcast_add_one(struct ib_device *device) 819 { 820 struct mcast_device *dev; 821 struct mcast_port *port; 822 int i; 823 int count = 0; 824 825 dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port, 826 GFP_KERNEL); 827 if (!dev) 828 return; 829 830 dev->start_port = rdma_start_port(device); 831 dev->end_port = rdma_end_port(device); 832 833 for (i = 0; i <= dev->end_port - dev->start_port; i++) { 834 if (!rdma_cap_ib_mcast(device, dev->start_port + i)) 835 continue; 836 port = &dev->port[i]; 837 port->dev = dev; 838 port->port_num = dev->start_port + i; 839 spin_lock_init(&port->lock); 840 port->table = RB_ROOT; 841 init_completion(&port->comp); 842 atomic_set(&port->refcount, 1); 843 ++count; 844 } 845 846 if (!count) { 847 kfree(dev); 848 return; 849 } 850 851 dev->device = device; 852 ib_set_client_data(device, &mcast_client, dev); 853 854 INIT_IB_EVENT_HANDLER(&dev->event_handler, device, mcast_event_handler); 855 ib_register_event_handler(&dev->event_handler); 856 } 857 858 static void mcast_remove_one(struct ib_device *device, void *client_data) 859 { 860 struct mcast_device *dev = client_data; 861 struct mcast_port *port; 862 int i; 863 864 if (!dev) 865 return; 866 867 ib_unregister_event_handler(&dev->event_handler); 868 flush_workqueue(mcast_wq); 869 870 for (i = 0; i <= dev->end_port - dev->start_port; i++) { 871 if (rdma_cap_ib_mcast(device, dev->start_port + i)) { 872 port = &dev->port[i]; 873 deref_port(port); 874 wait_for_completion(&port->comp); 875 } 876 } 877 878 kfree(dev); 879 } 880 881 int mcast_init(void) 882 { 883 int ret; 884 885 mcast_wq = create_singlethread_workqueue("ib_mcast"); 886 if (!mcast_wq) 887 return -ENOMEM; 888 889 ib_sa_register_client(&sa_client); 890 891 ret = ib_register_client(&mcast_client); 892 if (ret) 893 goto err; 894 return 0; 895 896 err: 897 ib_sa_unregister_client(&sa_client); 898 destroy_workqueue(mcast_wq); 899 return ret; 900 } 901 902 void mcast_cleanup(void) 903 { 904 ib_unregister_client(&mcast_client); 905 ib_sa_unregister_client(&sa_client); 906 destroy_workqueue(mcast_wq); 907 } 908