Lines Matching refs:group

50 #define mcg_warn_group(group, format, arg...) \  argument
52 (group)->name, group->demux->port, ## arg)
54 #define mcg_debug_group(group, format, arg...) \ argument
56 (group)->name, (group)->demux->port, ## arg)
58 #define mcg_error_group(group, format, arg...) \ argument
59 pr_err(" %16s: " format, (group)->name, ## arg)
136 struct mcast_group *group; member
144 mcg_warn_group(group, "did not expect to reach zero\n"); \
166 struct mcast_group *group; in mcast_find() local
170 group = rb_entry(node, struct mcast_group, node); in mcast_find()
171 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); in mcast_find()
173 return group; in mcast_find()
184 struct mcast_group *group) in mcast_insert() argument
195 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, in mcast_insert()
196 sizeof group->rec.mgid); in mcast_insert()
204 rb_link_node(&group->node, parent, link); in mcast_insert()
205 rb_insert_color(&group->node, &ctx->mcg_table); in mcast_insert()
252 static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad) in send_join_to_wire() argument
262 sa_mad_data->port_gid.global.interface_id = group->demux->guid_cache[0]; in send_join_to_wire()
265 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux); in send_join_to_wire()
266 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */ in send_join_to_wire()
268 ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad); in send_join_to_wire()
272 queue_delayed_work(group->demux->mcg_wq, &group->timeout_work, in send_join_to_wire()
279 static int send_leave_to_wire(struct mcast_group *group, u8 join_state) in send_leave_to_wire() argument
292 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux); in send_leave_to_wire()
293 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */ in send_leave_to_wire()
301 *sa_data = group->rec; in send_leave_to_wire()
304 ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad); in send_leave_to_wire()
306 group->state = MCAST_IDLE; in send_leave_to_wire()
311 queue_delayed_work(group->demux->mcg_wq, &group->timeout_work, in send_leave_to_wire()
318 static int send_reply_to_slave(int slave, struct mcast_group *group, in send_reply_to_slave() argument
341 *sa_data = group->rec; in send_reply_to_slave()
345 sa_data->scope_join_state |= (group->func[slave].join_state & 0x0f); in send_reply_to_slave()
348 ret = send_mad_to_slave(slave, group->demux, (struct ib_mad *)&mad); in send_reply_to_slave()
437 static int release_group(struct mcast_group *group, int from_timeout_handler) in release_group() argument
439 struct mlx4_ib_demux_ctx *ctx = group->demux; in release_group()
443 mutex_lock(&group->lock); in release_group()
444 if (atomic_dec_and_test(&group->refcount)) { in release_group()
446 if (group->state != MCAST_IDLE && in release_group()
447 !cancel_delayed_work(&group->timeout_work)) { in release_group()
448 atomic_inc(&group->refcount); in release_group()
449 mutex_unlock(&group->lock); in release_group()
455 nzgroup = memcmp(&group->rec.mgid, &mgid0, sizeof mgid0); in release_group()
457 del_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); in release_group()
458 if (!list_empty(&group->pending_list)) in release_group()
459 mcg_warn_group(group, "releasing a group with non empty pending list\n"); in release_group()
461 rb_erase(&group->node, &ctx->mcg_table); in release_group()
462 list_del_init(&group->mgid0_list); in release_group()
463 mutex_unlock(&group->lock); in release_group()
465 kfree(group); in release_group()
468 mutex_unlock(&group->lock); in release_group()
474 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc) in adjust_membership() argument
480 group->members[i] += inc; in adjust_membership()
483 static u8 get_leave_state(struct mcast_group *group) in get_leave_state() argument
489 if (!group->members[i]) in get_leave_state()
492 return leave_state & (group->rec.scope_join_state & 0xf); in get_leave_state()
495 static int join_group(struct mcast_group *group, int slave, u8 join_mask) in join_group() argument
501 join_state = join_mask & (~group->func[slave].join_state); in join_group()
502 adjust_membership(group, join_state, 1); in join_group()
503 group->func[slave].join_state |= join_state; in join_group()
504 if (group->func[slave].state != MCAST_MEMBER && join_state) { in join_group()
505 group->func[slave].state = MCAST_MEMBER; in join_group()
511 static int leave_group(struct mcast_group *group, int slave, u8 leave_state) in leave_group() argument
515 adjust_membership(group, leave_state, -1); in leave_group()
516 group->func[slave].join_state &= ~leave_state; in leave_group()
517 if (!group->func[slave].join_state) { in leave_group()
518 group->func[slave].state = MCAST_NOT_MEMBER; in leave_group()
524 static int check_leave(struct mcast_group *group, int slave, u8 leave_mask) in check_leave() argument
526 if (group->func[slave].state != MCAST_MEMBER) in check_leave()
530 if (~group->func[slave].join_state & leave_mask) in check_leave()
542 struct mcast_group *group; in mlx4_ib_mcg_timeout_handler() local
545 group = container_of(delay, typeof(*group), timeout_work); in mlx4_ib_mcg_timeout_handler()
547 mutex_lock(&group->lock); in mlx4_ib_mcg_timeout_handler()
548 if (group->state == MCAST_JOIN_SENT) { in mlx4_ib_mcg_timeout_handler()
549 if (!list_empty(&group->pending_list)) { in mlx4_ib_mcg_timeout_handler()
550 req = list_first_entry(&group->pending_list, struct mcast_req, group_list); in mlx4_ib_mcg_timeout_handler()
553 --group->func[req->func].num_pend_reqs; in mlx4_ib_mcg_timeout_handler()
554 mutex_unlock(&group->lock); in mlx4_ib_mcg_timeout_handler()
556 if (memcmp(&group->rec.mgid, &mgid0, sizeof mgid0)) { in mlx4_ib_mcg_timeout_handler()
557 if (release_group(group, 1)) in mlx4_ib_mcg_timeout_handler()
560 kfree(group); in mlx4_ib_mcg_timeout_handler()
563 mutex_lock(&group->lock); in mlx4_ib_mcg_timeout_handler()
565 mcg_warn_group(group, "DRIVER BUG\n"); in mlx4_ib_mcg_timeout_handler()
566 } else if (group->state == MCAST_LEAVE_SENT) { in mlx4_ib_mcg_timeout_handler()
567 if (group->rec.scope_join_state & 0xf) in mlx4_ib_mcg_timeout_handler()
568 group->rec.scope_join_state &= 0xf0; in mlx4_ib_mcg_timeout_handler()
569 group->state = MCAST_IDLE; in mlx4_ib_mcg_timeout_handler()
570 mutex_unlock(&group->lock); in mlx4_ib_mcg_timeout_handler()
571 if (release_group(group, 1)) in mlx4_ib_mcg_timeout_handler()
573 mutex_lock(&group->lock); in mlx4_ib_mcg_timeout_handler()
575 mcg_warn_group(group, "invalid state %s\n", get_state_string(group->state)); in mlx4_ib_mcg_timeout_handler()
576 group->state = MCAST_IDLE; in mlx4_ib_mcg_timeout_handler()
577 atomic_inc(&group->refcount); in mlx4_ib_mcg_timeout_handler()
578 if (!queue_work(group->demux->mcg_wq, &group->work)) in mlx4_ib_mcg_timeout_handler()
579 safe_atomic_dec(&group->refcount); in mlx4_ib_mcg_timeout_handler()
581 mutex_unlock(&group->lock); in mlx4_ib_mcg_timeout_handler()
584 static int handle_leave_req(struct mcast_group *group, u8 leave_mask, in handle_leave_req() argument
590 leave_mask = group->func[req->func].join_state; in handle_leave_req()
592 status = check_leave(group, req->func, leave_mask); in handle_leave_req()
594 leave_group(group, req->func, leave_mask); in handle_leave_req()
597 send_reply_to_slave(req->func, group, &req->sa_mad, status); in handle_leave_req()
598 --group->func[req->func].num_pend_reqs; in handle_leave_req()
605 static int handle_join_req(struct mcast_group *group, u8 join_mask, in handle_join_req() argument
608 u8 group_join_state = group->rec.scope_join_state & 0xf; in handle_join_req()
615 status = cmp_rec(&group->rec, sa_data, req->sa_mad.sa_hdr.comp_mask); in handle_join_req()
617 join_group(group, req->func, join_mask); in handle_join_req()
619 --group->func[req->func].num_pend_reqs; in handle_join_req()
620 send_reply_to_slave(req->func, group, &req->sa_mad, status); in handle_join_req()
627 group->prev_state = group->state; in handle_join_req()
628 if (send_join_to_wire(group, &req->sa_mad)) { in handle_join_req()
629 --group->func[req->func].num_pend_reqs; in handle_join_req()
634 group->state = group->prev_state; in handle_join_req()
636 group->state = MCAST_JOIN_SENT; in handle_join_req()
644 struct mcast_group *group; in mlx4_ib_mcg_work_handler() local
652 group = container_of(work, typeof(*group), work); in mlx4_ib_mcg_work_handler()
654 mutex_lock(&group->lock); in mlx4_ib_mcg_work_handler()
660 if (group->state == MCAST_RESP_READY) { in mlx4_ib_mcg_work_handler()
662 cancel_delayed_work(&group->timeout_work); in mlx4_ib_mcg_work_handler()
663 status = be16_to_cpu(group->response_sa_mad.mad_hdr.status); in mlx4_ib_mcg_work_handler()
664 method = group->response_sa_mad.mad_hdr.method; in mlx4_ib_mcg_work_handler()
665 if (group->last_req_tid != group->response_sa_mad.mad_hdr.tid) { in mlx4_ib_mcg_work_handler()
666 …mcg_warn_group(group, "Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, g… in mlx4_ib_mcg_work_handler()
667 be64_to_cpu(group->response_sa_mad.mad_hdr.tid), in mlx4_ib_mcg_work_handler()
668 be64_to_cpu(group->last_req_tid)); in mlx4_ib_mcg_work_handler()
669 group->state = group->prev_state; in mlx4_ib_mcg_work_handler()
673 if (!list_empty(&group->pending_list)) in mlx4_ib_mcg_work_handler()
674 req = list_first_entry(&group->pending_list, in mlx4_ib_mcg_work_handler()
678 send_reply_to_slave(req->func, group, &req->sa_mad, status); in mlx4_ib_mcg_work_handler()
679 --group->func[req->func].num_pend_reqs; in mlx4_ib_mcg_work_handler()
685 mcg_warn_group(group, "no request for failed join\n"); in mlx4_ib_mcg_work_handler()
686 } else if (method == IB_SA_METHOD_DELETE_RESP && group->demux->flushing) in mlx4_ib_mcg_work_handler()
693 group->response_sa_mad.data)->scope_join_state & 0xf; in mlx4_ib_mcg_work_handler()
694 cur_join_state = group->rec.scope_join_state & 0xf; in mlx4_ib_mcg_work_handler()
702 memcpy(&group->rec, group->response_sa_mad.data, sizeof group->rec); in mlx4_ib_mcg_work_handler()
704 group->state = MCAST_IDLE; in mlx4_ib_mcg_work_handler()
709 while (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) { in mlx4_ib_mcg_work_handler()
710 req = list_first_entry(&group->pending_list, struct mcast_req, in mlx4_ib_mcg_work_handler()
719 rc += handle_leave_req(group, req_join_state, req); in mlx4_ib_mcg_work_handler()
721 rc += handle_join_req(group, req_join_state, req); in mlx4_ib_mcg_work_handler()
725 if (group->state == MCAST_IDLE) { in mlx4_ib_mcg_work_handler()
726 req_join_state = get_leave_state(group); in mlx4_ib_mcg_work_handler()
728 group->rec.scope_join_state &= ~req_join_state; in mlx4_ib_mcg_work_handler()
729 group->prev_state = group->state; in mlx4_ib_mcg_work_handler()
730 if (send_leave_to_wire(group, req_join_state)) { in mlx4_ib_mcg_work_handler()
731 group->state = group->prev_state; in mlx4_ib_mcg_work_handler()
734 group->state = MCAST_LEAVE_SENT; in mlx4_ib_mcg_work_handler()
738 if (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) in mlx4_ib_mcg_work_handler()
740 mutex_unlock(&group->lock); in mlx4_ib_mcg_work_handler()
743 release_group(group, 0); in mlx4_ib_mcg_work_handler()
750 struct mcast_group *group = NULL, *cur_group, *n; in search_relocate_mgid0_group() local
754 list_for_each_entry_safe(group, n, &ctx->mcg_mgid0_list, mgid0_list) { in search_relocate_mgid0_group()
755 mutex_lock(&group->lock); in search_relocate_mgid0_group()
756 if (group->last_req_tid == tid) { in search_relocate_mgid0_group()
758 group->rec.mgid = *new_mgid; in search_relocate_mgid0_group()
759 sprintf(group->name, "%016llx%016llx", in search_relocate_mgid0_group()
760 be64_to_cpu(group->rec.mgid.global.subnet_prefix), in search_relocate_mgid0_group()
761 be64_to_cpu(group->rec.mgid.global.interface_id)); in search_relocate_mgid0_group()
762 list_del_init(&group->mgid0_list); in search_relocate_mgid0_group()
763 cur_group = mcast_insert(ctx, group); in search_relocate_mgid0_group()
766 req = list_first_entry(&group->pending_list, in search_relocate_mgid0_group()
768 --group->func[req->func].num_pend_reqs; in search_relocate_mgid0_group()
772 mutex_unlock(&group->lock); in search_relocate_mgid0_group()
774 release_group(group, 0); in search_relocate_mgid0_group()
778 atomic_inc(&group->refcount); in search_relocate_mgid0_group()
779 add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); in search_relocate_mgid0_group()
780 mutex_unlock(&group->lock); in search_relocate_mgid0_group()
782 return group; in search_relocate_mgid0_group()
786 list_del(&group->mgid0_list); in search_relocate_mgid0_group()
787 if (!list_empty(&group->pending_list) && group->state != MCAST_IDLE) in search_relocate_mgid0_group()
788 cancel_delayed_work_sync(&group->timeout_work); in search_relocate_mgid0_group()
790 list_for_each_entry_safe(tmp1, tmp2, &group->pending_list, group_list) { in search_relocate_mgid0_group()
794 mutex_unlock(&group->lock); in search_relocate_mgid0_group()
796 kfree(group); in search_relocate_mgid0_group()
800 mutex_unlock(&group->lock); in search_relocate_mgid0_group()
813 struct mcast_group *group, *cur_group; in acquire_group() local
819 group = mcast_find(ctx, mgid); in acquire_group()
820 if (group) in acquire_group()
827 group = kzalloc(sizeof(*group), GFP_KERNEL); in acquire_group()
828 if (!group) in acquire_group()
831 group->demux = ctx; in acquire_group()
832 group->rec.mgid = *mgid; in acquire_group()
833 INIT_LIST_HEAD(&group->pending_list); in acquire_group()
834 INIT_LIST_HEAD(&group->mgid0_list); in acquire_group()
836 INIT_LIST_HEAD(&group->func[i].pending); in acquire_group()
837 INIT_WORK(&group->work, mlx4_ib_mcg_work_handler); in acquire_group()
838 INIT_DELAYED_WORK(&group->timeout_work, mlx4_ib_mcg_timeout_handler); in acquire_group()
839 mutex_init(&group->lock); in acquire_group()
840 sprintf(group->name, "%016llx%016llx", in acquire_group()
841 be64_to_cpu(group->rec.mgid.global.subnet_prefix), in acquire_group()
842 be64_to_cpu(group->rec.mgid.global.interface_id)); in acquire_group()
843 sysfs_attr_init(&group->dentry.attr); in acquire_group()
844 group->dentry.show = sysfs_show_group; in acquire_group()
845 group->dentry.store = NULL; in acquire_group()
846 group->dentry.attr.name = group->name; in acquire_group()
847 group->dentry.attr.mode = 0400; in acquire_group()
848 group->state = MCAST_IDLE; in acquire_group()
851 list_add(&group->mgid0_list, &ctx->mcg_mgid0_list); in acquire_group()
855 cur_group = mcast_insert(ctx, group); in acquire_group()
858 kfree(group); in acquire_group()
862 add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); in acquire_group()
865 atomic_inc(&group->refcount); in acquire_group()
866 return group; in acquire_group()
871 struct mcast_group *group = req->group; in queue_req() local
873 atomic_inc(&group->refcount); /* for the request */ in queue_req()
874 atomic_inc(&group->refcount); /* for scheduling the work */ in queue_req()
875 list_add_tail(&req->group_list, &group->pending_list); in queue_req()
876 list_add_tail(&req->func_list, &group->func[req->func].pending); in queue_req()
878 if (!queue_work(group->demux->mcg_wq, &group->work)) in queue_req()
879 safe_atomic_dec(&group->refcount); in queue_req()
888 struct mcast_group *group; in mlx4_ib_mcg_demux_handler() local
894 group = acquire_group(ctx, &rec->mgid, 0); in mlx4_ib_mcg_demux_handler()
896 if (IS_ERR(group)) { in mlx4_ib_mcg_demux_handler()
900 group = search_relocate_mgid0_group(ctx, tid, &rec->mgid); in mlx4_ib_mcg_demux_handler()
902 group = NULL; in mlx4_ib_mcg_demux_handler()
905 if (!group) in mlx4_ib_mcg_demux_handler()
908 mutex_lock(&group->lock); in mlx4_ib_mcg_demux_handler()
909 group->response_sa_mad = *mad; in mlx4_ib_mcg_demux_handler()
910 group->prev_state = group->state; in mlx4_ib_mcg_demux_handler()
911 group->state = MCAST_RESP_READY; in mlx4_ib_mcg_demux_handler()
913 atomic_inc(&group->refcount); in mlx4_ib_mcg_demux_handler()
914 if (!queue_work(ctx->mcg_wq, &group->work)) in mlx4_ib_mcg_demux_handler()
915 safe_atomic_dec(&group->refcount); in mlx4_ib_mcg_demux_handler()
916 mutex_unlock(&group->lock); in mlx4_ib_mcg_demux_handler()
917 release_group(group, 0); in mlx4_ib_mcg_demux_handler()
937 struct mcast_group *group; in mlx4_ib_mcg_multiplex_handler() local
957 group = acquire_group(ctx, &rec->mgid, may_create); in mlx4_ib_mcg_multiplex_handler()
959 if (IS_ERR(group)) { in mlx4_ib_mcg_multiplex_handler()
961 return PTR_ERR(group); in mlx4_ib_mcg_multiplex_handler()
963 mutex_lock(&group->lock); in mlx4_ib_mcg_multiplex_handler()
964 if (group->func[slave].num_pend_reqs > MAX_PEND_REQS_PER_FUNC) { in mlx4_ib_mcg_multiplex_handler()
965 mutex_unlock(&group->lock); in mlx4_ib_mcg_multiplex_handler()
966 mcg_debug_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n", in mlx4_ib_mcg_multiplex_handler()
968 release_group(group, 0); in mlx4_ib_mcg_multiplex_handler()
972 ++group->func[slave].num_pend_reqs; in mlx4_ib_mcg_multiplex_handler()
973 req->group = group; in mlx4_ib_mcg_multiplex_handler()
975 mutex_unlock(&group->lock); in mlx4_ib_mcg_multiplex_handler()
976 release_group(group, 0); in mlx4_ib_mcg_multiplex_handler()
993 struct mcast_group *group = in sysfs_show_group() local
1002 if (group->state == MCAST_IDLE) in sysfs_show_group()
1004 get_state_string(group->state)); in sysfs_show_group()
1007 get_state_string(group->state), in sysfs_show_group()
1008 be64_to_cpu(group->last_req_tid)); in sysfs_show_group()
1010 if (list_empty(&group->pending_list)) { in sysfs_show_group()
1013 req = list_first_entry(&group->pending_list, struct mcast_req, in sysfs_show_group()
1020 group->rec.scope_join_state & 0xf, in sysfs_show_group()
1021 group->members[2], in sysfs_show_group()
1022 group->members[1], in sysfs_show_group()
1023 group->members[0], in sysfs_show_group()
1024 atomic_read(&group->refcount), in sysfs_show_group()
1029 if (group->func[i].state == MCAST_MEMBER) in sysfs_show_group()
1031 group->func[i].join_state); in sysfs_show_group()
1034 hoplimit = be32_to_cpu(group->rec.sl_flowlabel_hoplimit); in sysfs_show_group()
1037 be16_to_cpu(group->rec.pkey), in sysfs_show_group()
1038 be32_to_cpu(group->rec.qkey), in sysfs_show_group()
1039 (group->rec.mtusel_mtu & 0xc0) >> 6, in sysfs_show_group()
1040 (group->rec.mtusel_mtu & 0x3f), in sysfs_show_group()
1041 group->rec.tclass, in sysfs_show_group()
1042 (group->rec.ratesel_rate & 0xc0) >> 6, in sysfs_show_group()
1043 (group->rec.ratesel_rate & 0x3f), in sysfs_show_group()
1047 group->rec.proxy_join); in sysfs_show_group()
1070 static void force_clean_group(struct mcast_group *group) in force_clean_group() argument
1074 list_for_each_entry_safe(req, tmp, &group->pending_list, group_list) { in force_clean_group()
1078 del_sysfs_port_mcg_attr(group->demux->dev, group->demux->port, &group->dentry.attr); in force_clean_group()
1079 rb_erase(&group->node, &group->demux->mcg_table); in force_clean_group()
1080 kfree(group); in force_clean_group()
1087 struct mcast_group *group; in _mlx4_ib_mcg_port_cleanup() local
1113 group = rb_entry(p, struct mcast_group, node); in _mlx4_ib_mcg_port_cleanup()
1114 if (atomic_read(&group->refcount)) in _mlx4_ib_mcg_port_cleanup()
1115 mcg_debug_group(group, "group refcount %d!!! (pointer %p)\n", in _mlx4_ib_mcg_port_cleanup()
1116 atomic_read(&group->refcount), group); in _mlx4_ib_mcg_port_cleanup()
1118 force_clean_group(group); in _mlx4_ib_mcg_port_cleanup()
1173 static void clear_pending_reqs(struct mcast_group *group, int vf) in clear_pending_reqs() argument
1179 if (!list_empty(&group->pending_list)) in clear_pending_reqs()
1180 group_first = list_first_entry(&group->pending_list, struct mcast_req, group_list); in clear_pending_reqs()
1182 list_for_each_entry_safe(req, tmp, &group->func[vf].pending, func_list) { in clear_pending_reqs()
1185 (group->state == MCAST_JOIN_SENT || in clear_pending_reqs()
1186 group->state == MCAST_LEAVE_SENT)) { in clear_pending_reqs()
1187 clear = cancel_delayed_work(&group->timeout_work); in clear_pending_reqs()
1189 group->state = MCAST_IDLE; in clear_pending_reqs()
1192 --group->func[vf].num_pend_reqs; in clear_pending_reqs()
1196 atomic_dec(&group->refcount); in clear_pending_reqs()
1200 if (!pend && (!list_empty(&group->func[vf].pending) || group->func[vf].num_pend_reqs)) { in clear_pending_reqs()
1201 mcg_warn_group(group, "DRIVER BUG: list_empty %d, num_pend_reqs %d\n", in clear_pending_reqs()
1202 list_empty(&group->func[vf].pending), group->func[vf].num_pend_reqs); in clear_pending_reqs()
1206 static int push_deleteing_req(struct mcast_group *group, int slave) in push_deleteing_req() argument
1211 if (!group->func[slave].join_state) in push_deleteing_req()
1218 if (!list_empty(&group->func[slave].pending)) { in push_deleteing_req()
1219 pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list); in push_deleteing_req()
1228 req->group = group; in push_deleteing_req()
1229 ++group->func[slave].num_pend_reqs; in push_deleteing_req()
1237 struct mcast_group *group; in clean_vf_mcast() local
1242 group = rb_entry(p, struct mcast_group, node); in clean_vf_mcast()
1243 mutex_lock(&group->lock); in clean_vf_mcast()
1244 if (atomic_read(&group->refcount)) { in clean_vf_mcast()
1246 clear_pending_reqs(group, slave); in clean_vf_mcast()
1247 push_deleteing_req(group, slave); in clean_vf_mcast()
1249 mutex_unlock(&group->lock); in clean_vf_mcast()