Lines Matching refs:group

117 	struct mcast_group	*group;  member
133 struct mcast_group *group; in mcast_find() local
137 group = rb_entry(node, struct mcast_group, node); in mcast_find()
138 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); in mcast_find()
140 return group; in mcast_find()
151 struct mcast_group *group, in mcast_insert() argument
163 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, in mcast_insert()
164 sizeof group->rec.mgid); in mcast_insert()
174 rb_link_node(&group->node, parent, link); in mcast_insert()
175 rb_insert_color(&group->node, &port->table); in mcast_insert()
185 static void release_group(struct mcast_group *group) in release_group() argument
187 struct mcast_port *port = group->port; in release_group()
191 if (atomic_dec_and_test(&group->refcount)) { in release_group()
192 rb_erase(&group->node, &port->table); in release_group()
194 kfree(group); in release_group()
208 struct mcast_group *group = member->group; in queue_join() local
211 spin_lock_irqsave(&group->lock, flags); in queue_join()
212 list_add_tail(&member->list, &group->pending_list); in queue_join()
213 if (group->state == MCAST_IDLE) { in queue_join()
214 group->state = MCAST_BUSY; in queue_join()
215 atomic_inc(&group->refcount); in queue_join()
216 queue_work(mcast_wq, &group->work); in queue_join()
218 spin_unlock_irqrestore(&group->lock, flags); in queue_join()
228 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc) in adjust_membership() argument
234 group->members[i] += inc; in adjust_membership()
243 static u8 get_leave_state(struct mcast_group *group) in get_leave_state() argument
249 if (!group->members[i]) in get_leave_state()
252 return leave_state & group->rec.join_state; in get_leave_state()
330 static int send_join(struct mcast_group *group, struct mcast_member *member) in send_join() argument
332 struct mcast_port *port = group->port; in send_join()
335 group->last_join = member; in send_join()
340 3000, GFP_KERNEL, join_handler, group, in send_join()
341 &group->query); in send_join()
345 static int send_leave(struct mcast_group *group, u8 leave_state) in send_leave() argument
347 struct mcast_port *port = group->port; in send_leave()
351 rec = group->rec; in send_leave()
353 group->leave_state = leave_state; in send_leave()
361 group, &group->query); in send_leave()
365 static void join_group(struct mcast_group *group, struct mcast_member *member, in join_group() argument
369 adjust_membership(group, join_state, 1); in join_group()
370 group->rec.join_state |= join_state; in join_group()
371 member->multicast.rec = group->rec; in join_group()
373 list_move(&member->list, &group->active_list); in join_group()
376 static int fail_join(struct mcast_group *group, struct mcast_member *member, in fail_join() argument
379 spin_lock_irq(&group->lock); in fail_join()
381 spin_unlock_irq(&group->lock); in fail_join()
385 static void process_group_error(struct mcast_group *group) in process_group_error() argument
391 if (group->state == MCAST_PKEY_EVENT) in process_group_error()
392 ret = ib_find_pkey(group->port->dev->device, in process_group_error()
393 group->port->port_num, in process_group_error()
394 be16_to_cpu(group->rec.pkey), &pkey_index); in process_group_error()
396 spin_lock_irq(&group->lock); in process_group_error()
397 if (group->state == MCAST_PKEY_EVENT && !ret && in process_group_error()
398 group->pkey_index == pkey_index) in process_group_error()
401 while (!list_empty(&group->active_list)) { in process_group_error()
402 member = list_entry(group->active_list.next, in process_group_error()
406 adjust_membership(group, member->multicast.rec.join_state, -1); in process_group_error()
408 spin_unlock_irq(&group->lock); in process_group_error()
415 spin_lock_irq(&group->lock); in process_group_error()
418 group->rec.join_state = 0; in process_group_error()
420 group->state = MCAST_BUSY; in process_group_error()
421 spin_unlock_irq(&group->lock); in process_group_error()
426 struct mcast_group *group; in mcast_work_handler() local
432 group = container_of(work, typeof(*group), work); in mcast_work_handler()
434 spin_lock_irq(&group->lock); in mcast_work_handler()
435 while (!list_empty(&group->pending_list) || in mcast_work_handler()
436 (group->state != MCAST_BUSY)) { in mcast_work_handler()
438 if (group->state != MCAST_BUSY) { in mcast_work_handler()
439 spin_unlock_irq(&group->lock); in mcast_work_handler()
440 process_group_error(group); in mcast_work_handler()
444 member = list_entry(group->pending_list.next, in mcast_work_handler()
450 if (join_state == (group->rec.join_state & join_state)) { in mcast_work_handler()
451 status = cmp_rec(&group->rec, &multicast->rec, in mcast_work_handler()
454 join_group(group, member, join_state); in mcast_work_handler()
457 spin_unlock_irq(&group->lock); in mcast_work_handler()
460 spin_unlock_irq(&group->lock); in mcast_work_handler()
461 status = send_join(group, member); in mcast_work_handler()
466 ret = fail_join(group, member, status); in mcast_work_handler()
472 spin_lock_irq(&group->lock); in mcast_work_handler()
475 join_state = get_leave_state(group); in mcast_work_handler()
477 group->rec.join_state &= ~join_state; in mcast_work_handler()
478 spin_unlock_irq(&group->lock); in mcast_work_handler()
479 if (send_leave(group, join_state)) in mcast_work_handler()
482 group->state = MCAST_IDLE; in mcast_work_handler()
483 spin_unlock_irq(&group->lock); in mcast_work_handler()
484 release_group(group); in mcast_work_handler()
491 static void process_join_error(struct mcast_group *group, int status) in process_join_error() argument
496 spin_lock_irq(&group->lock); in process_join_error()
497 member = list_entry(group->pending_list.next, in process_join_error()
499 if (group->last_join == member) { in process_join_error()
502 spin_unlock_irq(&group->lock); in process_join_error()
508 spin_unlock_irq(&group->lock); in process_join_error()
514 struct mcast_group *group = context; in join_handler() local
518 process_join_error(group, status); in join_handler()
522 if (ib_find_pkey(group->port->dev->device, in join_handler()
523 group->port->port_num, be16_to_cpu(rec->pkey), in join_handler()
527 spin_lock_irq(&group->port->lock); in join_handler()
528 if (group->state == MCAST_BUSY && in join_handler()
529 group->pkey_index == MCAST_INVALID_PKEY_INDEX) in join_handler()
530 group->pkey_index = pkey_index; in join_handler()
531 mgids_changed = memcmp(&rec->mgid, &group->rec.mgid, in join_handler()
532 sizeof(group->rec.mgid)); in join_handler()
533 group->rec = *rec; in join_handler()
535 rb_erase(&group->node, &group->port->table); in join_handler()
536 is_mgid0 = !memcmp(&mgid0, &group->rec.mgid, in join_handler()
538 mcast_insert(group->port, group, is_mgid0); in join_handler()
540 spin_unlock_irq(&group->port->lock); in join_handler()
542 mcast_work_handler(&group->work); in join_handler()
548 struct mcast_group *group = context; in leave_handler() local
550 if (status && group->retries > 0 && in leave_handler()
551 !send_leave(group, group->leave_state)) in leave_handler()
552 group->retries--; in leave_handler()
554 mcast_work_handler(&group->work); in leave_handler()
560 struct mcast_group *group, *cur_group; in acquire_group() local
567 group = mcast_find(port, mgid); in acquire_group()
568 if (group) in acquire_group()
573 group = kzalloc(sizeof *group, gfp_mask); in acquire_group()
574 if (!group) in acquire_group()
577 group->retries = 3; in acquire_group()
578 group->port = port; in acquire_group()
579 group->rec.mgid = *mgid; in acquire_group()
580 group->pkey_index = MCAST_INVALID_PKEY_INDEX; in acquire_group()
581 INIT_LIST_HEAD(&group->pending_list); in acquire_group()
582 INIT_LIST_HEAD(&group->active_list); in acquire_group()
583 INIT_WORK(&group->work, mcast_work_handler); in acquire_group()
584 spin_lock_init(&group->lock); in acquire_group()
587 cur_group = mcast_insert(port, group, is_mgid0); in acquire_group()
589 kfree(group); in acquire_group()
590 group = cur_group; in acquire_group()
594 atomic_inc(&group->refcount); in acquire_group()
596 return group; in acquire_group()
638 member->group = acquire_group(&dev->port[port_num - dev->start_port], in ib_sa_join_multicast()
640 if (!member->group) { in ib_sa_join_multicast()
665 struct mcast_group *group; in ib_sa_free_multicast() local
668 group = member->group; in ib_sa_free_multicast()
670 spin_lock_irq(&group->lock); in ib_sa_free_multicast()
672 adjust_membership(group, multicast->rec.join_state, -1); in ib_sa_free_multicast()
676 if (group->state == MCAST_IDLE) { in ib_sa_free_multicast()
677 group->state = MCAST_BUSY; in ib_sa_free_multicast()
678 spin_unlock_irq(&group->lock); in ib_sa_free_multicast()
680 queue_work(mcast_wq, &group->work); in ib_sa_free_multicast()
682 spin_unlock_irq(&group->lock); in ib_sa_free_multicast()
683 release_group(group); in ib_sa_free_multicast()
698 struct mcast_group *group; in ib_sa_get_mcmember_rec() local
708 group = mcast_find(port, mgid); in ib_sa_get_mcmember_rec()
709 if (group) in ib_sa_get_mcmember_rec()
710 *rec = group->rec; in ib_sa_get_mcmember_rec()
774 struct mcast_group *group; in mcast_groups_event() local
780 group = rb_entry(node, struct mcast_group, node); in mcast_groups_event()
781 spin_lock(&group->lock); in mcast_groups_event()
782 if (group->state == MCAST_IDLE) { in mcast_groups_event()
783 atomic_inc(&group->refcount); in mcast_groups_event()
784 queue_work(mcast_wq, &group->work); in mcast_groups_event()
786 if (group->state != MCAST_GROUP_ERROR) in mcast_groups_event()
787 group->state = state; in mcast_groups_event()
788 spin_unlock(&group->lock); in mcast_groups_event()