Lines Matching +full:part +full:- +full:number
6 * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
10 * Cross Partition Communication (XPC) uv-based functions.
34 #include "../sgi-gru/gru.h"
35 #include "../sgi-gru/grukservices.h"
81 mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex); in xpc_setup_partitions_uv()
82 spin_lock_init(&part_uv->flags_lock); in xpc_setup_partitions_uv()
83 part_uv->remote_act_state = XPC_P_AS_INACTIVE; in xpc_setup_partitions_uv()
98 if (part_uv->cached_activate_gru_mq_desc != NULL) { in xpc_teardown_partitions_uv()
99 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex); in xpc_teardown_partitions_uv()
100 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); in xpc_teardown_partitions_uv()
101 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; in xpc_teardown_partitions_uv()
102 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); in xpc_teardown_partitions_uv()
103 kfree(part_uv->cached_activate_gru_mq_desc); in xpc_teardown_partitions_uv()
104 part_uv->cached_activate_gru_mq_desc = NULL; in xpc_teardown_partitions_uv()
105 mutex_unlock(&part_uv-> in xpc_teardown_partitions_uv()
114 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); in xpc_get_gru_mq_irq_uv()
117 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, in xpc_get_gru_mq_irq_uv()
119 if (mq->irq < 0) in xpc_get_gru_mq_irq_uv()
120 return mq->irq; in xpc_get_gru_mq_irq_uv()
122 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); in xpc_get_gru_mq_irq_uv()
126 mq->irq = SGI_XPC_ACTIVATE; in xpc_get_gru_mq_irq_uv()
128 mq->irq = SGI_XPC_NOTIFY; in xpc_get_gru_mq_irq_uv()
130 return -EINVAL; in xpc_get_gru_mq_irq_uv()
132 mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq; in xpc_get_gru_mq_irq_uv()
133 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value); in xpc_get_gru_mq_irq_uv()
145 uv_teardown_irq(mq->irq); in xpc_release_gru_mq_irq_uv()
151 mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); in xpc_release_gru_mq_irq_uv()
154 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value); in xpc_release_gru_mq_irq_uv()
166 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); in xpc_gru_mq_watchlist_alloc_uv()
168 ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address), in xpc_gru_mq_watchlist_alloc_uv()
169 mq->order, &mq->mmr_offset); in xpc_gru_mq_watchlist_alloc_uv()
173 return -EBUSY; in xpc_gru_mq_watchlist_alloc_uv()
176 ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address), in xpc_gru_mq_watchlist_alloc_uv()
177 mq->order, &mq->mmr_offset); in xpc_gru_mq_watchlist_alloc_uv()
187 mq->watchlist_num = ret; in xpc_gru_mq_watchlist_alloc_uv()
195 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); in xpc_gru_mq_watchlist_free_uv()
198 ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num); in xpc_gru_mq_watchlist_free_uv()
201 ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num); in xpc_gru_mq_watchlist_free_uv()
225 ret = -ENOMEM; in xpc_create_gru_mq_uv()
229 mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc), in xpc_create_gru_mq_uv()
231 if (mq->gru_mq_desc == NULL) { in xpc_create_gru_mq_uv()
234 ret = -ENOMEM; in xpc_create_gru_mq_uv()
239 mq->order = pg_order + PAGE_SHIFT; in xpc_create_gru_mq_uv()
240 mq_size = 1UL << mq->order; in xpc_create_gru_mq_uv()
242 mq->mmr_blade = uv_cpu_to_blade_id(cpu); in xpc_create_gru_mq_uv()
251 ret = -ENOMEM; in xpc_create_gru_mq_uv()
254 mq->address = page_address(page); in xpc_create_gru_mq_uv()
265 ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL); in xpc_create_gru_mq_uv()
268 mq->irq, -ret); in xpc_create_gru_mq_uv()
274 mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value; in xpc_create_gru_mq_uv()
275 ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size, in xpc_create_gru_mq_uv()
276 nasid, mmr_value->vector, mmr_value->dest); in xpc_create_gru_mq_uv()
280 ret = -EINVAL; in xpc_create_gru_mq_uv()
285 xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size); in xpc_create_gru_mq_uv()
287 ret = -EACCES; in xpc_create_gru_mq_uv()
295 free_irq(mq->irq, NULL); in xpc_create_gru_mq_uv()
301 free_pages((unsigned long)mq->address, pg_order); in xpc_create_gru_mq_uv()
303 kfree(mq->gru_mq_desc); in xpc_create_gru_mq_uv()
318 mq_size = 1UL << mq->order; in xpc_destroy_gru_mq_uv()
319 ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size); in xpc_destroy_gru_mq_uv()
323 free_irq(mq->irq, NULL); in xpc_destroy_gru_mq_uv()
329 pg_order = mq->order - PAGE_SHIFT; in xpc_destroy_gru_mq_uv()
330 free_pages((unsigned long)mq->address, pg_order); in xpc_destroy_gru_mq_uv()
353 /* ??? Do we add a limit to the number of retries? */ in xpc_send_gru_msg()
359 /* ??? Do we add a limit to the number of retries? */ in xpc_send_gru_msg()
376 struct xpc_partition *part; in xpc_process_activate_IRQ_rcvd_uv() local
383 part = &xpc_partitions[partid]; in xpc_process_activate_IRQ_rcvd_uv()
385 if (part->sn.uv.act_state_req == 0) in xpc_process_activate_IRQ_rcvd_uv()
388 xpc_activate_IRQ_rcvd--; in xpc_process_activate_IRQ_rcvd_uv()
391 act_state_req = part->sn.uv.act_state_req; in xpc_process_activate_IRQ_rcvd_uv()
392 part->sn.uv.act_state_req = 0; in xpc_process_activate_IRQ_rcvd_uv()
396 if (part->act_state == XPC_P_AS_INACTIVE) in xpc_process_activate_IRQ_rcvd_uv()
397 xpc_activate_partition(part); in xpc_process_activate_IRQ_rcvd_uv()
398 else if (part->act_state == XPC_P_AS_DEACTIVATING) in xpc_process_activate_IRQ_rcvd_uv()
399 XPC_DEACTIVATE_PARTITION(part, xpReactivating); in xpc_process_activate_IRQ_rcvd_uv()
402 if (part->act_state == XPC_P_AS_INACTIVE) in xpc_process_activate_IRQ_rcvd_uv()
403 xpc_activate_partition(part); in xpc_process_activate_IRQ_rcvd_uv()
405 XPC_DEACTIVATE_PARTITION(part, xpReactivating); in xpc_process_activate_IRQ_rcvd_uv()
408 XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason); in xpc_process_activate_IRQ_rcvd_uv()
423 xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, in xpc_handle_activate_mq_msg_uv() argument
429 struct xpc_partition_uv *part_uv = &part->sn.uv; in xpc_handle_activate_mq_msg_uv()
432 part_uv->remote_act_state = msg_hdr->act_state; in xpc_handle_activate_mq_msg_uv()
434 switch (msg_hdr->type) { in xpc_handle_activate_mq_msg_uv()
451 if (part_uv->act_state_req == 0) in xpc_handle_activate_mq_msg_uv()
453 part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; in xpc_handle_activate_mq_msg_uv()
454 part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ in xpc_handle_activate_mq_msg_uv()
455 part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; in xpc_handle_activate_mq_msg_uv()
456 part_uv->heartbeat_gpa = msg->heartbeat_gpa; in xpc_handle_activate_mq_msg_uv()
458 if (msg->activate_gru_mq_desc_gpa != in xpc_handle_activate_mq_msg_uv()
459 part_uv->activate_gru_mq_desc_gpa) { in xpc_handle_activate_mq_msg_uv()
460 spin_lock(&part_uv->flags_lock); in xpc_handle_activate_mq_msg_uv()
461 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; in xpc_handle_activate_mq_msg_uv()
462 spin_unlock(&part_uv->flags_lock); in xpc_handle_activate_mq_msg_uv()
463 part_uv->activate_gru_mq_desc_gpa = in xpc_handle_activate_mq_msg_uv()
464 msg->activate_gru_mq_desc_gpa; in xpc_handle_activate_mq_msg_uv()
478 if (part_uv->act_state_req == 0) in xpc_handle_activate_mq_msg_uv()
480 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; in xpc_handle_activate_mq_msg_uv()
481 part_uv->reason = msg->reason; in xpc_handle_activate_mq_msg_uv()
496 args = &part->remote_openclose_args[msg->ch_number]; in xpc_handle_activate_mq_msg_uv()
497 args->reason = msg->reason; in xpc_handle_activate_mq_msg_uv()
499 spin_lock_irqsave(&part->chctl_lock, irq_flags); in xpc_handle_activate_mq_msg_uv()
500 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST; in xpc_handle_activate_mq_msg_uv()
501 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); in xpc_handle_activate_mq_msg_uv()
503 xpc_wakeup_channel_mgr(part); in xpc_handle_activate_mq_msg_uv()
516 spin_lock_irqsave(&part->chctl_lock, irq_flags); in xpc_handle_activate_mq_msg_uv()
517 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY; in xpc_handle_activate_mq_msg_uv()
518 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); in xpc_handle_activate_mq_msg_uv()
520 xpc_wakeup_channel_mgr(part); in xpc_handle_activate_mq_msg_uv()
532 args = &part->remote_openclose_args[msg->ch_number]; in xpc_handle_activate_mq_msg_uv()
533 args->entry_size = msg->entry_size; in xpc_handle_activate_mq_msg_uv()
534 args->local_nentries = msg->local_nentries; in xpc_handle_activate_mq_msg_uv()
536 spin_lock_irqsave(&part->chctl_lock, irq_flags); in xpc_handle_activate_mq_msg_uv()
537 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST; in xpc_handle_activate_mq_msg_uv()
538 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); in xpc_handle_activate_mq_msg_uv()
540 xpc_wakeup_channel_mgr(part); in xpc_handle_activate_mq_msg_uv()
551 args = &part->remote_openclose_args[msg->ch_number]; in xpc_handle_activate_mq_msg_uv()
552 args->remote_nentries = msg->remote_nentries; in xpc_handle_activate_mq_msg_uv()
553 args->local_nentries = msg->local_nentries; in xpc_handle_activate_mq_msg_uv()
554 args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa; in xpc_handle_activate_mq_msg_uv()
556 spin_lock_irqsave(&part->chctl_lock, irq_flags); in xpc_handle_activate_mq_msg_uv()
557 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY; in xpc_handle_activate_mq_msg_uv()
558 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); in xpc_handle_activate_mq_msg_uv()
560 xpc_wakeup_channel_mgr(part); in xpc_handle_activate_mq_msg_uv()
571 spin_lock_irqsave(&part->chctl_lock, irq_flags); in xpc_handle_activate_mq_msg_uv()
572 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE; in xpc_handle_activate_mq_msg_uv()
573 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); in xpc_handle_activate_mq_msg_uv()
575 xpc_wakeup_channel_mgr(part); in xpc_handle_activate_mq_msg_uv()
579 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); in xpc_handle_activate_mq_msg_uv()
580 part_uv->flags |= XPC_P_ENGAGED_UV; in xpc_handle_activate_mq_msg_uv()
581 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); in xpc_handle_activate_mq_msg_uv()
585 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); in xpc_handle_activate_mq_msg_uv()
586 part_uv->flags &= ~XPC_P_ENGAGED_UV; in xpc_handle_activate_mq_msg_uv()
587 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); in xpc_handle_activate_mq_msg_uv()
592 "from partition=%d\n", msg_hdr->type, XPC_PARTID(part)); in xpc_handle_activate_mq_msg_uv()
596 if (part_uv->act_state_req == 0) in xpc_handle_activate_mq_msg_uv()
598 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; in xpc_handle_activate_mq_msg_uv()
599 part_uv->reason = xpBadMsgType; in xpc_handle_activate_mq_msg_uv()
606 if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies && in xpc_handle_activate_mq_msg_uv()
607 part->remote_rp_ts_jiffies != 0) { in xpc_handle_activate_mq_msg_uv()
613 if (part_uv->act_state_req == 0) in xpc_handle_activate_mq_msg_uv()
615 part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV; in xpc_handle_activate_mq_msg_uv()
627 struct xpc_partition *part; in xpc_handle_activate_IRQ_uv() local
632 msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc); in xpc_handle_activate_IRQ_uv()
636 partid = msg_hdr->partid; in xpc_handle_activate_IRQ_uv()
642 part = &xpc_partitions[partid]; in xpc_handle_activate_IRQ_uv()
644 part_referenced = xpc_part_ref(part); in xpc_handle_activate_IRQ_uv()
645 xpc_handle_activate_mq_msg_uv(part, msg_hdr, in xpc_handle_activate_IRQ_uv()
649 xpc_part_deref(part); in xpc_handle_activate_IRQ_uv()
652 gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr); in xpc_handle_activate_IRQ_uv()
670 gru_mq_desc->mq = NULL; in xpc_cache_remote_gru_mq_desc_uv()
676 xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size, in xpc_send_activate_IRQ_uv() argument
680 struct xpc_partition_uv *part_uv = &part->sn.uv; in xpc_send_activate_IRQ_uv()
687 msg_hdr->type = msg_type; in xpc_send_activate_IRQ_uv()
688 msg_hdr->partid = xp_partition_id; in xpc_send_activate_IRQ_uv()
689 msg_hdr->act_state = part->act_state; in xpc_send_activate_IRQ_uv()
690 msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies; in xpc_send_activate_IRQ_uv()
692 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex); in xpc_send_activate_IRQ_uv()
694 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) { in xpc_send_activate_IRQ_uv()
695 gru_mq_desc = part_uv->cached_activate_gru_mq_desc; in xpc_send_activate_IRQ_uv()
704 part_uv->cached_activate_gru_mq_desc = gru_mq_desc; in xpc_send_activate_IRQ_uv()
708 part_uv-> in xpc_send_activate_IRQ_uv()
713 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); in xpc_send_activate_IRQ_uv()
714 part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; in xpc_send_activate_IRQ_uv()
715 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); in xpc_send_activate_IRQ_uv()
718 /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */ in xpc_send_activate_IRQ_uv()
719 ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg, in xpc_send_activate_IRQ_uv()
722 smp_rmb(); /* ensure a fresh copy of part_uv->flags */ in xpc_send_activate_IRQ_uv()
723 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) in xpc_send_activate_IRQ_uv()
727 mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex); in xpc_send_activate_IRQ_uv()
732 xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg, in xpc_send_activate_IRQ_part_uv() argument
737 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); in xpc_send_activate_IRQ_part_uv()
739 XPC_DEACTIVATE_PARTITION(part, ret); in xpc_send_activate_IRQ_part_uv()
746 struct xpc_partition *part = &xpc_partitions[ch->partid]; in xpc_send_activate_IRQ_ch_uv() local
749 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); in xpc_send_activate_IRQ_ch_uv()
752 spin_unlock_irqrestore(&ch->lock, *irq_flags); in xpc_send_activate_IRQ_ch_uv()
754 XPC_DEACTIVATE_PARTITION(part, ret); in xpc_send_activate_IRQ_ch_uv()
757 spin_lock_irqsave(&ch->lock, *irq_flags); in xpc_send_activate_IRQ_ch_uv()
762 xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req) in xpc_send_local_activate_IRQ_uv() argument
765 struct xpc_partition_uv *part_uv = &part->sn.uv; in xpc_send_local_activate_IRQ_uv()
774 if (part_uv->act_state_req == 0) in xpc_send_local_activate_IRQ_uv()
776 part_uv->act_state_req = act_state_req; in xpc_send_local_activate_IRQ_uv()
820 rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv); in xpc_setup_rsvd_page_uv()
821 rp->sn.uv.activate_gru_mq_desc_gpa = in xpc_setup_rsvd_page_uv()
822 uv_gpa(xpc_activate_mq_uv->gru_mq_desc); in xpc_setup_rsvd_page_uv()
844 xpc_heartbeat_uv->value++; in xpc_increment_heartbeat_uv()
851 xpc_heartbeat_uv->offline = 1; in xpc_offline_heartbeat_uv()
858 xpc_heartbeat_uv->offline = 0; in xpc_online_heartbeat_uv()
864 xpc_heartbeat_uv->value = 1; in xpc_heartbeat_init_uv()
865 xpc_heartbeat_uv->offline = 0; in xpc_heartbeat_init_uv()
875 xpc_get_remote_heartbeat_uv(struct xpc_partition *part) in xpc_get_remote_heartbeat_uv() argument
877 struct xpc_partition_uv *part_uv = &part->sn.uv; in xpc_get_remote_heartbeat_uv()
880 ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat), in xpc_get_remote_heartbeat_uv()
881 part_uv->heartbeat_gpa, in xpc_get_remote_heartbeat_uv()
886 if (part_uv->cached_heartbeat.value == part->last_heartbeat && in xpc_get_remote_heartbeat_uv()
887 !part_uv->cached_heartbeat.offline) { in xpc_get_remote_heartbeat_uv()
891 part->last_heartbeat = part_uv->cached_heartbeat.value; in xpc_get_remote_heartbeat_uv()
900 short partid = remote_rp->SAL_partid; in xpc_request_partition_activation_uv()
901 struct xpc_partition *part = &xpc_partitions[partid]; in xpc_request_partition_activation_uv() local
904 part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */ in xpc_request_partition_activation_uv()
905 part->remote_rp_ts_jiffies = remote_rp->ts_jiffies; in xpc_request_partition_activation_uv()
906 part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa; in xpc_request_partition_activation_uv()
907 part->sn.uv.activate_gru_mq_desc_gpa = in xpc_request_partition_activation_uv()
908 remote_rp->sn.uv.activate_gru_mq_desc_gpa; in xpc_request_partition_activation_uv()
914 if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) { in xpc_request_partition_activation_uv()
916 msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa; in xpc_request_partition_activation_uv()
918 xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa; in xpc_request_partition_activation_uv()
919 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), in xpc_request_partition_activation_uv()
923 if (part->act_state == XPC_P_AS_INACTIVE) in xpc_request_partition_activation_uv()
924 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); in xpc_request_partition_activation_uv()
928 xpc_request_partition_reactivation_uv(struct xpc_partition *part) in xpc_request_partition_reactivation_uv() argument
930 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); in xpc_request_partition_reactivation_uv()
934 xpc_request_partition_deactivation_uv(struct xpc_partition *part) in xpc_request_partition_deactivation_uv() argument
942 if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING && in xpc_request_partition_deactivation_uv()
943 part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) { in xpc_request_partition_deactivation_uv()
945 msg.reason = part->reason; in xpc_request_partition_deactivation_uv()
946 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), in xpc_request_partition_deactivation_uv()
952 xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part) in xpc_cancel_partition_deactivation_request_uv() argument
961 head->first = NULL; in xpc_init_fifo_uv()
962 head->last = NULL; in xpc_init_fifo_uv()
963 spin_lock_init(&head->lock); in xpc_init_fifo_uv()
964 head->n_entries = 0; in xpc_init_fifo_uv()
973 spin_lock_irqsave(&head->lock, irq_flags); in xpc_get_fifo_entry_uv()
974 first = head->first; in xpc_get_fifo_entry_uv()
975 if (head->first != NULL) { in xpc_get_fifo_entry_uv()
976 head->first = first->next; in xpc_get_fifo_entry_uv()
977 if (head->first == NULL) in xpc_get_fifo_entry_uv()
978 head->last = NULL; in xpc_get_fifo_entry_uv()
980 head->n_entries--; in xpc_get_fifo_entry_uv()
981 BUG_ON(head->n_entries < 0); in xpc_get_fifo_entry_uv()
983 first->next = NULL; in xpc_get_fifo_entry_uv()
985 spin_unlock_irqrestore(&head->lock, irq_flags); in xpc_get_fifo_entry_uv()
995 last->next = NULL; in xpc_put_fifo_entry_uv()
996 spin_lock_irqsave(&head->lock, irq_flags); in xpc_put_fifo_entry_uv()
997 if (head->last != NULL) in xpc_put_fifo_entry_uv()
998 head->last->next = last; in xpc_put_fifo_entry_uv()
1000 head->first = last; in xpc_put_fifo_entry_uv()
1001 head->last = last; in xpc_put_fifo_entry_uv()
1002 head->n_entries++; in xpc_put_fifo_entry_uv()
1003 spin_unlock_irqrestore(&head->lock, irq_flags); in xpc_put_fifo_entry_uv()
1009 return head->n_entries; in xpc_n_of_fifo_entries_uv()
1016 xpc_setup_ch_structures_uv(struct xpc_partition *part) in xpc_setup_ch_structures_uv() argument
1021 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { in xpc_setup_ch_structures_uv()
1022 ch_uv = &part->channels[ch_number].sn.uv; in xpc_setup_ch_structures_uv()
1024 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); in xpc_setup_ch_structures_uv()
1025 xpc_init_fifo_uv(&ch_uv->recv_msg_list); in xpc_setup_ch_structures_uv()
1035 xpc_teardown_ch_structures_uv(struct xpc_partition *part) in xpc_teardown_ch_structures_uv() argument
1042 xpc_make_first_contact_uv(struct xpc_partition *part) in xpc_make_first_contact_uv() argument
1051 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), in xpc_make_first_contact_uv()
1054 while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) || in xpc_make_first_contact_uv()
1055 (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) { in xpc_make_first_contact_uv()
1058 "partition %d\n", XPC_PARTID(part)); in xpc_make_first_contact_uv()
1063 if (part->act_state == XPC_P_AS_DEACTIVATING) in xpc_make_first_contact_uv()
1064 return part->reason; in xpc_make_first_contact_uv()
1071 xpc_get_chctl_all_flags_uv(struct xpc_partition *part) in xpc_get_chctl_all_flags_uv() argument
1076 spin_lock_irqsave(&part->chctl_lock, irq_flags); in xpc_get_chctl_all_flags_uv()
1077 chctl = part->chctl; in xpc_get_chctl_all_flags_uv()
1079 part->chctl.all_flags = 0; in xpc_get_chctl_all_flags_uv()
1081 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); in xpc_get_chctl_all_flags_uv()
1088 struct xpc_channel_uv *ch_uv = &ch->sn.uv; in xpc_allocate_send_msg_slot_uv()
1095 for (nentries = ch->local_nentries; nentries > 0; nentries--) { in xpc_allocate_send_msg_slot_uv()
1097 ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL); in xpc_allocate_send_msg_slot_uv()
1098 if (ch_uv->send_msg_slots == NULL) in xpc_allocate_send_msg_slot_uv()
1102 msg_slot = &ch_uv->send_msg_slots[entry]; in xpc_allocate_send_msg_slot_uv()
1104 msg_slot->msg_slot_number = entry; in xpc_allocate_send_msg_slot_uv()
1105 xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list, in xpc_allocate_send_msg_slot_uv()
1106 &msg_slot->next); in xpc_allocate_send_msg_slot_uv()
1109 spin_lock_irqsave(&ch->lock, irq_flags); in xpc_allocate_send_msg_slot_uv()
1110 if (nentries < ch->local_nentries) in xpc_allocate_send_msg_slot_uv()
1111 ch->local_nentries = nentries; in xpc_allocate_send_msg_slot_uv()
1112 spin_unlock_irqrestore(&ch->lock, irq_flags); in xpc_allocate_send_msg_slot_uv()
1122 struct xpc_channel_uv *ch_uv = &ch->sn.uv; in xpc_allocate_recv_msg_slot_uv()
1129 for (nentries = ch->remote_nentries; nentries > 0; nentries--) { in xpc_allocate_recv_msg_slot_uv()
1130 nbytes = nentries * ch->entry_size; in xpc_allocate_recv_msg_slot_uv()
1131 ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL); in xpc_allocate_recv_msg_slot_uv()
1132 if (ch_uv->recv_msg_slots == NULL) in xpc_allocate_recv_msg_slot_uv()
1136 msg_slot = ch_uv->recv_msg_slots + in xpc_allocate_recv_msg_slot_uv()
1137 entry * ch->entry_size; in xpc_allocate_recv_msg_slot_uv()
1139 msg_slot->hdr.msg_slot_number = entry; in xpc_allocate_recv_msg_slot_uv()
1142 spin_lock_irqsave(&ch->lock, irq_flags); in xpc_allocate_recv_msg_slot_uv()
1143 if (nentries < ch->remote_nentries) in xpc_allocate_recv_msg_slot_uv()
1144 ch->remote_nentries = nentries; in xpc_allocate_recv_msg_slot_uv()
1145 spin_unlock_irqrestore(&ch->lock, irq_flags); in xpc_allocate_recv_msg_slot_uv()
1159 struct xpc_channel_uv *ch_uv = &ch->sn.uv; in xpc_setup_msg_structures_uv()
1161 DBUG_ON(ch->flags & XPC_C_SETUP); in xpc_setup_msg_structures_uv()
1163 ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct in xpc_setup_msg_structures_uv()
1166 if (ch_uv->cached_notify_gru_mq_desc == NULL) in xpc_setup_msg_structures_uv()
1174 kfree(ch_uv->send_msg_slots); in xpc_setup_msg_structures_uv()
1175 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); in xpc_setup_msg_structures_uv()
1188 struct xpc_channel_uv *ch_uv = &ch->sn.uv; in xpc_teardown_msg_structures_uv()
1190 lockdep_assert_held(&ch->lock); in xpc_teardown_msg_structures_uv()
1192 kfree(ch_uv->cached_notify_gru_mq_desc); in xpc_teardown_msg_structures_uv()
1193 ch_uv->cached_notify_gru_mq_desc = NULL; in xpc_teardown_msg_structures_uv()
1195 if (ch->flags & XPC_C_SETUP) { in xpc_teardown_msg_structures_uv()
1196 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); in xpc_teardown_msg_structures_uv()
1197 kfree(ch_uv->send_msg_slots); in xpc_teardown_msg_structures_uv()
1198 xpc_init_fifo_uv(&ch_uv->recv_msg_list); in xpc_teardown_msg_structures_uv()
1199 kfree(ch_uv->recv_msg_slots); in xpc_teardown_msg_structures_uv()
1208 msg.ch_number = ch->number; in xpc_send_chctl_closerequest_uv()
1209 msg.reason = ch->reason; in xpc_send_chctl_closerequest_uv()
1219 msg.ch_number = ch->number; in xpc_send_chctl_closereply_uv()
1229 msg.ch_number = ch->number; in xpc_send_chctl_openrequest_uv()
1230 msg.entry_size = ch->entry_size; in xpc_send_chctl_openrequest_uv()
1231 msg.local_nentries = ch->local_nentries; in xpc_send_chctl_openrequest_uv()
1241 msg.ch_number = ch->number; in xpc_send_chctl_openreply_uv()
1242 msg.local_nentries = ch->local_nentries; in xpc_send_chctl_openreply_uv()
1243 msg.remote_nentries = ch->remote_nentries; in xpc_send_chctl_openreply_uv()
1244 msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc); in xpc_send_chctl_openreply_uv()
1254 msg.ch_number = ch->number; in xpc_send_chctl_opencomplete_uv()
1260 xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number) in xpc_send_chctl_local_msgrequest_uv() argument
1264 spin_lock_irqsave(&part->chctl_lock, irq_flags); in xpc_send_chctl_local_msgrequest_uv()
1265 part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST; in xpc_send_chctl_local_msgrequest_uv()
1266 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); in xpc_send_chctl_local_msgrequest_uv()
1268 xpc_wakeup_channel_mgr(part); in xpc_send_chctl_local_msgrequest_uv()
1275 struct xpc_channel_uv *ch_uv = &ch->sn.uv; in xpc_save_remote_msgqueue_pa_uv()
1277 DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL); in xpc_save_remote_msgqueue_pa_uv()
1278 return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc, in xpc_save_remote_msgqueue_pa_uv()
1283 xpc_indicate_partition_engaged_uv(struct xpc_partition *part) in xpc_indicate_partition_engaged_uv() argument
1287 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), in xpc_indicate_partition_engaged_uv()
1292 xpc_indicate_partition_disengaged_uv(struct xpc_partition *part) in xpc_indicate_partition_disengaged_uv() argument
1296 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), in xpc_indicate_partition_disengaged_uv()
1306 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); in xpc_assume_partition_disengaged_uv()
1307 part_uv->flags &= ~XPC_P_ENGAGED_UV; in xpc_assume_partition_disengaged_uv()
1308 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); in xpc_assume_partition_disengaged_uv()
1325 if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0) in xpc_any_partition_engaged_uv()
1340 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list); in xpc_allocate_msg_slot_uv()
1361 xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next); in xpc_free_msg_slot_uv()
1364 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) in xpc_free_msg_slot_uv()
1365 wake_up(&ch->msg_allocate_wq); in xpc_free_msg_slot_uv()
1373 xpc_notify_func func = msg_slot->func; in xpc_notify_sender_uv()
1375 if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) { in xpc_notify_sender_uv()
1377 atomic_dec(&ch->n_to_notify); in xpc_notify_sender_uv()
1379 dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p " in xpc_notify_sender_uv()
1381 msg_slot->msg_slot_number, ch->partid, ch->number); in xpc_notify_sender_uv()
1383 func(reason, ch->partid, ch->number, msg_slot->key); in xpc_notify_sender_uv()
1385 dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p " in xpc_notify_sender_uv()
1387 msg_slot->msg_slot_number, ch->partid, ch->number); in xpc_notify_sender_uv()
1396 int entry = msg->hdr.msg_slot_number % ch->local_nentries; in xpc_handle_notify_mq_ack_uv()
1398 msg_slot = &ch->sn.uv.send_msg_slots[entry]; in xpc_handle_notify_mq_ack_uv()
1400 BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number); in xpc_handle_notify_mq_ack_uv()
1401 msg_slot->msg_slot_number += ch->local_nentries; in xpc_handle_notify_mq_ack_uv()
1403 if (msg_slot->func != NULL) in xpc_handle_notify_mq_ack_uv()
1410 xpc_handle_notify_mq_msg_uv(struct xpc_partition *part, in xpc_handle_notify_mq_msg_uv() argument
1413 struct xpc_partition_uv *part_uv = &part->sn.uv; in xpc_handle_notify_mq_msg_uv()
1418 int ch_number = msg->hdr.ch_number; in xpc_handle_notify_mq_msg_uv()
1420 if (unlikely(ch_number >= part->nchannels)) { in xpc_handle_notify_mq_msg_uv()
1422 "channel number=0x%x in message from partid=%d\n", in xpc_handle_notify_mq_msg_uv()
1423 ch_number, XPC_PARTID(part)); in xpc_handle_notify_mq_msg_uv()
1427 if (part_uv->act_state_req == 0) in xpc_handle_notify_mq_msg_uv()
1429 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; in xpc_handle_notify_mq_msg_uv()
1430 part_uv->reason = xpBadChannelNumber; in xpc_handle_notify_mq_msg_uv()
1437 ch = &part->channels[ch_number]; in xpc_handle_notify_mq_msg_uv()
1440 if (!(ch->flags & XPC_C_CONNECTED)) { in xpc_handle_notify_mq_msg_uv()
1446 if (msg->hdr.size == 0) { in xpc_handle_notify_mq_msg_uv()
1453 ch_uv = &ch->sn.uv; in xpc_handle_notify_mq_msg_uv()
1455 msg_slot = ch_uv->recv_msg_slots + in xpc_handle_notify_mq_msg_uv()
1456 (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size; in xpc_handle_notify_mq_msg_uv()
1458 BUG_ON(msg_slot->hdr.size != 0); in xpc_handle_notify_mq_msg_uv()
1460 memcpy(msg_slot, msg, msg->hdr.size); in xpc_handle_notify_mq_msg_uv()
1462 xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next); in xpc_handle_notify_mq_msg_uv()
1464 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) { in xpc_handle_notify_mq_msg_uv()
1470 if (atomic_read(&ch->kthreads_idle) > 0) in xpc_handle_notify_mq_msg_uv()
1471 wake_up_nr(&ch->idle_wq, 1); in xpc_handle_notify_mq_msg_uv()
1473 xpc_send_chctl_local_msgrequest_uv(part, ch->number); in xpc_handle_notify_mq_msg_uv()
1483 struct xpc_partition *part; in xpc_handle_notify_IRQ_uv() local
1485 while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) != in xpc_handle_notify_IRQ_uv()
1488 partid = msg->hdr.partid; in xpc_handle_notify_IRQ_uv()
1493 part = &xpc_partitions[partid]; in xpc_handle_notify_IRQ_uv()
1495 if (xpc_part_ref(part)) { in xpc_handle_notify_IRQ_uv()
1496 xpc_handle_notify_mq_msg_uv(part, msg); in xpc_handle_notify_IRQ_uv()
1497 xpc_part_deref(part); in xpc_handle_notify_IRQ_uv()
1501 gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg); in xpc_handle_notify_IRQ_uv()
1510 return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list); in xpc_n_of_deliverable_payloads_uv()
1514 xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number) in xpc_process_msg_chctl_flags_uv() argument
1516 struct xpc_channel *ch = &part->channels[ch_number]; in xpc_process_msg_chctl_flags_uv()
1524 (ch->flags & XPC_C_CONNECTED) && in xpc_process_msg_chctl_flags_uv()
1525 (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) { in xpc_process_msg_chctl_flags_uv()
1547 if (msg_size > ch->entry_size) in xpc_send_payload_uv()
1552 if (ch->flags & XPC_C_DISCONNECTING) { in xpc_send_payload_uv()
1553 ret = ch->reason; in xpc_send_payload_uv()
1556 if (!(ch->flags & XPC_C_CONNECTED)) { in xpc_send_payload_uv()
1566 atomic_inc(&ch->n_to_notify); in xpc_send_payload_uv()
1568 msg_slot->key = key; in xpc_send_payload_uv()
1569 smp_wmb(); /* a non-NULL func must hit memory after the key */ in xpc_send_payload_uv()
1570 msg_slot->func = func; in xpc_send_payload_uv()
1572 if (ch->flags & XPC_C_DISCONNECTING) { in xpc_send_payload_uv()
1573 ret = ch->reason; in xpc_send_payload_uv()
1579 msg->hdr.partid = xp_partition_id; in xpc_send_payload_uv()
1580 msg->hdr.ch_number = ch->number; in xpc_send_payload_uv()
1581 msg->hdr.size = msg_size; in xpc_send_payload_uv()
1582 msg->hdr.msg_slot_number = msg_slot->msg_slot_number; in xpc_send_payload_uv()
1583 memcpy(&msg->payload, payload, payload_size); in xpc_send_payload_uv()
1585 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, in xpc_send_payload_uv()
1590 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); in xpc_send_payload_uv()
1603 if (cmpxchg(&msg_slot->func, func, NULL) != func) { in xpc_send_payload_uv()
1608 msg_slot->key = NULL; in xpc_send_payload_uv()
1609 atomic_dec(&ch->n_to_notify); in xpc_send_payload_uv()
1630 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); in xpc_notify_senders_of_disconnect_uv()
1632 for (entry = 0; entry < ch->local_nentries; entry++) { in xpc_notify_senders_of_disconnect_uv()
1634 if (atomic_read(&ch->n_to_notify) == 0) in xpc_notify_senders_of_disconnect_uv()
1637 msg_slot = &ch->sn.uv.send_msg_slots[entry]; in xpc_notify_senders_of_disconnect_uv()
1638 if (msg_slot->func != NULL) in xpc_notify_senders_of_disconnect_uv()
1639 xpc_notify_sender_uv(ch, msg_slot, ch->reason); in xpc_notify_senders_of_disconnect_uv()
1653 if (!(ch->flags & XPC_C_DISCONNECTING)) { in xpc_get_deliverable_payload_uv()
1654 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list); in xpc_get_deliverable_payload_uv()
1658 payload = &msg->payload; in xpc_get_deliverable_payload_uv()
1674 msg->hdr.partid = xp_partition_id; in xpc_received_payload_uv()
1675 msg->hdr.size = 0; /* size of zero indicates this is an ACK */ in xpc_received_payload_uv()
1677 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, in xpc_received_payload_uv()
1680 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); in xpc_received_payload_uv()
1789 return -E2BIG; in xpc_init_uv()
1804 -ret); in xpc_init_uv()
1817 MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues.");