Lines Matching refs:crq
224 entry->fmt = evt->crq.format; in ibmvfc_trc_start()
261 entry->fmt = evt->crq.format; in ibmvfc_trc_end()
822 evt->crq.valid = 0x80; in ibmvfc_init_event_pool()
823 evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i)); in ibmvfc_init_event_pool()
894 struct ibmvfc_queue *crq = &vhost->crq; in ibmvfc_release_crq_queue() local
908 ibmvfc_free_queue(vhost, crq); in ibmvfc_release_crq_queue()
937 spin_lock(vhost->crq.q_lock); in ibmvfc_reenable_crq_queue()
940 spin_unlock(vhost->crq.q_lock); in ibmvfc_reenable_crq_queue()
960 struct ibmvfc_queue *crq = &vhost->crq; in ibmvfc_reset_crq() local
972 spin_lock(vhost->crq.q_lock); in ibmvfc_reset_crq()
979 memset(crq->msgs.crq, 0, PAGE_SIZE); in ibmvfc_reset_crq()
980 crq->cur = 0; in ibmvfc_reset_crq()
984 crq->msg_token, PAGE_SIZE); in ibmvfc_reset_crq()
992 spin_unlock(vhost->crq.q_lock); in ibmvfc_reset_crq()
1122 spin_lock_irqsave(&vhost->crq.l_lock, flags); in ibmvfc_purge_requests()
1123 list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list) in ibmvfc_purge_requests()
1125 list_splice_init(&vhost->crq.sent, &vhost->purge); in ibmvfc_purge_requests()
1126 spin_unlock_irqrestore(&vhost->crq.l_lock, flags); in ibmvfc_purge_requests()
1563 evt->crq.format = format; in ibmvfc_init_event()
1685 __be64 *crq_as_u64 = (__be64 *) &evt->crq; in ibmvfc_send_event()
1691 if (evt->crq.format == IBMVFC_CMD_FORMAT) in ibmvfc_send_event()
1693 else if (evt->crq.format == IBMVFC_MAD_FORMAT) in ibmvfc_send_event()
1911 vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset); in ibmvfc_init_vfc_cmd()
1959 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_queuecommand()
2049 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_bsg_timeout()
2112 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_bsg_plogi()
2234 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_bsg_request()
2248 mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + in ibmvfc_bsg_request()
2326 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_reset_device()
2471 queues = &vhost->crq; in ibmvfc_wait_for_ops()
2658 spin_lock(&vhost->crq.l_lock); in ibmvfc_cancel_all_sq()
2659 list_for_each_entry(evt, &vhost->crq.sent, queue_list) { in ibmvfc_cancel_all_sq()
2665 spin_unlock(&vhost->crq.l_lock); in ibmvfc_cancel_all_sq()
2675 evt = ibmvfc_init_tmf(&vhost->crq, sdev, type); in ibmvfc_cancel_all_sq()
2748 if (evt->crq.format == IBMVFC_CMD_FORMAT && in ibmvfc_match_key()
2794 spin_lock(&vhost->crq.l_lock); in ibmvfc_abort_task_set()
2795 list_for_each_entry(evt, &vhost->crq.sent, queue_list) { in ibmvfc_abort_task_set()
2801 spin_unlock(&vhost->crq.l_lock); in ibmvfc_abort_task_set()
2811 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_abort_task_set()
3157 static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, in ibmvfc_handle_async() argument
3160 const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event)); in ibmvfc_handle_async()
3164 " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id), in ibmvfc_handle_async()
3165 be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name), in ibmvfc_handle_async()
3166 ibmvfc_get_link_state(crq->link_state)); in ibmvfc_handle_async()
3168 switch (be64_to_cpu(crq->event)) { in ibmvfc_handle_async()
3170 switch (crq->link_state) { in ibmvfc_handle_async()
3209 if (!crq->scsi_id && !crq->wwpn && !crq->node_name) in ibmvfc_handle_async()
3211 if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id) in ibmvfc_handle_async()
3213 if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn) in ibmvfc_handle_async()
3215 if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name) in ibmvfc_handle_async()
3217 if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO) in ibmvfc_handle_async()
3219 if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) { in ibmvfc_handle_async()
3236 dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event); in ibmvfc_handle_async()
3248 static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost, in ibmvfc_handle_crq() argument
3252 struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba); in ibmvfc_handle_crq()
3254 switch (crq->valid) { in ibmvfc_handle_crq()
3256 switch (crq->format) { in ibmvfc_handle_crq()
3271 dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format); in ibmvfc_handle_crq()
3278 if (crq->format == IBMVFC_PARTITION_MIGRATED) { in ibmvfc_handle_crq()
3288 } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) { in ibmvfc_handle_crq()
3289 dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format); in ibmvfc_handle_crq()
3294 dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format); in ibmvfc_handle_crq()
3300 dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid); in ibmvfc_handle_crq()
3304 if (crq->format == IBMVFC_ASYNC_EVENT) in ibmvfc_handle_crq()
3311 if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) { in ibmvfc_handle_crq()
3313 crq->ioba); in ibmvfc_handle_crq()
3319 crq->ioba); in ibmvfc_handle_crq()
3688 struct ibmvfc_async_crq *crq; in ibmvfc_next_async_crq() local
3690 crq = &async_crq->msgs.async[async_crq->cur]; in ibmvfc_next_async_crq()
3691 if (crq->valid & 0x80) { in ibmvfc_next_async_crq()
3696 crq = NULL; in ibmvfc_next_async_crq()
3698 return crq; in ibmvfc_next_async_crq()
3710 struct ibmvfc_queue *queue = &vhost->crq; in ibmvfc_next_crq()
3711 struct ibmvfc_crq *crq; in ibmvfc_next_crq() local
3713 crq = &queue->msgs.crq[queue->cur]; in ibmvfc_next_crq()
3714 if (crq->valid & 0x80) { in ibmvfc_next_crq()
3719 crq = NULL; in ibmvfc_next_crq()
3721 return crq; in ibmvfc_next_crq()
3755 struct ibmvfc_crq *crq; in ibmvfc_tasklet() local
3763 spin_lock(vhost->crq.q_lock); in ibmvfc_tasklet()
3773 while ((crq = ibmvfc_next_crq(vhost)) != NULL) { in ibmvfc_tasklet()
3774 ibmvfc_handle_crq(crq, vhost, &evt_doneq); in ibmvfc_tasklet()
3775 crq->valid = 0; in ibmvfc_tasklet()
3785 } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) { in ibmvfc_tasklet()
3787 ibmvfc_handle_crq(crq, vhost, &evt_doneq); in ibmvfc_tasklet()
3788 crq->valid = 0; in ibmvfc_tasklet()
3794 spin_unlock(vhost->crq.q_lock); in ibmvfc_tasklet()
3825 static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost, in ibmvfc_handle_scrq() argument
3828 struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba); in ibmvfc_handle_scrq()
3830 switch (crq->valid) { in ibmvfc_handle_scrq()
3836 dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid); in ibmvfc_handle_scrq()
3846 crq->ioba); in ibmvfc_handle_scrq()
3852 crq->ioba); in ibmvfc_handle_scrq()
3863 struct ibmvfc_crq *crq; in ibmvfc_next_scrq() local
3865 crq = &scrq->msgs.scrq[scrq->cur].crq; in ibmvfc_next_scrq()
3866 if (crq->valid & 0x80) { in ibmvfc_next_scrq()
3871 crq = NULL; in ibmvfc_next_scrq()
3873 return crq; in ibmvfc_next_scrq()
3878 struct ibmvfc_crq *crq; in ibmvfc_drain_sub_crq() local
3886 while ((crq = ibmvfc_next_scrq(scrq)) != NULL) { in ibmvfc_drain_sub_crq()
3887 ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq); in ibmvfc_drain_sub_crq()
3888 crq->valid = 0; in ibmvfc_drain_sub_crq()
3893 if ((crq = ibmvfc_next_scrq(scrq)) != NULL) { in ibmvfc_drain_sub_crq()
3895 ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq); in ibmvfc_drain_sub_crq()
3896 crq->valid = 0; in ibmvfc_drain_sub_crq()
4073 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_tgt_send_prli()
4186 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_tgt_send_plogi()
4268 evt = ibmvfc_get_event(&vhost->crq); in __ibmvfc_tgt_get_implicit_logout_evt()
4443 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_tgt_move_login()
4549 mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + in ibmvfc_init_passthru()
4554 mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + in ibmvfc_init_passthru()
4558 mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + in ibmvfc_init_passthru()
4615 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_adisc_timeout()
4673 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_tgt_adisc()
4782 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_tgt_query_target()
4960 struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_discover_targets()
5041 struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_channel_setup()
5114 struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_channel_enquiry()
5242 struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_npiv_login()
5285 if (list_empty(&vhost->crq.sent) && in ibmvfc_npiv_logout_done()
5313 evt = ibmvfc_get_event(&vhost->crq); in ibmvfc_npiv_logout()
5775 fmt_size = sizeof(*queue->msgs.crq); in ibmvfc_alloc_queue()
5832 struct ibmvfc_queue *crq = &vhost->crq; in ibmvfc_init_crq() local
5835 if (ibmvfc_alloc_queue(vhost, crq, IBMVFC_CRQ_FMT)) in ibmvfc_init_crq()
5839 crq->msg_token, PAGE_SIZE); in ibmvfc_init_crq()
5875 ibmvfc_free_queue(vhost, crq); in ibmvfc_init_crq()
5954 memset(scrq->msgs.crq, 0, PAGE_SIZE); in ibmvfc_deregister_scsi_channel()