Lines Matching +full:supports +full:- +full:cqe

1 // SPDX-License-Identifier: GPL-2.0
8 #include <linux/blk-mq.h>
15 #include <linux/nvme-fc-driver.h>
16 #include <linux/nvme-fc.h>
32 struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */
55 struct list_head lsreq_list; /* tgtport->ls_req_list */
60 /* desired maximum for a single sequence - if sg list allows it */
93 struct list_head fcp_list; /* tgtport->fcp_list */
180 return (iodptr - iodptr->tgtport->iod); in nvmet_fc_iodnum()
186 return (fodptr - fodptr->queue->fod); in nvmet_fc_fodnum()
202 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
207 return (assoc->association_id | qid); in nvmet_fc_makeconnid()
268 /* *********************** FC-NVME DMA Handling **************************** */
333 s->dma_address = 0L; in fc_map_sg()
335 s->dma_length = s->length; in fc_map_sg()
357 /* ********************** FC-NVME LS XMT Handling ************************* */
363 struct nvmet_fc_tgtport *tgtport = lsop->tgtport; in __nvmet_fc_finish_ls_req()
364 struct nvmefc_ls_req *lsreq = &lsop->ls_req; in __nvmet_fc_finish_ls_req()
367 spin_lock_irqsave(&tgtport->lock, flags); in __nvmet_fc_finish_ls_req()
369 if (!lsop->req_queued) { in __nvmet_fc_finish_ls_req()
370 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_finish_ls_req()
374 list_del(&lsop->lsreq_list); in __nvmet_fc_finish_ls_req()
376 lsop->req_queued = false; in __nvmet_fc_finish_ls_req()
378 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_finish_ls_req()
380 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, in __nvmet_fc_finish_ls_req()
381 (lsreq->rqstlen + lsreq->rsplen), in __nvmet_fc_finish_ls_req()
385 queue_work(nvmet_wq, &tgtport->put_work); in __nvmet_fc_finish_ls_req()
393 struct nvmefc_ls_req *lsreq = &lsop->ls_req; in __nvmet_fc_send_ls_req()
397 if (!tgtport->ops->ls_req) in __nvmet_fc_send_ls_req()
398 return -EOPNOTSUPP; in __nvmet_fc_send_ls_req()
401 return -ESHUTDOWN; in __nvmet_fc_send_ls_req()
403 lsreq->done = done; in __nvmet_fc_send_ls_req()
404 lsop->req_queued = false; in __nvmet_fc_send_ls_req()
405 INIT_LIST_HEAD(&lsop->lsreq_list); in __nvmet_fc_send_ls_req()
407 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, in __nvmet_fc_send_ls_req()
408 lsreq->rqstlen + lsreq->rsplen, in __nvmet_fc_send_ls_req()
410 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) { in __nvmet_fc_send_ls_req()
411 ret = -EFAULT; in __nvmet_fc_send_ls_req()
414 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; in __nvmet_fc_send_ls_req()
416 spin_lock_irqsave(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
418 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list); in __nvmet_fc_send_ls_req()
420 lsop->req_queued = true; in __nvmet_fc_send_ls_req()
422 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
424 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, in __nvmet_fc_send_ls_req()
432 lsop->ls_error = ret; in __nvmet_fc_send_ls_req()
433 spin_lock_irqsave(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
434 lsop->req_queued = false; in __nvmet_fc_send_ls_req()
435 list_del(&lsop->lsreq_list); in __nvmet_fc_send_ls_req()
436 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
437 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, in __nvmet_fc_send_ls_req()
438 (lsreq->rqstlen + lsreq->rsplen), in __nvmet_fc_send_ls_req()
464 /* fc-nvme target doesn't care about success or failure of cmd */ in nvmet_fc_disconnect_assoc_done()
470 * This routine sends a FC-NVME LS to disconnect (aka terminate)
471 * the FC-NVME Association. Terminating the association also
472 * terminates the FC-NVME connections (per queue, both admin and io
474 * down, and the related FC-NVME Association ID and Connection IDs
477 * The behavior of the fc-nvme target is such that it's
480 * connectivity with the fc-nvme host, so the target may never get a
483 * continue on with terminating the association. If the fc-nvme host
489 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_xmt_disconnect_assoc()
501 if (!tgtport->ops->ls_req || !assoc->hostport || in nvmet_fc_xmt_disconnect_assoc()
502 assoc->hostport->invalid) in nvmet_fc_xmt_disconnect_assoc()
507 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); in nvmet_fc_xmt_disconnect_assoc()
509 dev_info(tgtport->dev, in nvmet_fc_xmt_disconnect_assoc()
511 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_xmt_disconnect_assoc()
517 lsreq = &lsop->ls_req; in nvmet_fc_xmt_disconnect_assoc()
518 if (tgtport->ops->lsrqst_priv_sz) in nvmet_fc_xmt_disconnect_assoc()
519 lsreq->private = (void *)&discon_acc[1]; in nvmet_fc_xmt_disconnect_assoc()
521 lsreq->private = NULL; in nvmet_fc_xmt_disconnect_assoc()
523 lsop->tgtport = tgtport; in nvmet_fc_xmt_disconnect_assoc()
524 lsop->hosthandle = assoc->hostport->hosthandle; in nvmet_fc_xmt_disconnect_assoc()
527 assoc->association_id); in nvmet_fc_xmt_disconnect_assoc()
532 dev_info(tgtport->dev, in nvmet_fc_xmt_disconnect_assoc()
534 tgtport->fc_target_port.port_num, assoc->a_id, ret); in nvmet_fc_xmt_disconnect_assoc()
540 /* *********************** FC-NVME Port Management ************************ */
552 return -ENOMEM; in nvmet_fc_alloc_ls_iodlist()
554 tgtport->iod = iod; in nvmet_fc_alloc_ls_iodlist()
557 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); in nvmet_fc_alloc_ls_iodlist()
558 iod->tgtport = tgtport; in nvmet_fc_alloc_ls_iodlist()
559 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list); in nvmet_fc_alloc_ls_iodlist()
561 iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) + in nvmet_fc_alloc_ls_iodlist()
564 if (!iod->rqstbuf) in nvmet_fc_alloc_ls_iodlist()
567 iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1]; in nvmet_fc_alloc_ls_iodlist()
569 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, in nvmet_fc_alloc_ls_iodlist()
570 sizeof(*iod->rspbuf), in nvmet_fc_alloc_ls_iodlist()
572 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) in nvmet_fc_alloc_ls_iodlist()
579 kfree(iod->rqstbuf); in nvmet_fc_alloc_ls_iodlist()
580 list_del(&iod->ls_rcv_list); in nvmet_fc_alloc_ls_iodlist()
581 for (iod--, i--; i >= 0; iod--, i--) { in nvmet_fc_alloc_ls_iodlist()
582 fc_dma_unmap_single(tgtport->dev, iod->rspdma, in nvmet_fc_alloc_ls_iodlist()
583 sizeof(*iod->rspbuf), DMA_TO_DEVICE); in nvmet_fc_alloc_ls_iodlist()
584 kfree(iod->rqstbuf); in nvmet_fc_alloc_ls_iodlist()
585 list_del(&iod->ls_rcv_list); in nvmet_fc_alloc_ls_iodlist()
590 return -EFAULT; in nvmet_fc_alloc_ls_iodlist()
596 struct nvmet_fc_ls_iod *iod = tgtport->iod; in nvmet_fc_free_ls_iodlist()
600 fc_dma_unmap_single(tgtport->dev, in nvmet_fc_free_ls_iodlist()
601 iod->rspdma, sizeof(*iod->rspbuf), in nvmet_fc_free_ls_iodlist()
603 kfree(iod->rqstbuf); in nvmet_fc_free_ls_iodlist()
604 list_del(&iod->ls_rcv_list); in nvmet_fc_free_ls_iodlist()
606 kfree(tgtport->iod); in nvmet_fc_free_ls_iodlist()
615 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_ls_iod()
616 iod = list_first_entry_or_null(&tgtport->ls_rcv_list, in nvmet_fc_alloc_ls_iod()
619 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist); in nvmet_fc_alloc_ls_iod()
620 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_ls_iod()
631 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_free_ls_iod()
632 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list); in nvmet_fc_free_ls_iod()
633 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_free_ls_iod()
640 struct nvmet_fc_fcp_iod *fod = queue->fod; in nvmet_fc_prep_fcp_iodlist()
643 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_prep_fcp_iodlist()
644 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); in nvmet_fc_prep_fcp_iodlist()
645 fod->tgtport = tgtport; in nvmet_fc_prep_fcp_iodlist()
646 fod->queue = queue; in nvmet_fc_prep_fcp_iodlist()
647 fod->active = false; in nvmet_fc_prep_fcp_iodlist()
648 fod->abort = false; in nvmet_fc_prep_fcp_iodlist()
649 fod->aborted = false; in nvmet_fc_prep_fcp_iodlist()
650 fod->fcpreq = NULL; in nvmet_fc_prep_fcp_iodlist()
651 list_add_tail(&fod->fcp_list, &queue->fod_list); in nvmet_fc_prep_fcp_iodlist()
652 spin_lock_init(&fod->flock); in nvmet_fc_prep_fcp_iodlist()
654 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, in nvmet_fc_prep_fcp_iodlist()
655 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_prep_fcp_iodlist()
656 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { in nvmet_fc_prep_fcp_iodlist()
657 list_del(&fod->fcp_list); in nvmet_fc_prep_fcp_iodlist()
658 for (fod--, i--; i >= 0; fod--, i--) { in nvmet_fc_prep_fcp_iodlist()
659 fc_dma_unmap_single(tgtport->dev, fod->rspdma, in nvmet_fc_prep_fcp_iodlist()
660 sizeof(fod->rspiubuf), in nvmet_fc_prep_fcp_iodlist()
662 fod->rspdma = 0L; in nvmet_fc_prep_fcp_iodlist()
663 list_del(&fod->fcp_list); in nvmet_fc_prep_fcp_iodlist()
675 struct nvmet_fc_fcp_iod *fod = queue->fod; in nvmet_fc_destroy_fcp_iodlist()
678 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_destroy_fcp_iodlist()
679 if (fod->rspdma) in nvmet_fc_destroy_fcp_iodlist()
680 fc_dma_unmap_single(tgtport->dev, fod->rspdma, in nvmet_fc_destroy_fcp_iodlist()
681 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_destroy_fcp_iodlist()
690 lockdep_assert_held(&queue->qlock); in nvmet_fc_alloc_fcp_iod()
692 fod = list_first_entry_or_null(&queue->fod_list, in nvmet_fc_alloc_fcp_iod()
695 list_del(&fod->fcp_list); in nvmet_fc_alloc_fcp_iod()
696 fod->active = true; in nvmet_fc_alloc_fcp_iod()
712 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; in nvmet_fc_queue_fcp_req()
718 fcpreq->hwqid = queue->qid ? in nvmet_fc_queue_fcp_req()
719 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; in nvmet_fc_queue_fcp_req()
731 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq); in nvmet_fc_fcp_rqst_op_defer_work()
739 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_free_fcp_iod()
740 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_free_fcp_iod()
744 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, in nvmet_fc_free_fcp_iod()
745 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_free_fcp_iod()
747 fcpreq->nvmet_fc_private = NULL; in nvmet_fc_free_fcp_iod()
749 fod->active = false; in nvmet_fc_free_fcp_iod()
750 fod->abort = false; in nvmet_fc_free_fcp_iod()
751 fod->aborted = false; in nvmet_fc_free_fcp_iod()
752 fod->writedataactive = false; in nvmet_fc_free_fcp_iod()
753 fod->fcpreq = NULL; in nvmet_fc_free_fcp_iod()
755 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); in nvmet_fc_free_fcp_iod()
760 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_free_fcp_iod()
761 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, in nvmet_fc_free_fcp_iod()
764 list_add_tail(&fod->fcp_list, &fod->queue->fod_list); in nvmet_fc_free_fcp_iod()
765 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_free_fcp_iod()
769 /* Re-use the fod for the next pending cmd that was deferred */ in nvmet_fc_free_fcp_iod()
770 list_del(&deferfcp->req_list); in nvmet_fc_free_fcp_iod()
772 fcpreq = deferfcp->fcp_req; in nvmet_fc_free_fcp_iod()
775 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list); in nvmet_fc_free_fcp_iod()
777 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_free_fcp_iod()
780 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); in nvmet_fc_free_fcp_iod()
783 fcpreq->rspaddr = NULL; in nvmet_fc_free_fcp_iod()
784 fcpreq->rsplen = 0; in nvmet_fc_free_fcp_iod()
785 fcpreq->nvmet_fc_private = fod; in nvmet_fc_free_fcp_iod()
786 fod->fcpreq = fcpreq; in nvmet_fc_free_fcp_iod()
787 fod->active = true; in nvmet_fc_free_fcp_iod()
790 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); in nvmet_fc_free_fcp_iod()
797 queue_work(queue->work_q, &fod->defer_work); in nvmet_fc_free_fcp_iod()
814 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0, in nvmet_fc_alloc_target_queue()
815 assoc->tgtport->fc_target_port.port_num, in nvmet_fc_alloc_target_queue()
816 assoc->a_id, qid); in nvmet_fc_alloc_target_queue()
817 if (!queue->work_q) in nvmet_fc_alloc_target_queue()
820 queue->qid = qid; in nvmet_fc_alloc_target_queue()
821 queue->sqsize = sqsize; in nvmet_fc_alloc_target_queue()
822 queue->assoc = assoc; in nvmet_fc_alloc_target_queue()
823 INIT_LIST_HEAD(&queue->fod_list); in nvmet_fc_alloc_target_queue()
824 INIT_LIST_HEAD(&queue->avail_defer_list); in nvmet_fc_alloc_target_queue()
825 INIT_LIST_HEAD(&queue->pending_cmd_list); in nvmet_fc_alloc_target_queue()
826 atomic_set(&queue->connected, 0); in nvmet_fc_alloc_target_queue()
827 atomic_set(&queue->sqtail, 0); in nvmet_fc_alloc_target_queue()
828 atomic_set(&queue->rsn, 1); in nvmet_fc_alloc_target_queue()
829 atomic_set(&queue->zrspcnt, 0); in nvmet_fc_alloc_target_queue()
830 spin_lock_init(&queue->qlock); in nvmet_fc_alloc_target_queue()
831 kref_init(&queue->ref); in nvmet_fc_alloc_target_queue()
833 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); in nvmet_fc_alloc_target_queue()
835 ret = nvmet_sq_init(&queue->nvme_sq); in nvmet_fc_alloc_target_queue()
839 WARN_ON(assoc->queues[qid]); in nvmet_fc_alloc_target_queue()
840 assoc->queues[qid] = queue; in nvmet_fc_alloc_target_queue()
845 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); in nvmet_fc_alloc_target_queue()
846 destroy_workqueue(queue->work_q); in nvmet_fc_alloc_target_queue()
859 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); in nvmet_fc_tgt_queue_free()
861 destroy_workqueue(queue->work_q); in nvmet_fc_tgt_queue_free()
869 kref_put(&queue->ref, nvmet_fc_tgt_queue_free); in nvmet_fc_tgt_q_put()
875 return kref_get_unless_zero(&queue->ref); in nvmet_fc_tgt_q_get()
882 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; in nvmet_fc_delete_target_queue()
883 struct nvmet_fc_fcp_iod *fod = queue->fod; in nvmet_fc_delete_target_queue()
889 disconnect = atomic_xchg(&queue->connected, 0); in nvmet_fc_delete_target_queue()
895 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_delete_target_queue()
897 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_delete_target_queue()
898 if (fod->active) { in nvmet_fc_delete_target_queue()
899 spin_lock(&fod->flock); in nvmet_fc_delete_target_queue()
900 fod->abort = true; in nvmet_fc_delete_target_queue()
906 if (fod->writedataactive) { in nvmet_fc_delete_target_queue()
907 fod->aborted = true; in nvmet_fc_delete_target_queue()
908 spin_unlock(&fod->flock); in nvmet_fc_delete_target_queue()
909 tgtport->ops->fcp_abort( in nvmet_fc_delete_target_queue()
910 &tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_delete_target_queue()
912 spin_unlock(&fod->flock); in nvmet_fc_delete_target_queue()
917 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list, in nvmet_fc_delete_target_queue()
919 list_del(&deferfcp->req_list); in nvmet_fc_delete_target_queue()
924 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, in nvmet_fc_delete_target_queue()
929 list_del(&deferfcp->req_list); in nvmet_fc_delete_target_queue()
930 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_delete_target_queue()
932 tgtport->ops->defer_rcv(&tgtport->fc_target_port, in nvmet_fc_delete_target_queue()
933 deferfcp->fcp_req); in nvmet_fc_delete_target_queue()
935 tgtport->ops->fcp_abort(&tgtport->fc_target_port, in nvmet_fc_delete_target_queue()
936 deferfcp->fcp_req); in nvmet_fc_delete_target_queue()
938 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, in nvmet_fc_delete_target_queue()
939 deferfcp->fcp_req); in nvmet_fc_delete_target_queue()
946 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_delete_target_queue()
948 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_delete_target_queue()
950 flush_workqueue(queue->work_q); in nvmet_fc_delete_target_queue()
952 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_fc_delete_target_queue()
970 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { in nvmet_fc_find_target_queue()
971 if (association_id == assoc->association_id) { in nvmet_fc_find_target_queue()
972 queue = assoc->queues[qid]; in nvmet_fc_find_target_queue()
974 (!atomic_read(&queue->connected) || in nvmet_fc_find_target_queue()
990 struct nvmet_fc_tgtport *tgtport = hostport->tgtport; in nvmet_fc_hostport_free()
993 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_hostport_free()
994 list_del(&hostport->host_list); in nvmet_fc_hostport_free()
995 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_hostport_free()
996 if (tgtport->ops->host_release && hostport->invalid) in nvmet_fc_hostport_free()
997 tgtport->ops->host_release(hostport->hosthandle); in nvmet_fc_hostport_free()
1005 kref_put(&hostport->ref, nvmet_fc_hostport_free); in nvmet_fc_hostport_put()
1011 return kref_get_unless_zero(&hostport->ref); in nvmet_fc_hostport_get()
1018 if (!hostport || !hostport->hosthandle) in nvmet_fc_free_hostport()
1029 lockdep_assert_held(&tgtport->lock); in nvmet_fc_match_hostport()
1031 list_for_each_entry(host, &tgtport->host_list, host_list) { in nvmet_fc_match_hostport()
1032 if (host->hosthandle == hosthandle && !host->invalid) { in nvmet_fc_match_hostport()
1056 return ERR_PTR(-EINVAL); in nvmet_fc_alloc_hostport()
1058 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1060 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1063 /* no new allocation - release reference */ in nvmet_fc_alloc_hostport()
1070 /* no new allocation - release reference */ in nvmet_fc_alloc_hostport()
1072 return ERR_PTR(-ENOMEM); in nvmet_fc_alloc_hostport()
1075 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1082 newhost->tgtport = tgtport; in nvmet_fc_alloc_hostport()
1083 newhost->hosthandle = hosthandle; in nvmet_fc_alloc_hostport()
1084 INIT_LIST_HEAD(&newhost->host_list); in nvmet_fc_alloc_hostport()
1085 kref_init(&newhost->ref); in nvmet_fc_alloc_hostport()
1087 list_add_tail(&newhost->host_list, &tgtport->host_list); in nvmet_fc_alloc_hostport()
1089 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1106 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_delete_assoc_work()
1115 nvmet_fc_tgtport_get(assoc->tgtport); in nvmet_fc_schedule_delete_assoc()
1116 queue_work(nvmet_wq, &assoc->del_work); in nvmet_fc_schedule_delete_assoc()
1128 if (!tgtport->pe) in nvmet_fc_alloc_target_assoc()
1135 idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL); in nvmet_fc_alloc_target_assoc()
1142 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); in nvmet_fc_alloc_target_assoc()
1143 if (IS_ERR(assoc->hostport)) in nvmet_fc_alloc_target_assoc()
1146 assoc->tgtport = tgtport; in nvmet_fc_alloc_target_assoc()
1147 assoc->a_id = idx; in nvmet_fc_alloc_target_assoc()
1148 INIT_LIST_HEAD(&assoc->a_list); in nvmet_fc_alloc_target_assoc()
1149 kref_init(&assoc->ref); in nvmet_fc_alloc_target_assoc()
1150 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work); in nvmet_fc_alloc_target_assoc()
1151 atomic_set(&assoc->terminating, 0); in nvmet_fc_alloc_target_assoc()
1154 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); in nvmet_fc_alloc_target_assoc()
1157 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_target_assoc()
1159 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) { in nvmet_fc_alloc_target_assoc()
1160 if (ran == tmpassoc->association_id) { in nvmet_fc_alloc_target_assoc()
1166 assoc->association_id = ran; in nvmet_fc_alloc_target_assoc()
1167 list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list); in nvmet_fc_alloc_target_assoc()
1169 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_target_assoc()
1177 ida_free(&tgtport->assoc_cnt, idx); in nvmet_fc_alloc_target_assoc()
1188 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_target_assoc_free()
1193 for (i = NVMET_NR_QUEUES; i >= 0; i--) { in nvmet_fc_target_assoc_free()
1194 if (assoc->queues[i]) in nvmet_fc_target_assoc_free()
1195 nvmet_fc_delete_target_queue(assoc->queues[i]); in nvmet_fc_target_assoc_free()
1201 nvmet_fc_free_hostport(assoc->hostport); in nvmet_fc_target_assoc_free()
1202 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_target_assoc_free()
1203 oldls = assoc->rcv_disconn; in nvmet_fc_target_assoc_free()
1204 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_target_assoc_free()
1208 ida_free(&tgtport->assoc_cnt, assoc->a_id); in nvmet_fc_target_assoc_free()
1209 dev_info(tgtport->dev, in nvmet_fc_target_assoc_free()
1211 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_target_assoc_free()
1219 kref_put(&assoc->ref, nvmet_fc_target_assoc_free); in nvmet_fc_tgt_a_put()
1225 return kref_get_unless_zero(&assoc->ref); in nvmet_fc_tgt_a_get()
1231 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_delete_target_assoc()
1235 terminating = atomic_xchg(&assoc->terminating, 1); in nvmet_fc_delete_target_assoc()
1241 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_delete_target_assoc()
1242 list_del_rcu(&assoc->a_list); in nvmet_fc_delete_target_assoc()
1243 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_delete_target_assoc()
1247 /* ensure all in-flight I/Os have been processed */ in nvmet_fc_delete_target_assoc()
1248 for (i = NVMET_NR_QUEUES; i >= 0; i--) { in nvmet_fc_delete_target_assoc()
1249 if (assoc->queues[i]) in nvmet_fc_delete_target_assoc()
1250 flush_workqueue(assoc->queues[i]->work_q); in nvmet_fc_delete_target_assoc()
1253 dev_info(tgtport->dev, in nvmet_fc_delete_target_assoc()
1255 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_delete_target_assoc()
1266 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { in nvmet_fc_find_target_assoc()
1267 if (association_id == assoc->association_id) { in nvmet_fc_find_target_assoc()
1286 pe->tgtport = tgtport; in nvmet_fc_portentry_bind()
1287 tgtport->pe = pe; in nvmet_fc_portentry_bind()
1289 pe->port = port; in nvmet_fc_portentry_bind()
1290 port->priv = pe; in nvmet_fc_portentry_bind()
1292 pe->node_name = tgtport->fc_target_port.node_name; in nvmet_fc_portentry_bind()
1293 pe->port_name = tgtport->fc_target_port.port_name; in nvmet_fc_portentry_bind()
1294 INIT_LIST_HEAD(&pe->pe_list); in nvmet_fc_portentry_bind()
1296 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list); in nvmet_fc_portentry_bind()
1305 if (pe->tgtport) in nvmet_fc_portentry_unbind()
1306 pe->tgtport->pe = NULL; in nvmet_fc_portentry_unbind()
1307 list_del(&pe->pe_list); in nvmet_fc_portentry_unbind()
1314 * re-registration can resume operation.
1323 pe = tgtport->pe; in nvmet_fc_portentry_unbind_tgt()
1325 pe->tgtport = NULL; in nvmet_fc_portentry_unbind_tgt()
1326 tgtport->pe = NULL; in nvmet_fc_portentry_unbind_tgt()
1346 if (tgtport->fc_target_port.node_name == pe->node_name && in nvmet_fc_portentry_rebind_tgt()
1347 tgtport->fc_target_port.port_name == pe->port_name) { in nvmet_fc_portentry_rebind_tgt()
1348 WARN_ON(pe->tgtport); in nvmet_fc_portentry_rebind_tgt()
1349 tgtport->pe = pe; in nvmet_fc_portentry_rebind_tgt()
1350 pe->tgtport = tgtport; in nvmet_fc_portentry_rebind_tgt()
1358 * nvmet_fc_register_targetport - transport entry point called by an
1372 * (ex: -ENXIO) upon failure.
1384 if (!template->xmt_ls_rsp || !template->fcp_op || in nvmet_fc_register_targetport()
1385 !template->fcp_abort || in nvmet_fc_register_targetport()
1386 !template->fcp_req_release || !template->targetport_delete || in nvmet_fc_register_targetport()
1387 !template->max_hw_queues || !template->max_sgl_segments || in nvmet_fc_register_targetport()
1388 !template->max_dif_sgl_segments || !template->dma_boundary) { in nvmet_fc_register_targetport()
1389 ret = -EINVAL; in nvmet_fc_register_targetport()
1393 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz), in nvmet_fc_register_targetport()
1396 ret = -ENOMEM; in nvmet_fc_register_targetport()
1402 ret = -ENOSPC; in nvmet_fc_register_targetport()
1407 ret = -ENODEV; in nvmet_fc_register_targetport()
1411 newrec->fc_target_port.node_name = pinfo->node_name; in nvmet_fc_register_targetport()
1412 newrec->fc_target_port.port_name = pinfo->port_name; in nvmet_fc_register_targetport()
1413 if (template->target_priv_sz) in nvmet_fc_register_targetport()
1414 newrec->fc_target_port.private = &newrec[1]; in nvmet_fc_register_targetport()
1416 newrec->fc_target_port.private = NULL; in nvmet_fc_register_targetport()
1417 newrec->fc_target_port.port_id = pinfo->port_id; in nvmet_fc_register_targetport()
1418 newrec->fc_target_port.port_num = idx; in nvmet_fc_register_targetport()
1419 INIT_LIST_HEAD(&newrec->tgt_list); in nvmet_fc_register_targetport()
1420 newrec->dev = dev; in nvmet_fc_register_targetport()
1421 newrec->ops = template; in nvmet_fc_register_targetport()
1422 spin_lock_init(&newrec->lock); in nvmet_fc_register_targetport()
1423 INIT_LIST_HEAD(&newrec->ls_rcv_list); in nvmet_fc_register_targetport()
1424 INIT_LIST_HEAD(&newrec->ls_req_list); in nvmet_fc_register_targetport()
1425 INIT_LIST_HEAD(&newrec->ls_busylist); in nvmet_fc_register_targetport()
1426 INIT_LIST_HEAD(&newrec->assoc_list); in nvmet_fc_register_targetport()
1427 INIT_LIST_HEAD(&newrec->host_list); in nvmet_fc_register_targetport()
1428 kref_init(&newrec->ref); in nvmet_fc_register_targetport()
1429 ida_init(&newrec->assoc_cnt); in nvmet_fc_register_targetport()
1430 newrec->max_sg_cnt = template->max_sgl_segments; in nvmet_fc_register_targetport()
1431 INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work); in nvmet_fc_register_targetport()
1435 ret = -ENOMEM; in nvmet_fc_register_targetport()
1442 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list); in nvmet_fc_register_targetport()
1445 *portptr = &newrec->fc_target_port; in nvmet_fc_register_targetport()
1466 struct device *dev = tgtport->dev; in nvmet_fc_free_tgtport()
1470 list_del(&tgtport->tgt_list); in nvmet_fc_free_tgtport()
1476 tgtport->ops->targetport_delete(&tgtport->fc_target_port); in nvmet_fc_free_tgtport()
1479 tgtport->fc_target_port.port_num); in nvmet_fc_free_tgtport()
1481 ida_destroy(&tgtport->assoc_cnt); in nvmet_fc_free_tgtport()
1491 kref_put(&tgtport->ref, nvmet_fc_free_tgtport); in nvmet_fc_tgtport_put()
1497 return kref_get_unless_zero(&tgtport->ref); in nvmet_fc_tgtport_get()
1506 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { in __nvmet_fc_free_assocs()
1516 * nvmet_fc_invalidate_host - transport entry point called by an LLDD
1519 * The nvmet-fc layer ensures that any references to the hosthandle
1529 * retries by the nvmet-fc transport. The nvmet-fc transport may
1531 * NVME associations. The nvmet-fc transport will call the
1532 * ops->host_release() callback to notify the LLDD that all references
1553 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_invalidate_host()
1555 &tgtport->assoc_list, a_list) { in nvmet_fc_invalidate_host()
1556 if (!assoc->hostport || in nvmet_fc_invalidate_host()
1557 assoc->hostport->hosthandle != hosthandle) in nvmet_fc_invalidate_host()
1561 assoc->hostport->invalid = 1; in nvmet_fc_invalidate_host()
1566 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_invalidate_host()
1568 /* if there's nothing to wait for - call the callback */ in nvmet_fc_invalidate_host()
1569 if (noassoc && tgtport->ops->host_release) in nvmet_fc_invalidate_host()
1570 tgtport->ops->host_release(hosthandle); in nvmet_fc_invalidate_host()
1595 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { in nvmet_fc_delete_ctrl()
1596 queue = assoc->queues[0]; in nvmet_fc_delete_ctrl()
1597 if (queue && queue->nvme_sq.ctrl == ctrl) { in nvmet_fc_delete_ctrl()
1619 * nvmet_fc_unregister_targetport - transport entry point called by an
1627 * (ex: -ENXIO) upon failure.
1655 /* ********************** FC-NVME LS RCV Handling ************************* */
1662 struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc; in nvmet_fc_ls_create_association()
1663 struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc; in nvmet_fc_ls_create_association()
1670 * FC-NVME spec changes. There are initiators sending different in nvmet_fc_ls_create_association()
1677 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN) in nvmet_fc_ls_create_association()
1679 else if (be32_to_cpu(rqst->desc_list_len) < in nvmet_fc_ls_create_association()
1682 else if (rqst->assoc_cmd.desc_tag != in nvmet_fc_ls_create_association()
1685 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) < in nvmet_fc_ls_create_association()
1688 else if (!rqst->assoc_cmd.ersp_ratio || in nvmet_fc_ls_create_association()
1689 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= in nvmet_fc_ls_create_association()
1690 be16_to_cpu(rqst->assoc_cmd.sqsize))) in nvmet_fc_ls_create_association()
1695 iod->assoc = nvmet_fc_alloc_target_assoc( in nvmet_fc_ls_create_association()
1696 tgtport, iod->hosthandle); in nvmet_fc_ls_create_association()
1697 if (!iod->assoc) in nvmet_fc_ls_create_association()
1700 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, in nvmet_fc_ls_create_association()
1701 be16_to_cpu(rqst->assoc_cmd.sqsize)); in nvmet_fc_ls_create_association()
1704 nvmet_fc_tgt_a_put(iod->assoc); in nvmet_fc_ls_create_association()
1710 dev_err(tgtport->dev, in nvmet_fc_ls_create_association()
1713 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, in nvmet_fc_ls_create_association()
1714 sizeof(*acc), rqst->w0.ls_cmd, in nvmet_fc_ls_create_association()
1720 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio); in nvmet_fc_ls_create_association()
1721 atomic_set(&queue->connected, 1); in nvmet_fc_ls_create_association()
1722 queue->sqhd = 0; /* best place to init value */ in nvmet_fc_ls_create_association()
1724 dev_info(tgtport->dev, in nvmet_fc_ls_create_association()
1726 tgtport->fc_target_port.port_num, iod->assoc->a_id); in nvmet_fc_ls_create_association()
1730 iod->lsrsp->rsplen = sizeof(*acc); in nvmet_fc_ls_create_association()
1736 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); in nvmet_fc_ls_create_association()
1737 acc->associd.desc_len = in nvmet_fc_ls_create_association()
1740 acc->associd.association_id = in nvmet_fc_ls_create_association()
1741 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0)); in nvmet_fc_ls_create_association()
1742 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); in nvmet_fc_ls_create_association()
1743 acc->connectid.desc_len = in nvmet_fc_ls_create_association()
1746 acc->connectid.connection_id = acc->associd.association_id; in nvmet_fc_ls_create_association()
1753 struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn; in nvmet_fc_ls_create_connection()
1754 struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn; in nvmet_fc_ls_create_connection()
1760 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst)) in nvmet_fc_ls_create_connection()
1762 else if (rqst->desc_list_len != in nvmet_fc_ls_create_connection()
1766 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) in nvmet_fc_ls_create_connection()
1768 else if (rqst->associd.desc_len != in nvmet_fc_ls_create_connection()
1772 else if (rqst->connect_cmd.desc_tag != in nvmet_fc_ls_create_connection()
1775 else if (rqst->connect_cmd.desc_len != in nvmet_fc_ls_create_connection()
1779 else if (!rqst->connect_cmd.ersp_ratio || in nvmet_fc_ls_create_connection()
1780 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >= in nvmet_fc_ls_create_connection()
1781 be16_to_cpu(rqst->connect_cmd.sqsize))) in nvmet_fc_ls_create_connection()
1786 iod->assoc = nvmet_fc_find_target_assoc(tgtport, in nvmet_fc_ls_create_connection()
1787 be64_to_cpu(rqst->associd.association_id)); in nvmet_fc_ls_create_connection()
1788 if (!iod->assoc) in nvmet_fc_ls_create_connection()
1791 queue = nvmet_fc_alloc_target_queue(iod->assoc, in nvmet_fc_ls_create_connection()
1792 be16_to_cpu(rqst->connect_cmd.qid), in nvmet_fc_ls_create_connection()
1793 be16_to_cpu(rqst->connect_cmd.sqsize)); in nvmet_fc_ls_create_connection()
1798 nvmet_fc_tgt_a_put(iod->assoc); in nvmet_fc_ls_create_connection()
1803 dev_err(tgtport->dev, in nvmet_fc_ls_create_connection()
1806 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, in nvmet_fc_ls_create_connection()
1807 sizeof(*acc), rqst->w0.ls_cmd, in nvmet_fc_ls_create_connection()
1815 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio); in nvmet_fc_ls_create_connection()
1816 atomic_set(&queue->connected, 1); in nvmet_fc_ls_create_connection()
1817 queue->sqhd = 0; /* best place to init value */ in nvmet_fc_ls_create_connection()
1821 iod->lsrsp->rsplen = sizeof(*acc); in nvmet_fc_ls_create_connection()
1826 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); in nvmet_fc_ls_create_connection()
1827 acc->connectid.desc_len = in nvmet_fc_ls_create_connection()
1830 acc->connectid.connection_id = in nvmet_fc_ls_create_connection()
1831 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, in nvmet_fc_ls_create_connection()
1832 be16_to_cpu(rqst->connect_cmd.qid))); in nvmet_fc_ls_create_connection()
1844 &iod->rqstbuf->rq_dis_assoc; in nvmet_fc_ls_disconnect()
1846 &iod->rspbuf->rsp_dis_assoc; in nvmet_fc_ls_disconnect()
1854 ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst); in nvmet_fc_ls_disconnect()
1856 /* match an active association - takes an assoc ref if !NULL */ in nvmet_fc_ls_disconnect()
1858 be64_to_cpu(rqst->associd.association_id)); in nvmet_fc_ls_disconnect()
1859 iod->assoc = assoc; in nvmet_fc_ls_disconnect()
1865 dev_err(tgtport->dev, in nvmet_fc_ls_disconnect()
1868 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, in nvmet_fc_ls_disconnect()
1869 sizeof(*acc), rqst->w0.ls_cmd, in nvmet_fc_ls_disconnect()
1879 iod->lsrsp->rsplen = sizeof(*acc); in nvmet_fc_ls_disconnect()
1895 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_ls_disconnect()
1896 oldls = assoc->rcv_disconn; in nvmet_fc_ls_disconnect()
1897 assoc->rcv_disconn = iod; in nvmet_fc_ls_disconnect()
1898 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_ls_disconnect()
1901 dev_info(tgtport->dev, in nvmet_fc_ls_disconnect()
1904 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_ls_disconnect()
1906 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, in nvmet_fc_ls_disconnect()
1907 sizeof(*iod->rspbuf), in nvmet_fc_ls_disconnect()
1909 rqst->w0.ls_cmd, in nvmet_fc_ls_disconnect()
1932 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; in nvmet_fc_xmt_ls_rsp_done()
1933 struct nvmet_fc_tgtport *tgtport = iod->tgtport; in nvmet_fc_xmt_ls_rsp_done()
1935 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, in nvmet_fc_xmt_ls_rsp_done()
1936 sizeof(*iod->rspbuf), DMA_TO_DEVICE); in nvmet_fc_xmt_ls_rsp_done()
1947 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, in nvmet_fc_xmt_ls_rsp()
1948 sizeof(*iod->rspbuf), DMA_TO_DEVICE); in nvmet_fc_xmt_ls_rsp()
1950 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); in nvmet_fc_xmt_ls_rsp()
1952 nvmet_fc_xmt_ls_rsp_done(iod->lsrsp); in nvmet_fc_xmt_ls_rsp()
1956 * Actual processing routine for received FC-NVME LS Requests from the LLD
1962 struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0; in nvmet_fc_handle_ls_rqst()
1965 iod->lsrsp->nvme_fc_private = iod; in nvmet_fc_handle_ls_rqst()
1966 iod->lsrsp->rspbuf = iod->rspbuf; in nvmet_fc_handle_ls_rqst()
1967 iod->lsrsp->rspdma = iod->rspdma; in nvmet_fc_handle_ls_rqst()
1968 iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done; in nvmet_fc_handle_ls_rqst()
1970 iod->lsrsp->rsplen = 0; in nvmet_fc_handle_ls_rqst()
1972 iod->assoc = NULL; in nvmet_fc_handle_ls_rqst()
1979 switch (w0->ls_cmd) { in nvmet_fc_handle_ls_rqst()
1993 iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf, in nvmet_fc_handle_ls_rqst()
1994 sizeof(*iod->rspbuf), w0->ls_cmd, in nvmet_fc_handle_ls_rqst()
2003 * Actual processing routine for received FC-NVME LS Requests from the LLD
2010 struct nvmet_fc_tgtport *tgtport = iod->tgtport; in nvmet_fc_handle_ls_rqst_work()
2017 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
2020 * The nvmet-fc layer will copy payload to an internal structure for
2045 dev_info(tgtport->dev, in nvmet_fc_rcv_ls_req()
2047 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvmet_fc_rcv_ls_req()
2048 nvmefc_ls_names[w0->ls_cmd] : "", in nvmet_fc_rcv_ls_req()
2050 return -E2BIG; in nvmet_fc_rcv_ls_req()
2054 dev_info(tgtport->dev, in nvmet_fc_rcv_ls_req()
2056 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvmet_fc_rcv_ls_req()
2057 nvmefc_ls_names[w0->ls_cmd] : ""); in nvmet_fc_rcv_ls_req()
2058 return -ESHUTDOWN; in nvmet_fc_rcv_ls_req()
2063 dev_info(tgtport->dev, in nvmet_fc_rcv_ls_req()
2065 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvmet_fc_rcv_ls_req()
2066 nvmefc_ls_names[w0->ls_cmd] : ""); in nvmet_fc_rcv_ls_req()
2068 return -ENOENT; in nvmet_fc_rcv_ls_req()
2071 iod->lsrsp = lsrsp; in nvmet_fc_rcv_ls_req()
2072 iod->fcpreq = NULL; in nvmet_fc_rcv_ls_req()
2073 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); in nvmet_fc_rcv_ls_req()
2074 iod->rqstdatalen = lsreqbuf_len; in nvmet_fc_rcv_ls_req()
2075 iod->hosthandle = hosthandle; in nvmet_fc_rcv_ls_req()
2077 queue_work(nvmet_wq, &iod->work); in nvmet_fc_rcv_ls_req()
2096 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent); in nvmet_fc_alloc_tgt_pgs()
2100 fod->data_sg = sg; in nvmet_fc_alloc_tgt_pgs()
2101 fod->data_sg_cnt = nent; in nvmet_fc_alloc_tgt_pgs()
2102 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, in nvmet_fc_alloc_tgt_pgs()
2103 ((fod->io_dir == NVMET_FCP_WRITE) ? in nvmet_fc_alloc_tgt_pgs()
2106 fod->next_sg = fod->data_sg; in nvmet_fc_alloc_tgt_pgs()
2117 if (!fod->data_sg || !fod->data_sg_cnt) in nvmet_fc_free_tgt_pgs()
2120 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, in nvmet_fc_free_tgt_pgs()
2121 ((fod->io_dir == NVMET_FCP_WRITE) ? in nvmet_fc_free_tgt_pgs()
2123 sgl_free(fod->data_sg); in nvmet_fc_free_tgt_pgs()
2124 fod->data_sg = NULL; in nvmet_fc_free_tgt_pgs()
2125 fod->data_sg_cnt = 0; in nvmet_fc_free_tgt_pgs()
2135 sqtail = atomic_read(&q->sqtail) % q->sqsize; in queue_90percent_full()
2137 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd); in queue_90percent_full()
2138 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9))); in queue_90percent_full()
2149 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; in nvmet_fc_prep_fcp_rsp()
2150 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; in nvmet_fc_prep_fcp_rsp()
2151 struct nvme_completion *cqe = &ersp->cqe; in nvmet_fc_prep_fcp_rsp() local
2152 u32 *cqewd = (u32 *)cqe; in nvmet_fc_prep_fcp_rsp()
2156 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) in nvmet_fc_prep_fcp_rsp()
2157 xfr_length = fod->req.transfer_len; in nvmet_fc_prep_fcp_rsp()
2159 xfr_length = fod->offset; in nvmet_fc_prep_fcp_rsp()
2163 * Note: to send a 0's response, the NVME-FC host transport will in nvmet_fc_prep_fcp_rsp()
2164 * recreate the CQE. The host transport knows: sq id, SQHD (last in nvmet_fc_prep_fcp_rsp()
2166 * zero-filled CQE with those known fields filled in. Transport in nvmet_fc_prep_fcp_rsp()
2167 * must send an ersp for any condition where the cqe won't match in nvmet_fc_prep_fcp_rsp()
2170 * Here are the FC-NVME mandated cases where we must send an ersp: in nvmet_fc_prep_fcp_rsp()
2172 * force fabric commands to send ersp's (not in FC-NVME but good in nvmet_fc_prep_fcp_rsp()
2174 * normal cmds: any time status is non-zero, or status is zero in nvmet_fc_prep_fcp_rsp()
2175 * but words 0 or 1 are non-zero. in nvmet_fc_prep_fcp_rsp()
2180 rspcnt = atomic_inc_return(&fod->queue->zrspcnt); in nvmet_fc_prep_fcp_rsp()
2181 if (!(rspcnt % fod->queue->ersp_ratio) || in nvmet_fc_prep_fcp_rsp()
2183 xfr_length != fod->req.transfer_len || in nvmet_fc_prep_fcp_rsp()
2184 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || in nvmet_fc_prep_fcp_rsp()
2185 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || in nvmet_fc_prep_fcp_rsp()
2186 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head))) in nvmet_fc_prep_fcp_rsp()
2189 /* re-set the fields */ in nvmet_fc_prep_fcp_rsp()
2190 fod->fcpreq->rspaddr = ersp; in nvmet_fc_prep_fcp_rsp()
2191 fod->fcpreq->rspdma = fod->rspdma; in nvmet_fc_prep_fcp_rsp()
2195 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; in nvmet_fc_prep_fcp_rsp()
2197 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32)); in nvmet_fc_prep_fcp_rsp()
2198 rsn = atomic_inc_return(&fod->queue->rsn); in nvmet_fc_prep_fcp_rsp()
2199 ersp->rsn = cpu_to_be32(rsn); in nvmet_fc_prep_fcp_rsp()
2200 ersp->xfrd_len = cpu_to_be32(xfr_length); in nvmet_fc_prep_fcp_rsp()
2201 fod->fcpreq->rsplen = sizeof(*ersp); in nvmet_fc_prep_fcp_rsp()
2204 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, in nvmet_fc_prep_fcp_rsp()
2205 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_prep_fcp_rsp()
2214 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_abort_op()
2223 /* no need to take lock - lock was taken earlier to get here */ in nvmet_fc_abort_op()
2224 if (!fod->aborted) in nvmet_fc_abort_op()
2225 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq); in nvmet_fc_abort_op()
2227 nvmet_fc_free_fcp_iod(fod->queue, fod); in nvmet_fc_abort_op()
2236 fod->fcpreq->op = NVMET_FCOP_RSP; in nvmet_fc_xmt_fcp_rsp()
2237 fod->fcpreq->timeout = 0; in nvmet_fc_xmt_fcp_rsp()
2241 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_xmt_fcp_rsp()
2250 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_transfer_fcp_data()
2251 struct scatterlist *sg = fod->next_sg; in nvmet_fc_transfer_fcp_data()
2253 u32 remaininglen = fod->req.transfer_len - fod->offset; in nvmet_fc_transfer_fcp_data()
2257 fcpreq->op = op; in nvmet_fc_transfer_fcp_data()
2258 fcpreq->offset = fod->offset; in nvmet_fc_transfer_fcp_data()
2259 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; in nvmet_fc_transfer_fcp_data()
2270 fcpreq->sg = sg; in nvmet_fc_transfer_fcp_data()
2271 fcpreq->sg_cnt = 0; in nvmet_fc_transfer_fcp_data()
2273 fcpreq->sg_cnt < tgtport->max_sg_cnt && in nvmet_fc_transfer_fcp_data()
2275 fcpreq->sg_cnt++; in nvmet_fc_transfer_fcp_data()
2279 if (tlen < remaininglen && fcpreq->sg_cnt == 0) { in nvmet_fc_transfer_fcp_data()
2280 fcpreq->sg_cnt++; in nvmet_fc_transfer_fcp_data()
2285 fod->next_sg = sg; in nvmet_fc_transfer_fcp_data()
2287 fod->next_sg = NULL; in nvmet_fc_transfer_fcp_data()
2289 fcpreq->transfer_length = tlen; in nvmet_fc_transfer_fcp_data()
2290 fcpreq->transferred_length = 0; in nvmet_fc_transfer_fcp_data()
2291 fcpreq->fcp_error = 0; in nvmet_fc_transfer_fcp_data()
2292 fcpreq->rsplen = 0; in nvmet_fc_transfer_fcp_data()
2295 * If the last READDATA request: check if LLDD supports in nvmet_fc_transfer_fcp_data()
2299 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) && in nvmet_fc_transfer_fcp_data()
2300 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { in nvmet_fc_transfer_fcp_data()
2301 fcpreq->op = NVMET_FCOP_READDATA_RSP; in nvmet_fc_transfer_fcp_data()
2305 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_transfer_fcp_data()
2312 fod->abort = true; in nvmet_fc_transfer_fcp_data()
2315 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_transfer_fcp_data()
2316 fod->writedataactive = false; in nvmet_fc_transfer_fcp_data()
2317 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_transfer_fcp_data()
2318 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); in nvmet_fc_transfer_fcp_data()
2320 fcpreq->fcp_error = ret; in nvmet_fc_transfer_fcp_data()
2321 fcpreq->transferred_length = 0; in nvmet_fc_transfer_fcp_data()
2322 nvmet_fc_xmt_fcp_op_done(fod->fcpreq); in nvmet_fc_transfer_fcp_data()
2330 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in __nvmet_fc_fod_op_abort()
2331 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in __nvmet_fc_fod_op_abort()
2335 if (fcpreq->op == NVMET_FCOP_WRITEDATA) { in __nvmet_fc_fod_op_abort()
2336 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); in __nvmet_fc_fod_op_abort()
2353 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_fod_op_done()
2354 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_fod_op_done()
2358 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_fod_op_done()
2359 abort = fod->abort; in nvmet_fc_fod_op_done()
2360 fod->writedataactive = false; in nvmet_fc_fod_op_done()
2361 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_fod_op_done()
2363 switch (fcpreq->op) { in nvmet_fc_fod_op_done()
2368 if (fcpreq->fcp_error || in nvmet_fc_fod_op_done()
2369 fcpreq->transferred_length != fcpreq->transfer_length) { in nvmet_fc_fod_op_done()
2370 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_fod_op_done()
2371 fod->abort = true; in nvmet_fc_fod_op_done()
2372 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_fod_op_done()
2374 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); in nvmet_fc_fod_op_done()
2378 fod->offset += fcpreq->transferred_length; in nvmet_fc_fod_op_done()
2379 if (fod->offset != fod->req.transfer_len) { in nvmet_fc_fod_op_done()
2380 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_fod_op_done()
2381 fod->writedataactive = true; in nvmet_fc_fod_op_done()
2382 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_fod_op_done()
2391 fod->req.execute(&fod->req); in nvmet_fc_fod_op_done()
2398 if (fcpreq->fcp_error || in nvmet_fc_fod_op_done()
2399 fcpreq->transferred_length != fcpreq->transfer_length) { in nvmet_fc_fod_op_done()
2406 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) { in nvmet_fc_fod_op_done()
2409 nvmet_fc_free_fcp_iod(fod->queue, fod); in nvmet_fc_fod_op_done()
2413 fod->offset += fcpreq->transferred_length; in nvmet_fc_fod_op_done()
2414 if (fod->offset != fod->req.transfer_len) { in nvmet_fc_fod_op_done()
2433 nvmet_fc_free_fcp_iod(fod->queue, fod); in nvmet_fc_fod_op_done()
2444 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; in nvmet_fc_xmt_fcp_op_done()
2456 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; in __nvmet_fc_fcp_nvme_cmd_done()
2457 struct nvme_completion *cqe = &fod->rspiubuf.cqe; in __nvmet_fc_fcp_nvme_cmd_done() local
2461 spin_lock_irqsave(&fod->flock, flags); in __nvmet_fc_fcp_nvme_cmd_done()
2462 abort = fod->abort; in __nvmet_fc_fcp_nvme_cmd_done()
2463 spin_unlock_irqrestore(&fod->flock, flags); in __nvmet_fc_fcp_nvme_cmd_done()
2465 /* if we have a CQE, snoop the last sq_head value */ in __nvmet_fc_fcp_nvme_cmd_done()
2467 fod->queue->sqhd = cqe->sq_head; in __nvmet_fc_fcp_nvme_cmd_done()
2476 /* fudge up a failed CQE status for our transport error */ in __nvmet_fc_fcp_nvme_cmd_done()
2477 memset(cqe, 0, sizeof(*cqe)); in __nvmet_fc_fcp_nvme_cmd_done()
2478 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ in __nvmet_fc_fcp_nvme_cmd_done()
2479 cqe->sq_id = cpu_to_le16(fod->queue->qid); in __nvmet_fc_fcp_nvme_cmd_done()
2480 cqe->command_id = sqe->command_id; in __nvmet_fc_fcp_nvme_cmd_done()
2481 cqe->status = cpu_to_le16(status); in __nvmet_fc_fcp_nvme_cmd_done()
2485 * try to push the data even if the SQE status is non-zero. in __nvmet_fc_fcp_nvme_cmd_done()
2489 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { in __nvmet_fc_fcp_nvme_cmd_done()
2496 /* writes & no data - fall thru */ in __nvmet_fc_fcp_nvme_cmd_done()
2510 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_fcp_nvme_cmd_done()
2517 * Actual processing routine for received FC-NVME I/O Requests from the LLD
2523 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; in nvmet_fc_handle_fcp_rqst()
2524 u32 xfrlen = be32_to_cpu(cmdiu->data_len); in nvmet_fc_handle_fcp_rqst()
2536 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; in nvmet_fc_handle_fcp_rqst()
2538 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) { in nvmet_fc_handle_fcp_rqst()
2539 fod->io_dir = NVMET_FCP_WRITE; in nvmet_fc_handle_fcp_rqst()
2540 if (!nvme_is_write(&cmdiu->sqe)) in nvmet_fc_handle_fcp_rqst()
2542 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) { in nvmet_fc_handle_fcp_rqst()
2543 fod->io_dir = NVMET_FCP_READ; in nvmet_fc_handle_fcp_rqst()
2544 if (nvme_is_write(&cmdiu->sqe)) in nvmet_fc_handle_fcp_rqst()
2547 fod->io_dir = NVMET_FCP_NODATA; in nvmet_fc_handle_fcp_rqst()
2552 fod->req.cmd = &fod->cmdiubuf.sqe; in nvmet_fc_handle_fcp_rqst()
2553 fod->req.cqe = &fod->rspiubuf.cqe; in nvmet_fc_handle_fcp_rqst()
2554 if (!tgtport->pe) in nvmet_fc_handle_fcp_rqst()
2556 fod->req.port = tgtport->pe->port; in nvmet_fc_handle_fcp_rqst()
2559 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); in nvmet_fc_handle_fcp_rqst()
2561 fod->data_sg = NULL; in nvmet_fc_handle_fcp_rqst()
2562 fod->data_sg_cnt = 0; in nvmet_fc_handle_fcp_rqst()
2564 ret = nvmet_req_init(&fod->req, in nvmet_fc_handle_fcp_rqst()
2565 &fod->queue->nvme_cq, in nvmet_fc_handle_fcp_rqst()
2566 &fod->queue->nvme_sq, in nvmet_fc_handle_fcp_rqst()
2574 fod->req.transfer_len = xfrlen; in nvmet_fc_handle_fcp_rqst()
2577 atomic_inc(&fod->queue->sqtail); in nvmet_fc_handle_fcp_rqst()
2579 if (fod->req.transfer_len) { in nvmet_fc_handle_fcp_rqst()
2582 nvmet_req_complete(&fod->req, ret); in nvmet_fc_handle_fcp_rqst()
2586 fod->req.sg = fod->data_sg; in nvmet_fc_handle_fcp_rqst()
2587 fod->req.sg_cnt = fod->data_sg_cnt; in nvmet_fc_handle_fcp_rqst()
2588 fod->offset = 0; in nvmet_fc_handle_fcp_rqst()
2590 if (fod->io_dir == NVMET_FCP_WRITE) { in nvmet_fc_handle_fcp_rqst()
2602 fod->req.execute(&fod->req); in nvmet_fc_handle_fcp_rqst()
2610 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2613 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2626 * asynchronously received - its possible for a command to be received
2633 * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2639 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2645 * transport will return a non-zero status indicating the error.
2646 * In all cases other than -EOVERFLOW, the transport has not accepted the
2670 (cmdiu->format_id != NVME_CMD_FORMAT_ID) || in nvmet_fc_rcv_fcp_req()
2671 (cmdiu->fc_id != NVME_CMD_FC_ID) || in nvmet_fc_rcv_fcp_req()
2672 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) in nvmet_fc_rcv_fcp_req()
2673 return -EIO; in nvmet_fc_rcv_fcp_req()
2676 be64_to_cpu(cmdiu->connection_id)); in nvmet_fc_rcv_fcp_req()
2678 return -ENOTCONN; in nvmet_fc_rcv_fcp_req()
2687 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2691 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2693 fcpreq->nvmet_fc_private = fod; in nvmet_fc_rcv_fcp_req()
2694 fod->fcpreq = fcpreq; in nvmet_fc_rcv_fcp_req()
2696 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); in nvmet_fc_rcv_fcp_req()
2703 if (!tgtport->ops->defer_rcv) { in nvmet_fc_rcv_fcp_req()
2704 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2707 return -ENOENT; in nvmet_fc_rcv_fcp_req()
2710 deferfcp = list_first_entry_or_null(&queue->avail_defer_list, in nvmet_fc_rcv_fcp_req()
2713 /* Just re-use one that was previously allocated */ in nvmet_fc_rcv_fcp_req()
2714 list_del(&deferfcp->req_list); in nvmet_fc_rcv_fcp_req()
2716 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2723 return -ENOMEM; in nvmet_fc_rcv_fcp_req()
2725 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2729 fcpreq->rspaddr = cmdiubuf; in nvmet_fc_rcv_fcp_req()
2730 fcpreq->rsplen = cmdiubuf_len; in nvmet_fc_rcv_fcp_req()
2731 deferfcp->fcp_req = fcpreq; in nvmet_fc_rcv_fcp_req()
2734 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list); in nvmet_fc_rcv_fcp_req()
2738 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2740 return -EOVERFLOW; in nvmet_fc_rcv_fcp_req()
2745 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2751 * (template_ops->fcp_req_release() has not been called).
2771 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; in nvmet_fc_rcv_fcp_abort()
2775 if (!fod || fod->fcpreq != fcpreq) in nvmet_fc_rcv_fcp_abort()
2779 queue = fod->queue; in nvmet_fc_rcv_fcp_abort()
2781 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_rcv_fcp_abort()
2782 if (fod->active) { in nvmet_fc_rcv_fcp_abort()
2788 spin_lock(&fod->flock); in nvmet_fc_rcv_fcp_abort()
2789 fod->abort = true; in nvmet_fc_rcv_fcp_abort()
2790 fod->aborted = true; in nvmet_fc_rcv_fcp_abort()
2791 spin_unlock(&fod->flock); in nvmet_fc_rcv_fcp_abort()
2793 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_rcv_fcp_abort()
2809 return -EINVAL; in __nvme_fc_parse_u64()
2824 substring_t wwn = { name, &name[sizeof(name)-1] }; in nvme_fc_parse_traddr()
2829 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && in nvme_fc_parse_traddr()
2831 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { in nvme_fc_parse_traddr()
2836 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && in nvme_fc_parse_traddr()
2838 "pn-", NVME_FC_TRADDR_NNLEN))) { in nvme_fc_parse_traddr()
2849 if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) in nvme_fc_parse_traddr()
2853 if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) in nvme_fc_parse_traddr()
2860 return -EINVAL; in nvme_fc_parse_traddr()
2873 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) || in nvmet_fc_add_port()
2874 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC)) in nvmet_fc_add_port()
2875 return -EINVAL; in nvmet_fc_add_port()
2879 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr, in nvmet_fc_add_port()
2880 sizeof(port->disc_addr.traddr)); in nvmet_fc_add_port()
2886 return -ENOMEM; in nvmet_fc_add_port()
2888 ret = -ENXIO; in nvmet_fc_add_port()
2891 if ((tgtport->fc_target_port.node_name == traddr.nn) && in nvmet_fc_add_port()
2892 (tgtport->fc_target_port.port_name == traddr.pn)) { in nvmet_fc_add_port()
2894 if (!tgtport->pe) { in nvmet_fc_add_port()
2898 ret = -EALREADY; in nvmet_fc_add_port()
2913 struct nvmet_fc_port_entry *pe = port->priv; in nvmet_fc_remove_port()
2918 __nvmet_fc_free_assocs(pe->tgtport); in nvmet_fc_remove_port()
2926 struct nvmet_fc_port_entry *pe = port->priv; in nvmet_fc_discovery_chg()
2927 struct nvmet_fc_tgtport *tgtport = pe->tgtport; in nvmet_fc_discovery_chg()
2929 if (tgtport && tgtport->ops->discovery_event) in nvmet_fc_discovery_chg()
2930 tgtport->ops->discovery_event(&tgtport->fc_target_port); in nvmet_fc_discovery_chg()
2954 /* sanity check - all lports should be removed */ in nvmet_fc_exit_module()