Lines Matching refs:fod

151 	struct nvmet_fc_fcp_iod		fod[];		/* array of fcp_iods */  member
186 return (fodptr - fodptr->queue->fod); in nvmet_fc_fodnum()
262 struct nvmet_fc_fcp_iod *fod);
640 struct nvmet_fc_fcp_iod *fod = queue->fod; in nvmet_fc_prep_fcp_iodlist() local
643 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_prep_fcp_iodlist()
644 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); in nvmet_fc_prep_fcp_iodlist()
645 fod->tgtport = tgtport; in nvmet_fc_prep_fcp_iodlist()
646 fod->queue = queue; in nvmet_fc_prep_fcp_iodlist()
647 fod->active = false; in nvmet_fc_prep_fcp_iodlist()
648 fod->abort = false; in nvmet_fc_prep_fcp_iodlist()
649 fod->aborted = false; in nvmet_fc_prep_fcp_iodlist()
650 fod->fcpreq = NULL; in nvmet_fc_prep_fcp_iodlist()
651 list_add_tail(&fod->fcp_list, &queue->fod_list); in nvmet_fc_prep_fcp_iodlist()
652 spin_lock_init(&fod->flock); in nvmet_fc_prep_fcp_iodlist()
654 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, in nvmet_fc_prep_fcp_iodlist()
655 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_prep_fcp_iodlist()
656 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { in nvmet_fc_prep_fcp_iodlist()
657 list_del(&fod->fcp_list); in nvmet_fc_prep_fcp_iodlist()
658 for (fod--, i--; i >= 0; fod--, i--) { in nvmet_fc_prep_fcp_iodlist()
659 fc_dma_unmap_single(tgtport->dev, fod->rspdma, in nvmet_fc_prep_fcp_iodlist()
660 sizeof(fod->rspiubuf), in nvmet_fc_prep_fcp_iodlist()
662 fod->rspdma = 0L; in nvmet_fc_prep_fcp_iodlist()
663 list_del(&fod->fcp_list); in nvmet_fc_prep_fcp_iodlist()
675 struct nvmet_fc_fcp_iod *fod = queue->fod; in nvmet_fc_destroy_fcp_iodlist() local
678 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_destroy_fcp_iodlist()
679 if (fod->rspdma) in nvmet_fc_destroy_fcp_iodlist()
680 fc_dma_unmap_single(tgtport->dev, fod->rspdma, in nvmet_fc_destroy_fcp_iodlist()
681 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_destroy_fcp_iodlist()
688 struct nvmet_fc_fcp_iod *fod; in nvmet_fc_alloc_fcp_iod() local
692 fod = list_first_entry_or_null(&queue->fod_list, in nvmet_fc_alloc_fcp_iod()
694 if (fod) { in nvmet_fc_alloc_fcp_iod()
695 list_del(&fod->fcp_list); in nvmet_fc_alloc_fcp_iod()
696 fod->active = true; in nvmet_fc_alloc_fcp_iod()
703 return fod; in nvmet_fc_alloc_fcp_iod()
712 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; in nvmet_fc_queue_fcp_req() local
721 nvmet_fc_handle_fcp_rqst(tgtport, fod); in nvmet_fc_queue_fcp_req()
727 struct nvmet_fc_fcp_iod *fod = in nvmet_fc_fcp_rqst_op_defer_work() local
731 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq); in nvmet_fc_fcp_rqst_op_defer_work()
737 struct nvmet_fc_fcp_iod *fod) in nvmet_fc_free_fcp_iod() argument
739 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_free_fcp_iod()
740 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_free_fcp_iod()
744 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, in nvmet_fc_free_fcp_iod()
745 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_free_fcp_iod()
749 fod->active = false; in nvmet_fc_free_fcp_iod()
750 fod->abort = false; in nvmet_fc_free_fcp_iod()
751 fod->aborted = false; in nvmet_fc_free_fcp_iod()
752 fod->writedataactive = false; in nvmet_fc_free_fcp_iod()
753 fod->fcpreq = NULL; in nvmet_fc_free_fcp_iod()
764 list_add_tail(&fod->fcp_list, &fod->queue->fod_list); in nvmet_fc_free_fcp_iod()
780 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); in nvmet_fc_free_fcp_iod()
785 fcpreq->nvmet_fc_private = fod; in nvmet_fc_free_fcp_iod()
786 fod->fcpreq = fcpreq; in nvmet_fc_free_fcp_iod()
787 fod->active = true; in nvmet_fc_free_fcp_iod()
797 queue_work(queue->work_q, &fod->defer_work); in nvmet_fc_free_fcp_iod()
810 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); in nvmet_fc_alloc_target_queue()
883 struct nvmet_fc_fcp_iod *fod = queue->fod; in nvmet_fc_delete_target_queue() local
897 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_delete_target_queue()
898 if (fod->active) { in nvmet_fc_delete_target_queue()
899 spin_lock(&fod->flock); in nvmet_fc_delete_target_queue()
900 fod->abort = true; in nvmet_fc_delete_target_queue()
906 if (fod->writedataactive) { in nvmet_fc_delete_target_queue()
907 fod->aborted = true; in nvmet_fc_delete_target_queue()
908 spin_unlock(&fod->flock); in nvmet_fc_delete_target_queue()
910 &tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_delete_target_queue()
912 spin_unlock(&fod->flock); in nvmet_fc_delete_target_queue()
2091 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod) in nvmet_fc_alloc_tgt_pgs() argument
2096 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent); in nvmet_fc_alloc_tgt_pgs()
2100 fod->data_sg = sg; in nvmet_fc_alloc_tgt_pgs()
2101 fod->data_sg_cnt = nent; in nvmet_fc_alloc_tgt_pgs()
2102 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, in nvmet_fc_alloc_tgt_pgs()
2103 ((fod->io_dir == NVMET_FCP_WRITE) ? in nvmet_fc_alloc_tgt_pgs()
2106 fod->next_sg = fod->data_sg; in nvmet_fc_alloc_tgt_pgs()
2115 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod) in nvmet_fc_free_tgt_pgs() argument
2117 if (!fod->data_sg || !fod->data_sg_cnt) in nvmet_fc_free_tgt_pgs()
2120 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, in nvmet_fc_free_tgt_pgs()
2121 ((fod->io_dir == NVMET_FCP_WRITE) ? in nvmet_fc_free_tgt_pgs()
2123 sgl_free(fod->data_sg); in nvmet_fc_free_tgt_pgs()
2124 fod->data_sg = NULL; in nvmet_fc_free_tgt_pgs()
2125 fod->data_sg_cnt = 0; in nvmet_fc_free_tgt_pgs()
2147 struct nvmet_fc_fcp_iod *fod) in nvmet_fc_prep_fcp_rsp() argument
2149 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; in nvmet_fc_prep_fcp_rsp()
2150 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; in nvmet_fc_prep_fcp_rsp()
2156 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) in nvmet_fc_prep_fcp_rsp()
2157 xfr_length = fod->req.transfer_len; in nvmet_fc_prep_fcp_rsp()
2159 xfr_length = fod->offset; in nvmet_fc_prep_fcp_rsp()
2180 rspcnt = atomic_inc_return(&fod->queue->zrspcnt); in nvmet_fc_prep_fcp_rsp()
2181 if (!(rspcnt % fod->queue->ersp_ratio) || in nvmet_fc_prep_fcp_rsp()
2183 xfr_length != fod->req.transfer_len || in nvmet_fc_prep_fcp_rsp()
2186 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head))) in nvmet_fc_prep_fcp_rsp()
2190 fod->fcpreq->rspaddr = ersp; in nvmet_fc_prep_fcp_rsp()
2191 fod->fcpreq->rspdma = fod->rspdma; in nvmet_fc_prep_fcp_rsp()
2195 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; in nvmet_fc_prep_fcp_rsp()
2198 rsn = atomic_inc_return(&fod->queue->rsn); in nvmet_fc_prep_fcp_rsp()
2201 fod->fcpreq->rsplen = sizeof(*ersp); in nvmet_fc_prep_fcp_rsp()
2204 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, in nvmet_fc_prep_fcp_rsp()
2205 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_prep_fcp_rsp()
2212 struct nvmet_fc_fcp_iod *fod) in nvmet_fc_abort_op() argument
2214 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_abort_op()
2217 nvmet_fc_free_tgt_pgs(fod); in nvmet_fc_abort_op()
2224 if (!fod->aborted) in nvmet_fc_abort_op()
2227 nvmet_fc_free_fcp_iod(fod->queue, fod); in nvmet_fc_abort_op()
2232 struct nvmet_fc_fcp_iod *fod) in nvmet_fc_xmt_fcp_rsp() argument
2236 fod->fcpreq->op = NVMET_FCOP_RSP; in nvmet_fc_xmt_fcp_rsp()
2237 fod->fcpreq->timeout = 0; in nvmet_fc_xmt_fcp_rsp()
2239 nvmet_fc_prep_fcp_rsp(tgtport, fod); in nvmet_fc_xmt_fcp_rsp()
2241 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_xmt_fcp_rsp()
2243 nvmet_fc_abort_op(tgtport, fod); in nvmet_fc_xmt_fcp_rsp()
2248 struct nvmet_fc_fcp_iod *fod, u8 op) in nvmet_fc_transfer_fcp_data() argument
2250 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_transfer_fcp_data()
2251 struct scatterlist *sg = fod->next_sg; in nvmet_fc_transfer_fcp_data()
2253 u32 remaininglen = fod->req.transfer_len - fod->offset; in nvmet_fc_transfer_fcp_data()
2258 fcpreq->offset = fod->offset; in nvmet_fc_transfer_fcp_data()
2285 fod->next_sg = sg; in nvmet_fc_transfer_fcp_data()
2287 fod->next_sg = NULL; in nvmet_fc_transfer_fcp_data()
2299 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) && in nvmet_fc_transfer_fcp_data()
2302 nvmet_fc_prep_fcp_rsp(tgtport, fod); in nvmet_fc_transfer_fcp_data()
2305 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_transfer_fcp_data()
2312 fod->abort = true; in nvmet_fc_transfer_fcp_data()
2315 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_transfer_fcp_data()
2316 fod->writedataactive = false; in nvmet_fc_transfer_fcp_data()
2317 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_transfer_fcp_data()
2318 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); in nvmet_fc_transfer_fcp_data()
2322 nvmet_fc_xmt_fcp_op_done(fod->fcpreq); in nvmet_fc_transfer_fcp_data()
2328 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort) in __nvmet_fc_fod_op_abort() argument
2330 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in __nvmet_fc_fod_op_abort()
2331 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in __nvmet_fc_fod_op_abort()
2336 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); in __nvmet_fc_fod_op_abort()
2340 nvmet_fc_abort_op(tgtport, fod); in __nvmet_fc_fod_op_abort()
2351 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) in nvmet_fc_fod_op_done() argument
2353 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_fod_op_done()
2354 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_fod_op_done()
2358 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_fod_op_done()
2359 abort = fod->abort; in nvmet_fc_fod_op_done()
2360 fod->writedataactive = false; in nvmet_fc_fod_op_done()
2361 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_fod_op_done()
2366 if (__nvmet_fc_fod_op_abort(fod, abort)) in nvmet_fc_fod_op_done()
2370 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_fod_op_done()
2371 fod->abort = true; in nvmet_fc_fod_op_done()
2372 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_fod_op_done()
2374 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); in nvmet_fc_fod_op_done()
2378 fod->offset += fcpreq->transferred_length; in nvmet_fc_fod_op_done()
2379 if (fod->offset != fod->req.transfer_len) { in nvmet_fc_fod_op_done()
2380 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_fod_op_done()
2381 fod->writedataactive = true; in nvmet_fc_fod_op_done()
2382 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_fod_op_done()
2385 nvmet_fc_transfer_fcp_data(tgtport, fod, in nvmet_fc_fod_op_done()
2391 fod->req.execute(&fod->req); in nvmet_fc_fod_op_done()
2396 if (__nvmet_fc_fod_op_abort(fod, abort)) in nvmet_fc_fod_op_done()
2400 nvmet_fc_abort_op(tgtport, fod); in nvmet_fc_fod_op_done()
2408 nvmet_fc_free_tgt_pgs(fod); in nvmet_fc_fod_op_done()
2409 nvmet_fc_free_fcp_iod(fod->queue, fod); in nvmet_fc_fod_op_done()
2413 fod->offset += fcpreq->transferred_length; in nvmet_fc_fod_op_done()
2414 if (fod->offset != fod->req.transfer_len) { in nvmet_fc_fod_op_done()
2416 nvmet_fc_transfer_fcp_data(tgtport, fod, in nvmet_fc_fod_op_done()
2424 nvmet_fc_free_tgt_pgs(fod); in nvmet_fc_fod_op_done()
2426 nvmet_fc_xmt_fcp_rsp(tgtport, fod); in nvmet_fc_fod_op_done()
2431 if (__nvmet_fc_fod_op_abort(fod, abort)) in nvmet_fc_fod_op_done()
2433 nvmet_fc_free_fcp_iod(fod->queue, fod); in nvmet_fc_fod_op_done()
2444 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; in nvmet_fc_xmt_fcp_op_done() local
2446 nvmet_fc_fod_op_done(fod); in nvmet_fc_xmt_fcp_op_done()
2454 struct nvmet_fc_fcp_iod *fod, int status) in __nvmet_fc_fcp_nvme_cmd_done() argument
2456 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; in __nvmet_fc_fcp_nvme_cmd_done()
2457 struct nvme_completion *cqe = &fod->rspiubuf.cqe; in __nvmet_fc_fcp_nvme_cmd_done()
2461 spin_lock_irqsave(&fod->flock, flags); in __nvmet_fc_fcp_nvme_cmd_done()
2462 abort = fod->abort; in __nvmet_fc_fcp_nvme_cmd_done()
2463 spin_unlock_irqrestore(&fod->flock, flags); in __nvmet_fc_fcp_nvme_cmd_done()
2467 fod->queue->sqhd = cqe->sq_head; in __nvmet_fc_fcp_nvme_cmd_done()
2470 nvmet_fc_abort_op(tgtport, fod); in __nvmet_fc_fcp_nvme_cmd_done()
2478 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ in __nvmet_fc_fcp_nvme_cmd_done()
2479 cqe->sq_id = cpu_to_le16(fod->queue->qid); in __nvmet_fc_fcp_nvme_cmd_done()
2489 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { in __nvmet_fc_fcp_nvme_cmd_done()
2491 nvmet_fc_transfer_fcp_data(tgtport, fod, in __nvmet_fc_fcp_nvme_cmd_done()
2500 nvmet_fc_free_tgt_pgs(fod); in __nvmet_fc_fcp_nvme_cmd_done()
2502 nvmet_fc_xmt_fcp_rsp(tgtport, fod); in __nvmet_fc_fcp_nvme_cmd_done()
2509 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req); in nvmet_fc_fcp_nvme_cmd_done() local
2510 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_fcp_nvme_cmd_done()
2512 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0); in nvmet_fc_fcp_nvme_cmd_done()
2521 struct nvmet_fc_fcp_iod *fod) in nvmet_fc_handle_fcp_rqst() argument
2523 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; in nvmet_fc_handle_fcp_rqst()
2536 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; in nvmet_fc_handle_fcp_rqst()
2539 fod->io_dir = NVMET_FCP_WRITE; in nvmet_fc_handle_fcp_rqst()
2543 fod->io_dir = NVMET_FCP_READ; in nvmet_fc_handle_fcp_rqst()
2547 fod->io_dir = NVMET_FCP_NODATA; in nvmet_fc_handle_fcp_rqst()
2552 fod->req.cmd = &fod->cmdiubuf.sqe; in nvmet_fc_handle_fcp_rqst()
2553 fod->req.cqe = &fod->rspiubuf.cqe; in nvmet_fc_handle_fcp_rqst()
2556 fod->req.port = tgtport->pe->port; in nvmet_fc_handle_fcp_rqst()
2559 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); in nvmet_fc_handle_fcp_rqst()
2561 fod->data_sg = NULL; in nvmet_fc_handle_fcp_rqst()
2562 fod->data_sg_cnt = 0; in nvmet_fc_handle_fcp_rqst()
2564 ret = nvmet_req_init(&fod->req, in nvmet_fc_handle_fcp_rqst()
2565 &fod->queue->nvme_cq, in nvmet_fc_handle_fcp_rqst()
2566 &fod->queue->nvme_sq, in nvmet_fc_handle_fcp_rqst()
2574 fod->req.transfer_len = xfrlen; in nvmet_fc_handle_fcp_rqst()
2577 atomic_inc(&fod->queue->sqtail); in nvmet_fc_handle_fcp_rqst()
2579 if (fod->req.transfer_len) { in nvmet_fc_handle_fcp_rqst()
2580 ret = nvmet_fc_alloc_tgt_pgs(fod); in nvmet_fc_handle_fcp_rqst()
2582 nvmet_req_complete(&fod->req, ret); in nvmet_fc_handle_fcp_rqst()
2586 fod->req.sg = fod->data_sg; in nvmet_fc_handle_fcp_rqst()
2587 fod->req.sg_cnt = fod->data_sg_cnt; in nvmet_fc_handle_fcp_rqst()
2588 fod->offset = 0; in nvmet_fc_handle_fcp_rqst()
2590 if (fod->io_dir == NVMET_FCP_WRITE) { in nvmet_fc_handle_fcp_rqst()
2592 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA); in nvmet_fc_handle_fcp_rqst()
2602 fod->req.execute(&fod->req); in nvmet_fc_handle_fcp_rqst()
2606 nvmet_fc_abort_op(tgtport, fod); in nvmet_fc_handle_fcp_rqst()
2664 struct nvmet_fc_fcp_iod *fod; in nvmet_fc_rcv_fcp_req() local
2689 fod = nvmet_fc_alloc_fcp_iod(queue); in nvmet_fc_rcv_fcp_req()
2690 if (fod) { in nvmet_fc_rcv_fcp_req()
2693 fcpreq->nvmet_fc_private = fod; in nvmet_fc_rcv_fcp_req()
2694 fod->fcpreq = fcpreq; in nvmet_fc_rcv_fcp_req()
2696 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); in nvmet_fc_rcv_fcp_req()
2771 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; in nvmet_fc_rcv_fcp_abort() local
2775 if (!fod || fod->fcpreq != fcpreq) in nvmet_fc_rcv_fcp_abort()
2779 queue = fod->queue; in nvmet_fc_rcv_fcp_abort()
2782 if (fod->active) { in nvmet_fc_rcv_fcp_abort()
2788 spin_lock(&fod->flock); in nvmet_fc_rcv_fcp_abort()
2789 fod->abort = true; in nvmet_fc_rcv_fcp_abort()
2790 fod->aborted = true; in nvmet_fc_rcv_fcp_abort()
2791 spin_unlock(&fod->flock); in nvmet_fc_rcv_fcp_abort()