Lines Matching refs:iod

424 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
427 nvme_req(req)->cmd = &iod->cmd;
528 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
529 dma_addr_t dma_addr = iod->first_dma;
532 for (i = 0; i < iod->nr_allocations; i++) {
533 __le64 *prp_list = iod->list[i].prp_list;
543 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
545 if (iod->dma_len) {
546 dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
551 WARN_ON_ONCE(!iod->sgt.nents);
553 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
555 if (iod->nr_allocations == 0)
556 dma_pool_free(dev->prp_small_pool, iod->list[0].sg_list,
557 iod->first_dma);
558 else if (iod->nr_allocations == 1)
559 dma_pool_free(dev->prp_page_pool, iod->list[0].sg_list,
560 iod->first_dma);
563 mempool_free(iod->sgt.sgl, dev->iod_mempool);
583 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
586 struct scatterlist *sg = iod->sgt.sgl;
596 iod->first_dma = 0;
610 iod->first_dma = dma_addr;
617 iod->nr_allocations = 0;
620 iod->nr_allocations = 1;
625 iod->nr_allocations = -1;
628 iod->list[0].prp_list = prp_list;
629 iod->first_dma = prp_dma;
637 iod->list[iod->nr_allocations++].prp_list = prp_list;
657 cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl));
658 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
664 WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents),
666 blk_rq_payload_bytes(req), iod->sgt.nents);
689 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
692 struct scatterlist *sg = iod->sgt.sgl;
693 unsigned int entries = iod->sgt.nents;
707 iod->nr_allocations = 0;
710 iod->nr_allocations = 1;
715 iod->nr_allocations = -1;
719 iod->list[0].sg_list = sg_list;
720 iod->first_dma = sgl_dma;
735 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
739 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
740 if (dma_mapping_error(dev->dev, iod->first_dma))
742 iod->dma_len = bv->bv_len;
744 cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
746 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
756 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
758 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
759 if (dma_mapping_error(dev->dev, iod->first_dma))
761 iod->dma_len = bv->bv_len;
764 cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma);
765 cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len);
773 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
794 iod->dma_len = 0;
795 iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
796 if (!iod->sgt.sgl)
798 sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
799 iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl);
800 if (!iod->sgt.orig_nents)
803 rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req),
811 if (nvme_pci_use_sgls(dev, req, iod->sgt.nents))
820 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
822 mempool_free(iod->sgt.sgl, dev->iod_mempool);
829 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
832 iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0);
833 if (dma_mapping_error(dev->dev, iod->meta_dma))
835 cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
841 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
844 iod->aborted = false;
845 iod->nr_allocations = -1;
846 iod->sgt.nents = 0;
853 ret = nvme_map_data(dev, req, &iod->cmd);
859 ret = nvme_map_metadata(dev, req, &iod->cmd);
883 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
900 nvme_sq_copy_cmd(nvmeq, &iod->cmd);
912 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
914 nvme_sq_copy_cmd(nvmeq, &iod->cmd);
965 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
967 dma_unmap_page(dev->dev, iod->meta_dma,
1280 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1348 if (!nvmeq->qid || iod->aborted) {
1360 iod->aborted = true;
3020 * Limit the max command size to prevent iod->sg allocations going