1d0ad6904SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
23a85a5deSChristoph Hellwig /*
33a85a5deSChristoph Hellwig * NVMe over Fabrics loopback device.
43a85a5deSChristoph Hellwig * Copyright (c) 2015-2016 HGST, a Western Digital Company.
53a85a5deSChristoph Hellwig */
63a85a5deSChristoph Hellwig #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
73a85a5deSChristoph Hellwig #include <linux/scatterlist.h>
83a85a5deSChristoph Hellwig #include <linux/blk-mq.h>
93a85a5deSChristoph Hellwig #include <linux/nvme.h>
103a85a5deSChristoph Hellwig #include <linux/module.h>
113a85a5deSChristoph Hellwig #include <linux/parser.h>
123a85a5deSChristoph Hellwig #include "nvmet.h"
133a85a5deSChristoph Hellwig #include "../host/nvme.h"
143a85a5deSChristoph Hellwig #include "../host/fabrics.h"
153a85a5deSChristoph Hellwig
163a85a5deSChristoph Hellwig #define NVME_LOOP_MAX_SEGMENTS 256
173a85a5deSChristoph Hellwig
183a85a5deSChristoph Hellwig struct nvme_loop_iod {
19d49187e9SChristoph Hellwig struct nvme_request nvme_req;
203a85a5deSChristoph Hellwig struct nvme_command cmd;
21fc6c9730SMax Gurtovoy struct nvme_completion cqe;
223a85a5deSChristoph Hellwig struct nvmet_req req;
233a85a5deSChristoph Hellwig struct nvme_loop_queue *queue;
243a85a5deSChristoph Hellwig struct work_struct work;
253a85a5deSChristoph Hellwig struct sg_table sg_table;
263a85a5deSChristoph Hellwig struct scatterlist first_sgl[];
273a85a5deSChristoph Hellwig };
283a85a5deSChristoph Hellwig
293a85a5deSChristoph Hellwig struct nvme_loop_ctrl {
303a85a5deSChristoph Hellwig struct nvme_loop_queue *queues;
313a85a5deSChristoph Hellwig
323a85a5deSChristoph Hellwig struct blk_mq_tag_set admin_tag_set;
333a85a5deSChristoph Hellwig
343a85a5deSChristoph Hellwig struct list_head list;
353a85a5deSChristoph Hellwig struct blk_mq_tag_set tag_set;
363a85a5deSChristoph Hellwig struct nvme_loop_iod async_event_iod;
373a85a5deSChristoph Hellwig struct nvme_ctrl ctrl;
383a85a5deSChristoph Hellwig
39fe4a9791SChristoph Hellwig struct nvmet_port *port;
403a85a5deSChristoph Hellwig };
413a85a5deSChristoph Hellwig
to_loop_ctrl(struct nvme_ctrl * ctrl)423a85a5deSChristoph Hellwig static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
433a85a5deSChristoph Hellwig {
443a85a5deSChristoph Hellwig return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
453a85a5deSChristoph Hellwig }
463a85a5deSChristoph Hellwig
479d7fab04SSagi Grimberg enum nvme_loop_queue_flags {
489d7fab04SSagi Grimberg NVME_LOOP_Q_LIVE = 0,
499d7fab04SSagi Grimberg };
509d7fab04SSagi Grimberg
513a85a5deSChristoph Hellwig struct nvme_loop_queue {
523a85a5deSChristoph Hellwig struct nvmet_cq nvme_cq;
533a85a5deSChristoph Hellwig struct nvmet_sq nvme_sq;
543a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl;
559d7fab04SSagi Grimberg unsigned long flags;
563a85a5deSChristoph Hellwig };
573a85a5deSChristoph Hellwig
58fe4a9791SChristoph Hellwig static LIST_HEAD(nvme_loop_ports);
59fe4a9791SChristoph Hellwig static DEFINE_MUTEX(nvme_loop_ports_mutex);
603a85a5deSChristoph Hellwig
613a85a5deSChristoph Hellwig static LIST_HEAD(nvme_loop_ctrl_list);
623a85a5deSChristoph Hellwig static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
633a85a5deSChristoph Hellwig
643a85a5deSChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
653a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
663a85a5deSChristoph Hellwig
67e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvme_loop_ops;
683a85a5deSChristoph Hellwig
nvme_loop_queue_idx(struct nvme_loop_queue * queue)693a85a5deSChristoph Hellwig static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
703a85a5deSChristoph Hellwig {
713a85a5deSChristoph Hellwig return queue - queue->ctrl->queues;
723a85a5deSChristoph Hellwig }
733a85a5deSChristoph Hellwig
nvme_loop_complete_rq(struct request * req)743a85a5deSChristoph Hellwig static void nvme_loop_complete_rq(struct request *req)
753a85a5deSChristoph Hellwig {
763a85a5deSChristoph Hellwig struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
773a85a5deSChristoph Hellwig
7852e6d8edSIsrael Rukshin sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT);
7977f02a7aSChristoph Hellwig nvme_complete_rq(req);
803a85a5deSChristoph Hellwig }
813a85a5deSChristoph Hellwig
nvme_loop_tagset(struct nvme_loop_queue * queue)823b068376SSagi Grimberg static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
833b068376SSagi Grimberg {
843b068376SSagi Grimberg u32 queue_idx = nvme_loop_queue_idx(queue);
853a85a5deSChristoph Hellwig
863b068376SSagi Grimberg if (queue_idx == 0)
873b068376SSagi Grimberg return queue->ctrl->admin_tag_set.tags[queue_idx];
883b068376SSagi Grimberg return queue->ctrl->tag_set.tags[queue_idx - 1];
893a85a5deSChristoph Hellwig }
903a85a5deSChristoph Hellwig
nvme_loop_queue_response(struct nvmet_req * req)91d49187e9SChristoph Hellwig static void nvme_loop_queue_response(struct nvmet_req *req)
923a85a5deSChristoph Hellwig {
933b068376SSagi Grimberg struct nvme_loop_queue *queue =
943b068376SSagi Grimberg container_of(req->sq, struct nvme_loop_queue, nvme_sq);
95fc6c9730SMax Gurtovoy struct nvme_completion *cqe = req->cqe;
963a85a5deSChristoph Hellwig
973a85a5deSChristoph Hellwig /*
983a85a5deSChristoph Hellwig * AEN requests are special as they don't time out and can
993a85a5deSChristoph Hellwig * survive any kind of queue freeze and often don't respond to
1003a85a5deSChristoph Hellwig * aborts. We don't even bother to allocate a struct request
1013a85a5deSChristoph Hellwig * for them but rather special case them here.
1023a85a5deSChristoph Hellwig */
10358a8df67SIsrael Rukshin if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue),
10458a8df67SIsrael Rukshin cqe->command_id))) {
1053b068376SSagi Grimberg nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
1067bf58533SChristoph Hellwig &cqe->result);
1073a85a5deSChristoph Hellwig } else {
1083b068376SSagi Grimberg struct request *rq;
1093a85a5deSChristoph Hellwig
110e7006de6SSagi Grimberg rq = nvme_find_rq(nvme_loop_tagset(queue), cqe->command_id);
1113b068376SSagi Grimberg if (!rq) {
1123b068376SSagi Grimberg dev_err(queue->ctrl->ctrl.device,
113e7006de6SSagi Grimberg "got bad command_id %#x on queue %d\n",
1143b068376SSagi Grimberg cqe->command_id, nvme_loop_queue_idx(queue));
1153b068376SSagi Grimberg return;
1163b068376SSagi Grimberg }
1173b068376SSagi Grimberg
1182eb81a33SChristoph Hellwig if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
119ff029451SChristoph Hellwig nvme_loop_complete_rq(rq);
1203a85a5deSChristoph Hellwig }
1213a85a5deSChristoph Hellwig }
1223a85a5deSChristoph Hellwig
nvme_loop_execute_work(struct work_struct * work)1233a85a5deSChristoph Hellwig static void nvme_loop_execute_work(struct work_struct *work)
1243a85a5deSChristoph Hellwig {
1253a85a5deSChristoph Hellwig struct nvme_loop_iod *iod =
1263a85a5deSChristoph Hellwig container_of(work, struct nvme_loop_iod, work);
1273a85a5deSChristoph Hellwig
128be3f3114SChristoph Hellwig iod->req.execute(&iod->req);
1293a85a5deSChristoph Hellwig }
1303a85a5deSChristoph Hellwig
nvme_loop_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)131fc17b653SChristoph Hellwig static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1323a85a5deSChristoph Hellwig const struct blk_mq_queue_data *bd)
1333a85a5deSChristoph Hellwig {
1343a85a5deSChristoph Hellwig struct nvme_ns *ns = hctx->queue->queuedata;
1353a85a5deSChristoph Hellwig struct nvme_loop_queue *queue = hctx->driver_data;
1363a85a5deSChristoph Hellwig struct request *req = bd->rq;
1373a85a5deSChristoph Hellwig struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
1383bc32bb1SChristoph Hellwig bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
139fc17b653SChristoph Hellwig blk_status_t ret;
1403a85a5deSChristoph Hellwig
141a9715744STao Chiu if (!nvme_check_ready(&queue->ctrl->ctrl, req, queue_ready))
142a9715744STao Chiu return nvme_fail_nonready_command(&queue->ctrl->ctrl, req);
1439d7fab04SSagi Grimberg
144f4b9e6c9SKeith Busch ret = nvme_setup_cmd(ns, req);
145fc17b653SChristoph Hellwig if (ret)
1463a85a5deSChristoph Hellwig return ret;
1473a85a5deSChristoph Hellwig
1486887fc64SSagi Grimberg nvme_start_request(req);
1493a85a5deSChristoph Hellwig iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
150fe4a9791SChristoph Hellwig iod->req.port = queue->ctrl->port;
1513a85a5deSChristoph Hellwig if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
15211d9ea6fSMing Lei &queue->nvme_sq, &nvme_loop_ops))
153fc17b653SChristoph Hellwig return BLK_STS_OK;
1543a85a5deSChristoph Hellwig
155eb464833SChaitanya Kulkarni if (blk_rq_nr_phys_segments(req)) {
1563a85a5deSChristoph Hellwig iod->sg_table.sgl = iod->first_sgl;
157fc17b653SChristoph Hellwig if (sg_alloc_table_chained(&iod->sg_table,
158f9d03f96SChristoph Hellwig blk_rq_nr_phys_segments(req),
15952e6d8edSIsrael Rukshin iod->sg_table.sgl, NVME_INLINE_SG_CNT)) {
1605812d04cSMax Gurtovoy nvme_cleanup_cmd(req);
161fc17b653SChristoph Hellwig return BLK_STS_RESOURCE;
1625812d04cSMax Gurtovoy }
1633a85a5deSChristoph Hellwig
1643a85a5deSChristoph Hellwig iod->req.sg = iod->sg_table.sgl;
1653a85a5deSChristoph Hellwig iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
166796b0b8dSChristoph Hellwig iod->req.transfer_len = blk_rq_payload_bytes(req);
1673a85a5deSChristoph Hellwig }
1683a85a5deSChristoph Hellwig
1698832cf92SSagi Grimberg queue_work(nvmet_wq, &iod->work);
170fc17b653SChristoph Hellwig return BLK_STS_OK;
1713a85a5deSChristoph Hellwig }
1723a85a5deSChristoph Hellwig
nvme_loop_submit_async_event(struct nvme_ctrl * arg)173ad22c355SKeith Busch static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
1743a85a5deSChristoph Hellwig {
1753a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
1763a85a5deSChristoph Hellwig struct nvme_loop_queue *queue = &ctrl->queues[0];
1773a85a5deSChristoph Hellwig struct nvme_loop_iod *iod = &ctrl->async_event_iod;
1783a85a5deSChristoph Hellwig
1793a85a5deSChristoph Hellwig memset(&iod->cmd, 0, sizeof(iod->cmd));
1803a85a5deSChristoph Hellwig iod->cmd.common.opcode = nvme_admin_async_event;
18138dabe21SKeith Busch iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
1823a85a5deSChristoph Hellwig iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
1833a85a5deSChristoph Hellwig
1843a85a5deSChristoph Hellwig if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
1853a85a5deSChristoph Hellwig &nvme_loop_ops)) {
1863a85a5deSChristoph Hellwig dev_err(ctrl->ctrl.device, "failed async event work\n");
1873a85a5deSChristoph Hellwig return;
1883a85a5deSChristoph Hellwig }
1893a85a5deSChristoph Hellwig
1908832cf92SSagi Grimberg queue_work(nvmet_wq, &iod->work);
1913a85a5deSChristoph Hellwig }
1923a85a5deSChristoph Hellwig
nvme_loop_init_iod(struct nvme_loop_ctrl * ctrl,struct nvme_loop_iod * iod,unsigned int queue_idx)1933a85a5deSChristoph Hellwig static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
1943a85a5deSChristoph Hellwig struct nvme_loop_iod *iod, unsigned int queue_idx)
1953a85a5deSChristoph Hellwig {
1963a85a5deSChristoph Hellwig iod->req.cmd = &iod->cmd;
197fc6c9730SMax Gurtovoy iod->req.cqe = &iod->cqe;
1983a85a5deSChristoph Hellwig iod->queue = &ctrl->queues[queue_idx];
1993a85a5deSChristoph Hellwig INIT_WORK(&iod->work, nvme_loop_execute_work);
2003a85a5deSChristoph Hellwig return 0;
2013a85a5deSChristoph Hellwig }
2023a85a5deSChristoph Hellwig
nvme_loop_init_request(struct blk_mq_tag_set * set,struct request * req,unsigned int hctx_idx,unsigned int numa_node)203d6296d39SChristoph Hellwig static int nvme_loop_init_request(struct blk_mq_tag_set *set,
204d6296d39SChristoph Hellwig struct request *req, unsigned int hctx_idx,
2053a85a5deSChristoph Hellwig unsigned int numa_node)
2063a85a5deSChristoph Hellwig {
2072ade8221SChristoph Hellwig struct nvme_loop_ctrl *ctrl = to_loop_ctrl(set->driver_data);
208f4b9e6c9SKeith Busch struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
2093a85a5deSChristoph Hellwig
21059e29ce6SSagi Grimberg nvme_req(req)->ctrl = &ctrl->ctrl;
211f4b9e6c9SKeith Busch nvme_req(req)->cmd = &iod->cmd;
21262b83b18SChristoph Hellwig return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
21362b83b18SChristoph Hellwig (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
2143a85a5deSChristoph Hellwig }
2153a85a5deSChristoph Hellwig
21688c99793SMing Lei static struct lock_class_key loop_hctx_fq_lock_key;
21788c99793SMing Lei
nvme_loop_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)2183a85a5deSChristoph Hellwig static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2193a85a5deSChristoph Hellwig unsigned int hctx_idx)
2203a85a5deSChristoph Hellwig {
2212ade8221SChristoph Hellwig struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
2223a85a5deSChristoph Hellwig struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
2233a85a5deSChristoph Hellwig
224d858e5f0SSagi Grimberg BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
2253a85a5deSChristoph Hellwig
22688c99793SMing Lei /*
22788c99793SMing Lei * flush_end_io() can be called recursively for us, so use our own
22888c99793SMing Lei * lock class key for avoiding lockdep possible recursive locking,
22988c99793SMing Lei * then we can remove the dynamically allocated lock class for each
23088c99793SMing Lei * flush queue, that way may cause horrible boot delay.
23188c99793SMing Lei */
23288c99793SMing Lei blk_mq_hctx_set_fq_lock_class(hctx, &loop_hctx_fq_lock_key);
23388c99793SMing Lei
2343a85a5deSChristoph Hellwig hctx->driver_data = queue;
2353a85a5deSChristoph Hellwig return 0;
2363a85a5deSChristoph Hellwig }
2373a85a5deSChristoph Hellwig
nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)2383a85a5deSChristoph Hellwig static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2393a85a5deSChristoph Hellwig unsigned int hctx_idx)
2403a85a5deSChristoph Hellwig {
2412ade8221SChristoph Hellwig struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
2423a85a5deSChristoph Hellwig struct nvme_loop_queue *queue = &ctrl->queues[0];
2433a85a5deSChristoph Hellwig
2443a85a5deSChristoph Hellwig BUG_ON(hctx_idx != 0);
2453a85a5deSChristoph Hellwig
2463a85a5deSChristoph Hellwig hctx->driver_data = queue;
2473a85a5deSChristoph Hellwig return 0;
2483a85a5deSChristoph Hellwig }
2493a85a5deSChristoph Hellwig
250f363b089SEric Biggers static const struct blk_mq_ops nvme_loop_mq_ops = {
2513a85a5deSChristoph Hellwig .queue_rq = nvme_loop_queue_rq,
2523a85a5deSChristoph Hellwig .complete = nvme_loop_complete_rq,
2533a85a5deSChristoph Hellwig .init_request = nvme_loop_init_request,
2543a85a5deSChristoph Hellwig .init_hctx = nvme_loop_init_hctx,
2553a85a5deSChristoph Hellwig };
2563a85a5deSChristoph Hellwig
257f363b089SEric Biggers static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
2583a85a5deSChristoph Hellwig .queue_rq = nvme_loop_queue_rq,
2593a85a5deSChristoph Hellwig .complete = nvme_loop_complete_rq,
26062b83b18SChristoph Hellwig .init_request = nvme_loop_init_request,
2613a85a5deSChristoph Hellwig .init_hctx = nvme_loop_init_admin_hctx,
2623a85a5deSChristoph Hellwig };
2633a85a5deSChristoph Hellwig
nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl * ctrl)2643a85a5deSChristoph Hellwig static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
2653a85a5deSChristoph Hellwig {
2664237de2fSHannes Reinecke if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
2674237de2fSHannes Reinecke return;
268*e4f9fffbSNilay Shroff /*
269*e4f9fffbSNilay Shroff * It's possible that some requests might have been added
270*e4f9fffbSNilay Shroff * after admin queue is stopped/quiesced. So now start the
271*e4f9fffbSNilay Shroff * queue to flush these requests to the completion.
272*e4f9fffbSNilay Shroff */
273*e4f9fffbSNilay Shroff nvme_unquiesce_admin_queue(&ctrl->ctrl);
274*e4f9fffbSNilay Shroff
275e4c5d376SSagi Grimberg nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
276ceee1953SChristoph Hellwig nvme_remove_admin_tag_set(&ctrl->ctrl);
2773a85a5deSChristoph Hellwig }
2783a85a5deSChristoph Hellwig
nvme_loop_free_ctrl(struct nvme_ctrl * nctrl)2793a85a5deSChristoph Hellwig static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
2803a85a5deSChristoph Hellwig {
2813a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
2823a85a5deSChristoph Hellwig
2833a85a5deSChristoph Hellwig if (list_empty(&ctrl->list))
2843a85a5deSChristoph Hellwig goto free_ctrl;
2853a85a5deSChristoph Hellwig
2863a85a5deSChristoph Hellwig mutex_lock(&nvme_loop_ctrl_mutex);
2873a85a5deSChristoph Hellwig list_del(&ctrl->list);
2883a85a5deSChristoph Hellwig mutex_unlock(&nvme_loop_ctrl_mutex);
2893a85a5deSChristoph Hellwig
290ceee1953SChristoph Hellwig if (nctrl->tagset)
291ceee1953SChristoph Hellwig nvme_remove_io_tag_set(nctrl);
2923a85a5deSChristoph Hellwig kfree(ctrl->queues);
2933a85a5deSChristoph Hellwig nvmf_free_options(nctrl->opts);
2943a85a5deSChristoph Hellwig free_ctrl:
2953a85a5deSChristoph Hellwig kfree(ctrl);
2963a85a5deSChristoph Hellwig }
2973a85a5deSChristoph Hellwig
nvme_loop_destroy_io_queues(struct nvme_loop_ctrl * ctrl)298945dd5baSSagi Grimberg static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
299945dd5baSSagi Grimberg {
300945dd5baSSagi Grimberg int i;
301945dd5baSSagi Grimberg
3029d7fab04SSagi Grimberg for (i = 1; i < ctrl->ctrl.queue_count; i++) {
3039d7fab04SSagi Grimberg clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
304945dd5baSSagi Grimberg nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
305945dd5baSSagi Grimberg }
306a6c144f3SHannes Reinecke ctrl->ctrl.queue_count = 1;
307*e4f9fffbSNilay Shroff /*
308*e4f9fffbSNilay Shroff * It's possible that some requests might have been added
309*e4f9fffbSNilay Shroff * after io queue is stopped/quiesced. So now start the
310*e4f9fffbSNilay Shroff * queue to flush these requests to the completion.
311*e4f9fffbSNilay Shroff */
312*e4f9fffbSNilay Shroff nvme_unquiesce_io_queues(&ctrl->ctrl);
3139d7fab04SSagi Grimberg }
314945dd5baSSagi Grimberg
nvme_loop_init_io_queues(struct nvme_loop_ctrl * ctrl)315945dd5baSSagi Grimberg static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
316945dd5baSSagi Grimberg {
317945dd5baSSagi Grimberg struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
318945dd5baSSagi Grimberg unsigned int nr_io_queues;
319945dd5baSSagi Grimberg int ret, i;
320945dd5baSSagi Grimberg
321945dd5baSSagi Grimberg nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
322945dd5baSSagi Grimberg ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
323945dd5baSSagi Grimberg if (ret || !nr_io_queues)
324945dd5baSSagi Grimberg return ret;
325945dd5baSSagi Grimberg
326945dd5baSSagi Grimberg dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
327945dd5baSSagi Grimberg
328945dd5baSSagi Grimberg for (i = 1; i <= nr_io_queues; i++) {
329945dd5baSSagi Grimberg ctrl->queues[i].ctrl = ctrl;
330945dd5baSSagi Grimberg ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
331945dd5baSSagi Grimberg if (ret)
332945dd5baSSagi Grimberg goto out_destroy_queues;
333945dd5baSSagi Grimberg
334d858e5f0SSagi Grimberg ctrl->ctrl.queue_count++;
335945dd5baSSagi Grimberg }
336945dd5baSSagi Grimberg
337945dd5baSSagi Grimberg return 0;
338945dd5baSSagi Grimberg
339945dd5baSSagi Grimberg out_destroy_queues:
340945dd5baSSagi Grimberg nvme_loop_destroy_io_queues(ctrl);
341945dd5baSSagi Grimberg return ret;
342945dd5baSSagi Grimberg }
343945dd5baSSagi Grimberg
nvme_loop_connect_io_queues(struct nvme_loop_ctrl * ctrl)344297186d6SSagi Grimberg static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
345297186d6SSagi Grimberg {
346297186d6SSagi Grimberg int i, ret;
347297186d6SSagi Grimberg
348d858e5f0SSagi Grimberg for (i = 1; i < ctrl->ctrl.queue_count; i++) {
349be42a33bSKeith Busch ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
350297186d6SSagi Grimberg if (ret)
351297186d6SSagi Grimberg return ret;
3529d7fab04SSagi Grimberg set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
353297186d6SSagi Grimberg }
354297186d6SSagi Grimberg
355297186d6SSagi Grimberg return 0;
356297186d6SSagi Grimberg }
357297186d6SSagi Grimberg
nvme_loop_configure_admin_queue(struct nvme_loop_ctrl * ctrl)3583a85a5deSChristoph Hellwig static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
3593a85a5deSChristoph Hellwig {
3603a85a5deSChristoph Hellwig int error;
3613a85a5deSChristoph Hellwig
3623a85a5deSChristoph Hellwig ctrl->queues[0].ctrl = ctrl;
3633a85a5deSChristoph Hellwig error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
3643a85a5deSChristoph Hellwig if (error)
3653a85a5deSChristoph Hellwig return error;
366d858e5f0SSagi Grimberg ctrl->ctrl.queue_count = 1;
3673a85a5deSChristoph Hellwig
368ceee1953SChristoph Hellwig error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
369db45e1a5SChristoph Hellwig &nvme_loop_admin_mq_ops,
370ceee1953SChristoph Hellwig sizeof(struct nvme_loop_iod) +
371ceee1953SChristoph Hellwig NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
3723a85a5deSChristoph Hellwig if (error)
3733a85a5deSChristoph Hellwig goto out_free_sq;
3743a85a5deSChristoph Hellwig
3751d35d519SMing Lei /* reset stopped state for the fresh admin queue */
3761d35d519SMing Lei clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags);
3773a85a5deSChristoph Hellwig
3783a85a5deSChristoph Hellwig error = nvmf_connect_admin_queue(&ctrl->ctrl);
3793a85a5deSChristoph Hellwig if (error)
380ceee1953SChristoph Hellwig goto out_cleanup_tagset;
3813a85a5deSChristoph Hellwig
3829d7fab04SSagi Grimberg set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
3839d7fab04SSagi Grimberg
384c0f2f45bSSagi Grimberg error = nvme_enable_ctrl(&ctrl->ctrl);
3853a85a5deSChristoph Hellwig if (error)
386ceee1953SChristoph Hellwig goto out_cleanup_tagset;
3873a85a5deSChristoph Hellwig
3883a85a5deSChristoph Hellwig ctrl->ctrl.max_hw_sectors =
3899bcf156fSDamien Le Moal (NVME_LOOP_MAX_SEGMENTS - 1) << PAGE_SECTORS_SHIFT;
3903a85a5deSChristoph Hellwig
3919f27bd70SChristoph Hellwig nvme_unquiesce_admin_queue(&ctrl->ctrl);
392e7832cb4SSagi Grimberg
39394cc781fSChristoph Hellwig error = nvme_init_ctrl_finish(&ctrl->ctrl, false);
3943a85a5deSChristoph Hellwig if (error)
395ceee1953SChristoph Hellwig goto out_cleanup_tagset;
3963a85a5deSChristoph Hellwig
3973a85a5deSChristoph Hellwig return 0;
3983a85a5deSChristoph Hellwig
399ceee1953SChristoph Hellwig out_cleanup_tagset:
4001c5f8e88SHannes Reinecke clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
401ceee1953SChristoph Hellwig nvme_remove_admin_tag_set(&ctrl->ctrl);
4023a85a5deSChristoph Hellwig out_free_sq:
4033a85a5deSChristoph Hellwig nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
4043a85a5deSChristoph Hellwig return error;
4053a85a5deSChristoph Hellwig }
4063a85a5deSChristoph Hellwig
nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl * ctrl)4073a85a5deSChristoph Hellwig static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
4083a85a5deSChristoph Hellwig {
409d858e5f0SSagi Grimberg if (ctrl->ctrl.queue_count > 1) {
4109f27bd70SChristoph Hellwig nvme_quiesce_io_queues(&ctrl->ctrl);
411e41f8c02SSagi Grimberg nvme_cancel_tagset(&ctrl->ctrl);
412945dd5baSSagi Grimberg nvme_loop_destroy_io_queues(ctrl);
4133a85a5deSChristoph Hellwig }
4143a85a5deSChristoph Hellwig
4159f27bd70SChristoph Hellwig nvme_quiesce_admin_queue(&ctrl->ctrl);
4163a85a5deSChristoph Hellwig if (ctrl->ctrl.state == NVME_CTRL_LIVE)
417285b6e9bSChristoph Hellwig nvme_disable_ctrl(&ctrl->ctrl, true);
4183a85a5deSChristoph Hellwig
419e41f8c02SSagi Grimberg nvme_cancel_admin_tagset(&ctrl->ctrl);
4203a85a5deSChristoph Hellwig nvme_loop_destroy_admin_queue(ctrl);
4213a85a5deSChristoph Hellwig }
4223a85a5deSChristoph Hellwig
nvme_loop_delete_ctrl_host(struct nvme_ctrl * ctrl)423c5017e85SChristoph Hellwig static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
4243a85a5deSChristoph Hellwig {
425c5017e85SChristoph Hellwig nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
4263a85a5deSChristoph Hellwig }
4273a85a5deSChristoph Hellwig
nvme_loop_delete_ctrl(struct nvmet_ctrl * nctrl)4283a85a5deSChristoph Hellwig static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
4293a85a5deSChristoph Hellwig {
4303a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl;
4313a85a5deSChristoph Hellwig
4323a85a5deSChristoph Hellwig mutex_lock(&nvme_loop_ctrl_mutex);
4333a85a5deSChristoph Hellwig list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
4343a85a5deSChristoph Hellwig if (ctrl->ctrl.cntlid == nctrl->cntlid)
435c5017e85SChristoph Hellwig nvme_delete_ctrl(&ctrl->ctrl);
4363a85a5deSChristoph Hellwig }
4373a85a5deSChristoph Hellwig mutex_unlock(&nvme_loop_ctrl_mutex);
4383a85a5deSChristoph Hellwig }
4393a85a5deSChristoph Hellwig
nvme_loop_reset_ctrl_work(struct work_struct * work)4403a85a5deSChristoph Hellwig static void nvme_loop_reset_ctrl_work(struct work_struct *work)
4413a85a5deSChristoph Hellwig {
442d86c4d8eSChristoph Hellwig struct nvme_loop_ctrl *ctrl =
443d86c4d8eSChristoph Hellwig container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
444297186d6SSagi Grimberg int ret;
4453a85a5deSChristoph Hellwig
446d09f2b45SSagi Grimberg nvme_stop_ctrl(&ctrl->ctrl);
4473a85a5deSChristoph Hellwig nvme_loop_shutdown_ctrl(ctrl);
4483a85a5deSChristoph Hellwig
4498bfc3b4cSJohannes Thumshirn if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
4506622f9acSHannes Reinecke if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
4516622f9acSHannes Reinecke ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
4526622f9acSHannes Reinecke /* state change failure for non-deleted ctrl? */
4538bfc3b4cSJohannes Thumshirn WARN_ON_ONCE(1);
4548bfc3b4cSJohannes Thumshirn return;
4558bfc3b4cSJohannes Thumshirn }
4568bfc3b4cSJohannes Thumshirn
4573a85a5deSChristoph Hellwig ret = nvme_loop_configure_admin_queue(ctrl);
4583a85a5deSChristoph Hellwig if (ret)
4593a85a5deSChristoph Hellwig goto out_disable;
4603a85a5deSChristoph Hellwig
461945dd5baSSagi Grimberg ret = nvme_loop_init_io_queues(ctrl);
4623a85a5deSChristoph Hellwig if (ret)
463945dd5baSSagi Grimberg goto out_destroy_admin;
4643a85a5deSChristoph Hellwig
465297186d6SSagi Grimberg ret = nvme_loop_connect_io_queues(ctrl);
4663a85a5deSChristoph Hellwig if (ret)
467945dd5baSSagi Grimberg goto out_destroy_io;
4683a85a5deSChristoph Hellwig
4694368c39bSSagi Grimberg blk_mq_update_nr_hw_queues(&ctrl->tag_set,
4704368c39bSSagi Grimberg ctrl->ctrl.queue_count - 1);
4714368c39bSSagi Grimberg
472b6cec06dSChaitanya Kulkarni if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
473b6cec06dSChaitanya Kulkarni WARN_ON_ONCE(1);
4743a85a5deSChristoph Hellwig
475d09f2b45SSagi Grimberg nvme_start_ctrl(&ctrl->ctrl);
4763a85a5deSChristoph Hellwig
4773a85a5deSChristoph Hellwig return;
4783a85a5deSChristoph Hellwig
479945dd5baSSagi Grimberg out_destroy_io:
480945dd5baSSagi Grimberg nvme_loop_destroy_io_queues(ctrl);
481945dd5baSSagi Grimberg out_destroy_admin:
4823a85a5deSChristoph Hellwig nvme_loop_destroy_admin_queue(ctrl);
4833a85a5deSChristoph Hellwig out_disable:
4843a85a5deSChristoph Hellwig dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
4853a85a5deSChristoph Hellwig nvme_uninit_ctrl(&ctrl->ctrl);
4863a85a5deSChristoph Hellwig }
4873a85a5deSChristoph Hellwig
4883a85a5deSChristoph Hellwig static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
4893a85a5deSChristoph Hellwig .name = "loop",
4903a85a5deSChristoph Hellwig .module = THIS_MODULE,
491d3d5b87dSChristoph Hellwig .flags = NVME_F_FABRICS,
4923a85a5deSChristoph Hellwig .reg_read32 = nvmf_reg_read32,
4933a85a5deSChristoph Hellwig .reg_read64 = nvmf_reg_read64,
4943a85a5deSChristoph Hellwig .reg_write32 = nvmf_reg_write32,
4953a85a5deSChristoph Hellwig .free_ctrl = nvme_loop_free_ctrl,
4963a85a5deSChristoph Hellwig .submit_async_event = nvme_loop_submit_async_event,
497c5017e85SChristoph Hellwig .delete_ctrl = nvme_loop_delete_ctrl_host,
498fe4a9791SChristoph Hellwig .get_address = nvmf_get_address,
4993a85a5deSChristoph Hellwig };
5003a85a5deSChristoph Hellwig
nvme_loop_create_io_queues(struct nvme_loop_ctrl * ctrl)5013a85a5deSChristoph Hellwig static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
5023a85a5deSChristoph Hellwig {
503297186d6SSagi Grimberg int ret;
5043a85a5deSChristoph Hellwig
505945dd5baSSagi Grimberg ret = nvme_loop_init_io_queues(ctrl);
5063a85a5deSChristoph Hellwig if (ret)
507945dd5baSSagi Grimberg return ret;
5083a85a5deSChristoph Hellwig
509ceee1953SChristoph Hellwig ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
510db45e1a5SChristoph Hellwig &nvme_loop_mq_ops, 1,
511ceee1953SChristoph Hellwig sizeof(struct nvme_loop_iod) +
512ceee1953SChristoph Hellwig NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
5133a85a5deSChristoph Hellwig if (ret)
5143a85a5deSChristoph Hellwig goto out_destroy_queues;
5153a85a5deSChristoph Hellwig
516297186d6SSagi Grimberg ret = nvme_loop_connect_io_queues(ctrl);
5173a85a5deSChristoph Hellwig if (ret)
518ceee1953SChristoph Hellwig goto out_cleanup_tagset;
5193a85a5deSChristoph Hellwig
5203a85a5deSChristoph Hellwig return 0;
5213a85a5deSChristoph Hellwig
522ceee1953SChristoph Hellwig out_cleanup_tagset:
523ceee1953SChristoph Hellwig nvme_remove_io_tag_set(&ctrl->ctrl);
5243a85a5deSChristoph Hellwig out_destroy_queues:
525945dd5baSSagi Grimberg nvme_loop_destroy_io_queues(ctrl);
5263a85a5deSChristoph Hellwig return ret;
5273a85a5deSChristoph Hellwig }
5283a85a5deSChristoph Hellwig
nvme_loop_find_port(struct nvme_ctrl * ctrl)529fe4a9791SChristoph Hellwig static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
530fe4a9791SChristoph Hellwig {
531fe4a9791SChristoph Hellwig struct nvmet_port *p, *found = NULL;
532fe4a9791SChristoph Hellwig
533fe4a9791SChristoph Hellwig mutex_lock(&nvme_loop_ports_mutex);
534fe4a9791SChristoph Hellwig list_for_each_entry(p, &nvme_loop_ports, entry) {
535fe4a9791SChristoph Hellwig /* if no transport address is specified use the first port */
536fe4a9791SChristoph Hellwig if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
537fe4a9791SChristoph Hellwig strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
538fe4a9791SChristoph Hellwig continue;
539fe4a9791SChristoph Hellwig found = p;
540fe4a9791SChristoph Hellwig break;
541fe4a9791SChristoph Hellwig }
542fe4a9791SChristoph Hellwig mutex_unlock(&nvme_loop_ports_mutex);
543fe4a9791SChristoph Hellwig return found;
544fe4a9791SChristoph Hellwig }
545fe4a9791SChristoph Hellwig
nvme_loop_create_ctrl(struct device * dev,struct nvmf_ctrl_options * opts)5463a85a5deSChristoph Hellwig static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
5473a85a5deSChristoph Hellwig struct nvmf_ctrl_options *opts)
5483a85a5deSChristoph Hellwig {
5493a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl;
5503a85a5deSChristoph Hellwig int ret;
5513a85a5deSChristoph Hellwig
5523a85a5deSChristoph Hellwig ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
5533a85a5deSChristoph Hellwig if (!ctrl)
5543a85a5deSChristoph Hellwig return ERR_PTR(-ENOMEM);
5553a85a5deSChristoph Hellwig ctrl->ctrl.opts = opts;
5563a85a5deSChristoph Hellwig INIT_LIST_HEAD(&ctrl->list);
5573a85a5deSChristoph Hellwig
558d86c4d8eSChristoph Hellwig INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
5593a85a5deSChristoph Hellwig
5603a85a5deSChristoph Hellwig ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
5613a85a5deSChristoph Hellwig 0 /* no quirks, we're perfect! */);
56203504e3bSWu Bo if (ret) {
56303504e3bSWu Bo kfree(ctrl);
5641401fcc4SChaitanya Kulkarni goto out;
56503504e3bSWu Bo }
5663a85a5deSChristoph Hellwig
567b6cec06dSChaitanya Kulkarni if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
568b6cec06dSChaitanya Kulkarni WARN_ON_ONCE(1);
56964d452b3SChaitanya Kulkarni
5703a85a5deSChristoph Hellwig ret = -ENOMEM;
5713a85a5deSChristoph Hellwig
5723a85a5deSChristoph Hellwig ctrl->ctrl.kato = opts->kato;
573fe4a9791SChristoph Hellwig ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
5743a85a5deSChristoph Hellwig
5753a85a5deSChristoph Hellwig ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
5763a85a5deSChristoph Hellwig GFP_KERNEL);
5773a85a5deSChristoph Hellwig if (!ctrl->queues)
5783a85a5deSChristoph Hellwig goto out_uninit_ctrl;
5793a85a5deSChristoph Hellwig
5803a85a5deSChristoph Hellwig ret = nvme_loop_configure_admin_queue(ctrl);
5813a85a5deSChristoph Hellwig if (ret)
5823a85a5deSChristoph Hellwig goto out_free_queues;
5833a85a5deSChristoph Hellwig
5843a85a5deSChristoph Hellwig if (opts->queue_size > ctrl->ctrl.maxcmd) {
5853a85a5deSChristoph Hellwig /* warn if maxcmd is lower than queue_size */
5863a85a5deSChristoph Hellwig dev_warn(ctrl->ctrl.device,
5873a85a5deSChristoph Hellwig "queue_size %zu > ctrl maxcmd %u, clamping down\n",
5883a85a5deSChristoph Hellwig opts->queue_size, ctrl->ctrl.maxcmd);
5893a85a5deSChristoph Hellwig opts->queue_size = ctrl->ctrl.maxcmd;
5903a85a5deSChristoph Hellwig }
591379e0df5SChristoph Hellwig ctrl->ctrl.sqsize = opts->queue_size - 1;
5923a85a5deSChristoph Hellwig
5933a85a5deSChristoph Hellwig if (opts->nr_io_queues) {
5943a85a5deSChristoph Hellwig ret = nvme_loop_create_io_queues(ctrl);
5953a85a5deSChristoph Hellwig if (ret)
5963a85a5deSChristoph Hellwig goto out_remove_admin_queue;
5973a85a5deSChristoph Hellwig }
5983a85a5deSChristoph Hellwig
5993a85a5deSChristoph Hellwig nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
6003a85a5deSChristoph Hellwig
6013a85a5deSChristoph Hellwig dev_info(ctrl->ctrl.device,
6023a85a5deSChristoph Hellwig "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
6033a85a5deSChristoph Hellwig
604b6cec06dSChaitanya Kulkarni if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
605b6cec06dSChaitanya Kulkarni WARN_ON_ONCE(1);
6063a85a5deSChristoph Hellwig
6073a85a5deSChristoph Hellwig mutex_lock(&nvme_loop_ctrl_mutex);
6083a85a5deSChristoph Hellwig list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
6093a85a5deSChristoph Hellwig mutex_unlock(&nvme_loop_ctrl_mutex);
6103a85a5deSChristoph Hellwig
611d09f2b45SSagi Grimberg nvme_start_ctrl(&ctrl->ctrl);
6123a85a5deSChristoph Hellwig
6133a85a5deSChristoph Hellwig return &ctrl->ctrl;
6143a85a5deSChristoph Hellwig
6153a85a5deSChristoph Hellwig out_remove_admin_queue:
6163a85a5deSChristoph Hellwig nvme_loop_destroy_admin_queue(ctrl);
6173a85a5deSChristoph Hellwig out_free_queues:
6183a85a5deSChristoph Hellwig kfree(ctrl->queues);
6193a85a5deSChristoph Hellwig out_uninit_ctrl:
6203a85a5deSChristoph Hellwig nvme_uninit_ctrl(&ctrl->ctrl);
6213a85a5deSChristoph Hellwig nvme_put_ctrl(&ctrl->ctrl);
6221401fcc4SChaitanya Kulkarni out:
6233a85a5deSChristoph Hellwig if (ret > 0)
6243a85a5deSChristoph Hellwig ret = -EIO;
6253a85a5deSChristoph Hellwig return ERR_PTR(ret);
6263a85a5deSChristoph Hellwig }
6273a85a5deSChristoph Hellwig
nvme_loop_add_port(struct nvmet_port * port)6283a85a5deSChristoph Hellwig static int nvme_loop_add_port(struct nvmet_port *port)
6293a85a5deSChristoph Hellwig {
630fe4a9791SChristoph Hellwig mutex_lock(&nvme_loop_ports_mutex);
631fe4a9791SChristoph Hellwig list_add_tail(&port->entry, &nvme_loop_ports);
632fe4a9791SChristoph Hellwig mutex_unlock(&nvme_loop_ports_mutex);
6333a85a5deSChristoph Hellwig return 0;
6343a85a5deSChristoph Hellwig }
6353a85a5deSChristoph Hellwig
nvme_loop_remove_port(struct nvmet_port * port)6363a85a5deSChristoph Hellwig static void nvme_loop_remove_port(struct nvmet_port *port)
6373a85a5deSChristoph Hellwig {
638fe4a9791SChristoph Hellwig mutex_lock(&nvme_loop_ports_mutex);
639fe4a9791SChristoph Hellwig list_del_init(&port->entry);
640fe4a9791SChristoph Hellwig mutex_unlock(&nvme_loop_ports_mutex);
64186b9a63eSLogan Gunthorpe
64286b9a63eSLogan Gunthorpe /*
64386b9a63eSLogan Gunthorpe * Ensure any ctrls that are in the process of being
64486b9a63eSLogan Gunthorpe * deleted are in fact deleted before we return
64586b9a63eSLogan Gunthorpe * and free the port. This is to prevent active
64686b9a63eSLogan Gunthorpe * ctrls from using a port after it's freed.
64786b9a63eSLogan Gunthorpe */
64886b9a63eSLogan Gunthorpe flush_workqueue(nvme_delete_wq);
6493a85a5deSChristoph Hellwig }
6503a85a5deSChristoph Hellwig
651e929f06dSChristoph Hellwig static const struct nvmet_fabrics_ops nvme_loop_ops = {
6523a85a5deSChristoph Hellwig .owner = THIS_MODULE,
6533a85a5deSChristoph Hellwig .type = NVMF_TRTYPE_LOOP,
6543a85a5deSChristoph Hellwig .add_port = nvme_loop_add_port,
6553a85a5deSChristoph Hellwig .remove_port = nvme_loop_remove_port,
6563a85a5deSChristoph Hellwig .queue_response = nvme_loop_queue_response,
6573a85a5deSChristoph Hellwig .delete_ctrl = nvme_loop_delete_ctrl,
6583a85a5deSChristoph Hellwig };
6593a85a5deSChristoph Hellwig
6603a85a5deSChristoph Hellwig static struct nvmf_transport_ops nvme_loop_transport = {
6613a85a5deSChristoph Hellwig .name = "loop",
6620de5cd36SRoy Shterman .module = THIS_MODULE,
6633a85a5deSChristoph Hellwig .create_ctrl = nvme_loop_create_ctrl,
664fe4a9791SChristoph Hellwig .allowed_opts = NVMF_OPT_TRADDR,
6653a85a5deSChristoph Hellwig };
6663a85a5deSChristoph Hellwig
nvme_loop_init_module(void)6673a85a5deSChristoph Hellwig static int __init nvme_loop_init_module(void)
6683a85a5deSChristoph Hellwig {
6693a85a5deSChristoph Hellwig int ret;
6703a85a5deSChristoph Hellwig
6713a85a5deSChristoph Hellwig ret = nvmet_register_transport(&nvme_loop_ops);
6723a85a5deSChristoph Hellwig if (ret)
6733a85a5deSChristoph Hellwig return ret;
674d19eef02SSagi Grimberg
675d19eef02SSagi Grimberg ret = nvmf_register_transport(&nvme_loop_transport);
676d19eef02SSagi Grimberg if (ret)
677d19eef02SSagi Grimberg nvmet_unregister_transport(&nvme_loop_ops);
678d19eef02SSagi Grimberg
679d19eef02SSagi Grimberg return ret;
6803a85a5deSChristoph Hellwig }
6813a85a5deSChristoph Hellwig
nvme_loop_cleanup_module(void)6823a85a5deSChristoph Hellwig static void __exit nvme_loop_cleanup_module(void)
6833a85a5deSChristoph Hellwig {
6843a85a5deSChristoph Hellwig struct nvme_loop_ctrl *ctrl, *next;
6853a85a5deSChristoph Hellwig
6863a85a5deSChristoph Hellwig nvmf_unregister_transport(&nvme_loop_transport);
6873a85a5deSChristoph Hellwig nvmet_unregister_transport(&nvme_loop_ops);
6883a85a5deSChristoph Hellwig
6893a85a5deSChristoph Hellwig mutex_lock(&nvme_loop_ctrl_mutex);
6903a85a5deSChristoph Hellwig list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
691c5017e85SChristoph Hellwig nvme_delete_ctrl(&ctrl->ctrl);
6923a85a5deSChristoph Hellwig mutex_unlock(&nvme_loop_ctrl_mutex);
6933a85a5deSChristoph Hellwig
694b227c59bSRoy Shterman flush_workqueue(nvme_delete_wq);
6953a85a5deSChristoph Hellwig }
6963a85a5deSChristoph Hellwig
6973a85a5deSChristoph Hellwig module_init(nvme_loop_init_module);
6983a85a5deSChristoph Hellwig module_exit(nvme_loop_cleanup_module);
6993a85a5deSChristoph Hellwig
7003a85a5deSChristoph Hellwig MODULE_LICENSE("GPL v2");
7013a85a5deSChristoph Hellwig MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
702