loop.c (c69f203df3e61414fbf1a66d130abfd7c3bf3fd0) loop.c (f363b089be0a39fe4282c688118a51d21f952bc7)
1/*
2 * NVMe over Fabrics loopback device.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *

--- 209 unchanged lines hidden (view full) ---

218 }
219
220 schedule_work(&iod->work);
221}
222
223static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
224 struct nvme_loop_iod *iod, unsigned int queue_idx)
225{
1/*
2 * NVMe over Fabrics loopback device.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *

--- 209 unchanged lines hidden (view full) ---

218 }
219
220 schedule_work(&iod->work);
221}
222
223static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
224 struct nvme_loop_iod *iod, unsigned int queue_idx)
225{
226 BUG_ON(queue_idx >= ctrl->queue_count);
227
226 iod->req.cmd = &iod->cmd;
227 iod->req.rsp = &iod->rsp;
228 iod->queue = &ctrl->queues[queue_idx];
229 INIT_WORK(&iod->work, nvme_loop_execute_work);
230 return 0;
231}
232
233static int nvme_loop_init_request(void *data, struct request *req,

--- 29 unchanged lines hidden (view full) ---

263 struct nvme_loop_queue *queue = &ctrl->queues[0];
264
265 BUG_ON(hctx_idx != 0);
266
267 hctx->driver_data = queue;
268 return 0;
269}
270
228 iod->req.cmd = &iod->cmd;
229 iod->req.rsp = &iod->rsp;
230 iod->queue = &ctrl->queues[queue_idx];
231 INIT_WORK(&iod->work, nvme_loop_execute_work);
232 return 0;
233}
234
235static int nvme_loop_init_request(void *data, struct request *req,

--- 29 unchanged lines hidden (view full) ---

265 struct nvme_loop_queue *queue = &ctrl->queues[0];
266
267 BUG_ON(hctx_idx != 0);
268
269 hctx->driver_data = queue;
270 return 0;
271}
272
271static struct blk_mq_ops nvme_loop_mq_ops = {
273static const struct blk_mq_ops nvme_loop_mq_ops = {
272 .queue_rq = nvme_loop_queue_rq,
273 .complete = nvme_loop_complete_rq,
274 .init_request = nvme_loop_init_request,
275 .init_hctx = nvme_loop_init_hctx,
276 .timeout = nvme_loop_timeout,
277};
278
274 .queue_rq = nvme_loop_queue_rq,
275 .complete = nvme_loop_complete_rq,
276 .init_request = nvme_loop_init_request,
277 .init_hctx = nvme_loop_init_hctx,
278 .timeout = nvme_loop_timeout,
279};
280
279static struct blk_mq_ops nvme_loop_admin_mq_ops = {
281static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
280 .queue_rq = nvme_loop_queue_rq,
281 .complete = nvme_loop_complete_rq,
282 .init_request = nvme_loop_init_admin_request,
283 .init_hctx = nvme_loop_init_admin_hctx,
284 .timeout = nvme_loop_timeout,
285};
286
287static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
288{
282 .queue_rq = nvme_loop_queue_rq,
283 .complete = nvme_loop_complete_rq,
284 .init_request = nvme_loop_init_admin_request,
285 .init_hctx = nvme_loop_init_admin_hctx,
286 .timeout = nvme_loop_timeout,
287};
288
289static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
290{
289 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
290 blk_cleanup_queue(ctrl->ctrl.admin_q);
291 blk_mq_free_tag_set(&ctrl->admin_tag_set);
291 blk_cleanup_queue(ctrl->ctrl.admin_q);
292 blk_mq_free_tag_set(&ctrl->admin_tag_set);
293 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
292}
293
294static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
295{
296 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
297
298 if (list_empty(&ctrl->list))
299 goto free_ctrl;

--- 7 unchanged lines hidden (view full) ---

307 blk_mq_free_tag_set(&ctrl->tag_set);
308 }
309 kfree(ctrl->queues);
310 nvmf_free_options(nctrl->opts);
311free_ctrl:
312 kfree(ctrl);
313}
314
294}
295
296static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
297{
298 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
299
300 if (list_empty(&ctrl->list))
301 goto free_ctrl;

--- 7 unchanged lines hidden (view full) ---

309 blk_mq_free_tag_set(&ctrl->tag_set);
310 }
311 kfree(ctrl->queues);
312 nvmf_free_options(nctrl->opts);
313free_ctrl:
314 kfree(ctrl);
315}
316
315static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
316{
317 int i;
318
319 for (i = 1; i < ctrl->queue_count; i++)
320 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
321}
322
323static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
324{
325 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
326 unsigned int nr_io_queues;
327 int ret, i;
328
329 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
330 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
331 if (ret || !nr_io_queues)
332 return ret;
333
334 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
335
336 for (i = 1; i <= nr_io_queues; i++) {
337 ctrl->queues[i].ctrl = ctrl;
338 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
339 if (ret)
340 goto out_destroy_queues;
341
342 ctrl->queue_count++;
343 }
344
345 return 0;
346
347out_destroy_queues:
348 nvme_loop_destroy_io_queues(ctrl);
349 return ret;
350}
351
352static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
353{
354 int error;
355
356 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
357 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
358 ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
359 ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */

--- 55 unchanged lines hidden (view full) ---

415 blk_mq_free_tag_set(&ctrl->admin_tag_set);
416out_free_sq:
417 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
418 return error;
419}
420
421static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
422{
317static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
318{
319 int error;
320
321 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
322 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
323 ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
324 ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */

--- 55 unchanged lines hidden (view full) ---

380 blk_mq_free_tag_set(&ctrl->admin_tag_set);
381out_free_sq:
382 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
383 return error;
384}
385
386static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
387{
388 int i;
389
423 nvme_stop_keep_alive(&ctrl->ctrl);
424
425 if (ctrl->queue_count > 1) {
426 nvme_stop_queues(&ctrl->ctrl);
427 blk_mq_tagset_busy_iter(&ctrl->tag_set,
428 nvme_cancel_request, &ctrl->ctrl);
390 nvme_stop_keep_alive(&ctrl->ctrl);
391
392 if (ctrl->queue_count > 1) {
393 nvme_stop_queues(&ctrl->ctrl);
394 blk_mq_tagset_busy_iter(&ctrl->tag_set,
395 nvme_cancel_request, &ctrl->ctrl);
429 nvme_loop_destroy_io_queues(ctrl);
396
397 for (i = 1; i < ctrl->queue_count; i++)
398 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
430 }
431
432 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
433 nvme_shutdown_ctrl(&ctrl->ctrl);
434
435 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
436 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
437 nvme_cancel_request, &ctrl->ctrl);

--- 55 unchanged lines hidden (view full) ---

493 int i, ret;
494
495 nvme_loop_shutdown_ctrl(ctrl);
496
497 ret = nvme_loop_configure_admin_queue(ctrl);
498 if (ret)
499 goto out_disable;
500
399 }
400
401 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
402 nvme_shutdown_ctrl(&ctrl->ctrl);
403
404 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
405 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
406 nvme_cancel_request, &ctrl->ctrl);

--- 55 unchanged lines hidden (view full) ---

462 int i, ret;
463
464 nvme_loop_shutdown_ctrl(ctrl);
465
466 ret = nvme_loop_configure_admin_queue(ctrl);
467 if (ret)
468 goto out_disable;
469
501 ret = nvme_loop_init_io_queues(ctrl);
502 if (ret)
503 goto out_destroy_admin;
470 for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
471 ctrl->queues[i].ctrl = ctrl;
472 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
473 if (ret)
474 goto out_free_queues;
504
475
505 for (i = 1; i < ctrl->queue_count; i++) {
476 ctrl->queue_count++;
477 }
478
479 for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
506 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
507 if (ret)
480 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
481 if (ret)
508 goto out_destroy_io;
482 goto out_free_queues;
509 }
510
511 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
512 WARN_ON_ONCE(!changed);
513
514 nvme_queue_scan(&ctrl->ctrl);
515 nvme_queue_async_events(&ctrl->ctrl);
516
517 nvme_start_queues(&ctrl->ctrl);
518
519 return;
520
483 }
484
485 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
486 WARN_ON_ONCE(!changed);
487
488 nvme_queue_scan(&ctrl->ctrl);
489 nvme_queue_async_events(&ctrl->ctrl);
490
491 nvme_start_queues(&ctrl->ctrl);
492
493 return;
494
521out_destroy_io:
522 nvme_loop_destroy_io_queues(ctrl);
523out_destroy_admin:
495out_free_queues:
496 for (i = 1; i < ctrl->queue_count; i++)
497 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
524 nvme_loop_destroy_admin_queue(ctrl);
525out_disable:
526 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
527 nvme_uninit_ctrl(&ctrl->ctrl);
528 nvme_put_ctrl(&ctrl->ctrl);
529}
530
531static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)

--- 22 unchanged lines hidden (view full) ---

554 .free_ctrl = nvme_loop_free_ctrl,
555 .submit_async_event = nvme_loop_submit_async_event,
556 .delete_ctrl = nvme_loop_del_ctrl,
557 .get_subsysnqn = nvmf_get_subsysnqn,
558};
559
560static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
561{
498 nvme_loop_destroy_admin_queue(ctrl);
499out_disable:
500 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
501 nvme_uninit_ctrl(&ctrl->ctrl);
502 nvme_put_ctrl(&ctrl->ctrl);
503}
504
505static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)

--- 22 unchanged lines hidden (view full) ---

528 .free_ctrl = nvme_loop_free_ctrl,
529 .submit_async_event = nvme_loop_submit_async_event,
530 .delete_ctrl = nvme_loop_del_ctrl,
531 .get_subsysnqn = nvmf_get_subsysnqn,
532};
533
534static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
535{
536 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
562 int ret, i;
563
537 int ret, i;
538
564 ret = nvme_loop_init_io_queues(ctrl);
565 if (ret)
539 ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
540 if (ret || !opts->nr_io_queues)
566 return ret;
567
541 return ret;
542
543 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
544 opts->nr_io_queues);
545
546 for (i = 1; i <= opts->nr_io_queues; i++) {
547 ctrl->queues[i].ctrl = ctrl;
548 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
549 if (ret)
550 goto out_destroy_queues;
551
552 ctrl->queue_count++;
553 }
554
568 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
569 ctrl->tag_set.ops = &nvme_loop_mq_ops;
570 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
571 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
572 ctrl->tag_set.numa_node = NUMA_NO_NODE;
573 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
574 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
575 SG_CHUNK_SIZE * sizeof(struct scatterlist);

--- 7 unchanged lines hidden (view full) ---

583 goto out_destroy_queues;
584
585 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
586 if (IS_ERR(ctrl->ctrl.connect_q)) {
587 ret = PTR_ERR(ctrl->ctrl.connect_q);
588 goto out_free_tagset;
589 }
590
555 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
556 ctrl->tag_set.ops = &nvme_loop_mq_ops;
557 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
558 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
559 ctrl->tag_set.numa_node = NUMA_NO_NODE;
560 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
561 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
562 SG_CHUNK_SIZE * sizeof(struct scatterlist);

--- 7 unchanged lines hidden (view full) ---

570 goto out_destroy_queues;
571
572 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
573 if (IS_ERR(ctrl->ctrl.connect_q)) {
574 ret = PTR_ERR(ctrl->ctrl.connect_q);
575 goto out_free_tagset;
576 }
577
591 for (i = 1; i < ctrl->queue_count; i++) {
578 for (i = 1; i <= opts->nr_io_queues; i++) {
592 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
593 if (ret)
594 goto out_cleanup_connect_q;
595 }
596
597 return 0;
598
599out_cleanup_connect_q:
600 blk_cleanup_queue(ctrl->ctrl.connect_q);
601out_free_tagset:
602 blk_mq_free_tag_set(&ctrl->tag_set);
603out_destroy_queues:
579 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
580 if (ret)
581 goto out_cleanup_connect_q;
582 }
583
584 return 0;
585
586out_cleanup_connect_q:
587 blk_cleanup_queue(ctrl->ctrl.connect_q);
588out_free_tagset:
589 blk_mq_free_tag_set(&ctrl->tag_set);
590out_destroy_queues:
604 nvme_loop_destroy_io_queues(ctrl);
591 for (i = 1; i < ctrl->queue_count; i++)
592 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
605 return ret;
606}
607
608static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
609 struct nvmf_ctrl_options *opts)
610{
611 struct nvme_loop_ctrl *ctrl;
612 bool changed;

--- 149 unchanged lines hidden ---
593 return ret;
594}
595
596static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
597 struct nvmf_ctrl_options *opts)
598{
599 struct nvme_loop_ctrl *ctrl;
600 bool changed;

--- 149 unchanged lines hidden ---