rdma.c (c69f203df3e61414fbf1a66d130abfd7c3bf3fd0) rdma.c (f363b089be0a39fe4282c688118a51d21f952bc7)
1/*
2 * NVMe over Fabrics RDMA host code.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *

--- 329 unchanged lines hidden (view full) ---

338 struct request *rq, unsigned int queue_idx)
339{
340 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
341 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
342 struct nvme_rdma_device *dev = queue->device;
343 struct ib_device *ibdev = dev->dev;
344 int ret;
345
1/*
2 * NVMe over Fabrics RDMA host code.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *

--- 329 unchanged lines hidden (view full) ---

338 struct request *rq, unsigned int queue_idx)
339{
340 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
341 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
342 struct nvme_rdma_device *dev = queue->device;
343 struct ib_device *ibdev = dev->dev;
344 int ret;
345
346 BUG_ON(queue_idx >= ctrl->queue_count);
347
346 ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command),
347 DMA_TO_DEVICE);
348 if (ret)
349 return ret;
350
351 req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
352 ctrl->max_fr_pages);
353 if (IS_ERR(req->mr)) {

--- 291 unchanged lines hidden (view full) ---

645
646out_free_queues:
647 nvme_rdma_free_io_queues(ctrl);
648 return ret;
649}
650
651static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
652{
348 ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command),
349 DMA_TO_DEVICE);
350 if (ret)
351 return ret;
352
353 req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
354 ctrl->max_fr_pages);
355 if (IS_ERR(req->mr)) {

--- 291 unchanged lines hidden (view full) ---

647
648out_free_queues:
649 nvme_rdma_free_io_queues(ctrl);
650 return ret;
651}
652
653static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
654{
653 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
654 unsigned int nr_io_queues;
655 int i, ret;
656
655 int i, ret;
656
657 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
658 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
659 if (ret)
660 return ret;
661
662 ctrl->queue_count = nr_io_queues + 1;
663 if (ctrl->queue_count < 2)
664 return 0;
665
666 dev_info(ctrl->ctrl.device,
667 "creating %d I/O queues.\n", nr_io_queues);
668
669 for (i = 1; i < ctrl->queue_count; i++) {
670 ret = nvme_rdma_init_queue(ctrl, i,
671 ctrl->ctrl.opts->queue_size);
672 if (ret) {
673 dev_info(ctrl->ctrl.device,
674 "failed to initialize i/o queue: %d\n", ret);
675 goto out_free_queues;
676 }

--- 847 unchanged lines hidden (view full) ---

1524 error = rq->errors;
1525 else
1526 error = nvme_error_status(rq->errors);
1527 }
1528
1529 blk_mq_end_request(rq, error);
1530}
1531
657 for (i = 1; i < ctrl->queue_count; i++) {
658 ret = nvme_rdma_init_queue(ctrl, i,
659 ctrl->ctrl.opts->queue_size);
660 if (ret) {
661 dev_info(ctrl->ctrl.device,
662 "failed to initialize i/o queue: %d\n", ret);
663 goto out_free_queues;
664 }

--- 847 unchanged lines hidden (view full) ---

1512 error = rq->errors;
1513 else
1514 error = nvme_error_status(rq->errors);
1515 }
1516
1517 blk_mq_end_request(rq, error);
1518}
1519
1532static struct blk_mq_ops nvme_rdma_mq_ops = {
1520static const struct blk_mq_ops nvme_rdma_mq_ops = {
1533 .queue_rq = nvme_rdma_queue_rq,
1534 .complete = nvme_rdma_complete_rq,
1535 .init_request = nvme_rdma_init_request,
1536 .exit_request = nvme_rdma_exit_request,
1537 .reinit_request = nvme_rdma_reinit_request,
1538 .init_hctx = nvme_rdma_init_hctx,
1539 .poll = nvme_rdma_poll,
1540 .timeout = nvme_rdma_timeout,
1541};
1542
1521 .queue_rq = nvme_rdma_queue_rq,
1522 .complete = nvme_rdma_complete_rq,
1523 .init_request = nvme_rdma_init_request,
1524 .exit_request = nvme_rdma_exit_request,
1525 .reinit_request = nvme_rdma_reinit_request,
1526 .init_hctx = nvme_rdma_init_hctx,
1527 .poll = nvme_rdma_poll,
1528 .timeout = nvme_rdma_timeout,
1529};
1530
1543static struct blk_mq_ops nvme_rdma_admin_mq_ops = {
1531static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
1544 .queue_rq = nvme_rdma_queue_rq,
1545 .complete = nvme_rdma_complete_rq,
1546 .init_request = nvme_rdma_init_admin_request,
1547 .exit_request = nvme_rdma_exit_admin_request,
1548 .reinit_request = nvme_rdma_reinit_request,
1549 .init_hctx = nvme_rdma_init_admin_hctx,
1550 .timeout = nvme_rdma_timeout,
1551};

--- 246 unchanged lines hidden (view full) ---

1798 .submit_async_event = nvme_rdma_submit_async_event,
1799 .delete_ctrl = nvme_rdma_del_ctrl,
1800 .get_subsysnqn = nvmf_get_subsysnqn,
1801 .get_address = nvmf_get_address,
1802};
1803
1804static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
1805{
1532 .queue_rq = nvme_rdma_queue_rq,
1533 .complete = nvme_rdma_complete_rq,
1534 .init_request = nvme_rdma_init_admin_request,
1535 .exit_request = nvme_rdma_exit_admin_request,
1536 .reinit_request = nvme_rdma_reinit_request,
1537 .init_hctx = nvme_rdma_init_admin_hctx,
1538 .timeout = nvme_rdma_timeout,
1539};

--- 246 unchanged lines hidden (view full) ---

1786 .submit_async_event = nvme_rdma_submit_async_event,
1787 .delete_ctrl = nvme_rdma_del_ctrl,
1788 .get_subsysnqn = nvmf_get_subsysnqn,
1789 .get_address = nvmf_get_address,
1790};
1791
1792static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
1793{
1794 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
1806 int ret;
1807
1795 int ret;
1796
1797 ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
1798 if (ret)
1799 return ret;
1800
1801 ctrl->queue_count = opts->nr_io_queues + 1;
1802 if (ctrl->queue_count < 2)
1803 return 0;
1804
1805 dev_info(ctrl->ctrl.device,
1806 "creating %d I/O queues.\n", opts->nr_io_queues);
1807
1808 ret = nvme_rdma_init_io_queues(ctrl);
1809 if (ret)
1810 return ret;
1811
1812 /*
1813 * We need a reference on the device as long as the tag_set is alive,
1814 * as the MRs in the request structures need a valid ib_device.
1815 */

--- 261 unchanged lines hidden ---
1808 ret = nvme_rdma_init_io_queues(ctrl);
1809 if (ret)
1810 return ret;
1811
1812 /*
1813 * We need a reference on the device as long as the tag_set is alive,
1814 * as the MRs in the request structures need a valid ib_device.
1815 */

--- 261 unchanged lines hidden ---