rdma.c (1e05a7e6ebc4a5a5c53dce32e7e6d0ff5e7e08d1) rdma.c (c0f2f45be2976abe973c8cd544f38e2d928771b0)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics RDMA host code.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>

--- 548 unchanged lines hidden (view full) ---

557 return 0;
558
559out_destroy_cm_id:
560 rdma_destroy_id(queue->cm_id);
561 nvme_rdma_destroy_queue_ib(queue);
562 return ret;
563}
564
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics RDMA host code.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>

--- 548 unchanged lines hidden (view full) ---

557 return 0;
558
559out_destroy_cm_id:
560 rdma_destroy_id(queue->cm_id);
561 nvme_rdma_destroy_queue_ib(queue);
562 return ret;
563}
564
565static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
566{
567 rdma_disconnect(queue->cm_id);
568 ib_drain_qp(queue->qp);
569}
570
571static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
572{
573 if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
574 return;
565static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
566{
567 if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
568 return;
575 __nvme_rdma_stop_queue(queue);
569
570 rdma_disconnect(queue->cm_id);
571 ib_drain_qp(queue->qp);
576}
577
578static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
579{
580 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
581 return;
582
583 nvme_rdma_destroy_queue_ib(queue);

--- 22 unchanged lines hidden (view full) ---

606 bool poll = nvme_rdma_poll_queue(queue);
607 int ret;
608
609 if (idx)
610 ret = nvmf_connect_io_queue(&ctrl->ctrl, idx, poll);
611 else
612 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
613
572}
573
574static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
575{
576 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
577 return;
578
579 nvme_rdma_destroy_queue_ib(queue);

--- 22 unchanged lines hidden (view full) ---

602 bool poll = nvme_rdma_poll_queue(queue);
603 int ret;
604
605 if (idx)
606 ret = nvmf_connect_io_queue(&ctrl->ctrl, idx, poll);
607 else
608 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
609
614 if (!ret) {
610 if (!ret)
615 set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
611 set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
616 } else {
617 __nvme_rdma_stop_queue(queue);
612 else
618 dev_info(ctrl->ctrl.device,
619 "failed to connect queue: %d ret=%d\n", idx, ret);
613 dev_info(ctrl->ctrl.device,
614 "failed to connect queue: %d ret=%d\n", idx, ret);
620 }
621 return ret;
622}
623
624static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl)
625{
626 int i, ret = 0;
627
628 for (i = 1; i < ctrl->ctrl.queue_count; i++) {

--- 175 unchanged lines hidden (view full) ---

804 goto out_free_tagset;
805 }
806 }
807
808 error = nvme_rdma_start_queue(ctrl, 0);
809 if (error)
810 goto out_cleanup_queue;
811
615 return ret;
616}
617
618static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl)
619{
620 int i, ret = 0;
621
622 for (i = 1; i < ctrl->ctrl.queue_count; i++) {

--- 175 unchanged lines hidden (view full) ---

798 goto out_free_tagset;
799 }
800 }
801
802 error = nvme_rdma_start_queue(ctrl, 0);
803 if (error)
804 goto out_cleanup_queue;
805
812 error = ctrl->ctrl.ops->reg_read64(&ctrl->ctrl, NVME_REG_CAP,
813 &ctrl->ctrl.cap);
814 if (error) {
815 dev_err(ctrl->ctrl.device,
816 "prop_get NVME_REG_CAP failed\n");
817 goto out_stop_queue;
818 }
819
820 ctrl->ctrl.sqsize =
821 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
822
823 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
806 error = nvme_enable_ctrl(&ctrl->ctrl);
824 if (error)
825 goto out_stop_queue;
826
827 ctrl->ctrl.max_hw_sectors =
828 (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
829
830 error = nvme_init_identify(&ctrl->ctrl);
831 if (error)

--- 70 unchanged lines hidden (view full) ---

902 return ret;
903}
904
905static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
906 bool remove)
907{
908 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
909 nvme_rdma_stop_queue(&ctrl->queues[0]);
807 if (error)
808 goto out_stop_queue;
809
810 ctrl->ctrl.max_hw_sectors =
811 (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
812
813 error = nvme_init_identify(&ctrl->ctrl);
814 if (error)

--- 70 unchanged lines hidden (view full) ---

885 return ret;
886}
887
888static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
889 bool remove)
890{
891 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
892 nvme_rdma_stop_queue(&ctrl->queues[0]);
910 if (ctrl->ctrl.admin_tagset)
893 if (ctrl->ctrl.admin_tagset) {
911 blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
912 nvme_cancel_request, &ctrl->ctrl);
894 blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
895 nvme_cancel_request, &ctrl->ctrl);
896 blk_mq_tagset_wait_completed_request(ctrl->ctrl.admin_tagset);
897 }
913 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
914 nvme_rdma_destroy_admin_queue(ctrl, remove);
915}
916
917static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
918 bool remove)
919{
920 if (ctrl->ctrl.queue_count > 1) {
921 nvme_stop_queues(&ctrl->ctrl);
922 nvme_rdma_stop_io_queues(ctrl);
898 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
899 nvme_rdma_destroy_admin_queue(ctrl, remove);
900}
901
902static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
903 bool remove)
904{
905 if (ctrl->ctrl.queue_count > 1) {
906 nvme_stop_queues(&ctrl->ctrl);
907 nvme_rdma_stop_io_queues(ctrl);
923 if (ctrl->ctrl.tagset)
908 if (ctrl->ctrl.tagset) {
924 blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
925 nvme_cancel_request, &ctrl->ctrl);
909 blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
910 nvme_cancel_request, &ctrl->ctrl);
911 blk_mq_tagset_wait_completed_request(ctrl->ctrl.tagset);
912 }
926 if (remove)
927 nvme_start_queues(&ctrl->ctrl);
928 nvme_rdma_destroy_io_queues(ctrl, remove);
929 }
930}
931
932static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
933{

--- 1188 unchanged lines hidden ---
913 if (remove)
914 nvme_start_queues(&ctrl->ctrl);
915 nvme_rdma_destroy_io_queues(ctrl, remove);
916 }
917}
918
919static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
920{

--- 1188 unchanged lines hidden ---