Lines Matching full:ctrl
89 struct nvme_rdma_ctrl *ctrl; member
125 struct nvme_ctrl ctrl; member
130 static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl) in to_rdma_ctrl() argument
132 return container_of(ctrl, struct nvme_rdma_ctrl, ctrl); in to_rdma_ctrl()
161 return queue - queue->ctrl->queues; in nvme_rdma_queue_idx()
167 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_rdma_poll_queue()
168 queue->ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_poll_queue()
297 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data); in nvme_rdma_init_request() local
299 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_rdma_init_request()
300 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; in nvme_rdma_init_request()
302 nvme_req(rq)->ctrl = &ctrl->ctrl; in nvme_rdma_init_request()
322 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data); in nvme_rdma_init_hctx() local
323 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_rdma_init_hctx()
325 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); in nvme_rdma_init_hctx()
334 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data); in nvme_rdma_init_admin_hctx() local
335 struct nvme_rdma_queue *queue = &ctrl->queues[0]; in nvme_rdma_init_admin_hctx()
533 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_create_queue_ib()
544 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_create_queue_ib()
569 static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, in nvme_rdma_alloc_queue() argument
576 queue = &ctrl->queues[idx]; in nvme_rdma_alloc_queue()
578 queue->ctrl = ctrl; in nvme_rdma_alloc_queue()
579 if (idx && ctrl->ctrl.max_integrity_segments) in nvme_rdma_alloc_queue()
586 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; in nvme_rdma_alloc_queue()
595 dev_info(ctrl->ctrl.device, in nvme_rdma_alloc_queue()
601 if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) in nvme_rdma_alloc_queue()
602 src_addr = (struct sockaddr *)&ctrl->src_addr; in nvme_rdma_alloc_queue()
606 (struct sockaddr *)&ctrl->addr, in nvme_rdma_alloc_queue()
609 dev_info(ctrl->ctrl.device, in nvme_rdma_alloc_queue()
616 dev_info(ctrl->ctrl.device, in nvme_rdma_alloc_queue()
660 static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl) in nvme_rdma_free_io_queues() argument
664 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_rdma_free_io_queues()
665 nvme_rdma_free_queue(&ctrl->queues[i]); in nvme_rdma_free_io_queues()
668 static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl) in nvme_rdma_stop_io_queues() argument
672 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_rdma_stop_io_queues()
673 nvme_rdma_stop_queue(&ctrl->queues[i]); in nvme_rdma_stop_io_queues()
676 static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx) in nvme_rdma_start_queue() argument
678 struct nvme_rdma_queue *queue = &ctrl->queues[idx]; in nvme_rdma_start_queue()
682 ret = nvmf_connect_io_queue(&ctrl->ctrl, idx); in nvme_rdma_start_queue()
684 ret = nvmf_connect_admin_queue(&ctrl->ctrl); in nvme_rdma_start_queue()
691 dev_info(ctrl->ctrl.device, in nvme_rdma_start_queue()
697 static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl, in nvme_rdma_start_io_queues() argument
703 ret = nvme_rdma_start_queue(ctrl, i); in nvme_rdma_start_io_queues()
712 nvme_rdma_stop_queue(&ctrl->queues[i]); in nvme_rdma_start_io_queues()
716 static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) in nvme_rdma_alloc_io_queues() argument
718 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_rdma_alloc_io_queues()
723 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); in nvme_rdma_alloc_io_queues()
728 dev_err(ctrl->ctrl.device, in nvme_rdma_alloc_io_queues()
733 ctrl->ctrl.queue_count = nr_io_queues + 1; in nvme_rdma_alloc_io_queues()
734 dev_info(ctrl->ctrl.device, in nvme_rdma_alloc_io_queues()
737 nvmf_set_io_queues(opts, nr_io_queues, ctrl->io_queues); in nvme_rdma_alloc_io_queues()
738 for (i = 1; i < ctrl->ctrl.queue_count; i++) { in nvme_rdma_alloc_io_queues()
739 ret = nvme_rdma_alloc_queue(ctrl, i, in nvme_rdma_alloc_io_queues()
740 ctrl->ctrl.sqsize + 1); in nvme_rdma_alloc_io_queues()
749 nvme_rdma_free_queue(&ctrl->queues[i]); in nvme_rdma_alloc_io_queues()
754 static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl) in nvme_rdma_alloc_tag_set() argument
759 if (ctrl->max_integrity_segments) in nvme_rdma_alloc_tag_set()
763 return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set, in nvme_rdma_alloc_tag_set()
765 ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2, in nvme_rdma_alloc_tag_set()
769 static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl) in nvme_rdma_destroy_admin_queue() argument
771 if (ctrl->async_event_sqe.data) { in nvme_rdma_destroy_admin_queue()
772 cancel_work_sync(&ctrl->ctrl.async_event_work); in nvme_rdma_destroy_admin_queue()
773 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, in nvme_rdma_destroy_admin_queue()
775 ctrl->async_event_sqe.data = NULL; in nvme_rdma_destroy_admin_queue()
777 nvme_rdma_free_queue(&ctrl->queues[0]); in nvme_rdma_destroy_admin_queue()
780 static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, in nvme_rdma_configure_admin_queue() argument
786 error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH); in nvme_rdma_configure_admin_queue()
790 ctrl->device = ctrl->queues[0].device; in nvme_rdma_configure_admin_queue()
791 ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev); in nvme_rdma_configure_admin_queue()
794 if (ctrl->device->dev->attrs.kernel_cap_flags & in nvme_rdma_configure_admin_queue()
798 ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev, in nvme_rdma_configure_admin_queue()
806 error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe, in nvme_rdma_configure_admin_queue()
812 error = nvme_alloc_admin_tag_set(&ctrl->ctrl, in nvme_rdma_configure_admin_queue()
813 &ctrl->admin_tag_set, &nvme_rdma_admin_mq_ops, in nvme_rdma_configure_admin_queue()
821 error = nvme_rdma_start_queue(ctrl, 0); in nvme_rdma_configure_admin_queue()
825 error = nvme_enable_ctrl(&ctrl->ctrl); in nvme_rdma_configure_admin_queue()
829 ctrl->ctrl.max_segments = ctrl->max_fr_pages; in nvme_rdma_configure_admin_queue()
830 ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9); in nvme_rdma_configure_admin_queue()
832 ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages; in nvme_rdma_configure_admin_queue()
834 ctrl->ctrl.max_integrity_segments = 0; in nvme_rdma_configure_admin_queue()
836 nvme_unquiesce_admin_queue(&ctrl->ctrl); in nvme_rdma_configure_admin_queue()
838 error = nvme_init_ctrl_finish(&ctrl->ctrl, false); in nvme_rdma_configure_admin_queue()
845 nvme_quiesce_admin_queue(&ctrl->ctrl); in nvme_rdma_configure_admin_queue()
846 blk_sync_queue(ctrl->ctrl.admin_q); in nvme_rdma_configure_admin_queue()
848 nvme_rdma_stop_queue(&ctrl->queues[0]); in nvme_rdma_configure_admin_queue()
849 nvme_cancel_admin_tagset(&ctrl->ctrl); in nvme_rdma_configure_admin_queue()
852 nvme_remove_admin_tag_set(&ctrl->ctrl); in nvme_rdma_configure_admin_queue()
854 if (ctrl->async_event_sqe.data) { in nvme_rdma_configure_admin_queue()
855 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, in nvme_rdma_configure_admin_queue()
857 ctrl->async_event_sqe.data = NULL; in nvme_rdma_configure_admin_queue()
860 nvme_rdma_free_queue(&ctrl->queues[0]); in nvme_rdma_configure_admin_queue()
864 static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) in nvme_rdma_configure_io_queues() argument
868 ret = nvme_rdma_alloc_io_queues(ctrl); in nvme_rdma_configure_io_queues()
873 ret = nvme_rdma_alloc_tag_set(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
883 nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count); in nvme_rdma_configure_io_queues()
884 ret = nvme_rdma_start_io_queues(ctrl, 1, nr_queues); in nvme_rdma_configure_io_queues()
889 nvme_start_freeze(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
890 nvme_unquiesce_io_queues(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
891 if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) { in nvme_rdma_configure_io_queues()
898 nvme_unfreeze(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
901 blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset, in nvme_rdma_configure_io_queues()
902 ctrl->ctrl.queue_count - 1); in nvme_rdma_configure_io_queues()
903 nvme_unfreeze(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
910 ret = nvme_rdma_start_io_queues(ctrl, nr_queues, in nvme_rdma_configure_io_queues()
911 ctrl->tag_set.nr_hw_queues + 1); in nvme_rdma_configure_io_queues()
918 nvme_quiesce_io_queues(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
919 nvme_sync_io_queues(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
920 nvme_rdma_stop_io_queues(ctrl); in nvme_rdma_configure_io_queues()
922 nvme_cancel_tagset(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
924 nvme_remove_io_tag_set(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
926 nvme_rdma_free_io_queues(ctrl); in nvme_rdma_configure_io_queues()
930 static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, in nvme_rdma_teardown_admin_queue() argument
933 nvme_quiesce_admin_queue(&ctrl->ctrl); in nvme_rdma_teardown_admin_queue()
934 blk_sync_queue(ctrl->ctrl.admin_q); in nvme_rdma_teardown_admin_queue()
935 nvme_rdma_stop_queue(&ctrl->queues[0]); in nvme_rdma_teardown_admin_queue()
936 nvme_cancel_admin_tagset(&ctrl->ctrl); in nvme_rdma_teardown_admin_queue()
938 nvme_unquiesce_admin_queue(&ctrl->ctrl); in nvme_rdma_teardown_admin_queue()
939 nvme_remove_admin_tag_set(&ctrl->ctrl); in nvme_rdma_teardown_admin_queue()
941 nvme_rdma_destroy_admin_queue(ctrl); in nvme_rdma_teardown_admin_queue()
944 static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, in nvme_rdma_teardown_io_queues() argument
947 if (ctrl->ctrl.queue_count > 1) { in nvme_rdma_teardown_io_queues()
948 nvme_quiesce_io_queues(&ctrl->ctrl); in nvme_rdma_teardown_io_queues()
949 nvme_sync_io_queues(&ctrl->ctrl); in nvme_rdma_teardown_io_queues()
950 nvme_rdma_stop_io_queues(ctrl); in nvme_rdma_teardown_io_queues()
951 nvme_cancel_tagset(&ctrl->ctrl); in nvme_rdma_teardown_io_queues()
953 nvme_unquiesce_io_queues(&ctrl->ctrl); in nvme_rdma_teardown_io_queues()
954 nvme_remove_io_tag_set(&ctrl->ctrl); in nvme_rdma_teardown_io_queues()
956 nvme_rdma_free_io_queues(ctrl); in nvme_rdma_teardown_io_queues()
962 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); in nvme_rdma_stop_ctrl() local
964 flush_work(&ctrl->err_work); in nvme_rdma_stop_ctrl()
965 cancel_delayed_work_sync(&ctrl->reconnect_work); in nvme_rdma_stop_ctrl()
970 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); in nvme_rdma_free_ctrl() local
972 if (list_empty(&ctrl->list)) in nvme_rdma_free_ctrl()
976 list_del(&ctrl->list); in nvme_rdma_free_ctrl()
981 kfree(ctrl->queues); in nvme_rdma_free_ctrl()
982 kfree(ctrl); in nvme_rdma_free_ctrl()
985 static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) in nvme_rdma_reconnect_or_remove() argument
987 enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl); in nvme_rdma_reconnect_or_remove()
995 if (nvmf_should_reconnect(&ctrl->ctrl)) { in nvme_rdma_reconnect_or_remove()
996 dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n", in nvme_rdma_reconnect_or_remove()
997 ctrl->ctrl.opts->reconnect_delay); in nvme_rdma_reconnect_or_remove()
998 queue_delayed_work(nvme_wq, &ctrl->reconnect_work, in nvme_rdma_reconnect_or_remove()
999 ctrl->ctrl.opts->reconnect_delay * HZ); in nvme_rdma_reconnect_or_remove()
1001 nvme_delete_ctrl(&ctrl->ctrl); in nvme_rdma_reconnect_or_remove()
1005 static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) in nvme_rdma_setup_ctrl() argument
1010 ret = nvme_rdma_configure_admin_queue(ctrl, new); in nvme_rdma_setup_ctrl()
1014 if (ctrl->ctrl.icdoff) { in nvme_rdma_setup_ctrl()
1016 dev_err(ctrl->ctrl.device, "icdoff is not supported!\n"); in nvme_rdma_setup_ctrl()
1020 if (!(ctrl->ctrl.sgls & (1 << 2))) { in nvme_rdma_setup_ctrl()
1022 dev_err(ctrl->ctrl.device, in nvme_rdma_setup_ctrl()
1027 if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) { in nvme_rdma_setup_ctrl()
1028 dev_warn(ctrl->ctrl.device, in nvme_rdma_setup_ctrl()
1029 "queue_size %zu > ctrl sqsize %u, clamping down\n", in nvme_rdma_setup_ctrl()
1030 ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1); in nvme_rdma_setup_ctrl()
1033 if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) { in nvme_rdma_setup_ctrl()
1034 dev_warn(ctrl->ctrl.device, in nvme_rdma_setup_ctrl()
1035 "ctrl sqsize %u > max queue size %u, clamping down\n", in nvme_rdma_setup_ctrl()
1036 ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE); in nvme_rdma_setup_ctrl()
1037 ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1; in nvme_rdma_setup_ctrl()
1040 if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { in nvme_rdma_setup_ctrl()
1041 dev_warn(ctrl->ctrl.device, in nvme_rdma_setup_ctrl()
1042 "sqsize %u > ctrl maxcmd %u, clamping down\n", in nvme_rdma_setup_ctrl()
1043 ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd); in nvme_rdma_setup_ctrl()
1044 ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1; in nvme_rdma_setup_ctrl()
1047 if (ctrl->ctrl.sgls & (1 << 20)) in nvme_rdma_setup_ctrl()
1048 ctrl->use_inline_data = true; in nvme_rdma_setup_ctrl()
1050 if (ctrl->ctrl.queue_count > 1) { in nvme_rdma_setup_ctrl()
1051 ret = nvme_rdma_configure_io_queues(ctrl, new); in nvme_rdma_setup_ctrl()
1056 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); in nvme_rdma_setup_ctrl()
1059 * state change failure is ok if we started ctrl delete, in nvme_rdma_setup_ctrl()
1063 enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1072 nvme_start_ctrl(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1076 if (ctrl->ctrl.queue_count > 1) { in nvme_rdma_setup_ctrl()
1077 nvme_quiesce_io_queues(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1078 nvme_sync_io_queues(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1079 nvme_rdma_stop_io_queues(ctrl); in nvme_rdma_setup_ctrl()
1080 nvme_cancel_tagset(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1082 nvme_remove_io_tag_set(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1083 nvme_rdma_free_io_queues(ctrl); in nvme_rdma_setup_ctrl()
1086 nvme_quiesce_admin_queue(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1087 blk_sync_queue(ctrl->ctrl.admin_q); in nvme_rdma_setup_ctrl()
1088 nvme_rdma_stop_queue(&ctrl->queues[0]); in nvme_rdma_setup_ctrl()
1089 nvme_cancel_admin_tagset(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1091 nvme_remove_admin_tag_set(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1092 nvme_rdma_destroy_admin_queue(ctrl); in nvme_rdma_setup_ctrl()
1098 struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work), in nvme_rdma_reconnect_ctrl_work() local
1101 ++ctrl->ctrl.nr_reconnects; in nvme_rdma_reconnect_ctrl_work()
1103 if (nvme_rdma_setup_ctrl(ctrl, false)) in nvme_rdma_reconnect_ctrl_work()
1106 dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n", in nvme_rdma_reconnect_ctrl_work()
1107 ctrl->ctrl.nr_reconnects); in nvme_rdma_reconnect_ctrl_work()
1109 ctrl->ctrl.nr_reconnects = 0; in nvme_rdma_reconnect_ctrl_work()
1114 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", in nvme_rdma_reconnect_ctrl_work()
1115 ctrl->ctrl.nr_reconnects); in nvme_rdma_reconnect_ctrl_work()
1116 nvme_rdma_reconnect_or_remove(ctrl); in nvme_rdma_reconnect_ctrl_work()
1121 struct nvme_rdma_ctrl *ctrl = container_of(work, in nvme_rdma_error_recovery_work() local
1124 nvme_stop_keep_alive(&ctrl->ctrl); in nvme_rdma_error_recovery_work()
1125 flush_work(&ctrl->ctrl.async_event_work); in nvme_rdma_error_recovery_work()
1126 nvme_rdma_teardown_io_queues(ctrl, false); in nvme_rdma_error_recovery_work()
1127 nvme_unquiesce_io_queues(&ctrl->ctrl); in nvme_rdma_error_recovery_work()
1128 nvme_rdma_teardown_admin_queue(ctrl, false); in nvme_rdma_error_recovery_work()
1129 nvme_unquiesce_admin_queue(&ctrl->ctrl); in nvme_rdma_error_recovery_work()
1130 nvme_auth_stop(&ctrl->ctrl); in nvme_rdma_error_recovery_work()
1132 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_rdma_error_recovery_work()
1133 /* state change failure is ok if we started ctrl delete */ in nvme_rdma_error_recovery_work()
1134 enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl); in nvme_rdma_error_recovery_work()
1141 nvme_rdma_reconnect_or_remove(ctrl); in nvme_rdma_error_recovery_work()
1144 static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl) in nvme_rdma_error_recovery() argument
1146 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) in nvme_rdma_error_recovery()
1149 dev_warn(ctrl->ctrl.device, "starting error recovery\n"); in nvme_rdma_error_recovery()
1150 queue_work(nvme_reset_wq, &ctrl->err_work); in nvme_rdma_error_recovery()
1167 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_wr_error() local
1169 if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE) in nvme_rdma_wr_error()
1170 dev_info(ctrl->ctrl.device, in nvme_rdma_wr_error()
1174 nvme_rdma_error_recovery(ctrl); in nvme_rdma_wr_error()
1278 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); in nvme_rdma_map_sg_inline()
1310 * Align the MR to a 4K page size to match the ctrl page size and in nvme_rdma_map_sg_fr()
1545 queue->ctrl->use_inline_data && in nvme_rdma_map_data()
1608 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_post_send()
1634 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_post_recv()
1645 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_rdma_tagset()
1646 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_rdma_tagset()
1657 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg); in nvme_rdma_submit_async_event() local
1658 struct nvme_rdma_queue *queue = &ctrl->queues[0]; in nvme_rdma_submit_async_event()
1660 struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe; in nvme_rdma_submit_async_event()
1690 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1693 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1704 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1707 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1714 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1717 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1742 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_recv_done()
1744 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_recv_done()
1757 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_rdma_recv_done()
1794 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_conn_rejected()
1798 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_conn_rejected()
1807 struct nvme_ctrl *ctrl = &queue->ctrl->ctrl; in nvme_rdma_addr_resolved() local
1814 if (ctrl->opts->tos >= 0) in nvme_rdma_addr_resolved()
1815 rdma_set_service_type(queue->cm_id, ctrl->opts->tos); in nvme_rdma_addr_resolved()
1818 dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n", in nvme_rdma_addr_resolved()
1832 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_route_resolved() local
1863 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); in nvme_rdma_route_resolved()
1868 dev_err(ctrl->ctrl.device, in nvme_rdma_route_resolved()
1882 dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n", in nvme_rdma_cm_handler()
1905 dev_dbg(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1912 dev_dbg(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1914 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_cm_handler()
1920 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1922 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_cm_handler()
1947 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_timeout() local
1949 dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n", in nvme_rdma_timeout()
1952 if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) { in nvme_rdma_timeout()
1957 * - ctrl disable/shutdown fabrics requests in nvme_rdma_timeout()
1974 nvme_rdma_error_recovery(ctrl); in nvme_rdma_timeout()
1994 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_rdma_queue_rq()
1995 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_rdma_queue_rq()
2026 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_queue_rq()
2114 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data); in nvme_rdma_map_queues() local
2116 nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues); in nvme_rdma_map_queues()
2139 static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) in nvme_rdma_shutdown_ctrl() argument
2141 nvme_rdma_teardown_io_queues(ctrl, shutdown); in nvme_rdma_shutdown_ctrl()
2142 nvme_quiesce_admin_queue(&ctrl->ctrl); in nvme_rdma_shutdown_ctrl()
2143 nvme_disable_ctrl(&ctrl->ctrl, shutdown); in nvme_rdma_shutdown_ctrl()
2144 nvme_rdma_teardown_admin_queue(ctrl, shutdown); in nvme_rdma_shutdown_ctrl()
2147 static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl) in nvme_rdma_delete_ctrl() argument
2149 nvme_rdma_shutdown_ctrl(to_rdma_ctrl(ctrl), true); in nvme_rdma_delete_ctrl()
2154 struct nvme_rdma_ctrl *ctrl = in nvme_rdma_reset_ctrl_work() local
2155 container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work); in nvme_rdma_reset_ctrl_work()
2157 nvme_stop_ctrl(&ctrl->ctrl); in nvme_rdma_reset_ctrl_work()
2158 nvme_rdma_shutdown_ctrl(ctrl, false); in nvme_rdma_reset_ctrl_work()
2160 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_rdma_reset_ctrl_work()
2166 if (nvme_rdma_setup_ctrl(ctrl, false)) in nvme_rdma_reset_ctrl_work()
2172 ++ctrl->ctrl.nr_reconnects; in nvme_rdma_reset_ctrl_work()
2173 nvme_rdma_reconnect_or_remove(ctrl); in nvme_rdma_reset_ctrl_work()
2205 struct nvme_rdma_ctrl *ctrl; in nvme_rdma_existing_controller() local
2209 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) { in nvme_rdma_existing_controller()
2210 found = nvmf_ip_options_match(&ctrl->ctrl, opts); in nvme_rdma_existing_controller()
2222 struct nvme_rdma_ctrl *ctrl; in nvme_rdma_create_ctrl() local
2226 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); in nvme_rdma_create_ctrl()
2227 if (!ctrl) in nvme_rdma_create_ctrl()
2229 ctrl->ctrl.opts = opts; in nvme_rdma_create_ctrl()
2230 INIT_LIST_HEAD(&ctrl->list); in nvme_rdma_create_ctrl()
2243 opts->traddr, opts->trsvcid, &ctrl->addr); in nvme_rdma_create_ctrl()
2252 opts->host_traddr, NULL, &ctrl->src_addr); in nvme_rdma_create_ctrl()
2265 INIT_DELAYED_WORK(&ctrl->reconnect_work, in nvme_rdma_create_ctrl()
2267 INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work); in nvme_rdma_create_ctrl()
2268 INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work); in nvme_rdma_create_ctrl()
2270 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + in nvme_rdma_create_ctrl()
2272 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_rdma_create_ctrl()
2273 ctrl->ctrl.kato = opts->kato; in nvme_rdma_create_ctrl()
2276 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), in nvme_rdma_create_ctrl()
2278 if (!ctrl->queues) in nvme_rdma_create_ctrl()
2281 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops, in nvme_rdma_create_ctrl()
2286 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); in nvme_rdma_create_ctrl()
2289 ret = nvme_rdma_setup_ctrl(ctrl, true); in nvme_rdma_create_ctrl()
2293 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", in nvme_rdma_create_ctrl()
2294 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr); in nvme_rdma_create_ctrl()
2297 list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); in nvme_rdma_create_ctrl()
2300 return &ctrl->ctrl; in nvme_rdma_create_ctrl()
2303 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_rdma_create_ctrl()
2304 nvme_put_ctrl(&ctrl->ctrl); in nvme_rdma_create_ctrl()
2309 kfree(ctrl->queues); in nvme_rdma_create_ctrl()
2311 kfree(ctrl); in nvme_rdma_create_ctrl()
2328 struct nvme_rdma_ctrl *ctrl; in nvme_rdma_remove_one() local
2346 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) { in nvme_rdma_remove_one()
2347 if (ctrl->device->dev != ib_device) in nvme_rdma_remove_one()
2349 nvme_delete_ctrl(&ctrl->ctrl); in nvme_rdma_remove_one()
2382 struct nvme_rdma_ctrl *ctrl; in nvme_rdma_cleanup_module() local
2388 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) in nvme_rdma_cleanup_module()
2389 nvme_delete_ctrl(&ctrl->ctrl); in nvme_rdma_cleanup_module()