Lines Matching full:ctrl

139 	struct nvme_tcp_ctrl	*ctrl;
167 struct nvme_ctrl ctrl;
182 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
184 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
189 return queue - queue->ctrl->queues;
197 return queue->ctrl->admin_tag_set.tags[queue_idx];
198 return queue->ctrl->tag_set.tags[queue_idx - 1];
232 return req == &req->queue->ctrl->async_req;
427 dev_err(queue->ctrl->ctrl.device,
437 dev_err(queue->ctrl->ctrl.device,
456 dev_err(queue->ctrl->ctrl.device,
478 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
481 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
482 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
493 nvme_req(rq)->ctrl = &ctrl->ctrl;
502 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
503 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
512 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
513 struct nvme_tcp_queue *queue = &ctrl->queues[0];
536 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
538 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
541 dev_warn(ctrl->device, "starting error recovery\n");
542 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
553 dev_err(queue->ctrl->ctrl.device,
556 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
578 dev_err(queue->ctrl->ctrl.device,
585 dev_err(queue->ctrl->ctrl.device,
595 dev_err(queue->ctrl->ctrl.device,
598 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
619 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
671 dev_err(queue->ctrl->ctrl.device,
679 dev_err(queue->ctrl->ctrl.device,
686 dev_err(queue->ctrl->ctrl.device,
693 dev_err(queue->ctrl->ctrl.device,
728 dev_err(queue->ctrl->ctrl.device,
740 dev_err(queue->ctrl->ctrl.device,
797 dev_err(queue->ctrl->ctrl.device,
834 dev_err(queue->ctrl->ctrl.device,
854 dev_err(queue->ctrl->ctrl.device,
908 dev_err(queue->ctrl->ctrl.device,
952 dev_err(queue->ctrl->ctrl.device,
955 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
1005 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
1008 dev_info(queue->ctrl->ctrl.device,
1028 nvme_complete_async_event(&req->queue->ctrl->ctrl,
1247 dev_err(queue->ctrl->ctrl.device,
1341 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1343 struct nvme_tcp_request *async = &ctrl->async_req;
1348 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1350 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1351 struct nvme_tcp_request *async = &ctrl->async_req;
1360 async->queue = &ctrl->queues[0];
1367 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1368 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1460 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1470 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1506 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1510 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1515 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1520 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1521 ctrl->io_queues[HCTX_TYPE_READ];
1526 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1532 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1533 ctrl->io_queues[HCTX_TYPE_READ] +
1534 ctrl->io_queues[HCTX_TYPE_POLL];
1539 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1546 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1548 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1549 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1555 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1556 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1560 queue->ctrl = ctrl;
1572 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1616 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1617 sizeof(ctrl->src_addr));
1662 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1663 sizeof(ctrl->addr), 0);
1715 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1716 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1745 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1746 struct nvme_tcp_queue *queue = &ctrl->queues[idx];
1769 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1771 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1772 cancel_work_sync(&ctrl->async_event_work);
1773 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1774 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1777 nvme_tcp_free_queue(ctrl, 0);
1780 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1784 for (i = 1; i < ctrl->queue_count; i++)
1785 nvme_tcp_free_queue(ctrl, i);
1788 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1792 for (i = 1; i < ctrl->queue_count; i++)
1793 nvme_tcp_stop_queue(ctrl, i);
1796 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
1802 ret = nvme_tcp_start_queue(ctrl, i);
1811 nvme_tcp_stop_queue(ctrl, i);
1815 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1819 ret = nvme_tcp_alloc_queue(ctrl, 0);
1823 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1830 nvme_tcp_free_queue(ctrl, 0);
1834 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1838 for (i = 1; i < ctrl->queue_count; i++) {
1839 ret = nvme_tcp_alloc_queue(ctrl, i);
1848 nvme_tcp_free_queue(ctrl, i);
1853 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1858 nr_io_queues = nvmf_nr_io_queues(ctrl->opts);
1859 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1864 dev_err(ctrl->device,
1869 ctrl->queue_count = nr_io_queues + 1;
1870 dev_info(ctrl->device,
1873 nvmf_set_io_queues(ctrl->opts, nr_io_queues,
1874 to_tcp_ctrl(ctrl)->io_queues);
1875 return __nvme_tcp_alloc_io_queues(ctrl);
1878 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1880 nvme_tcp_stop_io_queues(ctrl);
1882 nvme_remove_io_tag_set(ctrl);
1883 nvme_tcp_free_io_queues(ctrl);
1886 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1890 ret = nvme_tcp_alloc_io_queues(ctrl);
1895 ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
1897 ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
1908 nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count);
1909 ret = nvme_tcp_start_io_queues(ctrl, 1, nr_queues);
1914 nvme_start_freeze(ctrl);
1915 nvme_unquiesce_io_queues(ctrl);
1916 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
1923 nvme_unfreeze(ctrl);
1926 blk_mq_update_nr_hw_queues(ctrl->tagset,
1927 ctrl->queue_count - 1);
1928 nvme_unfreeze(ctrl);
1935 ret = nvme_tcp_start_io_queues(ctrl, nr_queues,
1936 ctrl->tagset->nr_hw_queues + 1);
1943 nvme_quiesce_io_queues(ctrl);
1944 nvme_sync_io_queues(ctrl);
1945 nvme_tcp_stop_io_queues(ctrl);
1947 nvme_cancel_tagset(ctrl);
1949 nvme_remove_io_tag_set(ctrl);
1951 nvme_tcp_free_io_queues(ctrl);
1955 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1957 nvme_tcp_stop_queue(ctrl, 0);
1959 nvme_remove_admin_tag_set(ctrl);
1960 nvme_tcp_free_admin_queue(ctrl);
1963 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1967 error = nvme_tcp_alloc_admin_queue(ctrl);
1972 error = nvme_alloc_admin_tag_set(ctrl,
1973 &to_tcp_ctrl(ctrl)->admin_tag_set,
1980 error = nvme_tcp_start_queue(ctrl, 0);
1984 error = nvme_enable_ctrl(ctrl);
1988 nvme_unquiesce_admin_queue(ctrl);
1990 error = nvme_init_ctrl_finish(ctrl, false);
1997 nvme_quiesce_admin_queue(ctrl);
1998 blk_sync_queue(ctrl->admin_q);
2000 nvme_tcp_stop_queue(ctrl, 0);
2001 nvme_cancel_admin_tagset(ctrl);
2004 nvme_remove_admin_tag_set(ctrl);
2006 nvme_tcp_free_admin_queue(ctrl);
2010 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
2013 nvme_quiesce_admin_queue(ctrl);
2014 blk_sync_queue(ctrl->admin_q);
2015 nvme_tcp_stop_queue(ctrl, 0);
2016 nvme_cancel_admin_tagset(ctrl);
2018 nvme_unquiesce_admin_queue(ctrl);
2019 nvme_tcp_destroy_admin_queue(ctrl, remove);
2022 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
2025 if (ctrl->queue_count <= 1)
2027 nvme_quiesce_admin_queue(ctrl);
2028 nvme_quiesce_io_queues(ctrl);
2029 nvme_sync_io_queues(ctrl);
2030 nvme_tcp_stop_io_queues(ctrl);
2031 nvme_cancel_tagset(ctrl);
2033 nvme_unquiesce_io_queues(ctrl);
2034 nvme_tcp_destroy_io_queues(ctrl, remove);
2037 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
2039 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
2047 if (nvmf_should_reconnect(ctrl)) {
2048 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
2049 ctrl->opts->reconnect_delay);
2050 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
2051 ctrl->opts->reconnect_delay * HZ);
2053 dev_info(ctrl->device, "Removing controller...\n");
2054 nvme_delete_ctrl(ctrl);
2058 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
2060 struct nvmf_ctrl_options *opts = ctrl->opts;
2063 ret = nvme_tcp_configure_admin_queue(ctrl, new);
2067 if (ctrl->icdoff) {
2069 dev_err(ctrl->device, "icdoff is not supported!\n");
2073 if (!nvme_ctrl_sgl_supported(ctrl)) {
2075 dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
2079 if (opts->queue_size > ctrl->sqsize + 1)
2080 dev_warn(ctrl->device,
2081 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2082 opts->queue_size, ctrl->sqsize + 1);
2084 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
2085 dev_warn(ctrl->device,
2086 "sqsize %u > ctrl maxcmd %u, clamping down\n",
2087 ctrl->sqsize + 1, ctrl->maxcmd);
2088 ctrl->sqsize = ctrl->maxcmd - 1;
2091 if (ctrl->queue_count > 1) {
2092 ret = nvme_tcp_configure_io_queues(ctrl, new);
2097 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
2099 * state change failure is ok if we started ctrl delete,
2103 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
2112 nvme_start_ctrl(ctrl);
2116 if (ctrl->queue_count > 1) {
2117 nvme_quiesce_io_queues(ctrl);
2118 nvme_sync_io_queues(ctrl);
2119 nvme_tcp_stop_io_queues(ctrl);
2120 nvme_cancel_tagset(ctrl);
2121 nvme_tcp_destroy_io_queues(ctrl, new);
2124 nvme_quiesce_admin_queue(ctrl);
2125 blk_sync_queue(ctrl->admin_q);
2126 nvme_tcp_stop_queue(ctrl, 0);
2127 nvme_cancel_admin_tagset(ctrl);
2128 nvme_tcp_destroy_admin_queue(ctrl, new);
2136 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2138 ++ctrl->nr_reconnects;
2140 if (nvme_tcp_setup_ctrl(ctrl, false))
2143 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
2144 ctrl->nr_reconnects);
2146 ctrl->nr_reconnects = 0;
2151 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
2152 ctrl->nr_reconnects);
2153 nvme_tcp_reconnect_or_remove(ctrl);
2160 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2162 nvme_stop_keep_alive(ctrl);
2163 flush_work(&ctrl->async_event_work);
2164 nvme_tcp_teardown_io_queues(ctrl, false);
2166 nvme_unquiesce_io_queues(ctrl);
2167 nvme_tcp_teardown_admin_queue(ctrl, false);
2168 nvme_unquiesce_admin_queue(ctrl);
2169 nvme_auth_stop(ctrl);
2171 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2172 /* state change failure is ok if we started ctrl delete */
2173 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
2180 nvme_tcp_reconnect_or_remove(ctrl);
2183 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2185 nvme_tcp_teardown_io_queues(ctrl, shutdown);
2186 nvme_quiesce_admin_queue(ctrl);
2187 nvme_disable_ctrl(ctrl, shutdown);
2188 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2191 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2193 nvme_tcp_teardown_ctrl(ctrl, true);
2198 struct nvme_ctrl *ctrl =
2201 nvme_stop_ctrl(ctrl);
2202 nvme_tcp_teardown_ctrl(ctrl, false);
2204 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2205 /* state change failure is ok if we started ctrl delete */
2206 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
2213 if (nvme_tcp_setup_ctrl(ctrl, false))
2219 ++ctrl->nr_reconnects;
2220 nvme_tcp_reconnect_or_remove(ctrl);
2223 static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
2225 flush_work(&to_tcp_ctrl(ctrl)->err_work);
2226 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2231 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2233 if (list_empty(&ctrl->list))
2237 list_del(&ctrl->list);
2242 kfree(ctrl->queues);
2243 kfree(ctrl);
2261 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2279 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2280 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2281 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2297 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2298 ctrl->async_req.offset = 0;
2299 ctrl->async_req.curr_bio = NULL;
2300 ctrl->async_req.data_len = 0;
2302 nvme_tcp_queue_request(&ctrl->async_req, true, true);
2308 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2310 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2317 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2322 dev_warn(ctrl->device,
2327 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) {
2332 * - ctrl disable/shutdown fabrics requests
2349 nvme_tcp_error_recovery(ctrl);
2419 dev_err(queue->ctrl->ctrl.device,
2445 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2446 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2461 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
2463 nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues);
2482 static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
2484 struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0];
2488 len = nvmf_get_address(ctrl, buf, size);
2546 struct nvme_tcp_ctrl *ctrl;
2550 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2551 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2563 struct nvme_tcp_ctrl *ctrl;
2566 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2567 if (!ctrl)
2570 INIT_LIST_HEAD(&ctrl->list);
2571 ctrl->ctrl.opts = opts;
2572 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2574 ctrl->ctrl.sqsize = opts->queue_size - 1;
2575 ctrl->ctrl.kato = opts->kato;
2577 INIT_DELAYED_WORK(&ctrl->connect_work,
2579 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2580 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2593 opts->traddr, opts->trsvcid, &ctrl->addr);
2602 opts->host_traddr, NULL, &ctrl->src_addr);
2624 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2626 if (!ctrl->queues) {
2631 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2635 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2641 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2645 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2646 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
2649 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2652 return &ctrl->ctrl;
2655 nvme_uninit_ctrl(&ctrl->ctrl);
2656 nvme_put_ctrl(&ctrl->ctrl);
2661 kfree(ctrl->queues);
2663 kfree(ctrl);
2701 struct nvme_tcp_ctrl *ctrl;
2706 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2707 nvme_delete_ctrl(&ctrl->ctrl);